query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Get the first n heptagonal numbers.
|
def get_heptagonals(num):
return [int(i * (5 * i - 3) / 2) for i in range(1, num + 1)]
|
[
"def H(n):\r\n if n <= -8:\r\n return H(n+5) + H(n+4) + H(n+2)\r\n elif -8 < n and n < 10:\r\n return n\r\n else: # n >= 10\r\n return H(n-8) + H(n-5) + H(n-3)",
"def pentagonal(n: int) -> int:\n # Find the pentagonal number to nth degree.\n pentagonal_number = (n * ((3 * n) - 1) // 2)\n\n # Find the total number of dots.\n dots = ((n-1) ** 2)\n dots += pentagonal_number\n return dots",
"def getHit(reuse, N):\n Phit = []\n for K in reuse.keys():\n if K<N:\n P = (1-1.0/N)**K\n for i in range(reuse[K]):\n Phit.append(P)\n else:\n for i in range(reuse[K]):\n Phit.append(0)\n return Phit",
"def hartmann_nd(x):\n x = np.array(x).reshape(-1)\n n_dim = len(x)\n f0 = hartmann6_single\n f_list = [f0(x[6 * i : 6 * i + 6]) for i in range(n_dim // 6)]\n return np.sum(f_list)",
"def ordinary_points(n):\n return [(x, y) for x in range(n) for y in range(n)]",
"def hamming(n):\n h = sorted(2 ** i * 3 ** j * 5 ** k for i in range(33) for j in range(21) for k in range(15))\n return h[n - 1]",
"def get_m_indices(n):\n m = np.arange(-n, n+1, 2)\n return m",
"def hailstone(n):\r\n seq = []\r\n while n != 1:\r\n seq.append(n)\r\n if n % 2 == 0: n = n/2\r\n else: n = 3*n + 1\r\n return seq",
"def HammingOrder(n):\n for i in range(0,15):\n N=2**i\n if N-i-1>=n: return i",
"def hailstone(n):\n print(n)\n if n == 1:\n return 1\n elif n % 2 == 0:\n return 1 + hailstone(n // 2)\n else:\n return 1 + hailstone((n * 3) + 1)",
"def first_n_harmonic_numbers_generator(n): \n sum = 0\n yield sum # The first harmonic number H_0 is 0\n for k in range(1, n):\n sum += 1/k\n yield sum",
"def circleOfNumbers(n, firstNumber):\n\n half = int(n/2)\n \n if firstNumber > half:\n return firstNumber - half # go backward \n \n elif firstNumber < half:\n return firstNumber + half # go forward\n \n else:\n return 0 # opposite of middle num is always 0",
"def HH(n):\n if (n<=0):return Context('1')\n else:\n LL1=LL(n-1)\n HH1=HH(n-1)\n r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1\n r2 = HH1 - HH1 - HH1\n return r1 + r2",
"def middle(X, Y, N):\n if N % 2 != 0:\n return X\n else:\n return (int(N/2) * Y) + X + (int(N/2) * Y)",
"def hailstone(n):\n if n == 1:\n print(1)\n return 1\n elif n % 2 == 0:\n print(n)\n return hailstone(n // 2) + 1\n else:\n print(n)\n return hailstone(n * 3 + 1) + 1",
"def horn(n):\n if n == 0:\n yield 'o', ()\n else:\n for k in range(0, n):\n for f, l in horn(k):\n for g, r in horn(n - 1 - k):\n yield g, ((f, l),) + r",
"def minOperations(n):\n h, cp, p = 1, 1, 1\n\n if (n < 2):\n return 0\n\n while p < n and p != n:\n if n % p == 0 and p != 1 and p + cp < n:\n cp = p\n h += 1\n p += cp\n else:\n p += cp\n h += 1\n return h",
"def nn(self, h, n=1):\n super(LinearHashIndex, self).nn(h, n)\n\n h_int = bit_vector_to_int_large(h)\n bits = len(h)\n #: :type: list[int|long]\n near_codes = \\\n heapq.nsmallest(n, self.index,\n lambda e: hamming_distance(h_int, e)\n )\n distances = map(hamming_distance, near_codes,\n [h_int] * len(near_codes))\n return [int_to_bit_vector_large(c, bits) for c in near_codes], \\\n [d / float(bits) for d in distances]",
"def genH():\n return [frac_bin(p ** (1/2.0)) for p in first_n_primes(8)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the first n octagonal numbers.
|
def get_octagonals(num):
return [int(i * (3 * i - 2)) for i in range(1, num + 1)]
|
[
"def octaves_for_note(note):\n return sorted([n for n in range(note,minC-1,-12)] + [n for n in range(note,maxC+1,12)])",
"def pentagonal(n: int) -> int:\n # Find the pentagonal number to nth degree.\n pentagonal_number = (n * ((3 * n) - 1) // 2)\n\n # Find the total number of dots.\n dots = ((n-1) ** 2)\n dots += pentagonal_number\n return dots",
"def get_hexagonals(num):\n return [int(i * (2 * i - 1)) for i in range(1, num + 1)]",
"def _get_nth_digit_from_right(n, number):\n return number % (10 ** n) // (10 ** (n - 1))",
"def ith_digit(n, i=0):\n return (n/10**i) % 10",
"def champernowne(n):\n digit_count, next_integer = 0, 1\n while digit_count + len(str(next_integer)) < n:\n digit_count += len(str(next_integer))\n next_integer += 1\n return int(str(next_integer)[n - digit_count - 1])",
"def digitcat(n):\n return int(''.join(str(d) for d in n))",
"def min_multiplicand(digit_count: int) -> int:\n return int(pandigit_string[:digit_count])",
"def tonal_int(x):\n\n if len(x) == 2:\n x = _tonal_unmodulo(x)\n return x[1]\n\n d = x[0]\n c = x[1]\n base_c = MS[d].c\n\n # Example: Cb --- base=0 c=11 c-base=11 11 - 12 = -1\n\n if c - base_c > 3:\n c = c - C_LEN\n\n # Example: B# --- base=11 c=0 c-base=-11 c+C_LEN =12\n if c - base_c < -3:\n c = c + C_LEN\n\n return c + x[2]*(C_LEN)",
"def iter_digits(n):\n return (digit(n, i) for i in range(digit_count(n)))",
"def nth_lex_permutation(n, digits):\n\n perms = []\n result = \"\"\n\n # generate list of permutations\n for i in itertools.permutations(range(digits)):\n perms.append(i)\n\n # join the answer together\n for j in perms[n - 1]:\n result += str(j)\n\n return int(result)",
"def circleOfNumbers(n, firstNumber):\n\n half = int(n/2)\n \n if firstNumber > half:\n return firstNumber - half # go backward \n \n elif firstNumber < half:\n return firstNumber + half # go forward\n \n else:\n return 0 # opposite of middle num is always 0",
"def nthterm(first, n, c):\n\n if n == 0:\n return first\n\n return nthterm(first + c, n - 1, c)",
"def triple_digits(n):\n n= str(n)\n string = ''\n for i in n :\n string += i*3\n return int(string)",
"def pandigitals(N, base=1):\n\tNUMBERS = list(range(base,N+base))\n\tpandigits = []\n\tfor i in list(itertools.permutations(NUMBERS)):\n\t\tif i[0] != 0:\n\t\t\ttmp = \"\"\n\t\t\tfor j in i:\n\t\t\t\ttmp = tmp + str(j)\n\t\t\tpandigits.append(int(tmp))\n\treturn sorted(pandigits)",
"def getStartPointInOctave(self) -> retval:\n ...",
"def getMRnaCodon(i, sequence):\n return self.getMRna(sequence)[i * 3:i * 3 + 3]",
"def get_heptagonals(num):\n return [int(i * (5 * i - 3) / 2) for i in range(1, num + 1)]",
"def comptertous(n: int) -> list:\n compteurs = [0] * 10\n while True:\n chiffre = n % 10\n compteurs[chiffre] += 1\n n //= 10\n if n == 0:\n break\n return compteurs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute kcliques in the graph.
|
def compute(self, k):
from collections import defaultdict
assert isinstance(k, int) and k >= 2
# look-up case
if k in self.cliques.keys():
return self.cliques[k]
k_cliques = set()
# base case: k = 2
if k == 2:
for i in range(0, self.num_vertices):
for j in self.adjacency_list[i]:
if i < j:
k_cliques.add((i, j))
# common case: recursion
else:
# find all the k-1 cliques
lower_cliques = self.compute(k - 1)
for _clique in lower_cliques:
_clique_set = set(_clique)
# use a dict to find vertices that are connected to everyone in the clique
degree = defaultdict(int)
for i in _clique:
for j in self.adjacency_list[i]:
if j not in _clique_set:
degree[j] += 1
for _key in degree.keys():
if degree[_key] == len(_clique):
new_clique = tuple(sorted(list(_clique) + [_key]))
k_cliques.add(new_clique)
self.cliques[k] = k_cliques
return k_cliques
|
[
"def run_k_cliques(self, smallest_clique):\n start = time.time()\n cliques = None\n if self.graph is not None:\n cliques = k_clique_communities(self.graph,smallest_clique)\n end = time.time()\n print(\"K-Cliques ime taken: {0}\".format(end-start))\n return cliques",
"def find_k_cliques(G, k):\n for clique in nx.find_cliques(G):\n if len(clique) >= k:\n for nodeset in combinations(clique, k):\n yield nodeset",
"def size_k_maximal_cliques(G, k):\n for clique in nx.find_cliques(G):\n if len(clique) == k:\n yield clique",
"def kks_graph(clique_size=4, num_cliques=5, silent=True):\n\n node_list = []\n for j in range(0, (num_cliques+1)*clique_size):\n node_list.append(j)\n\n inner_nodes = node_list[-clique_size:]\n\n if not silent:\n print(\"Clique nodes: \", node_list[:-clique_size])\n print(\"Independent set: \", inner_nodes)\n\n edge_list = []\n\n # Add edges fow the cliques\n for which_clique in range(0, num_cliques):\n first_node = which_clique*clique_size\n last_node = first_node + clique_size\n for endpoint_a in range(first_node, last_node):\n for endpoint_b in range(endpoint_a+1, last_node):\n edge_list.append((endpoint_a, endpoint_b))\n\n # Now add perfect matching from inner nodes to outer nodes\n for idx, inner_node in enumerate(inner_nodes):\n for which_clique in range(0, num_cliques):\n edge_list.append((inner_node, clique_size*which_clique+idx))\n\n G = nx.Graph()\n G.add_nodes_from(node_list)\n G.add_edges_from(edge_list)\n return G",
"def k_core(self, k):\n matrix = copy.deepcopy(self.matrix)\n node_degrees = [sum(row) for row in matrix]\n\n k_classes = []\n removed = 999\n while removed > 0:\n removed = 0\n for vertex, node_degree in enumerate(node_degrees):\n if node_degree < k:\n for index, col in enumerate(matrix[vertex]):\n if col == 1:\n removed += 1\n matrix[vertex][index] = matrix[index][vertex] = 0\n node_degrees = [sum(row) for row in matrix]\n for index, degree in enumerate(node_degrees):\n if degree > 0:\n k_classes.append(index)\n return k_classes",
"def generate_connected_K(nb):\n\n graph = nx.Graph()\n graph = nx.disjoint_union(graph, nx.complete_graph(nb))\n graph = nx.disjoint_union(graph, nx.complete_graph(nb))\n\n for i in range(nb - 2):\n graph.add_edge(i, nb + i)\n return graph",
"def compute_kcore(adj, nb_core):\n # Preprocessing on graph\n G = nx.from_scipy_sparse_matrix(adj)\n G.remove_edges_from(nx.selfloop_edges(G))\n # K-core decomposition\n core_number = nx.core_number(G)\n # nb_core subgraph\n kcore = nx.k_core(G, nb_core, core_number)\n # Get list of nodes from this subgraph\n nodes_kcore = kcore.nodes\n # Adjacency matrix of this subgraph\n adj_kcore = nx.adjacency_matrix(kcore)\n return adj_kcore, nodes_kcore",
"def kruskal(edges, noOfClusters, graph):\r\n\r\n # Sorting the edges of the graph by their distance as it is a requirement of the greedy approach\r\n edges.sort(key = lambda fn : fn.distance)\r\n\r\n # Variable to store the cost of the cluster graph\r\n cost = 0\r\n\r\n # For loop to go through the edges of the graph in ascending order of their distances\r\n for noOfEdgesSurpassed in range(len(edges)):\r\n edge = edges[noOfEdgesSurpassed]\r\n\r\n vertex1 = edge.u\r\n vertex2 = edge.v\r\n\r\n # Adding edge to cluster graph if and only if there does not\r\n # already exist a path between the two vertices\r\n if DFS(graph, vertex1, vertex2) == False:\r\n graph.booleanVerticeTraversed[vertex1].append(vertex2)\r\n graph.booleanVerticeTraversed[vertex2].append(vertex1)\r\n graph.addEdge(edge)\r\n cost += edge.distance\r\n\r\n # Finding the corresponding cluster to which the vertices belong\r\n i = graph.findClusterIndex(vertex1)\r\n j = graph.findClusterIndex(vertex2)\r\n\r\n # Not merging them if they belong to the same cluster\r\n if i == j:\r\n continue\r\n # Else merging their clusters\r\n else:\r\n graph.mergeClusters(i,j)\r\n\r\n # Breaking from the for loop if the clusters required are formed\r\n if len(graph.clusters) == noOfClusters + 1:\r\n print(\"\\n\\nFOLLOWING ARE THE CLUSTERS :- \\n \\n\")\r\n print(graph.clusters[1:])\r\n print(\"\\n\\nThe cost of the cluster formation is : \" + str(cost))\r\n break\r\n\r\n # Else ignoring that edge and going on to next one as that will create a loop otherwise\r\n else:\r\n continue\r\n\r\n return noOfEdgesSurpassed",
"def k_edge_subgraphs(self, k):\n if k < 1:\n raise ValueError('k cannot be less than 1')\n H = self.H\n A = self.A\n # \"traverse the auxiliary graph A and delete all edges with weights less\n # than k\"\n aux_weights = nx.get_edge_attributes(A, 'weight')\n # Create a relevant graph with the auxiliary edges with weights >= k\n R = nx.Graph()\n R.add_nodes_from(A.nodes())\n R.add_edges_from(e for e, w in aux_weights.items() if w >= k)\n\n # Return the components whose subgraphs are k-edge-connected\n for cc in nx.connected_components(R):\n if len(cc) < k:\n # Early return optimization\n for node in cc:\n yield {node}\n else:\n # Call subgraph solution to refine the results\n C = H.subgraph(cc)\n for sub_cc in k_edge_subgraphs(C, k):\n yield sub_cc",
"def k_edge_components(self, k):\n if k < 1:\n raise ValueError('k cannot be less than 1')\n A = self.A\n # \"traverse the auxiliary graph A and delete all edges with weights less\n # than k\"\n aux_weights = nx.get_edge_attributes(A, 'weight')\n # Create a relevant graph with the auxiliary edges with weights >= k\n R = nx.Graph()\n R.add_nodes_from(A.nodes())\n R.add_edges_from(e for e, w in aux_weights.items() if w >= k)\n\n # Return the nodes that are k-edge-connected in the original graph\n for cc in nx.connected_components(R):\n yield cc",
"def find_cliques(self) -> Iterator[List[dict]]:\n raise NotImplementedError",
"def top_k_betweenness_centrality(self):\n nodes=[]\n l=self.lbc()\n D={}\n for i in range(len(l)):\n D[vertices[i]]=l[i]\n #print('l=',l)\n #print('D=',D)\n D2=(sorted([[v, k] for k, v in D.items()], reverse=True))\n #print('D2=',D2)\n for i in range(len(D2)-1):\n if D2[i][0]==D2[i+1][0]:\n nodes.append(D2[i][1])\n return nodes\n return D2[0][0]",
"def jaccard(graph, node, k):\n neighbors = set(graph.neighbors(node))\n scores = []\n for n in sorted(graph.nodes(),reverse = True):\n if n != node:\n if not graph.has_edge(node,n):\n \n neighbors2 = set(graph.neighbors(n))\n scores.append(((node,n), 1. * len(neighbors & neighbors2) / len(neighbors | neighbors2)))\n \n return sorted(scores, key=lambda t: (-t[-1],t[0]))[:k]",
"def gen_cands(self, k):\n cands = set()\n if not k:\n for basket in self.baskets:\n for item in basket:\n cands.add(frozenset([item]))\n else:\n freqs = self.freq_list[k - 1]\n for i, freq_i in enumerate(freqs):\n for j, freq_j in enumerate(freqs):\n if i < j:\n cand = freq_i | freq_j\n if len(cand) == k + 1:\n cands.add(cand)\n if cands:\n self.cand_list.append(cands)",
"def findNumberOfClusters(X_train, kmin, kmax):\n bic = []\n for idx, _i in enumerate(range(kmin, kmax, 1)):\n model = GaussianMixture(n_components=_i, reg_covar=1e0, covariance_type='full', tol=1e-1).fit(X_train)\n bic.append(model.bic(X_train))\n bestk = list(range(kmin, kmax, 1))[int(np.argmin(bic))]\n print('Bestk: ', bestk)\n return bestk",
"def __knn_graph(self, k):\n A = np.zeros((self.X.shape[0], self.X.shape[0]))\n \n tree = spatial.KDTree(self.X)\n for i, x in enumerate(self.X):\n ds, idx = tree.query(x, k=k + 1)\n for d, j in zip(ds[1:], idx[1:]):\n A[i, j] = 1 / d\n A[j, i] = 1 / d\n \n return A",
"def _kcenters(metric, ptraj, k=None, distance_cutoff=None, seed=0, verbose=True):\n\n\n if k is None and distance_cutoff is None:\n raise ValueError(\"I need some cutoff criterion! both k and distance_cutoff can't both be none\")\n if k is None and distance_cutoff <= 0:\n raise ValueError(\"With k=None you need to supply a legit distance_cutoff\")\n if distance_cutoff is None:\n # set it below anything that can ever be reached\n distance_cutoff = -1\n if k is None:\n # set k to be the highest 32bit integer\n k = sys.maxint\n\n distance_list = np.inf * np.ones(len(ptraj), dtype=np.float32)\n assignments = -1 * np.ones(len(ptraj), dtype=np.int32)\n\n generator_indices = []\n for i in xrange(k):\n new_ind = seed if i == 0 else np.argmax(distance_list)\n #if k == sys.maxint:\n # print \"K-centers: Finding generator %i. Will finish when % .4f drops below % .4f\" % (i, distance_list[new_ind], distance_cutoff)\n #else:\n # print \"K-centers: Finding generator %i\" % i\n\n if distance_list[new_ind] < distance_cutoff:\n break\n new_distance_list = metric.one_to_all(ptraj, ptraj, new_ind)\n updated_indices = np.where(new_distance_list < distance_list)[0]\n distance_list[updated_indices] = new_distance_list[updated_indices]\n assignments[updated_indices] = new_ind\n generator_indices.append(new_ind)\n\n if verbose:\n print 'KCenters found %d generators' % (i + 1)\n\n return np.array(generator_indices), assignments, distance_list",
"def find_vertex_cover_sat_cnf_reduction(G, k, solver=solve_sat_cnf_pycosat):\n cnf = vertex_cover_to_sat_cnf(G, k)\n\n result = solver(cnf)\n if result is None:\n return None\n\n C = sat_cnf_solution_to_vertex_cover_solution(G, result)\n return C\n\n n = len(G)\n return set(x for x in result if 0 < x < n)",
"def kPar(k):\r\n \r\n k_par=ka*np.sqrt(1+(k/ks)**2)\r\n \r\n return k_par"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decipher a message using XOR. text a list of integers corresponding to the ASCII value of characters. key a list of characters used as keys.
|
def xor_decipher(text, key):
deciphered = []
key_length = len(key)
key_ascii = [ord(_k) for _k in key]
for i, _ascii in enumerate(text):
deciphered.append(chr(_ascii ^ key_ascii[i % key_length]))
return "".join(deciphered)
|
[
"def xor(text, key):\n #pad = (key * (len(text) // len(key) + 1))[:len(text)]\n pad = CycleStr(key)\n return ''.join([chr(a ^ b) for a, b in zip(text, pad)])",
"def decipher(ciphertext, key):\n return \"\".join(chr(ord(c)^ord(k)) for c, k in zip(ciphertext, cycle(key)))",
"def xor(msg: bytes, key: bytes) -> bytes:\n return bytes(x ^ y for x, y in zip(msg, infrep(key)))",
"def cipher(txt,k):\n\n k = k * len(txt)#keykeykey\n for i in xrange(1,len(txt)):\n txt = sxor(txt,k[i-1:len(txt)*i])\n return txt",
"def encrypt_with_xor(message, key):\n from itertools import cycle\n\n coded_message = ''\n for (message_char, key_char) in zip(message, cycle(key)):\n coded_char = chr(ord(message_char) ^ ord(key_char))\n coded_message += coded_char\n return coded_message",
"def get_repeating_xor(text, key):\n xor = []\n for i, char in enumerate(text):\n key_char = key[i%len(key)]\n xor.append(chr(ord(key_char) ^ ord(char)))\n return ''.join(xor)",
"def repeatedKeyXOR(ciphertext, key):\n\n k = 0\n\n plaintext = bytearray()\n\n # Compute the XOR byte-wise\n \n k = 0\n for i in range(len(ciphertext)):\n plainCharacter = ciphertext[i] ^ key[k]\n plaintext.append(plainCharacter)\n k += 1\n if k % len(key) == 0:\n k = 0\n \n return plaintext",
"def test_xor_decript(self):\n expected_text = 'I love cats! I want to pet all the cats in the world. I wish every cat could be my friend. MEOW!'\n encryption_key = 'cat'\n\n text = xor_decrypt(\n get_cipher('assets/test_cipher.txt'), encryption_key)\n\n self.assertEqual(text, expected_text)",
"def decode(message, key):\n decoded_message = ''\n print('decoding message...')\n\n inverse_key = modular_inverse(key, Cipher.alphabet_size)\n for letter in message:\n letter_num = (ord(letter) * inverse_key - 32) % Cipher.alphabet_size\n decoded_message += Cipher.alphabet[letter_num]\n return decoded_message",
"def decrypt(self, private_key, cipher_text):",
"def multi_byte_xor(data, key):\n assert isinstance(data, (bytes, bytearray))\n assert isinstance(key, (bytes, bytearray))\n\n if not len(data):\n raise MungerException('Data must not be zero length.')\n\n if not len(key):\n raise MungerException('Key must not be zero length.')\n\n if isinstance(data, bytes):\n data = bytearray(data)\n\n k = 0\n for i in range(len(data)):\n data[i] = data[i] ^ key[k]\n k += 1\n if k == len(key):\n k = 0\n\n return bytes(data)",
"def decode_chiper(cipher, key):\n decoded_text = ''\n for i in range(len(cipher)):\n decoded_text+=letters[np.where(key == cipher[i])][0]\n return decoded_text",
"def decrypt(enc_txt, key):\n\n return encrypt(enc_txt, -key)",
"def decrypt(cypher, key):\n\n return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)",
"def xor_decryption(enc):\r\n enc = binascii.unhexlify(enc)\r\n cipher = Crypto.Cipher.XOR.XORCipher(xor_key)\r\n return cipher.decrypt(enc)",
"def break_repeating_key_xor(ciphertext : bytes):\n distances = []\n for KEYSIZE in range(2, 41):\n\n # Break the ciphertext into chunks the length of the keysize\n chunks = [ciphertext[i:i+KEYSIZE] for i in range(0, len(ciphertext), KEYSIZE)]\n \n # find scores for every pair and divide by keysize (ignore the dangling bit)\n # score is normalized hamming distance between adjacent pair of chunks\n scores = [get_hamming_distance(p1, p2) / KEYSIZE for p1 ,p2 in pairwise(chunks) if len(p2) == KEYSIZE] \n \n # append average score for each KEYSIZE \n if (len(scores) > 0):\n distances.append({\n \"average\": sum(scores) / len(scores),\n \"key\": KEYSIZE,\n })\n \n predicted_keysize = sorted(distances, key=lambda x: x['average'])[:3]\n\n # Will populate with a single character as each transposed \n # block has been single-byte XOR brute forced\n key = b''\n\n possible_keysize = predicted_keysize[0]['key']\n for i in range(possible_keysize):\n \n # break the ciphertext into blocks of keysize length\n block = b''\n # transpose the blocks\n for j in range(i, len(ciphertext), possible_keysize):\n block += bytes([ciphertext[j]])\n # Solve each block as if it was single-character XOR\n key += bytes([bruteforce_single_char_xor(block)['key']]) \n\n # returns decrypted text and key\n return (repeating_key_xor(key, ciphertext), key)",
"def otp_decrypt(ct, key):\n res = binary_to_string(\"0b\"+xor_compare(hex_to_binary(ct), hex_to_binary(key)))\n return res",
"def desencrypt(ciphertext):\n plainText = (ciphertext*privateKey) % publicKey[0]\n print(plainText)\n return plainText",
"def decrypt(ciphertext, key):\n return encrypt(ciphertext, key, strict=False, verbose=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a triangleshaped array, determine the max sum of elements along a downward path. arr the input array. row_idx the index of the row where the path terminates.
|
def max_sum_path_in_triangle(arr, row_idx=-1):
# dynamic programming: tile it up by cumulative scores, row by row
points = []
for i, _row in enumerate(arr):
# base case: the first row
if i == 0:
points.append(_row[:])
else:
tmp_row = []
last_idx = len(_row) - 1
for j, _num in enumerate(_row):
# special case: the left-most element of a row
if j == 0:
parent_value = points[i - 1][0]
# special case: the right-most element of a row
elif j == last_idx:
parent_value = points[i - 1][j - 1]
# common case: a middle element of a row
else:
parent_value = max(points[i - 1][j - 1], points[i - 1][j])
tmp_row.append(parent_value + _row[j])
points.append(tmp_row[:])
return max(points[row_idx])
|
[
"def findMaxPathDownTriangle(triangle):\n dp_table = TriangleOfNumbers()\n n_rows = len(triangle.data)\n\n for irow in xrange(0,n_rows):\n dp_table.add_row([0]*(irow+1))\n\n irow = 0\n icol = 0\n\n for irow in xrange(0, n_rows):\n n_cols = len(triangle.data[irow])\n\n for icol in xrange(0, n_cols):\n current_value = triangle.data[irow][icol]\n max_value_so_far = dp_table.get_max_value_going_into_cell(irow, icol)\n print current_value, max_value_so_far, current_value + max_value_so_far\n dp_table.data[irow][icol] = current_value + max_value_so_far\n\n return dp_table.get_max_value_in_row(n_rows-1)",
"def max_subarray_brute_1(array):\n n = len(array)\n max_sum = -float(\"inf\")\n for i in range(n):\n for j in range(i, n):\n curr_sum = sum(array[i:j + 1])\n max_sum = max(max_sum, curr_sum)\n return max_sum",
"def subarray_maximum_total(arr: list) -> int:\n len_arr = len(arr)\n\n to_right = distance_to_greater_value(arr, True)\n to_left = list(reversed([len_arr - x - 1 for x in distance_to_greater_value(list(reversed(arr)), False)]))\n\n total = 0\n for idx in range(len_arr):\n total += arr[idx] * (idx - to_left[idx]) * (to_right[idx] - idx)\n return total",
"def max_subarray_brute_3(array):\n n = len(array)\n # creating array of prefix sums\n prefix_sums = [0] * n\n prefix_sums[0] = array[0]\n for i in range(1, n):\n prefix_sums[i] = prefix_sums[i - 1] + array[i]\n # for convinience, when checking sum of all elements before first element\n prefix_sums.append(0)\n\n # looking for the maximum sum\n max_sum = -float(\"inf\")\n a, b = 0, 0 # start and end indices of max subarray\n for i in range(n):\n for j in range(i, n):\n curr_sum = prefix_sums[j] - prefix_sums[i - 1]\n if curr_sum > max_sum:\n max_sum = curr_sum\n a, b = i, j\n return max_sum, array[a:b + 1]",
"def max_subarray_brute_2(array):\n n = len(array)\n # creating array of prefix sums\n prefix_sums = [0] * n\n prefix_sums[0] = array[0]\n for i in range(1, n):\n prefix_sums[i] = prefix_sums[i - 1] + array[i]\n # for convinience, when checking sum of all elements before first element\n prefix_sums.append(0)\n\n # looking for the maximum sum\n max_sum = -float(\"inf\")\n for i in range(n):\n for j in range(i, n):\n curr_sum = prefix_sums[j] - prefix_sums[i - 1]\n max_sum = max(max_sum, curr_sum)\n return max_sum",
"def max_subarray(items):\n low, high, i = 0, 0, 0\n max_sum, max_right_sum = items[0], items[0]\n\n for j in range(len(items) - 1):\n if max_right_sum >= 0:\n max_right_sum += items[j+1]\n else:\n max_right_sum = items[j+1]\n i = j + 1\n\n if max_right_sum > max_sum:\n max_sum = max_right_sum\n low, high = i, j+1\n\n return (low, high, max_sum)",
"def find_max_crossing_subarray(arr,low,mid,high):\n \n # Look at the left \n left_sum = -Inf\n sm = 0 # sm for sum\n for i in range(mid,low-1,-1):\n sm += arr[i]\n if sm >= left_sum:\n left_sum = sm\n max_left = i\n \n # Look at the right\n right_sum = -Inf\n sm = 0\n for j in range(mid+1,high+1):\n sm += arr[j]\n if sm >= right_sum:\n right_sum = sm\n max_right = j\n\n return (max_left,max_right,left_sum+right_sum)",
"def maximumSubArray(arr):\n\tn = len(arr)\n\tmax_val = [-float('inf') for i in range(n)]\n\tmax_val[0] = arr[0]\n\tfor i in range(1, n):\n\t\tmax_val[i] = max(max_val[i-1]+arr[i], arr[i])\n\treturn max_val[n-1]",
"def maxPathSum(self, root):\n\n def dfs(node):\n \"\"\"\n Returns: max sum of one side path, max sum of path.\n :param node: node\n :return: int\n \"\"\"\n left = right = 0\n lside = rside = None\n\n if node.left:\n left, lside = dfs(node.left)\n left = max(left, 0)\n if node.right:\n right, rside = dfs(node.right)\n right = max(right, 0)\n return node.val + max(left, right), max(node.val + left + right, lside, rside)\n\n if root:\n return dfs(root)[1]\n return 0",
"def better_bottom_up_max_sum_contiguous_subsequence(s):\n _max = s[0]\n _sum = s[0]\n index = 0\n\n start = end = 0\n\n for i in range(1, len(s)):\n\n if _sum > 0:\n _sum = _sum + s[i]\n else:\n _sum = s[i]\n index = i\n\n if _sum > _max:\n _max = _sum\n end = i\n start = index\n\n return _max, start, end",
"def find_maximum_subarray(arr,low,high):\n \n # Base case\n if high == low:\n return (low,high,arr[low])\n \n else:\n # find middle point of array\n mid = (low + high)//2\n\n # find a max sub-array in left sub-array\n left_low,left_high,left_sum = find_maximum_subarray(arr,low,mid)\n\n # find a max sub-array in right sub-array\n right_low,right_high,right_sum = find_maximum_subarray(arr,mid+1,high)\n\n # find a max sub-array that crosses the mid-point\n cross_low,cross_high,cross_sum = find_max_crossing_subarray(arr,low,mid,high)\n\n # test if left sub-array contains a sub-array with the maximum sum\n if left_sum >= right_sum and left_sum >= cross_sum:\n return (left_low,left_high,left_sum)\n\n # test if right sub-array contains a sub-array with the maximum sum\n elif right_sum >= left_sum and right_sum >= cross_sum:\n return (right_low,right_high,right_sum)\n\n # if neither left nor right sub-arrays contain a sub-array with the maximum sum,\n # then a maximum sub-array must cross the mid-point\n else:\n return (cross_low,cross_high,cross_sum)",
"def hourglassSum(arr):\n sums = []\n for i in range(4):\n for j in range(4):\n sums.append(sum(arr[i][j:j+3]+[arr[i+1][j+1]]+arr[i+2][j:j+3]))\n return max(sums)",
"def maximum_crossing_subarray(A,low,mid,high):\r\n left_sum = A[mid]\r\n left_max = mid\r\n i = mid\r\n Sum = 0\r\n while(i >= low):\r\n Sum = Sum + A[i]\r\n if Sum > left_sum:\r\n left_sum = Sum\r\n left_max = i\r\n i -= 1\r\n right_sum = A[mid+1]\r\n right_max = mid + 1\r\n Sum = 0\r\n for j in range(mid+1,high):\r\n Sum = Sum + A[j]\r\n if Sum > right_sum:\r\n right_sum = Sum\r\n right_max = j\r\n return (left_max,right_max,left_sum + right_sum)",
"def find_max_adjacent(grid, agg, num):\n dp_grid = [[0 for i in xrange(len(grid[0]))] \n for j in xrange(len(grid))]\n\n for i in xrange(len(dp_grid)):\n for j in xrange(len(dp_grid[0])):\n max_up = 0 if i == 0 else dp_grid[i-1][j]\n max_left = 0 if j == 0 else dp_grid[i][j-1]\n if j <= len(grid[0]) - num:\n right = reduce(agg, grid[i][j:j+num])\n else:\n right = 0\n if i <= len(grid) - num:\n down_vector = [grid[i+k][j] for k in xrange(num)]\n down = reduce(agg, down_vector)\n else:\n down = 0\n if j <= len(grid[0]) - num and i <= len(grid) - num:\n diag_right_vector = [grid[i+k][j+k] for k in xrange(num)]\n diag_right = reduce(agg, diag_right_vector)\n else:\n diag_right = 0\n if j >= num and i <= len(grid) - num:\n diag_left_vector = [grid[i+k][j-k] for k in xrange(num)]\n diag_left = reduce(agg, diag_left_vector)\n else:\n diag_left = 0\n dp_grid[i][j] = max([max_up, max_left, right, down, diag_right, diag_left])\n return dp_grid[-1][-1]",
"def argmax( indices, A, column, f=abs ):\n i_max= indices[0]\n for i in indices[1:]:\n if f(A[i][column]) > f(A[i_max][column]):\n i_max= i\n return i_max",
"def get_min_sub_array_sum(arr):\n # to store the minimum value that is ending up to the current index\n min_sum_till_point = sys.maxsize\n\n # to store the minimum value encountered so far\n min_sum_globally = sys.maxsize\n\n global_start_index, start_index, global_end_index = 0, 0, 0\n\n # traverse the array elements\n for index in range(len(arr)):\n # if min_sum_till_point > 0, then it could not possibly\n # contribute to the minimum sum further\n if min_sum_till_point > 0:\n min_sum_till_point = arr[index]\n start_index = index\n\n # else add the value arr[index] to min_sum_till_point\n else:\n min_sum_till_point += arr[index]\n\n # update min_sum_globally and min sub-array indexes\n if min_sum_globally > min_sum_till_point:\n min_sum_globally = min_sum_till_point\n global_start_index = start_index\n global_end_index = index\n\n return global_start_index, global_end_index, min_sum_globally",
"def bottom_up_max_sum_contiguous_subsequence(s):\n indices = [0] * len(s)\n\n _sum = [0] * len(s)\n _sum[0] = s[0]\n _max = _sum[0]\n\n start = end = 0\n\n for i in range(1, len(s)):\n\n if _sum[i - 1] > 0:\n _sum[i] = _sum[i - 1] + s[i]\n indices[i] = indices[i - 1]\n\n else:\n _sum[i] = s[i]\n indices[i] = i\n\n if _sum[i] > _max:\n _max = _sum[i]\n end = i\n start = indices[i]\n\n return _max, start, end",
"def brute_force_max_sum_contiguous_subsequence(s):\n _sum = _max = s[0]\n start = end = 0\n\n for i in range(0, len(s)):\n\n for j in range(i, len(s)):\n\n _sum = 0\n\n for k in range(i, j + 1):\n _sum += s[k]\n\n if _sum > _max:\n _max = _sum\n start = i\n end = j\n\n return _max, start, end",
"def max_heapify(arr):\n parent = ((len(arr) - 1) - 1 ) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes the square root of a number, build a continued fraction sequence and put that into a generator
|
def sqrt_continued_fraction_generator(num):
import sympy
return sympy.ntheory.continued_fraction_iterator(sympy.sqrt(num))
|
[
"def continued_fraction_of_root(n, max_iter=1000):\n root = n ** .5 # root(23)\n a = int(root) # a_0\n yield a\n nm, dr = 1, -a\n for _ in xrange(max_iter):\n d = (n - dr**2) / nm\n a = int((root - dr) / d)\n dr, nm = -dr - a * d, d\n yield a",
"def continued_fraction(n):\n r = sqrt(n)\n a0 = int(r)\n cycle = []\n if a0 < r:\n a = a0\n b = 1\n # remainder = (sqrt(n) - a) / b\n while b != 1 or not cycle:\n b = (n - a ** 2) // b\n x = int((r + a) // b)\n cycle.append(x)\n a = x * b - a\n return (a0, tuple(cycle))",
"def compile_continued_fraction_representation(seq):\n from fractions import Fraction\n\n # sanity check\n assert seq\n # initialize the value to be returned by working backwards from the last number\n retval = Fraction(1, seq.pop())\n # keep going backwords till the start of the sequence\n while seq:\n retval = 1 / (seq.pop() + retval)\n return retval",
"def geometric_sequence(base=np.e, tau=1.0, start=1):\n\n i = 0.0\n while True:\n value = base**(start+i/tau)\n\n yield value\n i += 1",
"def pfactorsr( x ):\n def factor_n( x, n ):\n if n*n > x:\n yield x\n return\n if x % n == 0:\n yield n\n if x//n > 1:\n #for f in factor_n( x // n, n ): yield f\n yield from factor_n( x // n, n )\n else:\n #for f in factor_n( x, n+2 ): yield f\n yield from factor_n( x, n+2 )\n if x % 2 == 0:\n yield 2\n if x//2 > 1:\n #for f in pfactorsr( x//2 ): yield f\n yield from pfactorsr( x//2 )\n return\n #for f in factor_n( x, 3 ): yield f\n yield from factor_n( x, 3 )",
"def reciprocals(start, stop):\n for i in range(start, stop):\n yield 1 / i",
"def mod_sqrt(a, p):\n # Simple cases\n #\n if legendre_symbol(a, p) != 1:\n return 0\n elif a == 0:\n return 0\n elif p == 2:\n return p\n elif p % 4 == 3:\n return pow(a, (p + 1) // 4, p)\n\n # Partition p-1 to s * 2^e for an odd s (i.e.\n # reduce all the powers of 2 from p-1)\n #\n s = p - 1\n e = 0\n while s % 2 == 0:\n s //= 2\n e += 1\n\n # Find some 'n' with a legendre symbol n|p = -1.\n # Shouldn't take long.\n #\n n = 2\n while legendre_symbol(n, p) != -1:\n n += 1\n\n # Here be dragons!\n # Read the paper \"Square roots from 1; 24, 51,\n # 10 to Dan Shanks\" by Ezra Brown for more\n # information\n #\n\n # x is a guess of the square root that gets better\n # with each iteration.\n # b is the \"fudge factor\" - by how much we're off\n # with the guess. The invariant x^2 = ab (mod p)\n # is maintained throughout the loop.\n # g is used for successive powers of n to update\n # both a and b\n # r is the exponent - decreases with each update\n #\n x = pow(a, (s + 1) // 2, p)\n b = pow(a, s, p)\n g = pow(n, s, p)\n r = e\n\n while True:\n t = b\n m = 0\n for m in range(r):\n if t == 1:\n break\n t = pow(t, 2, p)\n\n if m == 0:\n return x\n\n gs = pow(g, 2 ** (r - m - 1), p)\n g = (gs * gs) % p\n x = (x * gs) % p\n b = (b * g) % p\n r = m",
"def p_sequence(start=1.0, p=1.0):\n\n i = start\n while True:\n value = 1.0 / (i**p)\n yield value\n i += 1",
"def prime_numbers_gen():\n number = 5\n while 1:\n limit = int(math.sqrt(number))\n for x in range(3, limit + 1):\n if number % x == 0:\n break\n else:\n yield number\n number += 2",
"def figurate_numbers(size):\n assert size >= 3\n step = size - 2\n n = 1\n d = n + step\n while True:\n yield n\n n += d\n d += step",
"def mulseq(root:Integral, base:Integral=2, terms:Integral=-1, start:Integral=0, step:Integral=1) -> Generator:\r\n counter = count(start=start, step=step)\r\n while terms:\r\n yield root * base**next(counter)\r\n terms -= 1",
"def square_root_babylonian(n):\n if n < 0:\n return None\n if n <= 1:\n return n\n sol = n / 2\n updated_sol = n / sol\n e = 0.000001\n while sol - updated_sol > e:\n sol = (sol + updated_sol) / 2\n updated_sol = n / sol\n return sol",
"def pi_generate():\r\n q, r, t, k, m, x = 1, 0, 1, 1, 3, 3\r\n while True:\r\n if 4 * q + r - t < m * t:\r\n yield m\r\n q, r, t, k, m, x = (10*q, 10*(r-m*t), t, k, (10*(3*q+r))//t - 10*m, x)\r\n else:\r\n q, r, t, k, m, x = (q*k, (2*q+r)*x, t*x, k+1, (q*(7*k+2)+r*x)//(t*x), x+2)",
"def c_root_p(a, p):\n if (a % p) == 0:\n return [0]\n if p == 2 or p == 3 :\n return [a % p]\n if (p % 3) == 2:\n return [pow(a, (((2 * p) - 1) // 3), p)]\n p_div_3, p_mod_3 = divmod((p - 1), 3)\n # Compute e,q\n e = 0\n temp = p_div_3\n tempmod = p_mod_3\n\n while tempmod == 0:\n e += 1\n temp, tempmod = divmod(temp, 3)\n q = (p - 1) // (3 ** e)\n search_range = (p - 1) >> 1\n h = 2\n while pow(h, p_div_3, p) == 1:\n h = random.randrange(2, search_range)\n sym = pow(h, p_div_3, p)\n g = pow(h, q, p)\n # Initialize\n y = g\n r = e\n if q % 3 == 2:\n x = pow(a, (q - 2) // 3, p)\n else:\n x = pow(a, (((2 * q) - 2) // 3), p)\n\n b = (pow(a, 2, p) * pow(x, 3, p)) % p\n x = (a * x) % p\n while (b % p) != 1:\n # Find exponent\n b_pow = pow(b, 3, p)\n m = 1\n while b_pow != 1:\n b_pow = (b_pow ** 3) % p\n m += 1\n if m == r:\n raise ValueError(\"there is no cubic root mod p\")\n # Reduce exponent\n if sym == pow(b, pow(3, m - 1, p), p):\n t = pow(y, 2, p)\n sym = pow(sym, 2, p)\n else:\n t = y\n t = pow(t, pow(3, r - m - 1, p), p)\n y = pow(t, 3, p)\n r = m\n x = (x * t) % p\n b = (b * y) % p\n return [ x, (x * sym) % p, (x * pow(sym, 2, p)) % p ]",
"def rational(x, q):\n return 1 / np.polyval(q, x)",
"def generateFactors(num):\n \n yield 1\n \n limit = int(math.sqrt(num) + 1)\n for i in xrange(2, limit):\n if not num % i:\n yield i\n yield num / i",
"def square_root(number):\n return pow(number, 0.5)",
"def convergents(cfrac,mode):\n \n if mode == \"f\":\n yield from _cfrac_convergents(cfrac)\n \n elif mode == \"n\":\n for i in _cfrac_convergents(cfrac):\n yield i.numerator\n \n elif mode == \"d\":\n for i in _cfrac_convergents(cfrac):\n yield i.denominator\n \n else:\n raise ValueError(\"mode must be f (fraction), n (numerator), or d (denominator)\")",
"def triangles():\n n = 1\n while True:\n yield n * (n + 1) / 2\n n += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compile an integer sequence (continued fraction representation) into its corresponding fraction.
|
def compile_continued_fraction_representation(seq):
from fractions import Fraction
# sanity check
assert seq
# initialize the value to be returned by working backwards from the last number
retval = Fraction(1, seq.pop())
# keep going backwords till the start of the sequence
while seq:
retval = 1 / (seq.pop() + retval)
return retval
|
[
"def _cfrac_convergents(S):\n \n n0,n1 = 0,1\n d0,d1 = 1,0\n \n for c in S:\n n0,n1 = n1,c*n1 + n0\n d0,d1 = d1,c*d1 + d0\n \n yield Fraction(n1,d1)",
"def fractions():\n from fractions import Fraction\n return tuples(integers(), integers(min_value=1)).map(\n lambda t: Fraction(*t)\n )",
"def continued_fraction(n):\n r = sqrt(n)\n a0 = int(r)\n cycle = []\n if a0 < r:\n a = a0\n b = 1\n # remainder = (sqrt(n) - a) / b\n while b != 1 or not cycle:\n b = (n - a ** 2) // b\n x = int((r + a) // b)\n cycle.append(x)\n a = x * b - a\n return (a0, tuple(cycle))",
"def finite_cfrac_to_rational(S):\n \n if type(S) not in (list,tuple):\n raise Exception(\"finite_cfrac_to_rational accepts only lists and tuples that represent a continued fraction in order to ensure cacluation terminates\")\n \n n0,n1 = 0,1\n d0,d1 = 1,0\n \n for c in S:\n n0,n1 = n1,c*n1 + n0\n d0,d1 = d1,c*d1 + d0\n \n return Fraction(n1,d1)",
"def sqrt_continued_fraction_generator(num):\n import sympy\n\n return sympy.ntheory.continued_fraction_iterator(sympy.sqrt(num))",
"def gcd_seq(seq):\n return reduce(gcd, seq)",
"def as_integer(self):\n denom, coeffs = 1, []\n\n for coeff in self.iter_coeffs():\n if coeff.is_Rational:\n coeffs.append(coeff)\n\n if not coeff.is_Integer:\n denom = ilcm(denom, coeff.q)\n elif coeff.is_Real and int(coeff) == coeff:\n coeffs.append(Integer(int(coeff)))\n else:\n raise CoefficientError(\"%s is not a rational number\" % coeff)\n\n denom = sympify(denom)\n\n if denom is not S(1):\n coeffs = [ coeff * denom for coeff in self.iter_coeffs() ]\n\n return denom, self.__class__((coeffs, self.monoms),\n *self.symbols, **self.flags)",
"def cont_frac(n, c=1):\n if n==0:\n return \"1\"\n else: \n return \"(1 + ((e ** (-2*{} * pi)/{})))\".format(c, cont_frac(n-1, c+1))",
"def convergents(cfrac,mode):\n \n if mode == \"f\":\n yield from _cfrac_convergents(cfrac)\n \n elif mode == \"n\":\n for i in _cfrac_convergents(cfrac):\n yield i.numerator\n \n elif mode == \"d\":\n for i in _cfrac_convergents(cfrac):\n yield i.denominator\n \n else:\n raise ValueError(\"mode must be f (fraction), n (numerator), or d (denominator)\")",
"def adder(fraction):\n assert type(fraction)==list\n for n in fraction:\n if '/' not in n:\n raise AssertionError\n numbers = '0123456789/'\n for o in fraction:\n for p in o:\n if p not in numbers:\n raise AssertionError\n my_list1 = []\n my_list2 = []\n my_list3 = []\n my_list4 = []\n my_list5 = []\n #Separates the numerators from the denominators and adds them to corresponding lists.\n for i in fraction:\n my_list1.append(int(i[0:(i.index('/')):]))\n my_list2.append(int(i[(i.index('/'))+1:]))\n #Checks for zero division error.\n for i in my_list2:\n if i==0:\n raise AssertionError\n \n #Calculates the common denominator the fractions should have.\n LCM = 1\n for j in my_list2:\n LCM*=j\n #Changes the value of the numerator with respect to the common denominator they should share.\n for k in my_list2:\n for l in range(1,LCM+1):\n if k*l==LCM:\n my_list3.append(l)\n \n for m in range(len(my_list1)):\n my_list4.append(my_list1[m]*my_list3[m])\n \n numerator = sum(my_list4) #Adds the numerators together.\n #Loops through numbers from one to the common denominator and finds the highest common factor of the numerator.\n for p in range(1,LCM+1):\n if LCM%p==0:\n if numerator%p==0:\n my_list5.append(p)\n a = max(my_list5) #Highest common factor of the numerator.\n b = numerator//a #New numerator in simplest form\n c = LCM//a #New denominator in simplest form\n return str(b)+\"/\"+str(c)",
"def simplifyFraction(G,s):\n num,den = sympy.fraction(G.expand().simplify())\n num = sympy.Poly(num,s)\n den = sympy.Poly(den,s)\n \n return (num/den)",
"def make_frac(parts):\n # type: (List[str]) -> str\n\n return translate(\"(\" + parts[0] + \") / (\" + parts[1] + \")\")",
"def simplify(self):\n gcd1 = gcd(self.numerator, self.denominator)\n return Fraction(int(self.numerator / gcd1), int(self.denominator / gcd1))",
"def create_fraction_list():\n fractions = []\n for numerator in range(10, 101):\n for denominator in range(10, 101):\n temp = set(str(numerator) + str(denominator))\n if len(temp) == 4:\n continue\n if '0' in temp:\n continue\n if (numerator % 11) == 0:\n continue\n if (denominator % 11) == 0:\n continue\n if numerator < denominator:\n fractions.append((numerator, denominator))\n return fractions",
"def test_pgcd():\n assert pmisc.pgcd(48, 18) == 6\n assert pmisc.pgcd(3, 4) == 1\n assert pmisc.pgcd(0.05, 0.02) == 0.01\n assert pmisc.pgcd(5, 2) == 1\n assert pmisc.pgcd(Fraction(5, 3), Fraction(2, 3)) == Fraction(1, 3)",
"def continued_fraction(repList, depth = 20):\n if depth <= 0: return 0\n \n if type(repList) == list:\n if len(repList) == 1: return 0\n if len(repList) == 1: return repList[0]\n firstTerm = repList[0]\n repeatedTerms = repList[1:]\n gen = itertools.cycle(repeatedTerms)\n \n else:\n try:\n gen = repList\n firstTerm = gen.next()\n except:\n raise ValueError(\"repList must be a list or generator\")\n\n return firstTerm + c_f_helper(gen, depth - 1)",
"def simplify(self):\n new_numerator = self.numerator // self.euclid_gcd()\n new_denominator = self.denominator // self.euclid_gcd()\n return Fraction(new_numerator, new_denominator)",
"def with_fraction_coeff(self):\n return (mathify(1), self)",
"def fraction_to_decimal(numerator: int, denominator: int) -> str:\n result = [str(numerator//denominator) + \".\"]\n subresults = [numerator % denominator]\n numerator %= denominator\n while numerator != 0:\n numerator *= 10\n result_digit, numerator = divmod(numerator, denominator)\n result.append(str(result_digit))\n if numerator not in subresults:\n subresults.append(numerator)\n else:\n result.insert(subresults.index(numerator) + 1, \"(\")\n break\n return \"\".join(result)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if two numbers are related by digit permutation.
|
def related_by_digit_permutation(num_a, num_b):
from collections import Counter
return Counter(str(num_a)) == Counter(str(num_b))
|
[
"def is_permutation(a, b):\r\n \r\n return sorted(list(str(a))) == sorted(list(str(b)))",
"def two_adjacent_digits_same(number):\n digits = separate_digits(number)\n\n i = 0\n while i < len(digits) - 1:\n if digits[i] == digits[i+1]:\n return True\n i += 1\n return False",
"def two_adjacent_digits_same_2(number):\n digits = separate_digits(number)\n digit_counts = repeated_digit_counts(digits)\n return any(map(lambda x: x==2, digit_counts))",
"def same_length(a, b):\n return digits(a) == digits(b)",
"def same_length(a, b):\n\n a_digits = 0\n while a > 0:\n a = a // 10\n a_digits = a_digits + 1\n b_digits = 0\n while b > 0:\n b = b // 10\n b_digits = b_digits + 1\n return a_digits == b_digits",
"def same_frequency(num1, num2):\n num_1 = list(str(num1))\n num_2 = list(str(num2))\n digits = set(str(num1)) & set(str(num2))\n \n for digit in digits:\n \n digit1 = num_1.count(digit)\n digit2 = num_2.count(digit)\n\n if digit1 != digit2:\n return False\n \n return True",
"def is_permutation(a: str, b: str) -> bool:\n\n return True if sorted(a) == sorted(b) else False\n\n # Нужно проверить, являются ли строчки 'a' и 'b' перестановками",
"def compdig(x, y):\n a = intlist(x)\n b = intlist(y)\n i = 0\n if len(a) != len(b):\n return False\n elif complist(a, b):\n return True\n else:\n return False",
"def is_product_of_two_n_digit_numbers(number, digits):\n check_interval = range((10**digits)-1, 10**(digits-1), -1)\n for factor1 in check_interval:\n #check if the number is divisible by factor1\n if number % factor1 == 0:\n factor2_digits = get_number_of_digits(number/factor1)\n #does factor2 have as many digits as factor1? then we have a match\n if factor2_digits == digits:\n return True\n #is factor1 so small that factor2 now has more digits than factor1? then we can break\n elif factor2_digits > digits:\n break\n return False",
"def check_if_permutation_correct(perm_lst, lst1, lst2):\r\n index = 0\r\n len_lst = len(lst1)\r\n for permutation in perm_lst:\r\n permutation = int(permutation)\r\n while (lst1[index % len_lst] == lst2[(index + permutation) % len_lst]) \\\r\n and (index != len_lst):\r\n index += 1\r\n if index == len_lst:\r\n return True\r\n return False",
"def _offbyone_check(num1: int, num2: int) -> bool:\n return num1 == num2 or num1 + 1 == num2 or num1 - 1 == num2",
"def is_permutation(xs, ys):\n return sorted(xs) == sorted(ys)",
"def palindromo(numero):\n\treturn espejo(numero)==numero",
"def is_digit_sum(num):\n perms = get_permutations(str(num))\n for perm in perms:\n sum_of_other_digits = sum([int(x) for x in perm[1:]])\n if int(perm[0]) == sum_of_other_digits:\n return True\n\n return False",
"def check_control_number(id_code: str):\r\n numbers = [int(id_code[0]), int(id_code[1]), int(id_code[2]), int(id_code[3]), int(id_code[4]), int(id_code[5]),\r\n int(id_code[6]), int(id_code[7]), int(id_code[8]), int(id_code[9])]\r\n multip_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]\r\n multip_2 = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]\r\n result1 = sum([numbers[i] * multip_1[i] for i in range(10)]) % 11\r\n if result1 == 10:\r\n result2 = sum([numbers[i] * multip_2[i] for i in range(10)]) % 11 % 10\r\n return result2 == int(id_code[10])\r\n return result1 == int(id_code[10])",
"def jaccard_distance(a, b):\n a = set(a)\n b = set(b)\n try:\n # suppose that number2 is a float\n return 1.0 * len(a & b) / min(map(len, (a, b)))\n except ZeroDivisionError:\n return 0.0\n print(a)\n print(b)\n #return 1.0 * len(a & b) / min(map(len, (a, b)))",
"def is_permutation_v2(string1, string2):\n\tstring1_dict = str_count_dict(string1)\n\tstring2_dict = str_count_dict(string2)\n\n\tif string1_dict == string2_dict:\n\t\treturn True\n\treturn False",
"def is_permutation_v3(string1, string2):\n\n\tstring1_dict = str_count_dict(string1)\n\n\tfor c in string2:\n\t\tif c in string1_dict:\n\t\t\tstring1_dict[c] -= 1\n\t\telse:\n\t\t\treturn False\n\n\tfor char, count in string1_dict.iteritems():\n\t\tif count != 0:\n\t\t\treturn False\n\n\treturn True",
"def hasPalindromePermutation(input):\n\n asciiChars = [0 for i in range(128)]\n for c in input:\n asciiChars[ord(c)] += 1\n \n seenOneOdd = False\n\n for c in asciiChars:\n if c % 2 != 0:\n if seenOneOdd:\n return False\n\n seenOneOdd = True\n \n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize the lattice by defining which vertices are connected without assuming the size of the lattice. neighbor_function(row_idx, col_idx, row_dim, col_dim) returns a list of (row_idx, col_idx) neighbors. weight_function(matrix, head_row_idx, head_col_idx, tail_row_idx, tail_col_idx) returns the weight of the edge from head to tail.
|
def __init__(self, matrix, neighbor_function, weight_function):
self.lattice = matrix
self.row_dim = len(self.lattice)
self.col_dim = len(self.lattice[0])
self.neighbor_function = neighbor_function
self.weight_function = weight_function
self.consistency_check()
self.build_adjacency_list()
|
[
"def periodic_lattice(node_number, neighbors):\n import numpy as num\n from kreveik import *\n from kreveik.classes import TopologicalNetwork \n adjacency_matrix = num.zeros((node_number,node_number))\n for i in range(node_number):\n for j in range(neighbors):\n adjacency_matrix[i][i-j-1]=1\n adjacency_matrix=adjacency_matrix + adjacency_matrix.transpose()\n new_network=TopologicalNetwork(adjacency_matrix)\n return new_network",
"def get_neighbourhood(self):\n\n mu, var = np.random.normal(loc=0,scale=4,size=2)\n var = np.abs(var)\n self.neigh_feats = np.random.normal(loc=mu,scale=var,size=(self.n_neighbours,self.n_ftrs))\n #self.neigh_feats = np.random.normal(loc=mu,scale=var,size=(np.random.uniform(int(self.n_neighbours*.8),int(self.n_neighbours*1.2),self.n_ftrs)))\n #conn_str = lambda x,y",
"def create_lattice(self):\n G = nx.Graph()\n nodes = list(range(self.n))\n G.add_nodes_from(nodes)\n h = ((self.n - 1) // self.k) # the number of the lowest row\n for node in nodes:\n row = node // self.k\n column = node % self.k\n # lower\n if node + self.k < self.n:\n G.add_edge(node, node + self.k)\n else:\n G.add_edge(node, column)\n # right\n if column == (self.k - 1): # rightmost column\n G.add_edge(node, node - self.k + 1)\n elif node + 1 < self.n:\n G.add_edge(node, node + 1)\n else:\n G.add_edge(node, h * self.k)\n # lower-right\n if column == (self.k - 1): # rightmost column\n if node + 1 == self.n: # last point\n G.add_edge(node, 0)\n else:\n G.add_edge(node, node + 1)\n else:\n if (node + self.k + 1) < self.n:\n G.add_edge(node, node + self.k + 1)\n else:\n G.add_edge(node, column + 1)\n # lower-left\n if column == 0: # leftmost column\n if row == h:\n G.add_edge(node, self.k)\n elif row == h - 1:\n G.add_edge(node, self.n - 1)\n else:\n G.add_edge(node, node + 2 * self.k - 1)\n elif (node + self.k - 1) < self.n:\n G.add_edge(node, node + self.k - 1)\n else:\n G.add_edge(node, (column - 1) % self.k)\n \"\"\"\n if node + self.k in nodes:\n G.add_edge(node, node + self.k)\n if node % self.k != (self.k - 1) and node + 1 in nodes:\n G.add_edge(node, node + 1)\n \"\"\"\n return G",
"def set_all_neighbours(self) :\n\n\t\tN = self.size\n\n\t\tfor row in range(N) :\n\t\t\tfor col in range(N) :\n\n\t\t\t\tnext_row = (row + 1) % self.size\n\t\t\t\tnext_col = (col + 1) % self.size\n\t\t\t\tprev_row = (row - 1) % self.size\n\t\t\t\tprev_col = (col - 1) % self.size\n\t\t\t\t\n\t\t\t\tneighbours = [self.lattice_array[prev_row, col], self.lattice_array[next_row, col], self.lattice_array[row, prev_col], self.lattice_array[row, next_col]]\n\t\t\t\t\n\t\t\t\tself.lattice_array[row, col].set_neighbours(neighbours)\n\t\t\t\tself.lattice_array[row, col].set_location(row, col)\n\n\t\treturn self.lattice_array",
"def create_neighbors(self):\n for row in self._currentGrid:\n for cell in row:\n row = cell.get_row()\n column = cell.get_column()\n if row == 0:\n # 1. upper left corner (3 neighbors)\n if column == 0:\n #print('upper left')\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # 2. rest of the top row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # upper right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n # middle row\n elif row < (self._rows - 1):\n #print('middle')\n # 1. middle left edge (5 neighbors)\n if column == 0:\n #print('middle left edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n # 2. rest of the middle row (8 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n # 3. middle right edge (5 neighbors)\n else:\n #print('middle right edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n # bottom row\n else:\n #print('lower')\n # 1. bottom left corner (3 neighbors)\n if column == 0:\n #print('lower left')\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # 2. rest of the bottom row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # bottom right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[row - 1][0])\n cell.add_neighbor(self._currentGrid[row][0])",
"def connectivity_matrix_neigh_feats(n_ftrs,n_neighbours,n_testNs):\n \n node_set = []\n for i in range(n_testNs):\n node_set.append(node(n_ftrs,n_neighbours))\n\n W = np.random.normal(size=n_ftrs*4)\n #nonLin = lambda x: np.tanh(x)\n conn_mtx_n = np.zeros([n_testNs,n_testNs])\n\n for i1,pair1 in enumerate(node_set):\n for i2,pair2 in enumerate(node_set):\n conn_mtx_n[i1,i2] = (W.dot(np.concatenate([pair1.ftrs,pair1.agg,pair2.ftrs,pair2.agg])))\n return conn_mtx_n, W, node_set",
"def __init__(self,featdim,mapdim,wtsintvl,wrap=False, var=1):\n self.featdim = featdim\n self.wrap = wrap\n self.mapdim = mapdim\n self.mapwidth, self.maplength = mapdim\n self.nnodes = np.prod(mapdim)\n self.mapc = np.array([[i,j] for i in range(self.mapdim[0]) for j in range(self.mapdim[1])])\n # compute distances between nodes\n self.mapdistances = np.zeros((self.nnodes,self.nnodes))\n if wrap: # if edges are wrapped make copies of lattice and find distances by taking minimum\n generators = [np.array([i,j]) for i in range(-1,2) for j in range(-1,2)]\n copies = {i: generators[i]*(self.mapdim) + self.mapc for i in range(len(generators))}\n for i in range(self.nnodes):\n for j in range(i+1, self.nnodes):\n self.mapdistances[i][j] = min(euclidean(self.mapc[i],copy[j],var) for copy in copies.values())\n self.mapdistances[j][i] = self.mapdistances[i][j]\n else:\n for i in range(self.nnodes):\n for j in range(i+1, self.nnodes):\n self.mapdistances[i][j] = euclidean(self.mapc[i],self.mapc[j],var)\n self.mapdistances[j][i] = self.mapdistances[i][j]\n # initialize weights using UNIF[-1,1]\n self.weights = (np.ones(self.nnodes*self.featdim)*wtsintvl[0]\n + (wtsintvl[1] - wtsintvl[0])*np.random.rand(self.nnodes*self.featdim)).reshape((self.nnodes, self.featdim))",
"def lattice_builder(edges):\n\n\n topEdge, bottomEdge, leftEdge, rightEdge = edges \n # initializes the lattice\n latticeList = Lattice(np.zeros((containerSize, containerSize, 6), np.int8))\n\n # top left corner and top right corner positions are set, they won't vary\n # if the container size is odd or even.\n latticeList.array[0][0] = (0, 2, 2, 2, 2, 0) # topLeft\n latticeList.array[containerSize-1][0] = (2, 2, 2, 0, 0, 2) # topRight\n\n\n # the following if/else statement sets the walls for the bottom corners, which vary\n # based on whether the container size is odd or even. If even, the final row is short,\n # if odd, the final row is the same as the top row.\n if containerSize % 2 == 0: \n latticeList.array[containerSize-2][containerSize-1] = (2, 0, 0, 0, 2, 2) # bottomRight\n latticeList.array[0][containerSize-1] = (0, 0, 0, 2, 2, 2) # bottomLeft\n \n else:\n latticeList.array[containerSize-1][containerSize-1] = (2, 2, 0, 0, 2, 2) # bottomRight \n latticeList.array[0][containerSize-1] = (0, 0, 2, 2, 2, 2) # bottomLeft\n\n\n # the following for loops declare the edges based on either the lists provided by the\n # user, or automatically produced by auto_square_edges().\n for i in range(0,len(topEdge)):\n column, row = topEdge[i]\n latticeList.array[column][row] = (0, 2, 2, 0, 0, 0)\n \n \n for i in range(0,len(bottomEdge)):\n column, row = bottomEdge[i]\n latticeList.array[column][row] = (0, 0, 0, 0, 2, 2) \n \n \n for i in range(0,len(leftEdge)):\n column, row = leftEdge[i]\n \n if i % 2 == 1:\n latticeList.array[column][row] = (0, 0, 2, 2, 2, 0)\n else:\n latticeList.array[column][row] = (0, 0, 0, 2, 0, 0)\n \n \n for i in range(0,len(rightEdge)):\n column, row = rightEdge[i]\n \n if i % 2 == 1:\n latticeList.array[column][row] = (2, 2, 0, 0, 0, 2)\n else:\n latticeList.array[column][row] = (2, 0, 0, 0, 0, 0)\n latticeList.array[column+1][row] = (2, 2, 2, 2, 2, 2)\n\n\n return latticeList",
"def __init__(self, n_neighbors):\n self.n_neighbors = n_neighbors\n self.data = ()",
"def build(self, edges):\n\n\n topEdge, bottomEdge, leftEdge, rightEdge = edges \n \n\n # top left corner and top right corner positions are set, they won't vary\n # if the container size is odd or even.\n latticeList.array[0][0] = (0, 2, 2, 2, 2, 0) # topLeft\n latticeList.array[containerSize-1][0] = (2, 2, 2, 0, 0, 2) # topRight\n\n\n # the following if/else statement sets the walls for the bottom corners, which vary\n # based on whether the container size is odd or even. If even, the final row is short,\n # if odd, the final row is the same as the top row.\n if containerSize % 2 == 0: \n latticeList.array[containerSize-2][containerSize-1] = (2, 0, 0, 0, 2, 2) # bottomRight\n latticeList.array[0][containerSize-1] = (0, 0, 0, 2, 2, 2) # bottomLeft\n \n else:\n latticeList.array[containerSize-1][containerSize-1] = (2, 2, 0, 0, 2, 2) # bottomRight \n latticeList.array[0][containerSize-1] = (0, 0, 2, 2, 2, 2) # bottomLeft\n\n\n # the following for loops declare the edges based on either the lists provided by the\n # user, or automatically produced by auto_square_edges().\n for i in range(0,len(topEdge)):\n column, row = topEdge[i]\n latticeList.array[column][row] = (0, 2, 2, 0, 0, 0)\n \n \n for i in range(0,len(bottomEdge)):\n column, row = bottomEdge[i]\n latticeList.array[column][row] = (0, 0, 0, 0, 2, 2) \n \n \n for i in range(0,len(leftEdge)):\n column, row = leftEdge[i]\n \n if i % 2 == 1:\n latticeList.array[column][row] = (0, 0, 2, 2, 2, 0)\n else:\n latticeList.array[column][row] = (0, 0, 0, 2, 0, 0)\n \n \n for i in range(0,len(rightEdge)):\n column, row = rightEdge[i]\n \n if i % 2 == 1:\n latticeList.array[column][row] = (2, 2, 0, 0, 0, 2)\n else:\n latticeList.array[column][row] = (2, 0, 0, 0, 0, 0)\n latticeList.array[column+1][row] = (2, 2, 2, 2, 2, 2)\n\n\n return latticeList",
"def _setup_nodes(self):\n\n # Hard Coded connections based on indices.\n # ([Tiles], [Neighbors])\n Connection = namedtuple('Connection', ['tiles', 'neighbors'])\n connections = {\n 0: Connection([0], [3, 4]),\n 1: Connection([1], [4, 5]),\n 2: Connection([2], [5, 6]),\n 3: Connection([0], [0, 7]),\n 4: Connection([0, 1], [0, 1, 8]),\n 5: Connection([1, 2], [1, 2, 9]),\n 6: Connection([2], [2, 10]),\n 7: Connection([0, 3], [3, 11, 12]),\n 8: Connection([0, 1, 4], [4, 12, 13]),\n 9: Connection([1, 2, 5], [5, 13, 14]),\n 10: Connection([2, 6], [6, 14, 15]),\n 11: Connection([3], [7, 16]),\n 12: Connection([0, 3, 4], [7, 8, 17]),\n 13: Connection([1, 4, 5], [8, 9, 18]),\n 14: Connection([2, 5, 6], [9, 10, 19]),\n 15: Connection([6], [10, 20]),\n 16: Connection([3, 7], [11, 21, 22]),\n 17: Connection([3, 4, 8], [12, 22, 23]),\n 18: Connection([4, 5, 9], [13, 23, 24]),\n 19: Connection([5, 6, 10], [14, 24, 25]),\n 20: Connection([6, 11], [15, 25, 26]),\n 21: Connection([7], [16, 27]),\n 22: Connection([3, 7, 8], [16, 17, 28]),\n 23: Connection([4, 8, 9], [17, 18, 29]),\n 24: Connection([5, 9, 10], [18, 19, 30]),\n 25: Connection([6, 10, 11], [19, 20, 31]),\n 26: Connection([11], [20, 32]),\n 27: Connection([7], [21, 33]),\n 28: Connection([7, 8, 12], [22, 33, 34]),\n 29: Connection([8, 9, 13], [23, 34, 35]),\n 30: Connection([9, 10, 14], [24, 35, 36]),\n 31: Connection([10, 11, 15], [25, 36, 37]),\n 32: Connection([11], [26, 37]),\n 33: Connection([7, 12], [27, 28, 38]),\n 34: Connection([8, 12, 13], [28, 29, 39]),\n 35: Connection([9, 13, 14], [29, 30, 40]),\n 36: Connection([10, 14, 15], [30, 31, 41]),\n 37: Connection([11, 15], [31, 32, 42]),\n 38: Connection([12], [33, 43]),\n 39: Connection([12, 13, 16], [34, 43, 44]),\n 40: Connection([13, 14, 17], [35, 44, 45]),\n 41: Connection([14, 15, 18], [36, 45, 46]),\n 42: Connection([15], [37, 46]),\n 43: Connection([12, 16], [38, 39, 47]),\n 44: Connection([13, 16, 17], [39, 40, 48]),\n 45: Connection([14, 17, 18], [40, 41, 49]),\n 46: Connection([15, 18], [41, 42, 50]),\n 47: Connection([16], [43, 51]),\n 48: Connection([16, 17], [44, 51, 52]),\n 49: Connection([17, 18], [45, 52, 53]),\n 50: Connection([18], [46, 53]),\n 51: Connection([16], [47, 48]),\n 52: Connection([17], [48, 49]),\n 53: Connection([18], [49, 50])\n }\n\n # Setup nodes w/ tiles.\n for i in range(54):\n self.nodes[i].tiles = [self.tiles[j]\n for j\n in connections[i].tiles]\n\n # Connect nodes to each other\n for i in range(54):\n self.nodes[i].neighbors = [self.nodes[j]\n for j\n in connections[i].neighbors]",
"def _build_neighbor_list(\n self,\n Z: torch.Tensor,\n positions: torch.Tensor,\n cell: torch.Tensor,\n pbc: torch.Tensor,\n cutoff: float,\n ):\n raise NotImplementedError",
"def cellular_automaton2d(rows, cols, r=1, neighbourhood='Moore', boundary=\"periodic\"):\n n = rows * cols\n if n < 9:\n raise Exception(\"There must be at least 9 cells\")\n adjacency_matrix = [[0. for j in range(n)] for i in range(n)]\n if boundary == \"periodic\":\n if neighbourhood == 'von Neumann':\n criteria = lambda a_i, b_i, a_o, b_o, radius, rownum: np.abs(a_i - a_o) + np.abs(b_i - b_o) <= radius\n elif neighbourhood == 'Moore':\n criteria = lambda a_i, b_i, a_o, b_o, radius, rownum: np.abs(a_i - a_o) <= radius and np.abs(b_i - b_o) <= radius\n elif neighbourhood == 'Hex':\n def hex_crit(a_i, b_i, a_o, b_o, radius, rownum):\n vn = np.abs(a_i - a_o) + np.abs(b_i - b_o) <= radius\n if rownum % 2 == 0:\n ex = (b_i - b_o) < radius\n else:\n ex = (b_o - b_i) < radius\n return vn or ex\n criteria = hex_crit\n else:\n raise Exception(\"neighbourhood type not supported: %s\" % neighbourhood)\n\n lattice = np.array(range(n)).reshape((rows, cols)).tolist()\n rownum = 0\n for a, row in enumerate(lattice):\n rownum += 1\n for b, _ in enumerate(row):\n adjacency_row_num = lattice[a][b]\n neighbourhood_points = _get_neighbourhood_points2d(a, b, r, criteria, rownum)\n for point in neighbourhood_points:\n x = point[0] if point[0] == -1 else point[0] % len(lattice)\n y = point[1] if point[1] == -1 else point[1] % len(lattice[a])\n adjacency_matrix[adjacency_row_num][lattice[x][y]] = 1.\n\n else:\n raise Exception(\"unsupported boundary condition: %s\" % boundary)\n return adjacency_matrix",
"def neighbor_indices(self):",
"def _initialize(self):\n # Prepare list of each elements\n verts_2d_list = [m.verts_2d for m in self.mesh_list]\n faces_list = [m.faces for m in self.mesh_list]\n edgemap_list = [torch.from_numpy(m.edgemap).unsqueeze(0) for m in self.mesh_list]\n adj_edges_list = [m.adj_edges for m in self.mesh_list]\n\n # Create BatchTensor\n self.verts_2d = BatchTensor(verts_2d_list, device=self.device)\n self.faces = BatchTensor(faces_list, device=self.device)\n self.adj_edges = BatchTensor(adj_edges_list, device=self.device)\n\n self.edgemaps = torch.stack(edgemap_list, 0)",
"def __init__(self, nx, ny, ix=0, iy=0):\r\n\r\n self.nx, self.ny = nx, ny\r\n self.ix, self.iy = ix, iy\r\n self.maze_map = [[Cell(x, y) for y in range(ny)] for x in range(nx)]",
"def initialize(rows=50, cols=50, prob=0.5):\n\n lattice = np.random.choice([1, -1], size=(rows, cols), p=[prob, 1 - prob])\n\n return lattice",
"def __init__(self, nx, ny, ix=0, iy=0):\n\n self.nx, self.ny = nx, ny\n self.ix, self.iy = ix, iy\n self.maze_map = [[Cell(x, y) for y in range(ny)] for x in range(nx)]\n self.make_maze()",
"def _get_neighbors(self, matrix):\n self.neighbors = list() # FIXME: name refactor to step_queue instead of neighbors\n \n matrix_Nrows = len(matrix)\n matrix_Ncols = len(matrix[0]) # FIXME: no numpy array shape to help us to validate this.\n\n # FIXME: use a doble loop with + and - instead or define the jump vectors maybe more general\n steps_list = [ (1, 0), (-1, 0), (0, 1), (0, -1)]\n if self.previous_cell is None:\n # TODO: ADD EVERYTHING\n # No previous step\n for step in steps_list:\n next_r = self.r + step[0] \n next_c = self.c + step[1] \n b_rows = (next_r < matrix_Nrows) and (next_r >= 0)\n b_cols = (next_c < matrix_Ncols) and (next_c >= 0)\n if b_rows and b_cols:\n if matrix[next_r][next_c] == 0:\n self.neighbors.append(step)\n else:\n # Append first the step back\n previous_step = (self.previous_cell.r - self.r, self.previous_cell.c - self.c)\n self.neighbors.append(previous_step)\n for step in steps_list:\n if step != previous_step:\n next_r = self.r + step[0] \n next_c = self.c + step[1] \n b_rows = (next_r < matrix_Nrows) and (next_r >= 0)\n b_cols = (next_c < matrix_Ncols) and (next_c >= 0)\n if b_rows and b_cols:\n if matrix[next_r][next_c] == 0:\n #print(\" aaa: \", step, matrix[next_r][next_c], next_r, next_c) \n self.neighbors.append(step)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check that (1) the lattice is indeed rectangular; (2) the neighbor function is callable; (3) the weight function is callable.
|
def consistency_check(self):
for _row in self.lattice:
assert len(_row) == self.col_dim
assert callable(self.neighbor_function)
assert callable(self.weight_function)
|
[
"def testNeighborMasking(self):\n \"\"\"\n We create another object separated from the one of\n interest, which should be masked.\n \"\"\"\n self.checkCandidateMasking([(self.x+5, self.y, 1.0)])",
"def CheckBounds(self, ):\n ...",
"def test_just_inside():\n rmg = RasterModelGrid(4, 5, dx=2.0)\n\n assert_equal(rfuncs.is_coord_on_grid(rmg, (0., 4.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (8. - 1e-12, 4.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 0.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 6. - 1e-12)), True)",
"def test_neighbor_locations():\n des = Desert((2, 4))\n\n nt.assert_list_equal([(1, 4), (2, 5), (3, 4), (2, 3)],\n des.neighbour_locations(),\n \"Returns wrong locations for neighboring cells\")",
"def __check_neighbours(self):\n horizontal = False\n vertical = False\n x, y = self._coord.get_coord_tuple()\n if (x, y) in Ghost.neighbours_map.keys():\n return Ghost.neighbours_map.get((x, y))\n else:\n keys = self.__coord_dict.keys()\n if (x - 1, y) not in keys or (x + 1, y) not in keys or not self.__coord_dict.get(\n (x - 1, y)).is_wall() or not self.__coord_dict.get((x + 1, y)).is_wall():\n horizontal = True\n if (x, y + 1) not in keys or (x, y - 1) not in keys or not self.__coord_dict.get(\n (x, y - 1)).is_wall() or not self.__coord_dict.get((x, y + 1)).is_wall():\n vertical = True\n Ghost.neighbours_map[(x, y)] = horizontal and vertical\n return Ghost.neighbours_map.get((x, y))",
"def check_neighbor(self, input_ex: Example, neighbor_ex: Example,\n state: Any) -> bool:\n raise NotImplementedError",
"def testFaintNeighborMasking(self):\n \"\"\"\n We create another faint (i.e., undetected) object separated\n from the one of interest, which should be masked.\n \"\"\"\n self.checkCandidateMasking([(self.x+5, self.y, 0.5)], threshold=0.9, pixelThreshold=1.0)",
"def verify(self):\n for i in self.coords:\n if np.abs(6*i-int(6*i))>0.1: return False\n if np.abs(self.coords[2]+self.coords[0]+self.coords[1]) > 0.1: return False\n return True",
"def test_trustworthiness_n_neighbors_error():\n regex = \"n_neighbors .+ should be less than .+\"\n rng = np.random.RandomState(42)\n X = rng.rand(7, 4)\n X_embedded = rng.rand(7, 2)\n with pytest.raises(ValueError, match=regex):\n trustworthiness(X, X_embedded, n_neighbors=5)\n\n trust = trustworthiness(X, X_embedded, n_neighbors=3)\n assert 0 <= trust <= 1",
"def test_just_outside():\n rmg = RasterModelGrid(4, 5, dx=2.0)\n\n assert_equal(rfuncs.is_coord_on_grid(rmg, (0. - 1e-12, 4.)), False)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (8., 4.)), False)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 0. - 1e-12)), False)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 6.)), False)",
"def check_corners(x0,x1,y0,y1):\r\n pxl_corners = [[x0,y0],\r\n [x0,y1],\r\n [x1,y0],\r\n [x1,y1]]\r\n latlon_corners = np.zeros((4,2))\r\n for i,coord in enumerate(pxl_corners):\r\n latlon_corners[i,0],latlon_corners[i,1] = latlon_to_pixel(coord[0],coord[1],inverse=True,data_dir=SAT_DATA_DIR)\r\n return all(check_footprint(SAT_DATA_DIR,latlon_corners))",
"def isVis(self, c1, c2) :\n\t\tcellSize = self.cellSize\n\n\t\tx1 = float(cellSize*(c1%self.numg))\n\t\ty1 = float(cellSize*(c1/self.numg))\n\t\tx2 = float(cellSize*(c2%self.numg))\n\t\ty2 = float(cellSize*(c2/self.numg))\n\n\t\t# define points near cell corners in panda coordinates\n\t\tpoints1 = ( \\\n\t\t\tPoint3(x1+.001*cellSize,y1+.001*cellSize,0), \\\n\t\t\tPoint3(x1+.001*cellSize,y1+.999*cellSize,0), \\\n\t\t\tPoint3(x1+.999*cellSize,y1+.999*cellSize,0), \\\n\t\t\tPoint3(x1+.999*cellSize,y1+.001*cellSize,0))\n\t\tpoints2 = ( \\\n\t\t\tPoint3(x2+.001*cellSize,y2+.001*cellSize,0), \\\n\t\t\tPoint3(x2+.001*cellSize,y2+.999*cellSize,0), \\\n\t\t\tPoint3(x2+.999*cellSize,y2+.999*cellSize,0), \\\n\t\t\tPoint3(x2+.999*cellSize,y2+.001*cellSize,0))\n\n\t\t# Check sightlines between all pairs of \"corners\"\n\t\tfor p1 in points1 :\n\t\t\tfor p2 in points2 :\n\t\t\t\tif self.pWorld.canSee(p1,p2) :\n\t\t\t\t\treturn True\n\t\treturn False",
"def test_get_cells(self):\n assert type(self.island._cells[1, 1]).__name__ is 'Lowland'",
"def check_neighbors(self, data, map_size):\n unknowns = 0\n obstacles = 0\n\n for x in range(-3, 4):\n for y in range(-3, 4):\n row = x * 384 + y\n try:\n if data.data[map_size + row] == -1:\n unknowns += 1\n elif data.data[map_size + row] > 0.65:\n obstacles += 1\n except IndexError:\n pass\n if unknowns > 0 and obstacles < 2:\n return True\n else:\n return False",
"def test_is_cell_valid_false():\n \n # default value of dimensions is (6x7)\n board = Board()\n pos = [[0,-2],[9,11],[0,7],[1,8]]\n\n assert(board.is_cell_valid(pos[0][0], pos[0][1]) == False)\n assert(board.is_cell_valid(pos[1][0], pos[1][1]) == False)\n assert(board.is_cell_valid(pos[2][0], pos[2][1]) == False)\n assert(board.is_cell_valid(pos[3][0], pos[3][1]) == False)",
"def test_refpoints(self):\n self.ld.compute(self.box, self.pos)\n density = self.ld.density\n\n npt.assert_array_less(np.fabs(density - 10.0), 1.5)\n\n neighbors = self.ld.num_neighbors\n npt.assert_array_less(np.fabs(neighbors - 1130.973355292), 200)",
"def check_if_in_the_lattice(self, pt):\n if pt.x >= 0 and pt.x < self.dim.x and pt.y >= 0 and pt.y < self.dim.y and pt.z >= 0 and pt.z < self.dim.z:\n return True\n return False",
"def test_can_reach_square(self):\n start_row = 5\n start_col = 5\n start = Square(start_row, start_col)\n\n valid_dests = [\n Square(start_row + 1, start_col + 1),\n Square(start_row + 1, start_col - 1),\n Square(start_row - 1, start_col + 1),\n Square(start_row - 1, start_col - 1),\n Square(start_row + 4, start_col + 4),\n Square(start_row + 3, start_col - 3),\n Square(start_row - 3, start_col + 3),\n Square(start_row - 2, start_col - 2)\n ]\n for dest in valid_dests:\n self.assertTrue(self.bishop.can_reach_square(start, dest))\n\n invalid_dests = [\n Square(start_row, start_col + 1),\n Square(start_row + 3, start_col),\n Square(start_row + 2, start_col + 4),\n Square(start_row - 5, start_col + 4),\n Square(start_row - 3, start_col + 6),\n ]\n for dest in invalid_dests:\n self.assertFalse(self.bishop.can_reach_square(start, dest))",
"def test_get_neighbor(self):\n sample_game = Go(4)\n self.assertEqual([[1,0],[0,1]],sample_game.get_neighbor([0,0]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Flatten a 2D index to a 1D index.
|
def flatten_index(self, i, j):
return i * self.col_dim + j
|
[
"def unflatten_index(self, idx):\n return idx // self.col_dim, idx % self.col_dim",
"def flatten(self) -> WordIndex:\n index = self.index.flatten()\n return self._from_index(index)",
"def flatten_idxs(idx_in, jaggedarray):\n if len(idx_in) == 0:\n return numpy.array([], dtype=numpy.int)\n idx_out = jaggedarray.starts[idx_in[0]]\n if len(idx_in) == 1:\n pass\n elif len(idx_in) == 2:\n idx_out += idx_in[1]\n else:\n raise Exception(\"jme_standard_function only works for two binning dimensions!\")\n\n flattened = awkward.flatten(jaggedarray)\n good_idx = idx_out < len(flattened)\n if (~good_idx).any():\n input_idxs = tuple(\n [idx_out[~good_idx]] + [idx_in[i][~good_idx] for i in range(len(idx_in))]\n )\n raise Exception(\n \"Calculated invalid index {} for\"\n \" array with length {}\".format(numpy.vstack(input_idxs), len(flattened))\n )\n\n return idx_out",
"def compress_2D_index_forward(xfft, index_forward):\n if index_forward == xfft.shape[-2]:\n return xfft\n n = index_forward - 1\n top_left = xfft[..., :n + 1, :n + 1, :]\n if n > 0:\n bottom_left = xfft[..., -n:, :n + 1, :]\n return torch.cat((top_left, bottom_left), dim=-3)\n else:\n return top_left",
"def crop_window_to_flattened_indices_torch(indices: torch.Tensor, shape: list):\n xind = torch.as_tensor(indices[-1]).view(1, len(indices[-1])) % shape[-1]\n yind = torch.as_tensor(indices[-2]).view(len(indices[-2]), 1) % shape[-2]\n return (xind + yind * shape[-1]).flatten().type(torch.LongTensor)",
"def project_outer(self, index):\n indices = torch.div(index.indices, self.inner_index.num_segments, rounding_mode=\"floor\").type(torch.long)\n return IndexMap(indices=indices, num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims)",
"def flatten(x):\n return tf.reshape(x, [-1])",
"def flatten(tensor):\r\n c = tensor.size(1)\r\n # new axis order\r\n axis_order = (1, 0) + tuple(range(2, tensor.dim()))\r\n # Transpose: (N, C, D, H, W) -> (C, N, D, H, W)\r\n transposed = tensor.permute(axis_order)\r\n # Flatten: (C, N, D, H, W) -> (C, N * D * H * W)\r\n return transposed.contiguous().view(c, -1)",
"def doublelist2flatlistindex(listlist):\n flatlist = []\n indexlist = []\n for ind, entries in enumerate(listlist):\n flatlist += entries\n indexlist += [ind for j in entries]\n return flatlist, np.array(indexlist)",
"def flatlistindex2doublelist(flatlist, indexarray):\n Nlist = max(indexarray) + 1\n listlist = [[] for n in range(Nlist)]\n for entry, ind in zip(flatlist, indexarray):\n listlist[ind].append(entry)\n return listlist",
"def _2d(a):\n\tif len(a.shape) == 1:\n\t\ta.shape = (len(a), 1)\n\treturn a",
"def expand_index_like(index: torch.Tensor, tokens: torch.Tensor) -> torch.Tensor:\n dim = tokens.shape[-1]\n index = index.unsqueeze(-1).expand(-1, -1, dim)\n return index",
"def unravel_index(index: int, shape: torch.Tensor):\n out = []\n shape = torch.flip(shape, dims=(0,))\n for dim in shape:\n out.append(index % dim)\n index = index // dim\n out = torch.tensor([int(x.item()) for x in out])\n return torch.flip(out, dims=(0,))",
"def normalize_index(idx, shape):\n if not isinstance(idx, tuple):\n idx = (idx,)\n idx = replace_ellipsis(len(shape), idx)\n n_sliced_dims = 0\n for i in idx:\n if hasattr(i, \"ndim\") and i.ndim >= 1:\n n_sliced_dims += i.ndim\n elif i is None:\n continue\n else:\n n_sliced_dims += 1\n idx = idx + (slice(None),) * (len(shape) - n_sliced_dims)\n if len([i for i in idx if i is not None]) > len(shape):\n raise IndexError(\"Too many indices for array\")\n\n idx = tuple(map(sanitize_index, idx))\n return idx",
"def compress_2D_index_forward_full(xfft, index_forward):\n if index_forward == xfft.shape[-2] // 2 + 1:\n return xfft\n\n n = index_forward - 1\n top_left = xfft[:, :, :n + 1, :n + 1, :]\n if n > 0:\n bottom_left = xfft[:, :, -n:, :n + 1, :]\n top_right = xfft[:, :, :n + 1, -n:, :]\n bottom_right = xfft[:, :, -n:, -n:, :]\n\n # Combine along the H - height (vertical dimension).\n left = torch.cat((top_left, bottom_left), dim=2)\n right = torch.cat((top_right, bottom_right), dim=2)\n # Combine along the W - width (horizontal dimension).\n result = torch.cat((left, right), dim=3)\n return result\n else:\n # Return just a single coefficient.\n return top_left",
"def flatten_matrix(X):\n return X.swapaxes(1,2).reshape((X.shape[0], X.shape[1]*X.shape[2]))",
"def flatten_nd_array(pts_nd, axis=1):\n NDIM = pts_nd.ndim\n SHP = np.array(pts_nd.shape)\n nax = np.setdiff1d(np.arange(0, NDIM), np.array(axis)) # non axis indices\n NPTS = np.prod(SHP[nax])\n axorder = np.concatenate((nax, np.array(axis).flatten()), axis=0)\n pts_flt = pts_nd.transpose(axorder)\n pts_flt = pts_flt.reshape(NPTS, SHP[axis])\n return pts_flt",
"def Flatten1D(input_tensor):\n length = input_tensor.shape[1]\n hidden = input_tensor.shape[2]\n channels = input_tensor.shape[3]\n last_dim = length * hidden * channels\n return tf.reshape(input_tensor, [-1, last_dim])",
"def test_ravel_multi_index_2d(self):\n indices = [[0, 0], [0, 1], [2, 2], [3, 1]]\n expected = [0, 1, 10, 13]\n\n result = array_ops.ravel_multi_index(indices, [4, 4])\n self.assertAllEqual(result, expected)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Unflatten a 1D index to a 2D index.
|
def unflatten_index(self, idx):
return idx // self.col_dim, idx % self.col_dim
|
[
"def flatten_index(self, i, j):\n return i * self.col_dim + j",
"def flatten(self) -> WordIndex:\n index = self.index.flatten()\n return self._from_index(index)",
"def flatten_idxs(idx_in, jaggedarray):\n if len(idx_in) == 0:\n return numpy.array([], dtype=numpy.int)\n idx_out = jaggedarray.starts[idx_in[0]]\n if len(idx_in) == 1:\n pass\n elif len(idx_in) == 2:\n idx_out += idx_in[1]\n else:\n raise Exception(\"jme_standard_function only works for two binning dimensions!\")\n\n flattened = awkward.flatten(jaggedarray)\n good_idx = idx_out < len(flattened)\n if (~good_idx).any():\n input_idxs = tuple(\n [idx_out[~good_idx]] + [idx_in[i][~good_idx] for i in range(len(idx_in))]\n )\n raise Exception(\n \"Calculated invalid index {} for\"\n \" array with length {}\".format(numpy.vstack(input_idxs), len(flattened))\n )\n\n return idx_out",
"def ind2sub(shape, inds):\n if type(inds) is not np.ndarray:\n inds = np.array(inds)\n if len(inds.shape) != 1:\n raise ValueError(\"Indexing must be done as a 1D row vector, e.g. [3,6,6,...]\")\n return np.unravel_index(inds, shape, order=\"F\")",
"def unflatten(self, x):\n pass",
"def unravel_index(index: int, shape: torch.Tensor):\n out = []\n shape = torch.flip(shape, dims=(0,))\n for dim in shape:\n out.append(index % dim)\n index = index // dim\n out = torch.tensor([int(x.item()) for x in out])\n return torch.flip(out, dims=(0,))",
"def compress_2D_index_forward(xfft, index_forward):\n if index_forward == xfft.shape[-2]:\n return xfft\n n = index_forward - 1\n top_left = xfft[..., :n + 1, :n + 1, :]\n if n > 0:\n bottom_left = xfft[..., -n:, :n + 1, :]\n return torch.cat((top_left, bottom_left), dim=-3)\n else:\n return top_left",
"def _2d(a):\n\tif len(a.shape) == 1:\n\t\ta.shape = (len(a), 1)\n\treturn a",
"def unflatten_2d_array(pts_flt, pts_nd, axis=1, squeeze=False):\n NDIM = pts_nd.ndim\n SHP = np.array(pts_nd.shape)\n nax = np.setdiff1d(np.arange(0, NDIM), np.array(axis)) # non axis indices\n # NPTS = np.prod(SHP[nax])\n\n if squeeze:\n axorder = nax\n axorder_rev = np.argsort(axorder)\n M = pts_flt.shape[1]\n NEW_SHP = SHP[nax].tolist()\n pts_out = pts_flt.reshape(NEW_SHP)\n pts_out = pts_out.transpose(axorder_rev)\n else:\n axorder = np.concatenate((nax, np.array(axis).flatten()), axis=0)\n axorder_rev = np.argsort(axorder)\n M = pts_flt.shape[1]\n NEW_SHP = SHP[nax].tolist()\n NEW_SHP.append(M)\n pts_out = pts_flt.reshape(NEW_SHP)\n pts_out = pts_out.transpose(axorder_rev)\n\n return pts_out",
"def _triangulation_simplex_indices(self):\n disc = self.discretization\n simplices = self.triangulation.simplices\n new_simplices = np.empty_like(simplices)\n\n # Convert the points to out indices\n index_mapping = disc.state_to_index(self.triangulation.points +\n disc.offset)\n\n # Replace each index with out new_index in index_mapping\n for i, new_index in enumerate(index_mapping):\n new_simplices[simplices == i] = new_index\n return new_simplices",
"def unravel_index(indices: torch.LongTensor, shape) -> torch.LongTensor:\n\n coord = []\n\n for dim in reversed(shape):\n coord.append(indices % dim)\n indices = indices // dim\n\n coord = torch.stack(coord[::-1], dim=-1)\n\n return coord",
"def crop_window_to_flattened_indices_torch(indices: torch.Tensor, shape: list):\n xind = torch.as_tensor(indices[-1]).view(1, len(indices[-1])) % shape[-1]\n yind = torch.as_tensor(indices[-2]).view(len(indices[-2]), 1) % shape[-2]\n return (xind + yind * shape[-1]).flatten().type(torch.LongTensor)",
"def _gather_for_multidim_indexing(args: GatherArgs):\n # Guess the axis.\n axis = args.dnums.collapsed_slice_dims[0]\n squeezed_indices = tf.squeeze(args.start_indices, -1)\n op_shape = jax2tf._eval_shape(args.op_shape)\n start_indices = _clip((op_shape[axis],), squeezed_indices, (1,))\n return tf.gather(args.operand, start_indices, axis=axis, batch_dims=0)",
"def flatlistindex2doublelist(flatlist, indexarray):\n Nlist = max(indexarray) + 1\n listlist = [[] for n in range(Nlist)]\n for entry, ind in zip(flatlist, indexarray):\n listlist[ind].append(entry)\n return listlist",
"def unflatten(space: Space, x: np.ndarray) -> np.ndarray:\n raise NotImplementedError(f\"Unknown space: `{space}`\")",
"def _index(t, index):\n if not isinstance(index, (tuple, list)):\n index = list(index)\n for i in index:\n t = tf.gather(t, i)\n return t",
"def flatten_nd_array(pts_nd, axis=1):\n NDIM = pts_nd.ndim\n SHP = np.array(pts_nd.shape)\n nax = np.setdiff1d(np.arange(0, NDIM), np.array(axis)) # non axis indices\n NPTS = np.prod(SHP[nax])\n axorder = np.concatenate((nax, np.array(axis).flatten()), axis=0)\n pts_flt = pts_nd.transpose(axorder)\n pts_flt = pts_flt.reshape(NPTS, SHP[axis])\n return pts_flt",
"def flatten_2d(array, axis):\n if axis == 0:\n return array.reshape((array.size,), order='C')\n elif np.abs(axis) == 1:\n return array.reshape((array.size), order='F')\n else:\n raise ValueError(\"1-d or 2-d input data are only supported for \" +\n \"functions extracting multiple features per channel.\")",
"def unravel_index(indices: Tensor, shape: Shape) -> Tensor:\n\n shape = indices.new_tensor(shape + (1,))\n coefs = shape[1:].flipud().cumprod(dim=0).flipud()\n\n return torch.div(indices[..., None], coefs, rounding_mode='trunc') % shape[:-1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subroutine to update the shortest disance, and the maxindex internal node associated, from node i and node j.
|
def __update_single_pair(self, i, j):
update_value = (
self.__min_distance[i][self.__cap_internal]
+ self.__min_distance[self.__cap_internal][j]
)
# distance updates can be done in-place because all values used to compute update_value sit in the union of a row and a column that never get themselves updated in this iteration. This saves one copy of self.__min_distance from memory.
if update_value < self.__min_distance[i][j]:
# there is a negative cycle if and only if node i has a negative path to itself
if i == j:
# sanity check that the cycle length is indeed negative; assertion error indicates a bug in the initialization of distances
assert update_value < 0
self.__negative_cycle = True
return
self.__min_distance[i][j] = update_value
self.__max_internal[i][j] = self.__cap_internal
|
[
"def update_short_dist_to_EI(graph_links, graph_dist, EI, dist_to_EI=0):\n if graph_dist[EI] > dist_to_EI:\n graph_dist[EI] = dist_to_EI\n for N in graph_links[EI]:\n update_short_dist_to_EI(graph_links, graph_dist, N, dist_to_EI+1)",
"def dijkstra(self):\n\n # Initialise the needed variables\n graphs, edges = self.maze_to_graph()\n start = graphs[str(self.maze.start[0]) + \":\" + str(self.maze.start[1])]\n target = graphs[str(self.maze.target[0]) + \":\" + str(self.maze.target[1])]\n\n # In actual_ay all possible next nodes are stored\n actual_way = {\n str(start): NodeGraph(start, None, None)\n }\n # node_way contains all already visited nodes\n node_way = {}\n\n while str(target) not in actual_way.keys():\n # Takes the node with smallest length, that isn't visited\n neares_node = actual_way[min(actual_way, key=lambda k: actual_way[k].get_length())]\n # Create all next possible Nodes, from the actual Node, with the edges that can be go from the actual node\n for edge in neares_node.itself.edges:\n node_to_add = neares_node.itself.edges[edge].node_two\n new_node = NodeGraph(node_to_add, neares_node, neares_node.itself.edges[edge])\n\n # Add only if not in nodes to visit and not in visited nodes so no node get's visited two times.\n # If it is already visited there is an shorter way to reach this Node and cause the algorithm looks for\n # the shortest way its not in need to visit this node again\n if str(new_node.itself) not in list(actual_way.keys()) and \\\n str(new_node.itself) not in list(node_way.keys()):\n new_node.add_length(neares_node.itself.edges[edge].get_length())\n actual_way[str(new_node.itself)] = new_node\n\n # Add the actual node to node_way and remove it from possible next waypoints\n node_way[str(neares_node.itself)] = neares_node\n actual_way.pop(str(neares_node.itself))\n\n # For visualisation makes. Start by target, because the linked List works with previous Nodes\n way = []\n point = actual_way[str(target)]\n\n # Starts to search for start of maze\n while str(point.itself) != str(start):\n way.append(point)\n point = point.privious\n\n # Add the start to way\n way.append(node_way[str(start)])\n\n # Change value of target, only for visualisation\n self.maze.maze[self.maze.target[0]][self.maze.target[1]] = 4\n\n # Reverse the list of waypoints and go through it, that means start at start and at end\n for node in way[::-1]:\n if node.itself and node.privious:\n # Visualise each edge with time delay.\n edge_way = node.edge.get_way()\n self.maze.maze[node.edge.node_one.y][node.edge.node_one.x] = 2\n for wp in edge_way:\n self.maze.maze[wp[0]][wp[1]] = 5\n time.sleep(self.maze.delay)",
"def dijkstra(graph, start, end):\n\n\n #init S ensemble with start_node inside\n S = [start]\n #defin V ensemble with all node of graph\n V = [x for x in range(len(graph))]\n #init distance dictionnary\n distance = {}\n #init previous history dictionnary\n previous = {}\n\n #init all of node distances to inf exept for start node\n for v in V:\n if v != start:\n distance[v] = inf\n\n #loop until S != V\n while len(S) != len(V):\n #for all element of V exept for the element which are in S\n for v in (set(V)-set(S)):\n #init uc as the last element added in S\n uc = S[-1]\n\n #if uc == 0 that signified we are in the start node\n if uc == 0:\n\n #add set uc as previous[v] if the new distance if shortest than the current\n if 0+graph[uc][v] < distance[v]:\n previous[v] = uc\n\n #set the v distance as the min beetween the current v distance and the edge of uc and v.\n distance[v] = min(distance[v], 0+graph[uc][v])\n\n else:\n #add set uc as previous[v] if the new distance if shortest than the current\n if distance[uc]+graph[uc][v] <distance[v]:\n previous[v] = uc\n #set the v distance as the min beetween the current v distance and the distance of u + the edge of uc and v.\n distance[v] = min(distance[v], distance[uc]+graph[uc][v])\n\n #find the node with the shortest distance\n #init vmin as inf\n vmin = inf\n x = inf\n #loop for all v in V / S\n for v in (set(V)-set(S)):\n #if v distance < vmin\n if distance[v] < vmin:\n vmin = distance[v]\n # x = the node with the shortest distance\n x = v\n\n\n # UPDATE STATEMENT\n # define new uc as x\n uc = x\n # add new uc to S\n S.append(uc)\n\n #define total_cost to cost of the ending distance\n total_cost= distance[end]\n #init shortest path\n path = []\n\n #loop to insert in path the previous node from end's node\n while(end != start):\n path.insert(0, end)\n end = previous[end]\n path.insert(0, start)\n\n #return the shortest_way and total cost of dijkstra from start to end\n return path, total_cost",
"def _update_optimum(self, node: Node) -> None:",
"def anneal(self):\n\n # starting from a random path may be more effective, because it could\n # cool very quickly with a nearestneighbor\n \n \"\"\"\n\n # take in a graph and run nnTSP on the graph \n currentBest = self.nnGraph.nn_best_reversed()\n if is_valid_tour(currentBest[0]): \n # the eventual ouput of cities, but intially the input list\n citiesBest = currentBest[0]\n # the prelimary weight of the path (second index of nn_graph tuple)\n currentBest_weight = currentBest[1]\n else: \n print(\"Error: NN Tour is invalid\") \n return None\n\n \"\"\"\n # naive solution to start\n naive = nnGraph(self.cities, self.colorList, self.numcities)\n citiesBest = naive.nn_best_reversed()[0]\n # print \"What is the length of citiesBest? --> \" + str(len(citiesBest))\n currentBest_weight = self.tour_cost(citiesBest)\n starting_weight = currentBest_weight\n \n distances_current = []\n distances_best = []\n\n try:\n for iteration in range(self.maxIterations):\n # search is restarted at every iteration from the best known solution\n temperature = self.start_temp\n cities_current = citiesBest\n cities_new = citiesBest\n distance_current = currentBest_weight\n distance_new = currentBest_weight\n \n ### TEST ###\n # print \"Initialized parameters: \"\n # print \"starting temperature --> \" , temperature\n \"\"\"\n print \"iteration #\" , iteration\n print \"input list of cities (should be a list of indices 0-49) -->\" + str(cities_current)\n print \"weight of the tour above --> \" , distance_current \n \"\"\"\n\n step = 0\n while temperature > self.end_temp:\n # computing indices of the two cities to swap\n # never move the first city (??)\n index = random.sample(xrange(self.numcities-1), 2)\n # print \"indices: \" , index\n ### TEST ###\n # print \"These are the indices of cities to be swapped \" + str(index)\n # why this? not sure that we need it\n # index[0] += 1\n # index[1] += 1\n # naming the swapped cities\n cityA = index[0]\n cityB = index[1]\n\n # optimize by recomputing only the changed distances\n \n ha = random.randint(0,1)\n\n # creating a new list of the swapped cities\n if (ha > .2):\n swap_before = self.distance_swap(cities_new, cityA, cityB)\n # ensure that this swap creates a valid path, otherwise start over\n if self.is_valid_tour(swap_before) == False:\n # print \"Does this part actually run?\"\n continue\n else:\n swap_before = self.reverse_cities(cities_new, cityA, cityB)\n if self.is_valid_tour(swap_before) == False:\n continue\n\n ### TESTING TO SEE IF THIS IS THE PROBLEM\n # cities_new[cityA], cities_new[cityB] = cities_new[cityB], cities_new[cityA]\n swap_after = cities_new\n \"\"\"\n print \"Step: \" , step\n print \"before: \" , str(swap_before)\n print \"after: \" , str(swap_after)\n \"\"\"\n\n # and their costs\n ### TEST ###\n # print \"Now, cities_current and cities_new should only differ in their indices, \" + str(cityA) + \", \" + str(cityB)\n # print \"cities_current / cost --> \" , cities_current , \" / \" , self.tour_cost(cities_current)\n # print \"cities_new / cost -->\" , cities_new , \" / \" , self.tour_cost(cities_new)\n\n # compute the distance of the swapped city list\n # not exactly sure why these additions and subtractions work this way\n distance_new = self.tour_cost(swap_before)\n distance_current = self.tour_cost(swap_after)\n \"\"\"\n print \"What are distance new and distance current?\"\n print \"current: \" , distance_current\n print \"new: \" , distance_new\n \"\"\"\n # Kirkpatrick acceptance probability\n \n diff = distance_new - distance_current\n \"\"\"\n current_cost = self.tour_cost(cities_current)\n new_cost = self.tour_cost(swap_before)\n\n diff = new_cost - current_cost\n \"\"\"\n\n # print \"What is diff? --> \" , diff\n if diff < 0 or math.exp( -diff / temperature ) > random.randint(0,1):\n # print \"Does this ever execute?\"\n cities_current = swap_before\n distance_current = distance_new\n\n \"\"\"\n else:\n # no improvement and worsened result not within alpha\n distance_new = distance_current\n cities_current = cities_current[:]\n \"\"\"\n\n # update the best known if solution is an improvement\n # not for the annealing, but for restarts (in which we start\n # with the best solution known)\n if distance_current < currentBest_weight:\n citiesBest = cities_current\n currentBest_weight = distance_current\n\n # decrease temperature by alpha, increment step counter\n distances_current.append(distance_current)\n distances_best.append(currentBest_weight)\n temperature = temperature * self.alpha\n step += 1\n\n self.bestScore = currentBest_weight\n self.bestTour = citiesBest\n\n except KeyboardInterrupt, e:\n print \"Interrupted on user demand\"\n print \"performed iterations: \" + str(iteration)\n print \"current best tour: \" + str(citiesBest)\n print \"cost of current best tour: \" + str(currentBest_weight)\n\n \n return citiesBest, distances_current, distances_best, starting_weight",
"def __expand_edge(self, graph, i, j):\n #add\n G = graph.clone_directed()\n if G.add_dedge(i, j):\n if G.is_acyclic() and self.__CE(G):\n if not self.__queue_has(G):\n score = self.__score(G)\n self._queue[G] = score\n #delete\n G = graph.clone_directed()\n if G.remove_dedge(i, j):\n if G.is_acyclic() and self.__CE(G):\n if not self.__queue_has(G):\n score = self.__score(G)\n self._queue[G] = score\n #reverse\n G = graph.clone_directed()\n if G.reverse_dedge(i, j):\n if G.is_acyclic() and self.__CE(G):\n if not self.__queue_has(G):\n score = self.__score(G)\n self._queue[G] = score",
"def dijkstra1(self, game, graph, start, player):\n graph = {key: value for (key, value) in graph.items()} # Create a new dict to avoid the orignal one be replaced\n shortest_distance = {} # In the following 18 line of codes, which are derived and adjused from the Ian Sullivan(2017)(start) \n unseenNodes = graph # the code source: Implementation of dijkstra in python https://www.youtube.com/watch?v=IG1QioWSXRI&t=1s\n inf = 5000 \n size_board = game.size\n\n for node in unseenNodes:\n shortest_distance[node] = inf\n shortest_distance[start] = 0\n while unseenNodes:\n minNode = -10\n for node in unseenNodes:\n if minNode == -10:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n\n for childNode, distance in graph[minNode].items():\n if distance + shortest_distance[minNode] < shortest_distance[childNode]:\n shortest_distance[childNode] = distance + shortest_distance[minNode]\n\n unseenNodes.pop(minNode) # In the upper 18 line of codes, which are derived and adjused from the Ian Sullivan(2017)(end)\n\n # In the below, all codes is to identify the smallest distnace for red/blue pieces to the two side border\n if player == HexBoard.RED: # red is vertical\n edgeupper1 = []\n edgelower2 = []\n\n for i in range(size_board):\n a_edge1 = (i, 0)\n a_edge2 = (i, size_board - 1)\n edgeupper1.append(a_edge1)\n edgelower2.append(a_edge2)\n else: # blue is horizontal\n edgeupper1 = []\n edgelower2 = []\n\n for i in range(size_board):\n a_edge1 = (0, i)\n a_edge2 = (size_board - 1, i)\n edgeupper1.append(a_edge1)\n edgelower2.append(a_edge2)\n target_upper = inf\n for candidate in edgeupper1:\n if shortest_distance[candidate] < target_upper:\n target_upper = shortest_distance[candidate]\n target_lower = inf\n for candidate2 in edgelower2:\n if shortest_distance[candidate2] < target_lower:\n target_lower = shortest_distance[candidate2]\n return target_lower + target_upper",
"def adjust_move_node(self,i,new_pnt,nbrs):\n\n # HERE -- not compatible with pure python code.\n \n # find existing constrained edges\n # for each constrained edge:\n # will the updated edge still be valid?\n # if not, update new_pnt to be halfway between the old and the new,\n # and loop again.\n\n for shorten in range(15): # maximum number of shortenings allowed\n all_good = True\n\n # Create a probe vertex so we can call check_line_is_clear()\n # sort of winging it here for a measure of close things are.\n if abs(self.points[i] - new_pnt).sum() / (1.0+abs(new_pnt).max()) < 1e-8:\n log.warning(\"adjust_move_node: danger of roundoff issues\")\n all_good = False\n break\n\n all_good=self.check_line_is_clear_batch(p1=new_pnt,n2=nbrs)\n if all_good:\n break\n else:\n new_pnt = 0.5*(self.points[i]+new_pnt)\n log.debug('adjust_move_node: adjusting') \n if all_good:\n return new_pnt\n else:\n return self.points[i]",
"def _update_indexes(self):\n ntemp = 0\n ntarg = 0\n for pos in self.positions:\n if pos.temp!='-':\n ntemp+=1\n if pos.targ!='-':\n ntarg+=1\n pos.ntemp = ntemp\n pos.ntarg = ntarg",
"def yens_shortest_paths(G, start, target, max_paths=10):\n letters = list(string.ascii_letters)\n shortestPaths = []\n k = 0\n try:\n paths = list(itertools.islice(nx.shortest_simple_paths(G, start, target), max_paths))\n except Exception:\n raise PyeMapShortestPathException(\"No paths between \" + str(start) + \" and \" + str(target) + \" were found.\")\n for k in range(0, len(paths)):\n path = paths[k]\n sum = 0\n weights = []\n for i in range(0, len(path) - 1): # sum up edge weights\n sum += (G[path[i]][path[i + 1]]['weight'])\n weights.append(G[path[i]][path[i + 1]]['weight'])\n path = ShortestPath(path, weights, sum)\n shortestPaths.append(path)\n shortestPaths = sorted(shortestPaths)\n for i in range(0, len(shortestPaths)):\n path = shortestPaths[i].path\n if i == 0: # shortest path gets bolder edges\n for j in range(len(path) - 1):\n G[path[j]][path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['style'] = 'solid'\n G.nodes[path[j]]['penwidth'] = 6.0\n G.nodes[path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['color'] = '#778899FF'\n # make the nodes look opaque if they are connected to the source\n if len(G.nodes[path[j]]['fillcolor']) != 9:\n G.nodes[path[j]]['fillcolor'] += 'FF'\n G.nodes[path[j]]['color'] = '#708090FF'\n if len(G.nodes[path[j + 1]]['fillcolor']) != 9:\n G.nodes[path[j + 1]]['fillcolor'] += 'FF'\n G.nodes[path[j + 1]]['color'] = '#708090FF'\n else:\n for j in range(len(path) - 1):\n G[path[j]][path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['style'] = 'solid'\n G.nodes[path[j]]['penwidth'] = 6.0\n G.nodes[path[j + 1]]['penwidth'] = 6.0\n if G[path[j]][path[j + 1]]['color'] != '#778899FF':\n G[path[j]][path[j + 1]]['color'] = '#7788997F'\n # make the nodes look opaque if they are connected to the source\n if len(G.nodes[path[j]]['fillcolor']) != 9:\n G.nodes[path[j]]['fillcolor'] += '7F'\n G.nodes[path[j]]['color'] = '#7080907F'\n if len(G.nodes[path[j + 1]]['fillcolor']) != 9:\n G.nodes[path[j + 1]]['fillcolor'] += '7F'\n G.nodes[path[j + 1]]['color'] = '#7080907F'\n shortestPaths[i].set_id(\"1\" + letters[i])\n br = Branch(1, shortestPaths[0].path[-1])\n for pt in shortestPaths:\n br.add_path(pt)\n return [br]",
"def getBestMachingCell(j, i):\n cellIndex = np.ravel_multi_index((i, j*np.ones(np.shape(i), dtype=int)), (config.SM['M'], config.SM['N']), order='F')\n dendrites_temp = np.isin(config.SM['dendriteToCell'], cellIndex)\n [den_r, den_c] = np.nonzero(dendrites_temp)\n dendrites = np.ravel_multi_index((den_r, den_c), np.shape(config.SM['dendriteToCell']), order='F')\n lcChosen = False\n addNewSynapsesToDendrite = -1\n updateFlag = False\n\n if (np.shape(dendrites)!=0):\n id = np.argmax(config.SM['dendritePositive'][dendrites])\n val = config.SM['dendritePositive'][dendrites[id]][0]\n if (val > config.SM['minPositiveThreshold']):\n chosenCell = config.SM['dendriteToCell'][dendrites[id]][0]\n lcChosen = True\n if (val < config.SM['Theta']):\n addNewSynapsesToDendrite = dendrites[id]\n updateFlag = True\n\n # Add new dendrite if no dendrites of the active cells are above minimum threshold.\n if (lcChosen == False):\n # Randomly choose location to add a dendrite.\n ndpc_ind = np.unravel_index(cellIndex, (config.SM['M'], config.SM['N']), order='F')\n args = np.argsort(config.SM['numDendritesPerCell'][ndpc_ind])\n sorted_cellIndexs = np.unravel_index(cellIndex[args], (config.SM['M'], config.SM['N']), order='F')\n val = config.SM['numDendritesPerCell'][sorted_cellIndexs]\n tie = (val == val[0])\n rid = rd.randint(0, np.sum(tie))\n chosenCell = cellIndex[args[rid]]\n updateFlag = True\n return [chosenCell, addNewSynapsesToDendrite, updateFlag]",
"def _updateCost(self):\n\n self._numSyn = len(self._idxs)\n\n self._numSynBits = self._numSyn * (self._synFmt.numWgtBits +\n self._synFmt.numDlyBits)\n\n if self._synFmt.compression == Compression.SPARSE:\n self._numSynBits += self._numSyn * self._synFmt.numIdxBits\n elif self._synFmt.compression == Compression.RUNLENGTH:\n self._numSynBits += self._numSyn * self._synFmt.numSkipBits\n\n self._numPrefixBits = 4 + 6 # synMemFmtId + numSyn\n if self._synFmt.compression is not Compression.SPARSE:\n self._numPrefixBits += self._synFmt.numIdxBits\n\n self._numBits = self._numPrefixBits + self._numSynBits\n\n self._cost = self._numBits",
"def computeShortestPath(self):\n for row in range(len(self.graph)):\n # track row, which vertices to compute greedy Dijkstra\n v = self.graph[row][0][0] # key to sd list\n\n for ele in range(1, len(self.graph[row])):\n if len(self.graph[row][ele]) == 2:\n self.computeGreedyDijkstra(v, self.graph[row][ele])",
"def main():\n # Standard graph theory notation for number of nodes and edges. \n m = int(input(\"Enter the number of edges: \"))\n n = int(input(\"Enter the number of nodes: \"))\n graph = [[0 for i in range(n)] for j in range(n)]\n \n print(\"Enter the edges as vertex1 vertex2 weight: \") #Sample 0 1 3\n for i in range(m):\n x, y, w = input().split(\" \")\n x = int(x)\n y = int(y)\n w = int(w)\n graph[x][y] = w\n graph[y][x] = w\n\n source = int(input(\"Enter the source: \"))\n \n dist = djikstra(graph, n, source)\n dest = int(input(\"Enter the destination vertex: \"))\n print(\"Shortest distance =\", dist[dest])",
"def update_largest_index(self):\n while len(self.main_list[self.largest_affinity]) < 1:\n self.largest_affinity -= 1",
"def _compute_yen_shortest_paths(graph, target, n,\n distance, exclude_edge=False):\n pass",
"def removenode(self):\r\n if not self.head:\r\n return -1\r\n else:\r\n indexnode = self.head\r\n node = indexnode.head\r\n indexnode.cnt -= 1\r\n if node.next:\r\n indexnode.head = indexnode.head.next\r\n indexnode.head.pre = None\r\n del self.keymap[node.key]\r\n del node\r\n return -1\r\n else:\r\n f = node.f\r\n indexnode.head = None\r\n indexnode.tail = None\r\n del self.keymap[node.key]\r\n del node\r\n return f",
"def shortestReach(n, m, edges, s):\n\n nodes = [i for i in range(1, n + 1)]\n graph = AdjacencyList(nodes, edges).graph\n visited = {i : False for i in range(1, n + 1)}\n distances = {i : -1 for i in range(1, n + 1)}\n \n print(graph)\n\n root_level = [s]\n q = [root_level]\n curr_dist = 0\n while q:\n print(visited)\n level = q[0]\n del q[0]\n next_level = []\n for node in level:\n if not visited[node]:\n next_level += graph[node]\n visited[node] = True\n distances[node] = curr_dist\n if next_level:\n q.append(next_level)\n curr_dist += 6\n\n dists = list(distances.values())\n return dists[:s - 1] + dists[s : ]",
"def record_best_path_to(self, current, neighbor):\n # TODO: Record the best path to a node, by updating cameFrom, gScore, and fScore\n\n \n c=current\n gts=self.get_tenative_gScore(current,neighbor)\n calf=self.calculate_fscore(current)\n \n self.cameFrom[neighbor]=c\n self.gScore[neighbor]=gts\n self.fScore[current]=calf"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reverse an adjacency list. This is only relevant for directed graphs. For undirected graphs, the reverse is just the same as the original.
|
def reverse_adj_list(adjacency_list):
# determine the number of vertices
n = len(adjacency_list)
# initialzie the adjacency list to be returned
retlist = []
for i in range(0, n):
retlist.append([])
# loop over all nodes
for i, l in enumerate(adjacency_list):
# create an edge from node j to node i
for j in l:
retlist[j].append(i)
return retlist
|
[
"def reverse(self):\n cur_node = self.getHead()\n prev_node = None\n\n while cur_node is not None:\n next_node = cur_node.getNext()\n cur_node.setNext(prev_node) # reverse Node link\n prev_node = cur_node\n cur_node = next_node\n\n self.setHead(prev_node) # update list head to last Node",
"def reverse(self):\n\n # if the head is empty return\n if self.head is None:\n raise ValueError(\"The list is empty\")\n\n # initalize a few of our variables\n last = self.head\n curr = self.head\n prev = self.end\n next = curr.next_node\n\n # reassign the last node to the head's next node\n curr.next_node = prev\n\n # the old previous now becomes the old current\n prev = curr\n\n # the old current now becomes the old next, the one after the head\n curr = next\n\n # keep going until you reach the last node\n while curr != last:\n # reassign next\n next = curr.next_node\n\n # do the same reassignment steps as upabove\n curr.next_node = prev\n prev = curr\n curr = next\n\n # one final reassignment, make sure the last node points to the head\n curr.next_node = prev\n\n # then redefine your head and tail.\n self.head = prev\n self.end = curr",
"def reverse_in_place(self):\n\n if self.head is None:\n # Cant reverse an empty deque\n return\n\n curr_node = self.head\n while curr_node is not None:\n old_next = curr_node.next\n curr_node.prev, curr_node.next = curr_node.next, curr_node.prev\n curr_node = old_next\n\n self.head, self.tail = self.tail, self.head",
"def deep_reverse(L):\n L.reverse()\n for i in L:\n i.reverse()",
"def reverse(self):\n prev = None\n current = self.head \n while(current is not None): \n next_ = current.next\n current.next = prev \n prev = current \n current = next_\n self.head = prev",
"def reverse(self):\n\n '''\n 1->2->3 .... 3->2->1\n '''\n\n # use deep copy because python is pass-by-assignment\n curr = copy.deepcopy(self.head)\n nextNode = None\n prevNode = None\n\n while(curr):\n nextNode = curr.next\n curr.next = prevNode\n prevNode = curr\n curr = nextNode\n\n return LinkedList(prevNode)",
"def reverse(self):\n self.flips.reverse()\n for e in self.flips:\n self.permute(e, False)\n self.flips = []",
"def reverse_linked_list1(list_to_reverse):\n\n my_new_list = LinkedList()\n all_elements = []\n\n while len(list_to_reverse) > 0:\n element = list_to_reverse.pop()\n all_elements.append(element)\n\n for e in all_elements[::-1]:\n my_new_list.add(e)\n\n return my_new_list",
"def reverselist(lista):\n return list(reversed(lista))",
"def reverse_list(l):\n new_l = l\n new_l.reverse()\n\n return new_l",
"def reverse(L):\r\n return L[::-1]",
"def deep_reverse(L):\n temp = list(L)\n for i in range(len(L)):\n # reverse top list\n L[len(L) - 1 - i] = temp[i]\n\n # reverse inner list\n inL = L[len(L) - 1 - i]\n temp2 = list(inL)\n for j in range(len(inL)):\n inL[len(inL) - 1 - j] = temp2[j]",
"def reverse_graph(self):\n if self._type == 'u':\n return self\n reversed_graph = KjGraph('d')\n for vertex in self:\n for edge in vertex.get_edges():\n reversed_graph.add_edge(edge, vertex.get_node_id())\n return reversed_graph",
"def getReverse(self):\n\t\treverse=copy.deepcopy(self)\n\t\tfor n in reverse.depGraph.nodes():\n\t\t\tfor s in reverse.depGraph.node[n]['cpt']:\n\t\t\t\ttemp=reverse.depGraph.node[n]['cpt'][s][0]\n\t\t\t\treverse.depGraph.node[n]['cpt'][s][0]=reverse.depGraph.node[n]['cpt'][s][1]\n\t\t\t\treverse.depGraph.node[n]['cpt'][s][1]=temp\n\t\treturn reverse",
"def reverse(self, in_place=False):\n pass",
"def reverse_vertices(vertices):\n reversed_vertices = []\n nv = len(vertices)\n for i in range(nv-1, -1, -1):\n reversed_vertices.append(vertices[i])\n return reversed_vertices",
"def reverse_list(l):\n\n return l[::-1]",
"def reverse_linked_list2(my_list):\n\n previous_element = None\n element = my_list.head\n\n while element:\n next_element = element.next\n element.next = previous_element\n previous_element = element\n element = next_element\n my_list.head = previous_element",
"def reverse(self):\n new_g = defaultdict(list)\n for i in self.__graph.keys():\n for j in self.__graph[i]:\n new_g[j].append(i)\n self.__graph = new_g"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subproblem in dynamic programming. Count the number of descending sequences given a total and the head. Note that a oneterm sequence is also considered a sequence.
|
def num_desc_seq_given_total_and_head(total, head):
if total < 1 or head < 1:
return 0
# base case: sequence has only one term
if total == head:
return 1
# recursive case: sequence has more than one term
# the second term cannot exceed the head; take advantage of transitivity
num_seq = 0
for _second in range(1, head + 1):
num_seq += num_desc_seq_given_total_and_head(total - head, _second)
return num_seq
|
[
"def num_desc_prime_seq_given_total_and_head(total, head, list_of_primes, set_of_primes):\n # sanity check\n assert head in set_of_primes, f\"total: {total}, head: {head}\"\n assert total >= head, f\"total: {total}, head: {head}\"\n\n # base case: sequence has only one term\n if total == head:\n return 1\n\n # recursive case: sequence has more than one term\n # the second term cannot exceed the head; take advantage of transitivity\n num_seq = 0\n for _second in list_of_primes:\n if _second > head or _second > total - head:\n break\n else:\n num_seq += num_desc_prime_seq_given_total_and_head(\n total - head, _second, list_of_primes, set_of_primes\n )\n\n return num_seq",
"def count(seq):\n return sum(1 for x in seq)",
"def count(seq): # real signature unknown; restored from __doc__\n pass",
"def middle_and_length(self, head):\n if head is None:\n return None, 0\n\n slower = head\n faster = head.next\n count = 1\n while faster:\n count += 1\n faster = faster.next\n if faster is None:\n return slower, count\n count += 1\n faster = faster.next\n slower = slower.next\n\n return slower, count",
"def numSubseq(self, nums: List[int], target: int) -> int:\n # while subsequence order does matter\n # here we only care about the min and max\n nums.sort()\n l, r = 0, len(nums) - 1\n res = 0\n # in case of overflow\n mod = 10**9 + 7\n while l <= r:\n if nums[l] + nums[r] > target:\n r -= 1\n else:\n # same as : res += 2 ** (r - l), res %= mod\n # write it this way otherwise will overflow\n res += pow(2, r - l, mod)\n l += 1\n return res % mod",
"def getlen(self, head: ListNode):\n n = head\n l = 0\n while n:\n l += 1\n n = n.next\n return l",
"def count(seq, predicate=None):\n i = 0\n if predicate is None:\n #if len works, use that- otherwise iterate over.\n try:\n return len(seq)\n except TypeError:\n pass\n for _ in seq:\n i += 1\n else:\n for item in seq:\n if predicate(item):\n i += 1\n return i",
"def count_inversion(sequence):\n count = 0\n for i, n in enumerate(sequence):\n if i or i < len(sequence):\n for x in range(1, i+1):\n if sequence[i-x] > sequence[i]:\n count += 1\n return count",
"def recursive_LIS(sequence):\n\n if type(sequence) == str:\n if os.path.isfile(sequence):\n f_out = True\n with open(sequence, 'r') as f:\n s = f.readlines()[1].replace(' ', '')\n else: f_out = False\n else:\n f_out = False\n try:\n s = [int(i) for i in sequence]\n except:\n raise ValueError(\"First arg must be an integer sequence.\")\n\n def rec_subseqs(seq):\n\n \"\"\"\n Takes a list of non-repeated integers and returns all\n increasing subsequences in the list if inc = True, all\n decreasing subsequences in the list if inc = False.\n \"\"\"\n ## Way too slow to work on large sequences u_u\n subseqs = []\n s_len = len(seq)\n if s_len == 0:\n return []\n elif s_len == 1:\n return [seq]\n elif s_len == 2:\n if seq[0] > seq[1]:\n return [[seq[0]]]\n else:\n return [seq]\n else:\n for i in range(s_len):\n head = seq[i]\n tails = list((n for n in seq[i+1:] if n > head))\n #tails = list(filter(lambda x: x > h, seq[i+1:]))\n #tails = [j for j in seq[i+1:] if j > head]\n for j in range(len(tails)):\n for subseq in rec_subseqs(tails[j:]):\n subseqs += [[head] + subseq]\n return subseqs\n\n max_inc, max_dec = [], []\n for seq in rec_subseqs(s):\n if len(seq) > len(max_inc):\n max_inc = seq\n for seq in rec_subseqs([-i for i in s]):\n if len(seq) > len(max_dec):\n max_dec = seq\n max_dec = [-i for i in max_dec]\n\n if f_out:\n with open('output_{}'.format(sequence), 'w') as fout:\n for seq in [max_inc, max_dec]:\n for i in seq:\n fout.write('{} '.format(i))\n fout.write('\\n')\n return max_inc, max_dec",
"def num_final(tree: HuffmanNode, count = 0) -> int:\n if tree.is_leaf():\n return count\n left_count = num_final(tree.left,count)\n new_count = num_final(tree.right, left_count)\n tree.number = new_count\n return new_count + 1",
"def ll_len(self) -> int:\n count = 0\n curr = self.head\n while curr:\n count += 1\n curr = curr.next\n return count",
"def count(S, n):\n if n == 0:\n print(S)\n else:\n print(S)\n count(increment(S), n - 1)",
"def _seq_len(seq):\n i = 0\n for item in seq:\n i += 1\n return i",
"def count_in_sorted(arr, target, target_inc):\n return lowest_index(arr, target_inc) - lowest_index(arr, target)",
"def count_longest_one_sequence(binary):\n longest_sequence = 0\n counting = True\n current = 0\n\n for b in binary:\n\n if b == '0' and counting:\n if current > longest_sequence:\n longest_sequence = current\n counting = False\n current = 0\n\n elif b == '1' and not counting:\n counting = True\n current = 1\n\n elif b == '1' and counting:\n current += 1\n\n if counting and current > longest_sequence:\n longest_sequence = current\n\n return longest_sequence",
"def max_subsequence(ilist, is_circular = False):\n n = len(ilist)\n if is_circular: ilist = ilist + ilist\n maxsum = None\n result = None\n for start in range(n):\n for slen in range(1,n+1):\n if start + slen <= len(ilist):\n test = sum(ilist[start:start+slen])\n if maxsum is None or test > maxsum:\n maxsum = test\n result = (start,(start+slen-1) % n)\n return result",
"def count_arrangements(jolts: List[int]) -> int:\n _BLOCK_SIZE_TO_NUM_ARRANGEMENTS = {\n 1: 1,\n 2: 2,\n 3: 4,\n 4: 7,\n }\n\n differences = [b - a for a, b in zip(jolts, jolts[1:])]\n one_block_lengths = [\n len(list(values))\n for difference, values in groupby(differences)\n if difference == 1\n ]\n\n num_arrangements_for_blocks = [\n _BLOCK_SIZE_TO_NUM_ARRANGEMENTS[block_size]\n for block_size in one_block_lengths\n ]\n\n return reduce(mul, num_arrangements_for_blocks)",
"def length(self):\n counter = 0\n current_node = self.head\n while current_node != None:\n counter += 1\n current_node = current_node.next\n return counter",
"def subarray_maximum_total(arr: list) -> int:\n len_arr = len(arr)\n\n to_right = distance_to_greater_value(arr, True)\n to_left = list(reversed([len_arr - x - 1 for x in distance_to_greater_value(list(reversed(arr)), False)]))\n\n total = 0\n for idx in range(len_arr):\n total += arr[idx] * (idx - to_left[idx]) * (to_right[idx] - idx)\n return total"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subproblem in dynamic programming. Using a precomputed list & set of primes, count the number of descending prime sequences given a total and the head. Note that a oneterm sequence is also considered a sequence.
|
def num_desc_prime_seq_given_total_and_head(total, head, list_of_primes, set_of_primes):
# sanity check
assert head in set_of_primes, f"total: {total}, head: {head}"
assert total >= head, f"total: {total}, head: {head}"
# base case: sequence has only one term
if total == head:
return 1
# recursive case: sequence has more than one term
# the second term cannot exceed the head; take advantage of transitivity
num_seq = 0
for _second in list_of_primes:
if _second > head or _second > total - head:
break
else:
num_seq += num_desc_prime_seq_given_total_and_head(
total - head, _second, list_of_primes, set_of_primes
)
return num_seq
|
[
"def num_desc_seq_given_total_and_head(total, head):\n if total < 1 or head < 1:\n return 0\n\n # base case: sequence has only one term\n if total == head:\n return 1\n\n # recursive case: sequence has more than one term\n # the second term cannot exceed the head; take advantage of transitivity\n num_seq = 0\n for _second in range(1, head + 1):\n num_seq += num_desc_seq_given_total_and_head(total - head, _second)\n\n return num_seq",
"def problem35(limit):\n\n result = 0\n for p in pelib.primes_sieve(limit):\n if is_circular_primes(p):\n result += 1\n\n return result",
"def optimus_prime(n):\n count = 0\n for num in range(1,n):\n if num <= 1:\n continue\n for i in range(2,num):\n if (num % i) == 0:\n break\n else:\n count += 1\n print(count)",
"def computeLenHammings(prime_list, max_num):\n hammings = [1]\n new_hammings = [1]\n while True:\n old_hammings = set(hammings)\n cur_hammings = set([])\n for prime in prime_list:\n cur_hammings = getNewHammings(prime, new_hammings, old_hammings, max_num).union(cur_hammings)\n if len(cur_hammings) == 0:\n return len(hammings)\n new_hammings = list(cur_hammings)\n hammings = old_hammings.union(cur_hammings)",
"def get2nb(nbprimes):\n \n if nbprimes>len(_primes):\n # start with highest prime so far\n i = _primes[-1]\n # and add...\n i = i + 1 + isodd(i)*1\n \n while len(_primes)<nbprimes:\n if divtrial(i): _primes.append(i)\n i=i+2\n\n return _primes[:nbprimes]",
"def problem():\n return sum(prime_numbers(2000000))",
"def solve():\r\n pgen = get_prime_gen() \r\n result = 0\r\n for p in pgen:\r\n if p < 2000000:\r\n print p\r\n result += p\r\n else:\r\n break\r\n return result",
"def computeP(list):\n P = []\n P.append(0)\n for counter in range(1, len(list)):\n currentPtr = counter - 1\n maxIndex = 0\n while(currentPtr >= 0):\n if list[counter][0] >= list[currentPtr][1]:\n maxIndex = currentPtr + 1\n break\n currentPtr -= 1\n P.append(maxIndex)\n return P",
"def get_ascending_primes(max):\n isprime = [True for _ in range(max)]\n isprime[0], isprime[1] = False, False\n current_val = 2\n primes = []\n\n #Apply a Sieve of Eroesthes (I have no idea how to spell this...)\n while current_val < max:\n if isprime[current_val]:\n primes.append(current_val)\n for multiple in range(current_val, max, current_val):\n if multiple == current_val:\n continue\n isprime[multiple] = False\n current_val += 1\n return primes",
"def count(seq):\n return sum(1 for x in seq)",
"def test_subsecventa_relPrime():\n assert subsecventa_relPrime([1, 2, 3]) == [1, 2, 3]\n assert subsecventa_relPrime([1, 5, 25]) == [1, 5]\n assert subsecventa_relPrime([]) == []\n assert subsecventa_relPrime([625, 5, 125]) == []\n assert subsecventa_relPrime([-1, 10, 22]) == []\n assert subsecventa_relPrime([100, 10, 20, 1, 2, 1, -3]) == [20, 1, 2, 1]",
"def Solution10():\n return sum(get_primes(2000000))",
"def numSubseq(self, nums: List[int], target: int) -> int:\n # while subsequence order does matter\n # here we only care about the min and max\n nums.sort()\n l, r = 0, len(nums) - 1\n res = 0\n # in case of overflow\n mod = 10**9 + 7\n while l <= r:\n if nums[l] + nums[r] > target:\n r -= 1\n else:\n # same as : res += 2 ** (r - l), res %= mod\n # write it this way otherwise will overflow\n res += pow(2, r - l, mod)\n l += 1\n return res % mod",
"def primesums():\n n = 0\n for p in primes():\n n += p\n yield n",
"def summation_of_primes():\n\tresult = 0\n\tn = 2000000\n\t# n = 10\n\tfor i in range(n):\n\t\tif Helpers.is_prime(i):\n\t\t\tresult += i\n\tHelpers.pr(result)",
"def solution(max_prime: int = 10**6) -> int:\n\n primes_count = 0\n cube_index = 1\n prime_candidate = 7\n while prime_candidate < max_prime:\n primes_count += is_prime(prime_candidate)\n\n cube_index += 1\n prime_candidate += 6 * cube_index\n\n return primes_count",
"def max_subsequence(ilist, is_circular = False):\n n = len(ilist)\n if is_circular: ilist = ilist + ilist\n maxsum = None\n result = None\n for start in range(n):\n for slen in range(1,n+1):\n if start + slen <= len(ilist):\n test = sum(ilist[start:start+slen])\n if maxsum is None or test > maxsum:\n maxsum = test\n result = (start,(start+slen-1) % n)\n return result",
"def circular_prime_answer(number, total):\n for i in range(1, number + 1):\n if circular_prime(i):\n total += i\n print(total)",
"def find_prime_sum_consecutive_prime(x):\n\n\tprime = return_primes(x)\n\tlength = len(prime)\n\tprime_set = set(prime)\n\tmax_prime = prime[-1]\n\tresult = []\n\tmin_length = 1\n\tfor start in range(0, length):\n\t\tfor window_len in range(start+min_length, length -start +1 ):\n\t\t\tcheck_prime = prime[start:window_len]\n\t\t\ts = sum(check_prime)\n\t\t\tif s in prime_set and len(check_prime) > 1:\n\t\t\t\tmin_length = len(check_prime)\n\t\t\t\tresult.append(s)\n\t\t\telif s > max_prime:\n\t\t\t\tbreak\n\treturn len(result)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates coprime Pythagorean triplets where the greatest of the triplet is under some bound, and the generating (n, m) pairs are also optionally bounded.
|
def pythagorean_triplets(
bound, ratio_lower_bound=0.0, ratio_upper_bound=1.0, coprime=True
):
from math import sqrt, ceil, floor
if coprime:
fac = Factorizer(bound)
bound_for_iteration = ceil(sqrt(bound))
triplets = []
# use the formula: (m^2 - n^2)^2 + (2mn)^2 = (m^2 + n^2)^2
for _m in tqdm(range(2, bound_for_iteration)):
_n_upper = min(_m, ceil(ratio_upper_bound * _m))
_n_lower = max(1, floor(ratio_lower_bound * _m))
for _n in range(_n_lower, _n_upper):
# calculate Pythagorean triplet
term_a = _m**2 - _n**2
term_b = 2 * _m * _n
term_c = _m**2 + _n**2
if coprime:
# skip triplets that are not coprime
a_factors = fac.factorize(term_a)
keep = True
for _factor in a_factors:
if term_b % _factor == 0:
keep = False
break
if not keep:
continue
if term_c <= bound:
_triplet = tuple(sorted([term_a, term_b, term_c]))
triplets.append(_triplet)
return triplets
|
[
"def pythagorean_triplets():\n c = 4\n while True:\n c += 1\n a_min = int(sqrt(2 * c - 1))\n a_max = int(c / sqrt(2)) + 1\n\n for a in range(a_min, a_max):\n b = int(sqrt(c * c - a * a))\n if a ** 2 + b ** 2 == c ** 2:\n yield (a, b, c)",
"def find_pythagorean_triplet(n):\n for c in range(n - 2, 0, -1):\n for a in range(n - c - 1, 0, -1):\n b = n - c - a\n # Check if a,b,c form a valid Pythagorean triplet.\n if a**2 + b**2 == c**2:\n return a, b, c",
"def pythagorean_triplet_product(n):\n for a in range(1, n):\n for b in range(1, n-a):\n c = (n - a - b)\n if a**2 + b**2 == c**2:\n return a*b*c",
"def special_pythagorean_triplet(val=1000):\n for a in xrange(1, val):\n for b in xrange(a + 1, val):\n for c in xrange(b + 1, val):\n if a + b + c == val:\n if a**2 + b**2 == c**2:\n # print \"Found it! a={} b={} c={}\".format(a, b, c)\n return a * b * c",
"def problem086():\n\n # solutions[k] is the set of all solutions where the largest side has length k.\n # A solution is a triple (x, y, z) such that 0 < x <= y <= z, and in the rectangular prism with dimensions x * y * z,\n # the shortest surface path from one vertex to the opposite vertex has an integral length.\n solutions = []\n\n # Generates all solutions where the largest side has length less than 'limit'.\n def generate_solutions():\n # Pythagorean triples theorem:\n # Every primitive Pythagorean triple with a odd and b even can be expressed as\n # a = st, b = (s^2-t^2)/2, c = (s^2+t^2)/2, where s > t > 0 are coprime odd integers.\n # Now generate all Pythagorean triples, including non-primitive ones.\n for s in itertools.count(3, 2):\n for t in range(s - 2, 0, -2):\n if s * s // 2 >= limit * 3:\n return\n\n if math.gcd(s, t) == 1:\n for k in itertools.count(1):\n a = s * t * k\n b = (s * s - t * t) // 2 * k\n c = (s * s + t * t) // 2 * k\n if a >= limit and b >= limit:\n break\n find_splits(a, b, c)\n find_splits(b, a, c)\n\n # Assumes that a^2 + b^2 = c^2.\n def find_splits(a, b, c):\n z = b\n for x in range(1, a):\n y = a - x\n if y < x:\n break\n if c * c == min(\n (x + y) * (x + y) + z * z,\n (y + z) * (y + z) + x * x,\n (z + x) * (z + x) + y * y,\n ):\n temp = max(x, y, z)\n if temp < limit:\n # Add canonical solution\n item = tuple(sorted((x, y, z)))\n solutions[temp].add(item)\n\n # cumulativesolutions[m] = len(solutions[0]) + len(solutions[1]) + ... + len(solutions[m]).\n cumulativesolutions = [0]\n\n limit = 1\n while True:\n # Extend the solutions list with blank sets\n while len(solutions) < limit:\n solutions.append(set())\n\n generate_solutions()\n\n # Compute the number of cumulative solutions up to and including a certain maximum size\n for i in range(len(cumulativesolutions), limit):\n sum = cumulativesolutions[i - 1] + len(solutions[i])\n cumulativesolutions.append(sum)\n if sum > 1000000:\n return i\n\n # Raise the limit and keep searching\n limit *= 2",
"def random_pythagorean_triple(max_hyp=1555):\n a = b = 0\n c = max_hyp + 1\n\n while (c > max_hyp):\n r = random.randint(1, 12) * 2\n rr = r * r // 2\n\n s = random.choice(factors(rr))\n t = rr // s\n\n a = r + s\n b = r + t\n c = r + s + t\n\n return a, b, c",
"def triples(limit):\n for tri, circ in fibonacci_triples(limit):\n k = 1\n while True:\n kcirc = k * circ\n if kcirc > limit: break\n yield (tri.mult(k), kcirc)\n k += 1",
"def gen_primes_upto(n):\r\n if n == 2:\r\n return\r\n\r\n table = [True] * n\r\n sqrtn = int(math.ceil(math.sqrt(n)))\r\n\r\n for i in range(2, sqrtn):\r\n if table[i]:\r\n for j in range(i * i, n, i):\r\n table[j] = False\r\n\r\n yield 2\r\n for i in range(3, n, 2):\r\n if table[i]:\r\n yield i",
"def cg_test(n):\n for i in range(2, n):\n if p.isprime(i):\n rest = n - i\n k = int(pow(rest/2, 0.5)) \n if 2*k*k == rest:\n return (i, k)\n return ()",
"def generate_solution(bounds):\n solution = []\n for i in range(3):\n solution.extend(generate_pair([bounds[i*2],bounds[i*2+1]],constraints[i]))\n \n assert len(solution)==6\n return solution",
"def _coprimeGen(n):\n max_iter = 1000\n i = 0\n p = randprime(1e4, min(n, 1e6))\n\n while p%n == 0:\n p = randprime(1e4, min(n, 1e6))\n if i > max_iter:\n raise RuntimeError(\"number coprime to {}\".format(n)\n + \" not found after max allowed iteration\")\n i += 1\n return p",
"def primes(upper_bound):\r\n if upper_bound >= 2:\r\n yield 2\r\n sieve_bound = (upper_bound - 1) // 2\r\n sieve = [True for _ in range(sieve_bound)]\r\n crosslimit = (round(upper_bound ** 0.5) - 1) // 2\r\n for i in range(crosslimit):\r\n if sieve[i]:\r\n n = 2 * i + 3\r\n\r\n j = 3\r\n m = (n * j - 3) // 2\r\n while m < sieve_bound:\r\n sieve[m] = False\r\n j += 2\r\n m = (n * j - 3) // 2\r\n\r\n for i in range(sieve_bound):\r\n if sieve[i]:\r\n yield 2 * i + 3",
"def get_combinations(n, r) :\n return list(itertools.combinations(range(n), r))",
"def mult_parities_python(bound, verbose=False):\n v = [None]*bound\n v[0] = None\n v[1] = int(0)\n P = [int(p) for p in prime_range(bound)]\n for p in P:\n v[p] = int(1)\n last = P\n last_parity = int(1)\n loops = floor(log(bound,2))+1\n bound = int(bound)\n for k in range(loops):\n cur = []\n cur_parity = (last_parity+int(1))%int(2)\n if verbose:\n print \"loop %s (of %s); last = %s\"%(k,loops, len(last))\n for n in last:\n for p in P:\n m = n * p\n if m >= bound:\n break\n if v[m] is None:\n v[m] = cur_parity\n cur.append(m)\n last_parity = cur_parity\n last = cur\n return v",
"def _gen_pair(min_digits, max_digits):\n n_digits = randrange(min_digits, max_digits + 1)\n a = randrange(10 ** (n_digits - 1), 10 ** n_digits - 1)\n b = randrange(10 ** (n_digits - 1), 10 ** n_digits - 1)\n if a > b:\n return a, b\n return b, a",
"def brute_force_triple_with_sum(n):\n for a in range(3, n // 3):\n b = a + 1\n while True:\n c = n - a - b\n if a**2 + b**2 == c**2:\n return (a, b, c)\n elif a**2 + b**2 > c**2:\n break\n\n b += 1",
"def pythagoreanTriples():\n\tcounter = 0\n\tfor side1 in range(1,501):\n\t for side2 in range(1,501):\n\t for side3 in range(1,501):\n\t if side3**2 == side1**2 + side2**2:\n\t print(side1,side2,side3)\n\t counter +=1\n\tprint(\"The number of such triples found is:\",counter)",
"def ways(n, k):\n res = 1\n for p in primes:\n c = 0\n while k % p == 0:\n k = k // p\n c += 1\n if c > 0:\n res *= comb(n + c - 1, c)\n if p > k:\n break\n if k != 1:\n res *= n\n return res % M",
"def findTriplets_inefficient(integerList,n): \n length=len(integerList)\n triplets=[]\n for i in range(0,length):\n for j in range(i+1,length):\n for k in range(j+1,length):\n if i!=j and j!=k and i!=k:\n if integerList[i]+integerList[j]+integerList[k]==n:\n triplets.append([i,j,k]) \n return triplets"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Recursive approach to generate array element partitions.
|
def generate_all_partitions_from_list(arr):
if len(arr) == 0:
yield [], []
else:
for _l, _r in generate_all_partitions_from_list(arr[:-1]):
yield [*_l, arr[-1]], _r
yield _l, [*_r, arr[-1]]
|
[
"def partitionMemories(ir):\n for mems in partitions:\n partition_name=mems[0]\n dimention_to_partition=int(mems[1])\n settings=mems[2:][0]\n dim, dataType = getArrayInfo(partition_name)\n\n # Settings for fully partitioning\n if settings[0]=='*':\n print(\"\\tFully partitioning array \"+partition_name)\n partition_values=[[x] for x in xrange(dim[dimention_to_partition])]\n\n # Settings for block and cyclic partitions\n elif settings[0]==\"b\" or settings[0]==\"c\":\n blocks=int(settings[1])\n if blocks<1:\n blocks=1\n elif blocks>dim[dimention_to_partition]:\n blocks=dim[dimention_to_partition]\n\n # Splitting the array in blocks\n if settings[0]==\"b\":\n print(\"\\tPartitioning array \"+partition_name+\" in \"+str(blocks)+\" blocks of memory\")\n k, m = divmod(dim[dimention_to_partition], blocks)\n partition_values = [range(i*k+min(i, m),(i+1)*k+min(i+1,m)) for i in xrange(blocks)]\n\n # Splitting the array cyclically\n elif settings[0]==\"c\":\n print(\"\\tPartitioning array \"+partition_name+\" in \"+str(blocks)+\" memories cyclically\")\n partition_values = [range(dim[dimention_to_partition])[i::blocks] for i in xrange(blocks)]\n\n partition_dim=[dim[:dimention_to_partition]+[len(x)]+dim[dimention_to_partition+1:] for x in partition_values]\n createPartitions(partition_name,partition_values,partition_dim,dataType)\n updateGEP(partition_name,partition_values,partition_dim,dataType,dimention_to_partition)",
"def partitions_iter(elements,currentset=[]):\n if not elements:\n yield currentset\n else:\n for newset in powerset(elements):\n if newset:\n for p in partitions_iter([a for a in elements if a not in list(squash(currentset))+list(newset)],currentset+[list(newset)]):\n yield p",
"def _array_elements_sweep():\n\n # array_elements <= 8 noisy\n # larger array_elements extremely slow to synthesize (especially w/ nesting)\n for dim_size in range(12, 37, 12):\n yield dim_size",
"def partition(n):\n if n == 0:\n return [[]]\n\n partitions = []\n for k in [1, 2, 3]:\n if k > n:\n break\n for subpartition in partition(n - k):\n partitions.append([k] + subpartition)\n return partitions",
"def _partition(n, top):\n\n\tif top == 1:\n\t\treturn [[1] * n]\n\n\tif n == top:\n\t\treturn [[top]] + _partition(n, top - 1)\n\telse:\n\t\treturn [[top] + x for x in _partition(n - top, min(n - top, top))] + _partition(n, top - 1)",
"def _do_partition(total, maxelements, around=None, maxdz=None):\n if (around is None) != (maxdz is None):\n raise ValueError(\"Cannot define center or radius alone.\")\n\n if maxelements == 1:\n if around is not None and maxdz < abs(total - around[-maxelements]):\n return []\n else:\n return [[total]]\n res = []\n\n # get range to cover\n if around is None:\n first = 0\n last = total\n limit = None\n else:\n first = max(0, around[-maxelements] - maxdz)\n last = min(total, around[-maxelements] + maxdz)\n for x in range(first, last + 1):\n if around is not None:\n limit = maxdz - abs(x - around[-maxelements])\n for p in IntegerPartitions._do_partition(\n total - x, maxelements - 1, around, limit\n ):\n res.append([x] + p)\n return res",
"def integer_partition(size: int, nparts: int) -> Iterator[List[List[int]]]:\n for part in algorithm_u(range(size), nparts):\n yield part",
"def partitions(n):\n if n in xrange(11):\n return (1, 1, 2, 3, 5, 7, 11, 15, 22, 30, 42)[n]\n else:\n return sum(pow(-1, k + 1) * (partitions(n - k * (3 * k - 1) // 2) +\n partitions(n - k * (3 * k + 1) // 2)) for k in xrange(n, 0, -1))",
"def dynamic_partition(data: Tensor, partitions: Tensor, num_partitions: int):\n res = []\n for i in range(num_partitions):\n res += [data[(partitions == i).nonzero().squeeze(1)]]\n return res",
"def updateGEP(partition_name,partition_values,partition_dim,dataType,partitioned_dim):\n for idx,instr in enumerate(ir):\n if \"@\"+partition_name+\",\" in instr and \"getelementptr\"in instr:\n\n # Get the current which indexes of this array are being used in this instruction\n current_indexes=re.findall('\\i64 (.*?)[,\\s]', instr)[1:]\n \n # Now lets try to find out the subarray that this index belongs to\n for part_idx,_ in enumerate(partition_values):\n if current_indexes[partitioned_dim].isdigit():\n if int(current_indexes[partitioned_dim]) in partition_values[part_idx]:\n subarray_idx=part_idx\n break\n else:\n print(\"\\tUnable to partition \"+partition_name+\" - Please unroll it first\")\n shutil.copyfile(sys.argv[1],sys.argv[2])\n exit()\n\n # replace the array name\n ir[idx]=ir[idx].replace(\"@\"+partition_name+\",\",\"@\"+partition_name+\"_sub\"+str(subarray_idx)+\",\")\n\n # replace array dimension\n dim_text=generateDimText(partition_dim[subarray_idx],dataType)\n ir[idx]=ir[idx][:ir[idx].find(\"[\")-1]+dim_text+ir[idx][ir[idx].find(\"*\"):]\n\n # replace array index\n new_indexes=\" i64 0\"\n for i in range(len(partition_dim[0])):\n if i == partitioned_dim:\n for p in partition_values:\n if int(current_indexes[i]) in p:\n current_partition=p[:]\n break\n new_indexes=new_indexes+\", i64 \"+str(current_partition.index(int(current_indexes[i]))) \n else:\n new_indexes=new_indexes+\", i64 \"+current_indexes[i].replace(',','')\n ir[idx]=re.findall('(.*@\\S*,)',ir[idx])[0] + new_indexes +\"\\n\"",
"def createPartitions(partition_name,partition_values,partition_dim,dataType):\n for idx,instr in enumerate(ir):\n if \"@\"+partition_name+\" =\" in instr:\n del ir[idx]\n for i in range(len(partition_values)):\n print(\"\\tCreating partition \"+partition_name+\"_sub\"+str(i))\n dim_text=generateDimText(partition_dim[i],dataType)\n ir.insert(idx,\"@\"+partition_name+\"_sub\"+str(i)+\" = global\"+dim_text+\" zeroinitializer, align 8\\n\")\n break",
"def elements(dims, firstIdx=0):\n\n indcs = list(range(firstIdx, firstIdx + dims))\n\n blades = [()]\n\n for k in range(1, dims + 1):\n # k = grade\n\n if k == 1:\n for i in indcs:\n blades.append((i,))\n continue\n\n curBladeX = indcs[:k]\n\n for i in range(comb(dims, k)):\n if curBladeX[-1] < firstIdx + dims - 1:\n # increment last index\n blades.append(tuple(curBladeX))\n curBladeX[-1] = curBladeX[-1] + 1\n\n else:\n marker = -2\n tmp = curBladeX[:] # copy\n tmp.reverse()\n\n # locate where the steady increase begins\n for j in range(k - 1):\n if tmp[j] - tmp[j + 1] == 1:\n marker = marker - 1\n else:\n break\n\n if marker < -k:\n blades.append(tuple(curBladeX))\n continue\n\n # replace\n blades.append(tuple(curBladeX))\n curBladeX[marker:] = list(range(\n curBladeX[marker] + 1, curBladeX[marker] + 1 - marker))\n\n return blades",
"def partition_d1(start_value, end_value, partition_count):\n start_x = start_value\n dx = (end_value - start_value) / partition_count\n\n partitions = []\n for partition_i in range(1, partition_count + 1):\n if partition_i == partition_count:\n partitions.append((start_x, end_value))\n else:\n partitions.append((start_x, start_x + dx))\n\n start_x += dx\n return partitions",
"def a_partition(par):\n if par.m_q < 0:\n raise NotImplementedError(\"Q<0 not implemented.\")\n \n _parts = [_partition_gs, _partition_mq, _partition_left]\n for c_pairs in _parts:\n pairs = c_pairs(par)\n if is_valid(pairs, par) and not is_singular(pairs, par): \n return pairs\n\n # never get here\n raise RuntimeError(\"Failed to generate a_partition for %s\" % par)",
"def _partitions(self, s, k):\n if not k:\n yield [s]\n return\n for i in range(len(s) + 1):\n for tail in self._partitions(s[i:], k - 1):\n yield [s[:i]] + tail",
"def partitions(l, partition_size):\n for i in xrange(0, len(l), partition_size):\n yield l[i:i+partition_size]",
"def recursive_graph_partition(cost_s: csr_matrix, p_s: np.ndarray, idx2node: Dict, ot_hyperpara: Dict,\n max_node_num: int = 200) -> Tuple[List[np.ndarray], List[np.ndarray], List[Dict]]:\n costs_all = [cost_s]\n probs_all = [p_s]\n idx2nodes_all = [idx2node]\n costs_final = []\n probs_final = []\n idx2nodes_final = []\n n = 0\n while len(costs_all) > 0:\n costs_tmp = []\n probs_tmp = []\n idx2nodes_tmp = []\n for i in range(len(costs_all)):\n # print('Partition: level {}, leaf {}/{}'.format(n+1, i+1, len(costs_all)))\n p_t = estimate_target_distribution({0: probs_all[i]}, dim_t=2)\n # print(p_t[:, 0], probs_all[i].shape[0])\n cost_t = csr_matrix(np.diag(p_t[:, 0]))\n # cost_t = 1 / (1 + cost_t)\n ot_hyperpara['outer_iteration'] = probs_all[i].shape[0]\n trans, d_gw, p_s = Gwl.gromov_wasserstein_discrepancy(costs_all[i],\n cost_t,\n probs_all[i],\n p_t,\n ot_hyperpara)\n sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(costs_all[i],\n trans,\n probs_all[i],\n p_t,\n idx2nodes_all[i])\n\n for key in sub_idx2nodes.keys():\n sub_cost = sub_costs[key]\n sub_prob = sub_probs[key]\n sub_idx2node = sub_idx2nodes[key]\n if len(sub_idx2node) > max_node_num:\n costs_tmp.append(sub_cost)\n probs_tmp.append(sub_prob)\n idx2nodes_tmp.append(sub_idx2node)\n else:\n costs_final.append(sub_cost)\n probs_final.append(sub_prob)\n idx2nodes_final.append(sub_idx2node)\n\n costs_all = costs_tmp\n probs_all = probs_tmp\n idx2nodes_all = idx2nodes_tmp\n n += 1\n return costs_final, probs_final, idx2nodes_final",
"def kPartitions(M, k):\n groups = [] # a list of lists, currently empty\n seq = range(M) ### indecies which we group\n\n def generate_partitions(i):\n if i >= M: ### we're out of elements, so just map them to tuples\n yield list(map(tuple, groups))\n\n else: ### still elements that are unassigned\n if M - i > k - len(groups): ### there are enough elements left to still get enough groups without them all be new groups\n for group in groups:\n group.append(seq[i])\n for x in generate_partitions(i + 1): ### recurse\n yield x\n group.pop() ### not exactly sure what this is doing, but it works.\n\n if len(groups) < k: ### we should add another group because we don't have enough\n groups.append([seq[i]])\n for x in generate_partitions(i + 1): ### recurse\n yield x\n groups.pop() ### not exactly sure what this is doing, but it works\n\n return generate_partitions(0)",
"def set_partitions(X):\r\n X = list(X)\r\n if len(X) == 1:\r\n yield [X]\r\n return\r\n \r\n x = X[0]\r\n for Y in powerset(X[1:]):\r\n Y_set = set(Y)\r\n Z = [z for z in X[1:] if z not in Y_set]\r\n \r\n if len(Z) == 0:\r\n yield [X]\r\n \r\n else:\r\n for p in set_partitions(Z):\r\n yield [[x] + list(Y)] + p"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if a set, represented as a sorted typle, is a special sum set. Any set A is a special sum set iff for any nonempty disjoint subsets B and C S(B) != S(C) If B contains more elements than C then S(B) > S(C)
|
def is_special_sum_set(set_as_sorted_tuple, verbose=False):
# basic set check: no duplicates
if len(set_as_sorted_tuple) != len(set(set_as_sorted_tuple)):
return False
# compute all the sums of non-empty subsets
# note that we do not need to check for disjointness because
# any common elements between B and C has no effect on the
# comparison between either S(B) vs. S(C) or len(B) vs. len(C)
subset_to_sum = nonempty_subset_sums(set_as_sorted_tuple)
sums = set()
size_to_sums = defaultdict(set)
for _tuple, _sum in subset_to_sum.items():
if _sum in sums:
return False
sums.add(_sum)
size_to_sums[len(_tuple)].add(_sum)
max_sum = 0
for _size in sorted(size_to_sums.keys()):
_sumset = size_to_sums[_size]
if min(_sumset) <= max_sum:
return False
max_sum = max(_sumset)
if verbose:
print(_size, max_sum, _sumset)
return True
|
[
"def subset_sum(S, total):\n\n if total == 0 and set:\n # Can use empty set\n return True\n\n if not set and total:\n # There are not elements in teh set and total > 0\n return False\n\n # Create a cache adn initiliaze all values as False\n cache = [[False for _ in range(total + 1)] for _ in range(len(S) + 1)]\n\n # Initiliaze the first column as True\n for i in range(len(S) + 1):\n cache[i][0] = True\n\n # Now iterate\n for i in range(1, len(S) + 1):\n for j in range(1, total + 1):\n if j < S[i-1]:\n cache[i][j] = cache[i-1][j]\n else:\n cache[i][j] = cache[i-1][j] or cache[i-1][j-S[i - 1]]\n\n # pprint.pprint(cache)\n return cache[-1][-1]",
"def isEqual_sum(arr: \"list[int]\", n: \"int\") -> bool:\n # find sum of all elements\n s = 0\n for el in arr:\n s += el\n if(s % 2 == 1):\n return False\n else:\n return isSubset_sum(arr, s//2)",
"def superset(big, little):\n for el in little:\n if big.count(el) < little.count(el):\n return False\n return True",
"def isSumSubset(arr, arrLen, requiredSum):\n # a subset value says 1 if that subset sum can be formed else 0\n # initially no subsets can be formed hence False/0\n subset = [[False for i in range(requiredSum + 1)] for i in range(arrLen + 1)]\n\n # for each arr value, a sum of zero(0) can be formed by not taking any element hence True/1\n for i in range(arrLen + 1):\n subset[i][0] = True\n\n # sum is not zero and set is empty then false\n for i in range(1, requiredSum + 1):\n subset[0][i] = False\n\n for i in range(1, arrLen + 1):\n for j in range(1, requiredSum + 1):\n if arr[i - 1] > j:\n subset[i][j] = subset[i - 1][j]\n if arr[i - 1] <= j:\n subset[i][j] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]\n\n # uncomment to print the subset\n for i in range(arrLen+1):\n print(subset[i])\n print(subset[arrLen][requiredSum])",
"def isEqual_sum_rec(arr: \"list[int]\", n: \"int\") -> bool:\n # find sum of all elements\n s = 0\n for el in arr:\n s += el\n if(s % 2 == 1):\n return False\n else:\n return isSubset_rec(arr, n, s//2)",
"def check_partial_sums(menge):\n summen = defaultdict(list)\n for length in range(len(menge), 0, -1):\n for comb in itertools.combinations(menge, length):\n summe = sum(comb)\n # first condition\n if summen[summe]:\n for item in summen[summe]:\n if any(i in item for i in comb):\n continue\n else:\n return False\n # second condition\n for i in range(summe // 2, summe + 1):\n if summen[i]:\n for item in summen[i]:\n if len(item) > length and not any(i in item for i in comb):\n return False\n summen[summe].append(comb)\n\n return True",
"def sperner(T):\n i = 0\n T = list(T)\n while(i < len(T)):\n if sum([a <= T[i] for a in T]) >= 2:\n T.pop(i)\n else:\n i += 1\n return frozenset(T)",
"def calcSubsetSumOver(nums, sum):\n return calcSubsetSum(nums, 0, sum, \"\")",
"def canPartition(self, nums):\n n = len(nums)\n total = sum(nums)\n \n # This problem is the equivalent to finding a set A that adds up to\n # half of the sum total of nums. Since the array is made up of integers,\n # this is impossible if the sum total is odd.\n if (total & 1) == 1:\n return False\n capacity = total // 2\n \n # dp[i][j], indicates whether or not a capacity j can be fulfilled\n # from a subset of the elements nums[0] to nums[i - 1], inclusive.\n # dp[i][0], will always be true since a capacity of zero can be\n # fulfilled by not including any elements\n dp = [[j == 0 for _ in range(n + 1)] for j in range(capacity + 1)]\n \n # An element is either a part of set A or it isn't; hence, this problem\n # reduces to the subset-sum problem.\n for cap in range(1, capacity + 1):\n for val in range(1, n + 1):\n dp[cap][val] = dp[cap][val- 1]\n if nums[val - 1] <= cap and not dp[cap][val]:\n dp[cap][val] = dp[cap - nums[val - 1]][val - 1]\n \n return dp[capacity][n]",
"def is_all_unique(x: Iterable[Any]) -> bool:\n return len(set(x)) == len(x)",
"def add_to_zero(nums):\n\n set_nums = set(nums)\n\n for num in nums: \n if -num in set_nums: \n return True\n\n return False",
"def two_sum(lst, k):\n seen = set()\n for num in lst:\n if k - num in seen:\n return True\n seen.add(num)\n return False",
"def test_issubset_single():\n should_be_valid = sets.is_subset([], [1, 2, 3, 4])\n assert should_be_valid is True",
"def all_have_quantization(self, nodes: Sequence[NNNodeBase]) -> bool:\n return all(node.name in self.qset for node in nodes)",
"def validSet(nums):\n combos = permutations(nums,2)\n for combo in combos:\n if not (int(str(combo[0])+str(combo[1])) in PrimesSet and int(str(combo[1])+str(combo[0])) in PrimesSet):\n return False\n return True",
"def has_sum(nums, k):\n\n for i, num in enumerate(nums[:-1]):\n for alt in nums[i + 1:]:\n if num + alt == k:\n return True\n\n return False",
"def all_non_sumable():\n abundant_num_list = find_abundant_num(28123)\n sumable = set([x + y for x, y in CR(abundant_num_list, 2)])\n non_sumable = set(range(1, 28124)) - sumable\n return sum(non_sumable)",
"def containsDuplicate(self, nums):\n return True if len(set(nums)) < len(nums) else False",
"def are_all_unique(seq):\n return len(seq) == len(set(seq))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the number of ways to tile n black blocks with red paint. Each group of red paint must cover at least m consecutive blocks.
|
def block_tiling_flexible_1d(m, n):
end_in_red = [*[0 for _ in range(m - 1)], 1]
end_in_black = [1 for _ in range(m)]
for i in range(m, n):
_reds = end_in_red[i - 1] + end_in_black[i - m]
_blacks = end_in_red[i - 1] + end_in_black[i - 1]
end_in_red.append(_reds)
end_in_black.append(_blacks)
return end_in_red, end_in_black
|
[
"def block_tiling_fixed_1d(m, n):\n end_in_red_end = [*[0 for _ in range(m - 1)], 1]\n end_in_black = [1 for _ in range(m)]\n\n for i in range(m, n):\n _reds = end_in_red_end[i - m] + end_in_black[i - m]\n _blacks = end_in_red_end[i - 1] + end_in_black[i - 1]\n end_in_red_end.append(_reds)\n end_in_black.append(_blacks)\n\n return end_in_red_end, end_in_black",
"def block_tiling_multifixed_1d(m_values, n):\n m_values = sorted(m_values)\n m_min = m_values[0]\n end_in_red_end = [*[0 for _ in range(m_min - 1)], 1]\n end_in_black = [1 for _ in range(m_min)]\n\n for i in range(m_min, n):\n _reds, _blacks = 0, 0\n for _m in m_values:\n if i < _m - 1:\n pass\n elif i == _m - 1:\n _reds += 1\n else:\n _reds += end_in_red_end[i - _m] + end_in_black[i - _m]\n _blacks = end_in_red_end[i - 1] + end_in_black[i - 1]\n end_in_red_end.append(_reds)\n end_in_black.append(_blacks)\n\n return end_in_red_end, end_in_black",
"def get_ways(tiles,n,memo=[]):\r\n if len(memo) > tiles and memo[tiles] > -1:\r\n return memo[tiles]\r\n if n > tiles:\r\n # it's not possible to place a tile when the color is bigger than the black\r\n return 0\r\n res = 0\r\n for i in range(n,min(2*n,tiles+1)):\r\n # place a tile somewhere at the beginning from [1,2n-1]\r\n rest = get_ways(tiles-i,n,memo)\r\n res += rest+1 # add 1 for when the rest is empy\r\n res += get_ways(tiles-n,n,memo) # add when first n are empty\r\n if len(memo) > tiles:\r\n memo[tiles] = res\r\n return res",
"def count_paths(m,n,blocks):\n\n\tassert isinstance(m,int)\n\tassert isinstance(n,int)\n\tassert m > 0\n\tassert n > 0 \n\n\tassert isinstance(blocks, list)\n\n\tfor coord in blocks: \n\t\tassert coord != (0,0)\n\t\tassert coord != (m-1, n-1)\n\t\tassert isinstance(coord[0], int)\n\t\tassert m-1 >= coord[0] >= 0 \n\t\tassert isinstance(coord[1], int)\n\t\tassert n-1 >= coord[1] >= 0\n\n\n\n\n\tfrom collections import defaultdict\n\n\td = defaultdict()\n\n\n\tfor i in range(m):\n\t\tfor j in range(n):\t\t\n\t\t\tif (i,j) == (0,0):\n\t\t\t\td[(i,j)] = 1\n\t\t\t\n\t\t\t# print(i,j)\n\n\t\t\telif (i,j) in blocks: \n\t\t\t\td[(i,j)] = 0 \n\t\t\t\n\t\t\t# for co in blocks: \n\t\t\t# \tif co == (i,j):\n\t\t\t# \t\td[(i,j)] = 0 \n\n\n\t\t\telif i == 0: #Top row\n\t\t\t\td[(i,j)] = d[(i,j-1)]\n\t\t\telif j == 0: #left column\n\t\t\t\td[(i,j)] = d[(i-1,j)]\n\t\t\telse:\n\t\t\t\td[(i,j)] = d[(i-1,j)] + d[(i,j-1)]\n\n\t\n\treturn d[(m-1,n-1)]\n\t# return d \n\n\t# for keys, values in d.items():\n\t# \tprint(keys, values)",
"def numof_black_pieces(self):\n n = 0\n for rnum,piece in self._pieces.items():\n if piece.color == CheckersPiece.Color.BLACK:\n n += 1\n return n",
"def count_islands(rows,columns,tiles):\n numOfIslands = 0\n for i in range(0,rows):\n for j in range(0,columns):\n if tiles[i][j] == True:\n numOfIslands += 1\n find_all_parts_of_island(rows,columns,i,j,tiles)\n return numOfIslands",
"def find_num_carrots(n, m):\n\n # first draw the matrix, populated with random #s 0-4 (carrots) \n # write helper function to place bunny in center\n # figure out L R U D how many carrots it can eat",
"def _countPaths(self, cells, n, m, x, y):\n \n if (self.dp):\n if ((x, y) in self.dpMap): return self.dpMap[(x, y)]\n if (cells[x][y] == True): return 0\n if (x == n - 1 and y == m - 1): return 1\n \n \n k = 0\n if (x + 1 < n): k += self._countPaths(cells, n, m, x + 1, y)\n if (y + 1 < m): k += self._countPaths(cells, n, m, x, y + 1)\n\n if (self.dp):\n self.dpMap[(x, y)] = k # Memorization for dynamic programming\n return k",
"def getMinStep(n,m,board):\n step=0\n for i in range(n):\n for j in range(m):\n if board[i][j]=='Y':\n paintY(i,j,n,m,board)\n step+=1\n elif board[i][j]=='B':\n paintB(i,j,n,m,board)\n step+=1\n elif board[i][j]=='G':\n paintY(i,j,n,m,board)\n paintB(i,j,n,m,board)\n step+=2\n return step",
"def count_modes(m, nest=False):\n npix = len(m)\n nside = ah.npix_to_nside(npix)\n for nmodes in range(npix):\n nonzeroipix = np.flatnonzero(m)\n if len(nonzeroipix):\n flood_fill(nside, nonzeroipix[0], m, nest=nest)\n else:\n break\n return nmodes",
"def nr_of_blocks(self, img_arr_shape):\n\n # maximal coordinates that can be reached (inclusive)\n or_z = img_arr_shape[0] - 1\n or_y = img_arr_shape[1] - 1\n or_x = img_arr_shape[2] - 1\n\n # starting coordinates\n z, y, x = 0, 0, 0\n\n # centered starting coordinates in the padded image = origin of the original image\n cent_z, cent_y, cent_x = (self.block_size // 2), (self.block_size // 2), (self.block_size // 2)\n\n while cent_z <= or_z + (self.block_size // 2) - self.block_displacement:\n cent_z += self.block_displacement\n z += 1\n nr_block_z = z + 1\n\n while cent_y <= or_y + (self.block_size // 2) - self.block_displacement:\n cent_y += self.block_displacement\n y += 1\n nr_block_y = y + 1\n\n while cent_x <= or_x + (self.block_size // 2) - self.block_displacement:\n cent_x += self.block_displacement\n x += 1\n nr_block_x = x + 1\n\n return [nr_block_z, nr_block_y, nr_block_x]",
"def count_islands(grid):\n\tvisited = grid.copy() # copy the grid in order not to lose the real information.\n\tM = len(grid)\n\tN = len(grid[0])\n\tc = 0\n\tfor k in range(M):\n\t\tfor l in range(N):\n\t\t\tif visited[k][l]:\n\t\t\t\tc += 1 # found a new island\n\t\t\t\tvisit_island(visited, k, l, M, N) # visit the connected pieces\n\treturn c",
"def num_black(self, n):\n if n < 0:\n raise ValueError('The number of steps must be non-negative')\n B_incr = self.B[self.cutoff] - self.B[self.cutoff-self.period]\n if n <= self.cutoff:\n return self.B[n]\n periods, remaining_steps = divmod(n-self.cutoff, self.period)\n if remaining_steps:\n periods += 1\n remaining_steps = self.period-remaining_steps\n return self.B[self.cutoff-remaining_steps] + periods*B_incr",
"def test_every_n(self):\n N = 3\n self.color_list.configure(colors=self.colors, counts=N)\n for n in range(2):\n for color in self.colors:\n for m in range(N):\n self.assertEqual(color, self.color_list.color())\n self.color_list.advance()",
"def McNuggets(n):\r\n\r\n for a in range(n):\r\n for b in range(n):\r\n for c in range(n):\r\n if 6*a + 9*b + 20*c == n:\r\n return True \r\n return False",
"def test_returns_number_of_islands_in_large_matrix(self):\n matrix = [[1, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 0, 1],\n [1, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0]]\n result = island_counter(matrix)\n self.assertEqual(result, 7)",
"def count_lit_pixels(self):\n return sum(sum(row) for row in self.grid)",
"def paint(graph, n):\r\n if n < 1:\r\n return False\r\n\r\n import random\r\n\r\n palette = [\"#%06x\" % random.randint(0, 0xFFFFFF) for i in range(n)]\r\n nodes = get_nodes(graph)\r\n matrix = matrix_adjacency_undirected(graph)\r\n colors = {}\r\n adjacents = []\r\n\r\n def adjacents_have_diff_colors(nodes, color):\r\n return all(color != colors.get(n) for n in nodes)\r\n\r\n for row in matrix:\r\n adjacents.append([])\r\n for node in row:\r\n if node != 0:\r\n adjacents[-1].append(nodes[row.index(node)])\r\n row[row.index(node)] = 0\r\n\r\n nodes = get_nodes(graph)\r\n\r\n for node in range(len(nodes)):\r\n found_color = False\r\n for colr in (x for x in range(n) if x != node):\r\n if adjacents_have_diff_colors(adjacents[node], colr):\r\n found_color = True\r\n colors[nodes[node]] = colr\r\n break\r\n if not found_color:\r\n return False\r\n\r\n\r\n color_str = \"[\"\r\n for key in colors:\r\n color_str += \"[\" + str(key) + \", \" + \"{color: \" + \\\r\n str(palette[colors[key]]) + \"], \"\r\n color_str = color_str[:-2] + \"]\"\r\n\r\n return color_str",
"def getNumCleanedTiles(self):\n numClean = 0\n for t in self.tileStatus:\n numClean = numClean + sum(t)\n\n return numClean"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the number of ways to tile n black blocks with red paint. Each group of red paint must cover exactly m consecutive blocks.
|
def block_tiling_fixed_1d(m, n):
end_in_red_end = [*[0 for _ in range(m - 1)], 1]
end_in_black = [1 for _ in range(m)]
for i in range(m, n):
_reds = end_in_red_end[i - m] + end_in_black[i - m]
_blacks = end_in_red_end[i - 1] + end_in_black[i - 1]
end_in_red_end.append(_reds)
end_in_black.append(_blacks)
return end_in_red_end, end_in_black
|
[
"def block_tiling_flexible_1d(m, n):\n end_in_red = [*[0 for _ in range(m - 1)], 1]\n end_in_black = [1 for _ in range(m)]\n\n for i in range(m, n):\n _reds = end_in_red[i - 1] + end_in_black[i - m]\n _blacks = end_in_red[i - 1] + end_in_black[i - 1]\n end_in_red.append(_reds)\n end_in_black.append(_blacks)\n\n return end_in_red, end_in_black",
"def block_tiling_multifixed_1d(m_values, n):\n m_values = sorted(m_values)\n m_min = m_values[0]\n end_in_red_end = [*[0 for _ in range(m_min - 1)], 1]\n end_in_black = [1 for _ in range(m_min)]\n\n for i in range(m_min, n):\n _reds, _blacks = 0, 0\n for _m in m_values:\n if i < _m - 1:\n pass\n elif i == _m - 1:\n _reds += 1\n else:\n _reds += end_in_red_end[i - _m] + end_in_black[i - _m]\n _blacks = end_in_red_end[i - 1] + end_in_black[i - 1]\n end_in_red_end.append(_reds)\n end_in_black.append(_blacks)\n\n return end_in_red_end, end_in_black",
"def get_ways(tiles,n,memo=[]):\r\n if len(memo) > tiles and memo[tiles] > -1:\r\n return memo[tiles]\r\n if n > tiles:\r\n # it's not possible to place a tile when the color is bigger than the black\r\n return 0\r\n res = 0\r\n for i in range(n,min(2*n,tiles+1)):\r\n # place a tile somewhere at the beginning from [1,2n-1]\r\n rest = get_ways(tiles-i,n,memo)\r\n res += rest+1 # add 1 for when the rest is empy\r\n res += get_ways(tiles-n,n,memo) # add when first n are empty\r\n if len(memo) > tiles:\r\n memo[tiles] = res\r\n return res",
"def count_paths(m,n,blocks):\n\n\tassert isinstance(m,int)\n\tassert isinstance(n,int)\n\tassert m > 0\n\tassert n > 0 \n\n\tassert isinstance(blocks, list)\n\n\tfor coord in blocks: \n\t\tassert coord != (0,0)\n\t\tassert coord != (m-1, n-1)\n\t\tassert isinstance(coord[0], int)\n\t\tassert m-1 >= coord[0] >= 0 \n\t\tassert isinstance(coord[1], int)\n\t\tassert n-1 >= coord[1] >= 0\n\n\n\n\n\tfrom collections import defaultdict\n\n\td = defaultdict()\n\n\n\tfor i in range(m):\n\t\tfor j in range(n):\t\t\n\t\t\tif (i,j) == (0,0):\n\t\t\t\td[(i,j)] = 1\n\t\t\t\n\t\t\t# print(i,j)\n\n\t\t\telif (i,j) in blocks: \n\t\t\t\td[(i,j)] = 0 \n\t\t\t\n\t\t\t# for co in blocks: \n\t\t\t# \tif co == (i,j):\n\t\t\t# \t\td[(i,j)] = 0 \n\n\n\t\t\telif i == 0: #Top row\n\t\t\t\td[(i,j)] = d[(i,j-1)]\n\t\t\telif j == 0: #left column\n\t\t\t\td[(i,j)] = d[(i-1,j)]\n\t\t\telse:\n\t\t\t\td[(i,j)] = d[(i-1,j)] + d[(i,j-1)]\n\n\t\n\treturn d[(m-1,n-1)]\n\t# return d \n\n\t# for keys, values in d.items():\n\t# \tprint(keys, values)",
"def numof_black_pieces(self):\n n = 0\n for rnum,piece in self._pieces.items():\n if piece.color == CheckersPiece.Color.BLACK:\n n += 1\n return n",
"def count_islands(rows,columns,tiles):\n numOfIslands = 0\n for i in range(0,rows):\n for j in range(0,columns):\n if tiles[i][j] == True:\n numOfIslands += 1\n find_all_parts_of_island(rows,columns,i,j,tiles)\n return numOfIslands",
"def find_num_carrots(n, m):\n\n # first draw the matrix, populated with random #s 0-4 (carrots) \n # write helper function to place bunny in center\n # figure out L R U D how many carrots it can eat",
"def getMinStep(n,m,board):\n step=0\n for i in range(n):\n for j in range(m):\n if board[i][j]=='Y':\n paintY(i,j,n,m,board)\n step+=1\n elif board[i][j]=='B':\n paintB(i,j,n,m,board)\n step+=1\n elif board[i][j]=='G':\n paintY(i,j,n,m,board)\n paintB(i,j,n,m,board)\n step+=2\n return step",
"def _countPaths(self, cells, n, m, x, y):\n \n if (self.dp):\n if ((x, y) in self.dpMap): return self.dpMap[(x, y)]\n if (cells[x][y] == True): return 0\n if (x == n - 1 and y == m - 1): return 1\n \n \n k = 0\n if (x + 1 < n): k += self._countPaths(cells, n, m, x + 1, y)\n if (y + 1 < m): k += self._countPaths(cells, n, m, x, y + 1)\n\n if (self.dp):\n self.dpMap[(x, y)] = k # Memorization for dynamic programming\n return k",
"def count_modes(m, nest=False):\n npix = len(m)\n nside = ah.npix_to_nside(npix)\n for nmodes in range(npix):\n nonzeroipix = np.flatnonzero(m)\n if len(nonzeroipix):\n flood_fill(nside, nonzeroipix[0], m, nest=nest)\n else:\n break\n return nmodes",
"def count_islands(grid):\n\tvisited = grid.copy() # copy the grid in order not to lose the real information.\n\tM = len(grid)\n\tN = len(grid[0])\n\tc = 0\n\tfor k in range(M):\n\t\tfor l in range(N):\n\t\t\tif visited[k][l]:\n\t\t\t\tc += 1 # found a new island\n\t\t\t\tvisit_island(visited, k, l, M, N) # visit the connected pieces\n\treturn c",
"def nr_of_blocks(self, img_arr_shape):\n\n # maximal coordinates that can be reached (inclusive)\n or_z = img_arr_shape[0] - 1\n or_y = img_arr_shape[1] - 1\n or_x = img_arr_shape[2] - 1\n\n # starting coordinates\n z, y, x = 0, 0, 0\n\n # centered starting coordinates in the padded image = origin of the original image\n cent_z, cent_y, cent_x = (self.block_size // 2), (self.block_size // 2), (self.block_size // 2)\n\n while cent_z <= or_z + (self.block_size // 2) - self.block_displacement:\n cent_z += self.block_displacement\n z += 1\n nr_block_z = z + 1\n\n while cent_y <= or_y + (self.block_size // 2) - self.block_displacement:\n cent_y += self.block_displacement\n y += 1\n nr_block_y = y + 1\n\n while cent_x <= or_x + (self.block_size // 2) - self.block_displacement:\n cent_x += self.block_displacement\n x += 1\n nr_block_x = x + 1\n\n return [nr_block_z, nr_block_y, nr_block_x]",
"def test_returns_number_of_islands_in_large_matrix(self):\n matrix = [[1, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 0, 1],\n [1, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0]]\n result = island_counter(matrix)\n self.assertEqual(result, 7)",
"def test_every_n(self):\n N = 3\n self.color_list.configure(colors=self.colors, counts=N)\n for n in range(2):\n for color in self.colors:\n for m in range(N):\n self.assertEqual(color, self.color_list.color())\n self.color_list.advance()",
"def McNuggets(n):\r\n\r\n for a in range(n):\r\n for b in range(n):\r\n for c in range(n):\r\n if 6*a + 9*b + 20*c == n:\r\n return True \r\n return False",
"def num_black(self, n):\n if n < 0:\n raise ValueError('The number of steps must be non-negative')\n B_incr = self.B[self.cutoff] - self.B[self.cutoff-self.period]\n if n <= self.cutoff:\n return self.B[n]\n periods, remaining_steps = divmod(n-self.cutoff, self.period)\n if remaining_steps:\n periods += 1\n remaining_steps = self.period-remaining_steps\n return self.B[self.cutoff-remaining_steps] + periods*B_incr",
"def paint(graph, n):\r\n if n < 1:\r\n return False\r\n\r\n import random\r\n\r\n palette = [\"#%06x\" % random.randint(0, 0xFFFFFF) for i in range(n)]\r\n nodes = get_nodes(graph)\r\n matrix = matrix_adjacency_undirected(graph)\r\n colors = {}\r\n adjacents = []\r\n\r\n def adjacents_have_diff_colors(nodes, color):\r\n return all(color != colors.get(n) for n in nodes)\r\n\r\n for row in matrix:\r\n adjacents.append([])\r\n for node in row:\r\n if node != 0:\r\n adjacents[-1].append(nodes[row.index(node)])\r\n row[row.index(node)] = 0\r\n\r\n nodes = get_nodes(graph)\r\n\r\n for node in range(len(nodes)):\r\n found_color = False\r\n for colr in (x for x in range(n) if x != node):\r\n if adjacents_have_diff_colors(adjacents[node], colr):\r\n found_color = True\r\n colors[nodes[node]] = colr\r\n break\r\n if not found_color:\r\n return False\r\n\r\n\r\n color_str = \"[\"\r\n for key in colors:\r\n color_str += \"[\" + str(key) + \", \" + \"{color: \" + \\\r\n str(palette[colors[key]]) + \"], \"\r\n color_str = color_str[:-2] + \"]\"\r\n\r\n return color_str",
"def count_lit_pixels(self):\n return sum(sum(row) for row in self.grid)",
"def count_set_bits(n: int) -> int:\n assert 0 <= n <= 255\n return sum(\n 1 for i in range(8)\n if (n >> i) % 2 == 1\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the number of ways to tile n black blocks with red paint. Each group of red paint must cover exactly m consecutive blocks where m has multiple choices.
|
def block_tiling_multifixed_1d(m_values, n):
m_values = sorted(m_values)
m_min = m_values[0]
end_in_red_end = [*[0 for _ in range(m_min - 1)], 1]
end_in_black = [1 for _ in range(m_min)]
for i in range(m_min, n):
_reds, _blacks = 0, 0
for _m in m_values:
if i < _m - 1:
pass
elif i == _m - 1:
_reds += 1
else:
_reds += end_in_red_end[i - _m] + end_in_black[i - _m]
_blacks = end_in_red_end[i - 1] + end_in_black[i - 1]
end_in_red_end.append(_reds)
end_in_black.append(_blacks)
return end_in_red_end, end_in_black
|
[
"def block_tiling_flexible_1d(m, n):\n end_in_red = [*[0 for _ in range(m - 1)], 1]\n end_in_black = [1 for _ in range(m)]\n\n for i in range(m, n):\n _reds = end_in_red[i - 1] + end_in_black[i - m]\n _blacks = end_in_red[i - 1] + end_in_black[i - 1]\n end_in_red.append(_reds)\n end_in_black.append(_blacks)\n\n return end_in_red, end_in_black",
"def block_tiling_fixed_1d(m, n):\n end_in_red_end = [*[0 for _ in range(m - 1)], 1]\n end_in_black = [1 for _ in range(m)]\n\n for i in range(m, n):\n _reds = end_in_red_end[i - m] + end_in_black[i - m]\n _blacks = end_in_red_end[i - 1] + end_in_black[i - 1]\n end_in_red_end.append(_reds)\n end_in_black.append(_blacks)\n\n return end_in_red_end, end_in_black",
"def get_ways(tiles,n,memo=[]):\r\n if len(memo) > tiles and memo[tiles] > -1:\r\n return memo[tiles]\r\n if n > tiles:\r\n # it's not possible to place a tile when the color is bigger than the black\r\n return 0\r\n res = 0\r\n for i in range(n,min(2*n,tiles+1)):\r\n # place a tile somewhere at the beginning from [1,2n-1]\r\n rest = get_ways(tiles-i,n,memo)\r\n res += rest+1 # add 1 for when the rest is empy\r\n res += get_ways(tiles-n,n,memo) # add when first n are empty\r\n if len(memo) > tiles:\r\n memo[tiles] = res\r\n return res",
"def numof_black_pieces(self):\n n = 0\n for rnum,piece in self._pieces.items():\n if piece.color == CheckersPiece.Color.BLACK:\n n += 1\n return n",
"def test_every_n(self):\n N = 3\n self.color_list.configure(colors=self.colors, counts=N)\n for n in range(2):\n for color in self.colors:\n for m in range(N):\n self.assertEqual(color, self.color_list.color())\n self.color_list.advance()",
"def count_paths(m,n,blocks):\n\n\tassert isinstance(m,int)\n\tassert isinstance(n,int)\n\tassert m > 0\n\tassert n > 0 \n\n\tassert isinstance(blocks, list)\n\n\tfor coord in blocks: \n\t\tassert coord != (0,0)\n\t\tassert coord != (m-1, n-1)\n\t\tassert isinstance(coord[0], int)\n\t\tassert m-1 >= coord[0] >= 0 \n\t\tassert isinstance(coord[1], int)\n\t\tassert n-1 >= coord[1] >= 0\n\n\n\n\n\tfrom collections import defaultdict\n\n\td = defaultdict()\n\n\n\tfor i in range(m):\n\t\tfor j in range(n):\t\t\n\t\t\tif (i,j) == (0,0):\n\t\t\t\td[(i,j)] = 1\n\t\t\t\n\t\t\t# print(i,j)\n\n\t\t\telif (i,j) in blocks: \n\t\t\t\td[(i,j)] = 0 \n\t\t\t\n\t\t\t# for co in blocks: \n\t\t\t# \tif co == (i,j):\n\t\t\t# \t\td[(i,j)] = 0 \n\n\n\t\t\telif i == 0: #Top row\n\t\t\t\td[(i,j)] = d[(i,j-1)]\n\t\t\telif j == 0: #left column\n\t\t\t\td[(i,j)] = d[(i-1,j)]\n\t\t\telse:\n\t\t\t\td[(i,j)] = d[(i-1,j)] + d[(i,j-1)]\n\n\t\n\treturn d[(m-1,n-1)]\n\t# return d \n\n\t# for keys, values in d.items():\n\t# \tprint(keys, values)",
"def McNuggets(n):\r\n\r\n for a in range(n):\r\n for b in range(n):\r\n for c in range(n):\r\n if 6*a + 9*b + 20*c == n:\r\n return True \r\n return False",
"def find_num_carrots(n, m):\n\n # first draw the matrix, populated with random #s 0-4 (carrots) \n # write helper function to place bunny in center\n # figure out L R U D how many carrots it can eat",
"def count_islands(rows,columns,tiles):\n numOfIslands = 0\n for i in range(0,rows):\n for j in range(0,columns):\n if tiles[i][j] == True:\n numOfIslands += 1\n find_all_parts_of_island(rows,columns,i,j,tiles)\n return numOfIslands",
"def getMinStep(n,m,board):\n step=0\n for i in range(n):\n for j in range(m):\n if board[i][j]=='Y':\n paintY(i,j,n,m,board)\n step+=1\n elif board[i][j]=='B':\n paintB(i,j,n,m,board)\n step+=1\n elif board[i][j]=='G':\n paintY(i,j,n,m,board)\n paintB(i,j,n,m,board)\n step+=2\n return step",
"def _countPaths(self, cells, n, m, x, y):\n \n if (self.dp):\n if ((x, y) in self.dpMap): return self.dpMap[(x, y)]\n if (cells[x][y] == True): return 0\n if (x == n - 1 and y == m - 1): return 1\n \n \n k = 0\n if (x + 1 < n): k += self._countPaths(cells, n, m, x + 1, y)\n if (y + 1 < m): k += self._countPaths(cells, n, m, x, y + 1)\n\n if (self.dp):\n self.dpMap[(x, y)] = k # Memorization for dynamic programming\n return k",
"def solve():\n\n n = int(input())\n S = input()\n\n colors = ['U', 'R', 'Y', 'O', 'B', 'P', 'G', 'A']\n color_to_int = {c: i for i,c in enumerate(colors)}\n\n # loop through the squares and keep track of consecutive colors\n rflag, yflag, bflag = False, False, False\n\n res = 0 \n\n for c in S:\n # get red, yellow and blue values\n val = color_to_int[c]\n val, r = divmod(val, 2)\n val, y = divmod(val, 2)\n val, b = divmod(val, 2)\n # check if r, y, b are 1\n if r == 1:\n rflag = True\n elif rflag == True:\n res += 1\n rflag = False\n\n if y == 1:\n yflag = True\n elif yflag == True:\n res += 1\n yflag = False\n\n if b == 1:\n bflag = True\n elif bflag == True:\n res += 1\n bflag = False\n\n res += int(rflag) + int(yflag) + int(bflag)\n return res",
"def coveredsquares(game):\n bombs = 0\n covered_squares = 0\n for r in range(game['dimensions'][0]):\n for c in range(game['dimensions'][1]):\n if game['board'][r][c] == '.':\n if game['mask'][r][c] == True:\n bombs += 1\n elif game['mask'][r][c] == False:\n covered_squares += 1\n return bombs,covered_squares",
"def count_modes(m, nest=False):\n npix = len(m)\n nside = ah.npix_to_nside(npix)\n for nmodes in range(npix):\n nonzeroipix = np.flatnonzero(m)\n if len(nonzeroipix):\n flood_fill(nside, nonzeroipix[0], m, nest=nest)\n else:\n break\n return nmodes",
"def num_black(self, n):\n if n < 0:\n raise ValueError('The number of steps must be non-negative')\n B_incr = self.B[self.cutoff] - self.B[self.cutoff-self.period]\n if n <= self.cutoff:\n return self.B[n]\n periods, remaining_steps = divmod(n-self.cutoff, self.period)\n if remaining_steps:\n periods += 1\n remaining_steps = self.period-remaining_steps\n return self.B[self.cutoff-remaining_steps] + periods*B_incr",
"def count_set_bits(n: int) -> int:\n assert 0 <= n <= 255\n return sum(\n 1 for i in range(8)\n if (n >> i) % 2 == 1\n )",
"def nb_black_passings(self):\n return sum(1 for s in self.thumb_scores if s[1] == 1)",
"def paint(graph, n):\r\n if n < 1:\r\n return False\r\n\r\n import random\r\n\r\n palette = [\"#%06x\" % random.randint(0, 0xFFFFFF) for i in range(n)]\r\n nodes = get_nodes(graph)\r\n matrix = matrix_adjacency_undirected(graph)\r\n colors = {}\r\n adjacents = []\r\n\r\n def adjacents_have_diff_colors(nodes, color):\r\n return all(color != colors.get(n) for n in nodes)\r\n\r\n for row in matrix:\r\n adjacents.append([])\r\n for node in row:\r\n if node != 0:\r\n adjacents[-1].append(nodes[row.index(node)])\r\n row[row.index(node)] = 0\r\n\r\n nodes = get_nodes(graph)\r\n\r\n for node in range(len(nodes)):\r\n found_color = False\r\n for colr in (x for x in range(n) if x != node):\r\n if adjacents_have_diff_colors(adjacents[node], colr):\r\n found_color = True\r\n colors[nodes[node]] = colr\r\n break\r\n if not found_color:\r\n return False\r\n\r\n\r\n color_str = \"[\"\r\n for key in colors:\r\n color_str += \"[\" + str(key) + \", \" + \"{color: \" + \\\r\n str(palette[colors[key]]) + \"], \"\r\n color_str = color_str[:-2] + \"]\"\r\n\r\n return color_str",
"def nb_permutation(self):\n grid = [[self.state[x * self.size + y] for y in range(self.size)] for x in range(self.size)]\n nb_inv = 0\n for i in range(1, self.size ** 2):\n x_i, y_i = find_coordinates(self.goal, self.size, i)\n for j in list(range(i + 1, self.size ** 2)) + [0]:\n x_j, y_j = find_coordinates(self.goal, self.size, j)\n if grid[x_i][y_i] > 0 and grid[x_j][y_j] > 0:\n nb_inv += grid[x_i][y_i] > grid[x_j][y_j]\n return nb_inv"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts a datetime to the given timezone. The tz argument can be an instance of tzinfo or a string such as 'Europe/London' that will be passed to pytz.timezone. Naive datetimes are forced to the timezone. Wise datetimes are converted.
|
def force_tz(obj, tz):
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
if (obj.tzinfo is None) or (obj.tzinfo.utcoffset(obj) is None):
return tz.localize(obj)
else:
return obj.astimezone(tz)
|
[
"def dt_to_zone(dt, tzstring):\n return dt.astimezone(pytz.timezone(tzstring))",
"def ensure_timezone(dt, tz=None):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=tz or tzlocal())\n else:\n return dt",
"def transform_timezone(dt, from_tz_str, to_tz_str):\n if from_tz_str == to_tz_str:\n return dt\n tz_from = pytz.timezone(from_tz_str)\n tz_to = pytz.timezone(to_tz_str)\n dt = parse_datetime(dt)\n local_dt = tz_from.localize(dt)\n ret = local_dt.astimezone(tz_to)\n return ret",
"def localize_dt(dt_obj, tz_name):\n\n return pytz.timezone(tz_name).localize(dt_obj)",
"def to_timezone(t, tzinfo):\n if tzinfo:\n if not t.tzinfo:\n t = pytz.utc.localize(t)\n return tzinfo.normalize(t.astimezone(tzinfo))\n elif t.tzinfo:\n return pytz.utc.normalize(t.astimezone(pytz.utc)).replace(tzinfo=None)\n else:\n return t",
"def convert_to_datetime(input, tz, arg_name):\n if input is None:\n return\n elif isinstance(input, datetime):\n datetime_ = input\n elif isinstance(input, date):\n datetime_ = datetime.combine(input, time())\n elif isinstance(input, six.string_types):\n m = _DATE_REGEX.match(input)\n if not m:\n raise ValueError('Invalid date string')\n\n values = m.groupdict()\n tzname = values.pop('timezone')\n if tzname == 'Z':\n tz = utc\n elif tzname:\n hours, minutes = (int(x) for x in tzname[1:].split(':'))\n sign = 1 if tzname[0] == '+' else -1\n tz = FixedOffset(sign * (hours * 60 + minutes))\n\n values = {k: int(v or 0) for k, v in values.items()}\n datetime_ = datetime(**values)\n else:\n raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__))\n\n if datetime_.tzinfo is not None:\n return datetime_\n if tz is None:\n raise ValueError(\n 'The \"tz\" argument must be specified if %s has no timezone information' % arg_name)\n if isinstance(tz, six.string_types):\n tz = timezone(tz)\n\n try:\n return tz.localize(datetime_, is_dst=None)\n except AttributeError:\n raise TypeError(\n 'Only pytz timezones are supported (need the localize() and normalize() methods)')",
"def add_timezone(date, tz=None):\r\n return utils.add_timezone(date, tz)",
"def localtime_for_timezone(value, timezone):\n return adjust_datetime_to_timezone(value, 'UTC', timezone)",
"def setTimeZone(self, tz):\n raise NotImplementedError()",
"def create_pytz_timezone(time_zone_string):\n result_timezone = pytz.timezone(time_zone_string)\n return result_timezone",
"def filter_to_user_timezone(dt):\n return to_user_timezone(dt)",
"def tz_convert(dt_input, tz_target):\n tz_local = get_localzone()\n tz_target = timezone(tz_target)\n dt_target = tz_local.localize(dt_input).astimezone(tz_target)\n return tz_target.normalize(dt_target)",
"def convert_datetime_across_timezones(d, t1, t2):\n d_dt = t1.localize(d, is_dst=True)\n return d_dt.astimezone(t2).replace(tzinfo=None)",
"def __as_datetime_with_tz(datetime_string, dt_format=\"%d.%m.%Y\"):\n return datetime.strptime(datetime_string, dt_format).replace(tzinfo=timezone.utc)",
"def to_timezone(localized_datetime: datetime, target_timezone: Union[BaseTzInfo, str]) -> datetime:\n if isinstance(target_timezone, str):\n tz = timezone(target_timezone)\n return to_timezone(localized_datetime, tz)\n\n return datetime.fromtimestamp(localized_datetime.timestamp(), tz=target_timezone)",
"def set_timezone(unlocalized_datetime: datetime, target_timezone: Union[BaseTzInfo, str]) -> datetime:\n if unlocalized_datetime.tzinfo is not None:\n # remove current tz info and call this function again\n return set_timezone(unlocalized_datetime.replace(tzinfo=None), target_timezone)\n\n if isinstance(target_timezone, str):\n return set_timezone(unlocalized_datetime, timezone(target_timezone))\n\n return target_timezone.localize(unlocalized_datetime)",
"def tztime(self, timezone=None, format=None):\n self.writeCommand('tztime', [timezone, format])\n return self",
"def set_run_time_tz(self, tz: datetime.timezone | None):\n self._run_time = self._run_time.astimezone(tz=tz)",
"def as_utc_timezone(date_time: datetime) -> datetime:\n return date_time.astimezone(pytz.timezone('GMT'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the override for a specific flag, given a context.
|
def get_override(self, flag: Flag, **kwargs) -> Any:
raise NotImplementedError
|
[
"def get_override(self, flag: Flag, **kwargs) -> Any:\n return flag.cast_string(SiteFlagOverride.objects.get(name=flag.name).value)",
"def get_flag(flag = 'exit'):\n return flag in FLAG and FLAG[flag]",
"def Get(self, flag_name):\n return self.flags.get(flag_name)",
"def should_override(self, flag: Flag, **kwargs) -> bool:\n return SiteFlagOverride.objects.filter(name=flag.name).exists()",
"def get_flag(self, f):\n\t\ttry:\n\t\t\treturn self._flags[f]\n\t\texcept KeyError: pass",
"def get_flag(self):\r\n if self.flag is None:\r\n # Find the flag in the game objects list\r\n for obj in self.game_objects_list:\r\n if isinstance(obj, gameobjects.Flag):\r\n self.flag = obj\r\n break\r\n return self.flag",
"def SoOverrideElement_getFlags(state: 'SoState') -> \"uint32_t\":\n return _coin.SoOverrideElement_getFlags(state)",
"def GetGlobalOption(self, option, default=None):\n\n for opt, opt_arg in self.__global_options:\n if opt == option:\n return opt_arg\n return default",
"def get_option(cfg, base, opt):\n if cfg.has_option(base, opt):\n return cfg.get(base, opt)\n else:\n return None",
"def flags(self, **kw):\n for k, v in kw.iteritems():\n FLAGS.set_override(k, v)\n self._overridden_opts.append(k)",
"def flag(self, flag):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.flag\", \r\n self._object._eco_id, flag)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)",
"def getDrawOverrideInfo(*args, **kwargs):\n \n pass",
"def getFlags(state: 'SoState') -> \"uint32_t\":\n return _coin.SoOverrideElement_getFlags(state)",
"def override(val, **kwargs):\n return _OverridingValue(val, **kwargs)",
"def override(flag):\n var = '_override_{}'.format(flag.strip('-'))\n if var in os.environ or flag in sys.argv:\n if flag in sys.argv:\n sys.argv.remove(flag)\n os.environ[var] = ''\n return True",
"def get_custom(self, option, default):\n try:\n return self.__cp.get(SEC, \"custom-%s\" % option)\n except (ValueError, AttributeError), e:\n log.warning(\"config '%s' malformed (%s)\" % (option, e))\n return default\n except ConfigParser.NoOptionError:\n return default",
"def getDrawStyleOverride(state: 'SoState') -> \"SbBool\":\n return _coin.SoOverrideElement_getDrawStyleOverride(state)",
"def getCustom(self, section, option, default=''):\n if self.has_option(section, option):\n optval = self.get(section, option).strip()\n if optval == '':\n optval = default\n else:\n optval = default\n\n return optval",
"def getFlags(self) -> int:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
make sure we can get a dfname with at least the filename attribute being correct, from a pagerange passed in
|
def test_make_dfname_from_pagerange(self):
converter = DFNamePageRangeConverter(self.en['wiki'], "pages-articles", "xml",
"bz2", verbose=False)
dfname = converter.make_dfname_from_pagerange((230, 295), 2)
expected_filename = 'enwiki-{today}-pages-articles2.xml-p230p295.bz2'.format(
today=self.today)
self.assertEqual(dfname.filename, expected_filename)
|
[
"def get_dfnames_from_pageranges(self, pageranges):\n dfnames = []\n for startpage, endpage, partnum in pageranges:\n dfname = DumpFilename(\n self.wiki, self.wiki.date, self.dumpname,\n self.filetype, self.file_ext, partnum,\n DumpFilename.make_checkpoint_string(startpage, endpage),\n False)\n dfnames.append(dfname)\n return dfnames",
"def get_pageranges_from_dfnames(dfnames):\n pageranges = []\n for dfname in dfnames:\n if not dfname.first_page_id or not dfname.last_page_id:\n return None\n pageranges.append((dfname.first_page_id, dfname.last_page_id, dfname.partnum))\n return pageranges",
"def create_df(self):\n alldf= pd.DataFrame()\n pdffiles= glob.glob(self.input_lib+'/**/*.pdf', recursive=True)\n for pdf_file in pdffiles:\n pdf_page_count= self.count_pages(pdf_file)\n for pg in range(1,pdf_page_count+1):\n pg = str(pg)\n cmd = ['pdftotext','-bbox-layout','-f', pg, pdf_file, pdf_file[:-4]+'_'+pg+'.html']\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) \n o, e = proc.communicate()\n page = open(pdf_file[:-4]+'_'+pg+'.html',encoding=\"utf8\")\n soup = BeautifulSoup(page.read(),'html.parser')\n out_html_file_path=pdf_file[:-4]+'_'+pg+'.html'\n lines = soup.find_all('line')\n pdf_file= pdf_file.replace(\"\\\\\",\"/\")\n path= pdf_file.split(\"/\")[-1]\n path= path[:-4]+\"-from-pdf-page\"+pg+'.jpg'\n td_list = []\n for line in lines:\n req_td_dict = {}\n req_td_dict['path'] =path\n #req_td_dict['page']= int(pg)\n req_td_dict['xmin'] = round(float(line['xmin']))\n req_td_dict['ymin'] = round(float(line['ymin']))\n req_td_dict['xmax'] = round(float(line['xmax']))\n req_td_dict['ymax'] = round(float(line['ymax']))\n req_td_dict['label'] = line.text.replace('\\n',' ')\n td_list.append(req_td_dict)\n df1 = pd.DataFrame(td_list)\n alldf= alldf.append(df1)\n alldf_multi= self.apply_multiple(alldf, self.multiple)\n return alldf_multi,alldf",
"def test_file_name(self):\n df_to_test = functions.invest_dataframe(FILE_NAME)\n out_file_name = list(df_to_test)[0]\n char_one = \"/\"\n char_two = \".\"\n break1 = [pos for pos, char in enumerate(FILE_NAME) if char == char_one]\n break2 = [pos for pos, char in enumerate(FILE_NAME) if char == char_two]\n in_file_name = FILE_NAME[break1[-1] + 1:break2[-1]]\n self.assertEqual(in_file_name, out_file_name)",
"def get_pdffilename(self):\n project_dir = os.path.dirname(self.template_file)\n #print yaml.load(open(os.path.join(project_dir, 'index.yaml')))\n\n pdfkeys = yaml.load(open(os.path.join(project_dir, 'index.yaml')))['pdffilename']\n filename = os.path.join(project_dir, 'reports',\n ''.join(['_'.join([self.vals[key] for key in pdfkeys]), '_', self.uniq_id, '.pdf']))\n\n #TODO: uniq_id is still not really unique and there is small theoretical possiblity\n # that filename may reflect older patient. However this will happen only if the\n # older record is deleted, so should not matter much.\n return filename",
"def get_name(self, col_name, df_slice=\"\", index_names=\"\"):\n # if slice is provided, use it\n if any(df_slice):\n df_slice = df_slice\n # if given index_names, grab a slice using fancy indexing\n elif index_names:\n df_slice = self.df.ix[index_names]\n # otherwise, use the full DataFrame\n else:\n df_slice = self.df\n # if the slice is empty, return \"\"\n if len(df_slice) == 0:\n return \"\"\n # if the column name isn't present in the slice, return \"\"\n if col_name not in df_slice.columns:\n return \"\"\n # otherwise, return the first value from that column\n first_val = df_slice[col_name].dropna()\n if any(first_val):\n return first_val[0]\n else:\n return \"\"\n #return df_slice[col_name].dropna()[0]",
"def _rap_filename(self):\n rap_files = np.array(sorted(glob(join(self.path, self.date.strftime('%Y%m%d'),'*'))))\n # print(\"rap_files\", rap_files)\n rap_dates = self._rap_file_dates(rap_files)\n # print(\"rap_dates\", rap_dates)\n date_diffs = np.abs(rap_dates - self.date)\n # print(\"Date_diffs\", date_diffs)\n file_index = np.where(date_diffs <= pd.Timedelta(\n minutes=self.time_range_minutes))[0]\n # print(\"File_index\", file_index, len(file_index))\n if len(file_index) == 0:\n diff = (date_diffs.total_seconds().values.min() /\n 60) if len(date_diffs) != 0 else float('inf')\n raise FileNotFoundError(\n f\"No RAP files within {self.time_range_minutes} minutes of \"\n f\"{self.date.strftime('%Y-%m-%d %H:%M:%S')}. Nearest file is within {diff:.3f} minutes\")\n else:\n filename = rap_files[np.argmin(date_diffs)]\n return filename",
"def check_df(self):\n if not os.path.exists(self.ran):\n os.makedirs(self.ran)\n df_file = os.path.join(self.ran, 'df.csv')\n if os.path.isfile(df_file):\n self.df = pd.read_csv(df_file, index_col=0, parse_dates=[-11])\n self.tags = pickle.load(open(os.path.join(self.ran, 'tags.txt'), 'rb'))\n self.df['upload_date'] = pd.to_datetime(self.df['upload_date'])\n else:\n self.df_from_files()",
"def parentFileName(*args, **kwargs):\n \n pass",
"def getFilename(self, frameNum):\n\t\treturn self.format % (self.dirname, self.frameName, frameNum)",
"def suggest_name( src ):\n date = src.split(os.sep)[-2]\n basename = os.path.basename(src).split('.', 2)[0]\n if basename in hpcparse.FS_MAP_REV:\n return hpcparse.FS_MAP_REV[basename] + \"_\" + date + \".hdf5\"\n else:\n return basename + \"_\" + date + \".hdf5\"",
"def get_name(self, i):\n over_sample_index = i // self.over_sample\n return self.files[over_sample_index]",
"def get_filename(table_name):\n table_data = TableIndex.query.filter_by(table_name=table_name).first()\n return table_data.filename",
"def get_file_name(self):\n data_file_name= os.path.join(self.data_path, \"{0}_to_{1}_{2}\".format(\n self.mars_dict['date'].split('/')[0],\n self.mars_dict['date'].split('/')[-1],\n self.mars_dict['levtype']))\n return data_file_name",
"def get_dataset_names(dbfilepath, dbroot='', dataset_names=[], pathinh5 = []): \n if is_string(dbfilepath) and (os.path.exists(dbfilepath)):\n h5file = h5py.File(dbfilepath,'r')\n item = h5file\n isdbfile = 1\n elif (isinstance(dbfilepath, h5py.File)) or (isinstance(dbfilepath, h5py.Group)): \n item = dbfilepath\n isdbfile = 0\n else:\n return dataset_names\n \n for key, val in iteritem(dict(item)):\n #printlog(key, val)\n try: \n subitem = dict(val)\n if ('mz' in subitem) or ('sp' in subitem) or ('sp_unfiltered_peaks' in subitem) or (('is_sample_dataset' in subitem.attrs) and (subitem.attrs['is_sample_dataset'] == True)):\n success = 1\n else:\n success = 0\n except Exception as inst:\n #printlog(inst)\n #traceback.print_exc()\n success = 0\n if success==1:\n if is_string(pathinh5):\n success = 0\n h5str = val.name.split('/')[0:2]\n for i in h5str:\n if '/'+i==pathinh5:\n datasetname = re.sub(pathinh5,'',val.name)\n dataset_names.append(datasetname)\n success=1\n break\n else:\n dataset_names.append(val.name)\n if success==0:\n if isinstance(val,h5py.Group):\n dbroot = dbroot + val.name\n dataset_names = get_dataset_names(val,dbroot,dataset_names,pathinh5=pathinh5)\n \n if isdbfile==1:\n h5file.close()\n\n return sorted(dataset_names)",
"def define_output_name(fname):\n phdr = pyfits.getheader(fname,ext=0)\n if 'D001DATA' in phdr:\n outname = phdr['D001DATA']\n if outname.find('['):outname = outname.split('[')[0]\n else:\n frootname = fname.split('_')[0]\n outname = fileutil.buildRootname(frootname)\n if outname is None:\n # make one from the header keywords\n outname = phdr['rootname']\n del phdr\n\n return outname",
"def find_file_by_index(idx):\n files = glob.glob(f'{RUN_FOLDER}/parameter_history*.csv')\n file_list = add_timestamps(files)\n if len(file_list) == 0:\n print('No files in folder')\n return None\n else:\n return file_list.iloc[idx].loc['Filename']",
"def filename_format(self):\n raise NotImplementedError",
"def get_layout_image_filename(self, document, page):\n tmp_image_name, _ = document.file.name.rsplit('.', 1)\n return '{}_{}.jpg'.format(tmp_image_name, page.number)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
make sure that for conf with checkpoints disabled, we get a good list of output files to be produced, with or without part numbers
|
def test_get_nochkpt_outputfiles(self):
# turn off checkpoints in the config but keep part numbers
self.en['wiki'].config.checkpoint_time = 0
pages_per_part = FilePartInfo.convert_comma_sep(
self.en['wiki'].config.pages_per_filepart_history)
content_job = XmlDump("articles", "articlesdump", "short description here",
"long description here",
item_for_stubs=None, item_for_stubs_recombine=None,
prefetch=True, prefetchdate=None,
spawn=True, wiki=self.en['wiki'], partnum_todo=False,
pages_per_part=pages_per_part,
checkpoints=True, checkpoint_file=None,
page_id_range=None, verbose=False)
dfnames = content_job.get_nochkpt_outputfiles(self.en['dump_dir'])
expected_files = [
"enwiki-{today}-pages-articles1.xml.bz2".format(today=self.today),
"enwiki-{today}-pages-articles2.xml.bz2".format(today=self.today),
"enwiki-{today}-pages-articles3.xml.bz2".format(today=self.today),
"enwiki-{today}-pages-articles4.xml.bz2".format(today=self.today)]
expected_dfnames = self.dfnames_from_filenames(expected_files)
self.assertEqual(dfnames, expected_dfnames)
# turn off part numbers now
self.en['wiki'].config.parts_enabled = 0
content_job = XmlDump("articles", "articlesdump", "short description here",
"long description here",
item_for_stubs=None, item_for_stubs_recombine=None,
prefetch=True, prefetchdate=None,
spawn=True, wiki=self.en['wiki'], partnum_todo=False,
pages_per_part=None,
checkpoints=True, checkpoint_file=None,
page_id_range=None, verbose=False)
dfnames = content_job.get_nochkpt_outputfiles(self.en['dump_dir'])
expected_files = [
"enwiki-{today}-pages-articles.xml.bz2".format(today=self.today)]
expected_dfnames = self.dfnames_from_filenames(expected_files)
self.assertEqual(dfnames, expected_dfnames)
|
[
"def missingoutputfiles(self):\n return self.getmissingoutputfiles(self.SlideID, **self.workflowkwargs)",
"def train_output():\n output = [config['out']+\"/{sample}/model.rda\", config['out']+\"/{sample}/variable_importance.tsv\"]\n if check_config('tune'):\n output.append(config['out']+\"/{sample}/tune_matrix.tsv\")\n return output",
"def clean_extra_output_destination():\n global extra_print_dests\n extra_print_dests = []",
"def has__no_valid_output_files(self):\r\n return not self.__has_valid_output_files",
"def get_output_filenames(output_path: str):\n now = datetime.datetime.now()\n now_string = now.strftime(\"%Y%m%d_%H%M%S\")\n filenames ={\n 'train': os.path.join(output_path, \"train_split_\"+now_string+\".csv\"),\n 'val': os.path.join(output_path, \"val_split_\"+now_string+\".csv\")\n }\n write_file(\"/tmp/train.txt\", filenames['train'])\n write_file(\"/tmp/val.txt\", filenames['val'])\n return filenames",
"def all_output_result_files(self):\n return [output for _, output in self.all_artifacts()\n if output.generation_type == output.PER_INPUT]",
"def expected_output_files(fname_prefix):\n expected_suffixes = ['.pdf', '.txt', '.mat'] # Suffixes of output files generated by kubios\n return [fname_prefix + x for x in expected_suffixes]",
"def test_output_exists():\n global out_dir\n assert_true(path.exists(path.join(out_dir, 'run.log')))\n assert_true(path.exists(path.join(out_dir, 'lsi.model')))\n assert_true(path.exists(path.join(out_dir, 'pre.model')))\n assert_true(path.exists(path.join(out_dir, 'lsi.model.npy')))",
"def get_output_files(description):\n log.info(\"fixing output files in description\")\n files = {}\n if description['outFiles'] and description['outFiles'] != \"NULL\":\n out_files = split(description[\"outFiles\"])\n l = len(out_files)\n ddm_endpoint = split(description.get(\"ddmEndPointOut\"), min_len=l)\n destination_se = split(description.get(\"fileDestinationSE\"), min_len=l)\n dblock_token = split(description.get(\"dispatchDBlockTokenForOut\"), min_len=l)\n dblock_tokens = split(description.get(\"prodDBlockTokenForOut\"), min_len=l)\n datasets = split(description.get(\"realDatasets\"), min_len=l)\n dblocks = split(description.get(\"destinationDblock\"), min_len=l)\n destination_dblock_token = split(description.get(\"destinationDBlockToken\"), min_len=l)\n scope = split(description.get(\"scopeOut\"), min_len=l, fill_last=True)\n\n for i, f in enumerate(out_files):\n if f is not None:\n files[f] = {\n \"ddm_endpoint\": ddm_endpoint[i],\n \"storage_element\": destination_se[i],\n \"dispatch_dblock_token\": dblock_token[i],\n \"destination_dblock_token\": destination_dblock_token[i],\n \"dblock_token\": dblock_tokens[i],\n \"dataset\": datasets[i],\n \"dblock\": dblocks[i],\n \"scope\": scope[i]\n }\n\n return fix_log(description, files)",
"def find_output_files(self):\n # find the base path to the files\n if self.input_uri.startswith(\"file\"):\n test_path = self.input_uri.split(\":\")[-1]\n if os.path.isdir(test_path):\n base_path = os.path.abspath(test_path)\n elif os.path.isdir(os.path.join(os.getcwd(), test_path)):\n base_path = os.path.join(os.getcwd(), test_path)\n else:\n raise ValueError(f\"output path {test_path} does not exist\")\n search_fits = f\"{base_path}/{self.ipppssoot.lower()[0:5]}*.fits\"\n # trailer files\n search_tra = f\"{base_path}/{self.ipppssoot.lower()[0:5]}*.tra\"\n # env file\n search_env = f\"{base_path}/{self.ipppssoot.lower()}_cal_env.txt\"\n\n else:\n base_path = os.getcwd()\n subfolder = os.path.join(base_path, \"inputs\", self.ipppssoot)\n search_fits = f\"{subfolder}/{self.ipppssoot.lower()[0:5]}*.fits\"\n search_tra = f\"{subfolder}/{self.ipppssoot.lower()[0:5]}*.tra\"\n search_env = f\"{subfolder}/{self.ipppssoot.lower()}_cal_env.txt\"\n\n self.divider(\"Finding output data for:\", repr(search_fits))\n files = glob.glob(search_fits)\n\n self.divider(\"Finding output trailers for:\", repr(search_tra))\n files.extend(glob.glob(search_tra))\n\n self.divider(\"Finding output cal env file for:\", repr(search_env))\n files.extend(glob.glob(search_env))\n\n return list(sorted(files))",
"def get_output_directories(self):\r\n pass",
"def set_stub_output_filenames(self, parts):\n stub_filenames = []\n for partnum in parts:\n stub_filenames.append(\n \"{wiki}-{date}-stub-articles{partnum}.xml.gz\".format(\n wiki=self.en['wiki'].db_name, date=self.today,\n partnum=partnum))\n return self.dfnames_from_filenames(stub_filenames)",
"def getCandidateFiles(self, outputList, outputLFNs, fileMask):\n fileInfo = {}\n for outputFile in outputList:\n if outputFile.has_key('outputFile') and outputFile.has_key('outputDataSE') and outputFile.has_key('outputPath'):\n fname = outputFile['outputFile']\n fileSE = outputFile['outputDataSE']\n filePath = outputFile['outputPath']\n fileInfo[fname] = {'path' : filePath, 'workflowSE' : fileSE}\n else:\n self.log.error('Ignoring malformed output data specification', str(outputFile))\n\n for lfn in outputLFNs:\n if os.path.basename(lfn) in fileInfo.keys():\n fileInfo[os.path.basename(lfn)]['lfn']=lfn\n self.log.verbose('Found LFN %s for file %s' %(lfn, os.path.basename(lfn)))\n if len(os.path.basename(lfn))>127:\n self.log.error('Your file name is WAAAY too long for the FileCatalog. Cannot proceed to upload.')\n return S_ERROR('Filename too long')\n if len(lfn)>256+127:\n self.log.error('Your LFN is WAAAAY too long for the FileCatalog. Cannot proceed to upload.')\n return S_ERROR('LFN too long')\n \n #Check that the list of output files were produced\n for fileName, metadata in fileInfo.items():\n if not os.path.exists(fileName):\n self.log.error('Output data file %s does not exist locally' % fileName)\n if not self.ignoreapperrors:\n return S_ERROR('Output Data Not Found')\n del fileInfo[fileName]\n #Check the list of files against the output file mask (if it exists)\n #candidateFiles = {}\n #if fileMask:\n ##nothing to do yet, as FileMask is not used\n #for fileName,metadata in fileInfo.items():\n # if metadata['type'].lower() in fileMask or fileName.split('.')[-1] in fileMask:\n # candidateFiles[fileName]=metadata\n # else:\n # self.log.info('Output file %s was produced but will not be treated (outputDataFileMask is %s)' %(fileName,\n # string.join(self.outputDataFileMask,', ')))\n\n #if not candidateFiles.keys():\n # return S_OK({}) #nothing to do\n # candidateFiles = fileInfo\n #else:\n #do not apply mask to files\n \n candidateFiles = fileInfo\n #Sanity check all final candidate metadata keys are present (return S_ERROR if not)\n mandatoryKeys = ['path', 'workflowSE', 'lfn'] #filedict is used for requests\n for fileName, metadata in candidateFiles.items():\n for key in mandatoryKeys:\n if not metadata.has_key(key):\n return S_ERROR('File %s has missing %s' % (fileName, key))\n \n return S_OK(candidateFiles)",
"def test_output_exists():\n global out_dir\n assert_true(path.exists(path.join(out_dir, 'run.log')))\n assert_true(path.exists(path.join(out_dir, 'info.pickle')))\n assert_true(path.exists(path.join(out_dir, 'articles.pickle')))",
"def _base_checkpoint_filenames(steps, checkpoint_type):\n if checkpoint_type == CheckpointType.CHECKPOINT_FLAX:\n results = []\n for step in steps:\n results.append(f'{CHECKPOINT_PREFIX}{step}')\n return results\n elif checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:\n results = []\n for i in range(2):\n process_dir = f'{i:03d}'\n for step in steps:\n results.append(os.path.join(process_dir, f'{CHECKPOINT_PREFIX}{step}'))\n return results\n elif checkpoint_type in {\n CheckpointType.CHECKPOINT_PERSISTENCE,\n CheckpointType.CHECKPOINT_GDA,\n }:\n results = []\n for step in steps:\n results.append(f'{CHECKPOINT_PREFIX}{step:08d}')\n return results\n else:\n raise ValueError(f'Unsupported checkpoint_type `{checkpoint_type}`.')",
"def choose_rule_all(config):\n myout = []\n if config[\"to_multiqc\"] == \"TRUE\":\n myout.append(\"output/multiqc_report.html\")\n if config[\"to_bw\"] == \"TRUE\" and config[\"experiment\"] != \"rnaseq\":\n myout.append(\n expand('output/bw/{sample}.unique.sorted.rmdup.chr.bw', \n sample=SAMPLES))\n if config[\"to_bed\"] == \"TRUE\" and config[\"experiment\"] != \"rnaseq\":\n myout.append(\n expand('output/bed/{sample}.unique.sorted.rmdup.chr.bed', \n sample=SAMPLES))\n if config[\"to_tdf\"] == \"TRUE\" and config[\"experiment\"] != \"rnaseq\":\n myout.append(\n expand('output/tdf/{sample}.unique.sorted.rmdup.tdf', \n sample=SAMPLES))\n if config[\"experiment\"] == \"rnaseq\":\n myout.append(\"output/counts_matrix.txt\")\n return(myout)",
"def getBedOutFiles(args):\n bed = os.path.join(args.outDir, 'out.bed')\n bedDetails = os.path.join(args.outDir, 'out_details.bed')\n return bed, bedDetails",
"def getJoblines(targetList, faFiles, outDir, params={}, blatOpt=None, pslFilterOpt=None, splitTarget=True, noOocFile=False, altGenbankDir=None): \n\n if isinstance(targetList, str): # targetList can be a string\n targetList = targetList.split(\",\")\n\n #progDir = dirname(sys.argv[0]) # assume that jobscript is in same dir as we are\n progDir = dirname(__file__) # assume that jobscript is in same dir as we are\n bigBlatPath = join(progDir, bigBlatJobSrc)\n\n assert(isfile(bigBlatPath)) # job python script for cluster jobs must exist\n assert(isdir(outDir)) # output directory must exist\n\n faFiles = resolveToFiles(faFiles)\n\n jobLines = []\n for faFile in faFiles:\n for target in targetList:\n # make output dir for each target\n faBase = splitext(basename(faFile))[0] # strip path and ext\n # append target to outdirname only if several targets specified\n if len(targetList)>1:\n pslDir = join(outDir, faBase, basename(target))\n else:\n pslDir = join(outDir, faBase)\n if not isdir(pslDir):\n os.makedirs(pslDir)\n\n # run mark's splitter\n conf = None\n if splitTarget:\n conf = genbank.Config.Config(GBCONFFILE) \n splitter = GenomeSplitter(target, conf, params)\n windows = splitter.windows\n splitSpecs = []\n for win in windows:\n twoBitSpec = splitter.twoBitFname+\":\"+win.getSpec()\n chrom, startPos = win.seq.id, win.start\n splitSpecs.append( (twoBitSpec, chrom, startPos) )\n\n # try to get ooc file from config file\n oocFile = conf.getDbStrNone(target, \"ooc\")\n if oocFile==\"no\":\n oocFile = None\n\n # try various other paths to find ooc file\n if oocFile==None:\n oocFile = join(dirname(splitter.twoBitFname), \"11.ooc\")\n logging.debug(\"%s not found\" % oocFile)\n if not isfile(oocFile):\n oocFile = splitext(splitter.twoBitFname)[0]+\".11.ooc\"\n logging.debug(\"%s not found\" % oocFile)\n if not isfile(oocFile):\n oocFile = join(splitter.twoBitFname+\".ooc\")\n if not isfile(oocFile):\n logging.debug(\"%s not found\" % oocFile)\n raise Exception(\"no ooc statement in gbconf and %s not found\" % (oocFile))\n\n else:\n if isfile(target):\n twoBitFname = target\n oocFile = join(dirname(target), \"11.ooc\")\n if not isfile(oocFile):\n baseNoExt = splitext(basename(target))[0]\n oocFile = join(dirname(target), baseNoExt+\".ooc\")\n if not isfile(oocFile):\n raise Exception(\"could not find %s nor 11.ooc in same dir\" % oocFile)\n else:\n if conf==None:\n conf = genbank.Config.Config(GBCONFFILE) \n twoBitFname = conf.getDbStr(target, \"clusterGenome\")\n oocFile = conf.getDbStrNo(target, \"ooc\")\n splitSpecs = [ (twoBitFname, \"all\", 0) ]\n if noOocFile:\n oocFile=None\n assert(oocFile!=\"no\")\n\n #for win in windows:\n for twoBitSpec, chrom, startPos in splitSpecs:\n pslFile = join(pslDir, \"%s-%d.psl\" % (chrom, startPos))\n # prep blat/pslFilter options\n oocOpt=None\n if oocFile!=None:\n oocOpt = \"ooc=\"+oocFile\n blatOptString = prefixEmpty(\"-b\", blatOpt, oocOpt)\n filterOptString = prefixEmpty(\"-f\", pslFilterOpt)\n\n # assemble command line for joblist\n cmdParts = [bigBlatPath,\n twoBitSpec,\n \"{check in exists \"+faFile+\"}\",\n \"{check out exists \"+pslFile+\"}\",\n blatOptString,\n filterOptString]\n line = \" \".join(cmdParts)\n if len(line)>1500:\n raise Exception(\"jobList command '%s' is too long for parasol\" % line)\n jobLines.append(line)\n return jobLines",
"def make_loglist(jobs):\n log_files = []\n for ipst in jobs:\n log_files.append(f\"outputs/{ipst}/preview_metrics.txt\")\n log_files.append(f\"outputs/{ipst}/process_metrics.txt\")\n print(\"LogFiles: \", len(log_files))\n return log_files"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
make sure that we get good list of page ranges covered by stubs when we feed in fake tuples describing what the stubs cover
|
def test_get_ranges_covered_by_stubs(self,
mock_get_first_last_page_ids,
mock_list_outfiles_for_input):
mock_list_outfiles_for_input.return_value = self.set_stub_output_filenames([1, 2, 3, 4])
mock_get_first_last_page_ids.side_effect = self.get_fake_first_last_pageids
pages_per_part = FilePartInfo.convert_comma_sep(
self.en['wiki'].config.pages_per_filepart_history)
stubs_job = XmlStub("xmlstubsdump", "First-pass for page XML data dumps",
partnum_todo=False,
jobsperbatch=dumps.dumpitemlist.get_int_setting(
self.en['wiki'].config.jobsperbatch, "xmlstubsdump"),
pages_per_part=pages_per_part)
content_job = XmlDump("articles", "articlesdump", "short description here",
"long description here",
item_for_stubs=stubs_job, item_for_stubs_recombine=None,
prefetch=True, prefetchdate=None,
spawn=True, wiki=self.en['wiki'], partnum_todo=False,
pages_per_part=pages_per_part,
checkpoints=True, checkpoint_file=None,
page_id_range=None, verbose=False)
expected_stub_ranges = [(1, 100, 1), (101, 300, 2),
(301, 600, 3), (601, 3400, 4)]
stub_ranges = content_job.get_ranges_covered_by_stubs(self.en['dump_dir'])
self.assertEqual(stub_ranges, expected_stub_ranges)
|
[
"def get_ranges_covered_by_stubs(self, dump_dir):\n output_dfnames = self.oflister.get_reg_files_for_filepart_possible(\n self.oflister.makeargs(dump_dir, self.list_dumpnames(), self.get_fileparts_list()))\n stub_dfnames = [self.stubber.get_stub_dfname(dfname.partnum, dump_dir)\n for dfname in output_dfnames]\n stub_dfnames = sorted(stub_dfnames, key=lambda thing: thing.filename)\n\n stub_ranges = []\n for stub_dfname in stub_dfnames:\n # why do we do this instead of getting the theoretical page\n # ranges (which are used in page content files anyways, aren't they?)\n # via the wiki config? FIXME\n\n first_page, last_page = self.stubber.get_first_last_page_ids(\n stub_dfname, dump_dir, self._pages_per_part)\n stub_ranges.append((first_page, last_page, stub_dfname.partnum_int))\n\n return stub_ranges",
"def test_get_done_pageranges(self, mock_list_checkpt_files):\n pagerange_strings = {1: ['p1p48', 'p49p65', 'p66p82'],\n 2: ['p135p151', 'p152p168', 'p169p185', 'p203p295'],\n 3: ['p301p319', 'p320p384', 'p438p461', 'p577p599'],\n 4: ['p601p659', 'p660p690', 'p691p712', 'p713p735', 'p736p3024']}\n mock_list_checkpt_files.return_value = self.set_checkpt_filenames(pagerange_strings,\n wiki=self.en['wiki'])\n\n expected_pageranges = [(1, 48, 1), (49, 65, 1), (66, 82, 1),\n (135, 151, 2), (152, 168, 2),\n (169, 185, 2), (203, 295, 2),\n (301, 319, 3), (320, 384, 3),\n (438, 461, 3), (577, 599, 3),\n (601, 659, 4), (660, 690, 4),\n (691, 712, 4), (713, 735, 4),\n (736, 3024, 4)]\n\n content_job = XmlDump(\"articles\", \"articlesdump\", \"short description here\",\n \"long description here\",\n item_for_stubs=None, item_for_stubs_recombine=None,\n prefetch=True, prefetchdate=None,\n spawn=True, wiki=self.en['wiki'], partnum_todo=False,\n pages_per_part=FilePartInfo.convert_comma_sep(\n self.en['wiki'].config.pages_per_filepart_history),\n checkpoints=True, checkpoint_file=None,\n page_id_range=None, verbose=False)\n\n done_pageranges = content_job.get_done_pageranges(self.en['dump_dir'], self.en['wiki'].date)\n self.assertEqual(done_pageranges, expected_pageranges)",
"def test_determine_index_range(self):\n index_range = determine_index_range(2)\n self.assertEqual(index_range, range(101, 201))",
"def get_ranges(self):\r\n pass",
"def calculate_ranges(self):\n if (self.input_file_number_of_pages is None) or (self.input_file_number_of_pages < 20): # 20 to avoid unnecessary parallel operation\n return None\n #\n range_size = math.ceil(self.input_file_number_of_pages / self.cpu_to_use)\n number_of_ranges = math.ceil(self.input_file_number_of_pages / range_size)\n result = []\n for i in range(0, number_of_ranges):\n range_start = (range_size * i) + 1\n range_end = (range_size * i) + range_size\n # Handle last range\n if range_end > self.input_file_number_of_pages:\n range_end = self.input_file_number_of_pages\n result.append((range_start, range_end))\n # Check result\n check_pages = 0\n for created_range in result:\n check_pages += (created_range[1] - created_range[0]) + 1\n if check_pages != self.input_file_number_of_pages:\n raise ArithmeticError(\"Please check 'calculate_ranges' function, something is wrong...\")\n #\n return result",
"def verifyRanges(obj, data, ranges):\n ((minLower, minUpper), (maxLower, maxUpper)) = ranges\n obj.assertGreaterEqual(min(data), minLower)\n obj.assertLess(min(data), minUpper)\n obj.assertGreaterEqual(max(data), maxLower)\n obj.assertLess(max(data), maxUpper)",
"def test_list_cast(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n entire_list = list(self.plist)\n self.assertEqual(entire_list, list(range(self.total)))\n self.assertEqual(len(responses.calls), self.lazy_pages(self.total-1))",
"def test_slice_lazy(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n self.assertEqual(self.plist[1:3], list(range(1, 3)))\n self.assertEqual(len(responses.calls), self.lazy_pages(2))\n\n self.assertEqual(self.plist[1:7:2], list(range(1, 7, 2)))\n self.assertEqual(len(responses.calls), self.lazy_pages(6))\n\n self.assertEqual(self.plist[10:13], list(range(10, 13)))\n self.assertEqual(len(responses.calls), self.lazy_pages(12))",
"def test_both_bills_inside_range(self, fake_billed_org, fake_bills):\n bill_1, bill_2 = fake_bills\n\n assert list(fake_billed_org.bills_covering_period(\n weeks_ago(7)(),\n weeks_ago(6)(),\n )) == [bill_2, bill_1]",
"def test_pageList(self):\n pageList = self.positionController.pageList()\n assert len(pageList) == 4\n assert pageList == [0, 5, 10, 15]",
"def prepare_range(self, pages_names: List[str]) -> range:\r\n\r\n # if length is 0 then there is only 1 page\r\n if len(pages_names) != 0:\r\n last_page = int(pages_names[len(pages_names) - 1])\r\n else:\r\n last_page = 1\r\n\r\n return range(1, last_page + 1)",
"def _test_merge_port_ranges_helper(self, expected, result):\n self.assertEqual(len(expected), len(result))\n for (range_min, range_max, conj_ids), result1 in zip(\n expected, result):\n self.assertEqual(range_min, result1[0].get('port_range_min'))\n self.assertEqual(range_max, result1[0].get('port_range_max'))\n self.assertEqual(conj_ids, set(result1[1]))",
"def test_target_ranges(self):\r\n self.goto_mycare()\r\n self.goto_target_range_screen()\r\n highvalue = self.critical_high()\r\n self.set_critical_low()\r\n initialvalue = self.after_meal_overall_high(highvalue)\r\n self.before_meal_fasting_high(initialvalue)\r\n self.low()\r\n self.critical_low()\r\n self.summary_view()",
"def test_range_initialization(self):\n assert AddressFilter.Range(\"*\").get_range() == (0, 65535)\n assert AddressFilter.Range(\"5\").get_range() == (5, 5)\n assert AddressFilter.Range(\"0\").get_range() == (0, 0)\n assert AddressFilter.Range(\"3-5\").get_range() == (3, 5)\n assert AddressFilter.Range(\"5-3\").get_range() == (3, 5)\n assert AddressFilter.Range(\"-5\").get_range() == (0, 5)\n assert AddressFilter.Range(\"5-\").get_range() == (5, 65535)\n assert AddressFilter.Range(\"70-100\").get_range() == (70, 100)",
"async def test_range_types(self):\n\n cases = [\n ('int4range', [\n [(1, 9), asyncpg.Range(1, 10)],\n [asyncpg.Range(0, 9, lower_inc=False, upper_inc=True),\n asyncpg.Range(1, 10)],\n [(), asyncpg.Range(empty=True)],\n [asyncpg.Range(empty=True), asyncpg.Range(empty=True)],\n [(None, 2), asyncpg.Range(None, 3)],\n [asyncpg.Range(None, 2, upper_inc=True),\n asyncpg.Range(None, 3)],\n [(2,), asyncpg.Range(2, None)],\n [(2, None), asyncpg.Range(2, None)],\n [asyncpg.Range(2, None), asyncpg.Range(2, None)],\n [(None, None), asyncpg.Range(None, None)],\n [asyncpg.Range(None, None), asyncpg.Range(None, None)]\n ])\n ]\n\n for (typname, sample_data) in cases:\n st = await self.con.prepare(\n \"SELECT $1::\" + typname\n )\n\n for sample, expected in sample_data:\n with self.subTest(sample=sample, typname=typname):\n result = await st.fetchval(sample)\n self.assertEqual(result, expected)\n\n with self.assertRaisesRegex(\n asyncpg.DataError, 'list, tuple or Range object expected'):\n await self.con.fetch(\"SELECT $1::int4range\", 'aa')\n\n with self.assertRaisesRegex(\n asyncpg.DataError, 'expected 0, 1 or 2 elements'):\n await self.con.fetch(\"SELECT $1::int4range\", (0, 2, 3))\n\n cases = [(asyncpg.Range(0, 1), asyncpg.Range(0, 1), 1),\n (asyncpg.Range(0, 1), asyncpg.Range(0, 2), 2),\n (asyncpg.Range(empty=True), asyncpg.Range(0, 2), 2),\n (asyncpg.Range(empty=True), asyncpg.Range(empty=True), 1),\n (asyncpg.Range(0, 1, upper_inc=True), asyncpg.Range(0, 1), 2),\n ]\n for obj_a, obj_b, count in cases:\n dic = {obj_a: 1, obj_b: 2}\n self.assertEqual(len(dic), count)",
"def test_lowerbounds(self):\n tests = [{'pair': [256, 4], 'result': 5}, \n {'pair': [63, 3], 'result': 4}, \n {'pair': [46, 4], 'result': 4}]\n for i in range(len(tests)):\n with self.subTest(i=i):\n self.assertEqual(mami.find_lower_bound(*tests[i]['pair']),tests[i]['result'])",
"def test_get_range_dict():\n df = pd.read_csv(op.join(data_path,'faux_data.csv'))\n rd = mli.get_range_dict(df, max_pts=172)\n t1 = len(rd['int1']), len(rd['int2']) == (100, 172)\n t2 = len(rd['float1']) == 172\n t3 = len(rd['str1']), len(rd['str2']) == (172, 50)\n assert(t1 and t2 and t3)",
"def test_get_inbox_repliers(self):\n pass",
"def range_test(self, *args, **kwargs):\n import sys\n print('sys.maxsize:',sys.maxsize)\n # empty range\n print('list(range(0)):',list(range(0)))\n # using range(stop)\n print('list(range(10)):',list(range(10)))\n # using range(start, stop)\n print('list(range(1, 10)):',list(range(1, 10)))\n start = 2\n stop = 14\n step = 2\n print('list(range(start, stop, step)):',list(range(start, stop, step)))\n start = 2\n stop = -14\n step = -2\n print('list(range(start, stop, step)):',list(range(start, stop, step)))\n # value constraint not met\n print('list(range(start, 14, step)):',list(range(start, 14, step)))\n r = range(0, 20, 2)\n print('r:',r)\n print('11 in r:',11 in r)\n print('10 in r:',10 in r)\n print('r.index(10):',r.index(10))\n print('r[5]:',r[5])\n print('r[:5]:',r[:5])\n print('r[-1]:',r[-1])\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
make sure that we get a reasonable list of completed pageranges when we feed in a list of complete output files (supposedly found in the dump run output directory)
|
def test_get_done_pageranges(self, mock_list_checkpt_files):
pagerange_strings = {1: ['p1p48', 'p49p65', 'p66p82'],
2: ['p135p151', 'p152p168', 'p169p185', 'p203p295'],
3: ['p301p319', 'p320p384', 'p438p461', 'p577p599'],
4: ['p601p659', 'p660p690', 'p691p712', 'p713p735', 'p736p3024']}
mock_list_checkpt_files.return_value = self.set_checkpt_filenames(pagerange_strings,
wiki=self.en['wiki'])
expected_pageranges = [(1, 48, 1), (49, 65, 1), (66, 82, 1),
(135, 151, 2), (152, 168, 2),
(169, 185, 2), (203, 295, 2),
(301, 319, 3), (320, 384, 3),
(438, 461, 3), (577, 599, 3),
(601, 659, 4), (660, 690, 4),
(691, 712, 4), (713, 735, 4),
(736, 3024, 4)]
content_job = XmlDump("articles", "articlesdump", "short description here",
"long description here",
item_for_stubs=None, item_for_stubs_recombine=None,
prefetch=True, prefetchdate=None,
spawn=True, wiki=self.en['wiki'], partnum_todo=False,
pages_per_part=FilePartInfo.convert_comma_sep(
self.en['wiki'].config.pages_per_filepart_history),
checkpoints=True, checkpoint_file=None,
page_id_range=None, verbose=False)
done_pageranges = content_job.get_done_pageranges(self.en['dump_dir'], self.en['wiki'].date)
self.assertEqual(done_pageranges, expected_pageranges)
|
[
"def get_done_pageranges(self, dump_dir, date):\n chkpt_dfnames = self.oflister.list_checkpt_files(\n self.oflister.makeargs(dump_dir, [self.get_dumpname()],\n parts=PARTS_ANY, date=date))\n # get the page ranges covered by existing checkpoint files\n done_pageranges = [(dfname.first_page_id_int, dfname.last_page_id_int,\n dfname.partnum_int)\n for dfname in chkpt_dfnames]\n done_pageranges = sorted(done_pageranges, key=lambda x: int(x[0]))\n if self.verbose:\n print(\"done_pageranges:\", done_pageranges)\n return done_pageranges",
"def get_sorted_export_files():\n ...",
"def scan_output(self):\n import os\n import re\n\n import itertools\n\n def ranges(i):\n for a, b in itertools.groupby(enumerate(i), lambda x_y1: x_y1[1] - x_y1[0]):\n b = list(b)\n yield b[0][1], b[-1][1]\n\n expected = list(range(1, self.njobs + 1))\n existing = os.listdir(self.folder_out)\n found = [idx for idx in expected if self.outfile(idx) in existing]\n found = list(ranges(found))\n missing = [\n idx for idx in expected if self.outfile(idx) not in existing\n ]\n num_missing = len(missing)\n missing = list(ranges(missing))\n print('------------------------------')\n print('missing outputfiles:')\n print((',\\n'.join([\n '{:}-{:}'.format(*tup)\n if not tup[0] == tup[1] else '{:}'.format(tup[0])\n for tup in missing\n ])))\n print(('total missing files:', num_missing))\n print('------------------------------')\n return found, missing",
"def clean_extra_output_destination():\n global extra_print_dests\n extra_print_dests = []",
"def generateresultspage(self, redirlist, pagename, header, footer):\n #finalpage = header\n finalpage = u''\n if self.getOption('section'):\n finalpage += u'== ' + self.getOption('section') + u' ==\\n'\n #res = sorted(redirlist, key=redirlist.__getitem__, reverse=False)\n res = sorted(redirlist.keys())\n itemcount = 0\n totalcount = len(res)\n pagecount = 0\n\n if self.getOption('count'):\n self.savepart(finalpage,pagename,pagecount,header,self.generateprefooter(pagename,totalcount,pagecount)+footer)\n return(1)\n \n for i in res:\n\n title = i\n link = redirlist[i]\n\n #finalpage += u'\\n# [[' + title + u']]'\n linenumber = pagecount * int(self.getOption('maxlines')) + itemcount + 1\n if self.getOption('edit'):\n finalpage += u'\\n|-\\n| %i || {{Edytuj| %s | %s }} || %i || ' % (linenumber,title,title,len(link))\n else:\n finalpage += u'\\n|-\\n| %i || [[%s]] || %i || ' % (linenumber, title,len(link))\n if self.getOption('cite') and not self.getOption('negative'):\n #results are list\n if self.getOption('nowiki'):\n finalpage += u' - <nowiki>'\n firstlink = True\n for r in link:\n if not firstlink:\n finalpage += '<br />'\n finalpage += r['link']\n firstlink = False\n if self.getOption('nowiki'):\n finalpage += u'</nowiki>'\n\n itemcount += 1\n\n if itemcount > int(self.getOption('maxlines'))-1:\n pywikibot.output(u'***** saving partial results *****')\n self.savepart(finalpage,pagename,pagecount,header,self.generateprefooter(pagename,totalcount,pagecount)+footer)\n finalpage = u''\n itemcount = 0\n pagecount += 1\n\n #save remaining results\n pywikibot.output(u'***** saving remaining results *****')\n self.savepart(finalpage,pagename,pagecount,header,self.generateprefooter(pagename,totalcount,pagecount)+footer)\n\n\n return(pagecount)",
"def file_chunker(files, files_per_output=-1, events_per_output=-1, MB_per_output=-1, flush=False):\n \n num = 0\n chunk, chunks = [], []\n for f in files:\n # if the current file's nevents would push the chunk\n # over the limit, then start a new chunk\n if ((0 < files_per_output <= num) or \n (0 < events_per_output < num+f.get_nevents()) or\n (0 < MB_per_output < num+f.get_filesizeMB())\n ):\n chunks.append(chunk)\n num, chunk = 0, []\n chunk.append(f)\n if (files_per_output > 0): num += 1\n elif (events_per_output > 0): num += f.get_nevents()\n elif (MB_per_output > 0): num += f.get_filesizeMB()\n # push remaining partial chunk if flush is True\n if (len(chunk) == files_per_output) or (flush and len(chunk) > 0):\n chunks.append(chunk)\n chunk = []\n # return list of lists (chunks) and leftover (chunk) which should\n # be empty if flushed\n return chunks, chunk",
"def analyse_multiple_editing_percent_files(pileup_files, out_pileups, summary_files,total_summary_file=None, add_headers=False,\n summary_only=False, min_editing=0.0, max_editing=100.0, max_noise=100.0, min_reads=1,\n edit_tag=None, parallel_limit=2,Disable_parallel=True ):\n\n\n # collecting commands for parallel\n commands = []\n for in_file, out_file, summary_file in zip(pileup_files, out_pileups, summary_files):\n # False,False map to add_headers and summary_only\n command = [analyse_editing_percent,in_file,out_file,summary_file,\n add_headers,summary_only,min_editing,max_noise,min_reads,edit_tag]\n commands.append(command)\n\n # run parallel editing percent + summary printing for the pileup files\n parallel_commands(commands,parallel_limit=parallel_limit,Disable_parallel=Disable_parallel)\n\n\n # if total summary was requested,\n # get the summary strings to write to the summary output file\n if total_summary_file != None:\n lines_list = get_full_summary_str(summary_files)\n\n with open(total_summary_file, \"w\") as sumfile:\n sumfile.write(\"\\n\".join(lines_list))\n\n return",
"def run(self, runner):\n # here we will either clean up or not depending on how we were called\n # FIXME callers should set this appropriately and they don't right now\n self.cleanup_old_files(runner.dump_dir, runner)\n\n # clean up all tmp output files from previous attempts of this job\n # for this dump wiki and date; they may have been left around from\n # an interrupted or failed earlier run\n self.cleanup_tmp_files(runner.dump_dir, runner)\n\n # get the names of the output files we want to produce\n dfnames_todo = self.get_content_dfnames_todo(runner)\n\n # set up a prefetch arg generator if needed\n prefetcher = self.get_prefetcher(runner.wiki)\n\n # accumulate all the info about stub inputs, page content inputs\n # for prefetches, output files and so on\n wanted = self.get_wanted(dfnames_todo, runner, prefetcher)\n\n # figure out what temp stub files we need to write, if we\n # are producing output files covering page ranges (each\n # output file will cover the same content as its stub input\n # file)\n to_generate = self.get_to_generate_for_temp_stubs(wanted)\n\n # figure out how many stub input files we generate at once\n batchsize = self.get_batchsize(stubs=True)\n\n commands, output_dfnames = self.stubber.get_commands_for_temp_stubs(to_generate, runner)\n\n worker_type = self.doing_batch_jobs(runner)\n\n # secondary batch workers should not generate temp stubs, that should\n # be done only if we run without batches or by the primary worker\n if worker_type != 'secondary_batches':\n self.stubber.run_temp_stub_commands(runner, commands, batchsize)\n # check that the temp stubs are not garbage, though they may be empty so\n # we should (but don't yet) skip that check. FIXME\n self.stubber.check_temp_stubs(runner, self.move_if_truncated, output_dfnames)\n\n # if we had to generate or need to use temp stubs, skip over those with no pages in them;\n # it's possible a page range has nothing in the stub file because they were all deleted.\n # we have some projects with e.g. 35k pages in a row deleted!\n todo = [entry for entry in wanted if not entry['generate'] or\n not self.stubber.has_no_pages(entry['stub'], runner, tempdir=True)]\n\n # now figure out how many page content files we generate at once\n batchsize = self.get_batchsize()\n\n if worker_type == 'primary_batches':\n # main worker. do all the setup so other workers as well as this one\n # can claim and run batches\n return self.do_run_batches(todo, batchsize, 'batch_primary', runner)\n\n if worker_type == 'secondary_batches':\n # claim and run batches only. no index.html or status updates, that's\n # for the main worker\n\n # FIXME suppose there are no batch files yet? we exit and that's that?\n # do we sleep and loop a few times just in case or is there a point?\n return self.do_run_batches(todo, batchsize, 'batch_secondary', runner)\n\n if worker_type == 'regular':\n # the plain old boring 'do everything' code path\n commands = self.get_commands_for_pagecontent(todo, runner)\n self.run_page_content_commands(commands, runner, 'regular')\n return True\n\n # what kind of batch worker am I? WTF knows.\n return False",
"def get_dfnames_from_pageranges(self, pageranges):\n dfnames = []\n for startpage, endpage, partnum in pageranges:\n dfname = DumpFilename(\n self.wiki, self.wiki.date, self.dumpname,\n self.filetype, self.file_ext, partnum,\n DumpFilename.make_checkpoint_string(startpage, endpage),\n False)\n dfnames.append(dfname)\n return dfnames",
"def find_multi_mappers_pe(inputf,output,num_procs=1,keep_temp_files=False,append=False):\n sam_header = []\n file_handles = {}\n f = open(inputf,'r')\n cycle = itertools.cycle(range(0,num_procs))\n for file_num in xrange(0,num_procs):\n if append == False:\n file_handles[file_num]=open(output+\"_sorted_\"+str(file_num),'w')\n else:\n file_handles[file_num]=open(output+\"_sorted_\"+str(file_num),'a')\n for line in f:\n if line[0] == \"@\":\n continue\n\n fields = line.split(\"\\t\")\n\n #To deal with the way chromosomes were named in some of our older references\n fields[2] = fields[2].replace(\"_f\",\"\")\n fields[2] = fields[2].replace(\"_r\",\"\")\n\n if int(fields[1]) & 2 != 0:\n header = fields[0].split(\"!\")\n #BIG ASSUMPTION!! NO TABS IN FASTQ HEADER LINES EXCEPT THE ONES I ADD!\n if (int(fields[1]) & 16) == 16:\n strand = \"-\"\n else:\n strand = \"+\"\n if (int(fields[1]) & 128) == 128:\n is_R2 = True\n else:\n is_R2 = False\n seq = decode_converted_positions(fields[9],header[-1],strand,is_R2)\n file_handles[cycle.next()].write(\" \".join(header[:-1])+\"\\t\"+\"\\t\".join(fields[1:9])+\"\\t\"+seq+\"\\t\"+\"\\t\".join(fields[10:]))\n #file_handles[cycle.next()].write(\"\\t\".join(fields[0:9])+\"\\t\"+seq+\"\\t\"+\"\\t\".join(fields[10:]))\n f.close()\n if keep_temp_files == False:\n subprocess.check_call(shlex.split(\"rm \"+inputf))\n pass\n for file_num in xrange(0,num_procs):\n file_handles[file_num].close()",
"def main_split_urls():\n\n all_urls_path = \"data/urls/collected/all.csv\"\n url_path = \"data/urls/to_collect/set0_6100_8100.txt\"\n out_path = \"data/urls/to_collect/finish_set0_6100_8100.txt\"\n remaining_urls(all_urls_path, url_path, out_path)",
"def test_get_ranges_covered_by_stubs(self,\n mock_get_first_last_page_ids,\n mock_list_outfiles_for_input):\n mock_list_outfiles_for_input.return_value = self.set_stub_output_filenames([1, 2, 3, 4])\n mock_get_first_last_page_ids.side_effect = self.get_fake_first_last_pageids\n\n pages_per_part = FilePartInfo.convert_comma_sep(\n self.en['wiki'].config.pages_per_filepart_history)\n\n stubs_job = XmlStub(\"xmlstubsdump\", \"First-pass for page XML data dumps\",\n partnum_todo=False,\n jobsperbatch=dumps.dumpitemlist.get_int_setting(\n self.en['wiki'].config.jobsperbatch, \"xmlstubsdump\"),\n pages_per_part=pages_per_part)\n\n content_job = XmlDump(\"articles\", \"articlesdump\", \"short description here\",\n \"long description here\",\n item_for_stubs=stubs_job, item_for_stubs_recombine=None,\n prefetch=True, prefetchdate=None,\n spawn=True, wiki=self.en['wiki'], partnum_todo=False,\n pages_per_part=pages_per_part,\n checkpoints=True, checkpoint_file=None,\n page_id_range=None, verbose=False)\n\n expected_stub_ranges = [(1, 100, 1), (101, 300, 2),\n (301, 600, 3), (601, 3400, 4)]\n stub_ranges = content_job.get_ranges_covered_by_stubs(self.en['dump_dir'])\n self.assertEqual(stub_ranges, expected_stub_ranges)",
"def generateresultspage(self, redirlist, pagename, header, footer):\n finalpage = ''\n res = sorted(redirlist, key=redirlist.__getitem__, reverse=True)\n if self.getOption('test'):\n pywikibot.output('***** INPUT *****')\n pywikibot.output(redirlist)\n pywikibot.output('***** RESULT *****')\n pywikibot.output(res)\n linkcount = 0\n rangenumber = 0\n limit,subpage = self.ranges[rangenumber]\n for i in res:\n count = self.redirCount(redirlist[i])\n l = redirlist[i]['list']\n if self.getOption('resprogress'):\n pywikibot.output('i:[[%s]], count:%i, l:%s' % (i,count,l))\n\n if count < int(self.getOption('minlinks')):\n return\n while count < limit:\n self.savepart(finalpage,subpage,self.getOption('outpage'),header,footer)\n rangenumber += 1\n limit,subpage = self.ranges[rangenumber]\n finalpage = ''\n\n suffix = self.suffix(count)\n finalpage += u'\\n# [[%s]] ([[Specjalna:Linkujące/%s|%s link%s]]) ← [[%s]]' % (i,i,str(count),suffix,']], [['.join(l))\n #finalpage += u'\\n# [[%s]] ([[Specjalna:Linkujące/%s|%i link%s]])' % (i,i,count,self.suffix(count))\n return",
"def perform_extraction(dumpdir:str, outputdir:str, logger:logging.Logger) -> None:\n\n\tparagraphs_file_name = \"paragraphs.txt\"\n\tfulltexts_dir_name = \"fulltexts\"\n\tknowledgebase_file_name = \"incomlete-kb.txt\" # incomplete kb\n\tkb_file_name = \"knowledgebase.txt\" # complete\n\n\n\tparagraphs_file = open(os.path.join(outputdir, paragraphs_file_name), \"w\")\n\tfulltexts_dir = os.path.join(outputdir, fulltexts_dir_name)\n\tif not os.path.exists(fulltexts_dir):\n\t\tos.makedirs(fulltexts_dir)\n\tknowledgebase_file = open(os.path.join(outputdir, knowledgebase_file_name), \"w\")\n\n\n\tlogger.info(\"==== Performing extraction ====\")\n\n\t# variables for logging and progress tracking, begin with 'log_'\n\tlog_totalpagecount = 0\n\tlog_pagechunk = 10000 # display info after processing this many pages\n\n\n\t### WARNING!: iterates over overy file in every subdirectory of 'dumpdir'! \n\t### Make sure no other subdirectories or files are in there\n\tfor subdir in [os.path.join(dumpdir,node) for node in os.listdir(dumpdir) if os.path.isdir(os.path.join(dumpdir, node))]:\n\t\tfor file_path in [os.path.join(subdir, node) for node in os.listdir(subdir) if os.path.isfile(os.path.join(subdir, node))]:\n\t\t\thtml_file = open(file_path, \"r\")\n\t\t\twhile True:\n\t\t\t\tline = html_file.readline()\n\t\t\t\t\n\t\t\t\t# Found beginning of a wiki page\n\t\t\t\tif line.strip().startswith('<doc'): # process page\n\t\t\t\t\tdoc_lines = []\n\t\t\t\t\tdoc_lines.append(line.strip())\n\t\t\t\t\t\n\t\t\t\t\t# Read all lines of wikipage ( <doc ...> * </doc> )\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tnext_line = html_file.readline().strip()\n\t\t\t\t\t\tif not next_line == '': # discard blank lines\n\t\t\t\t\t\t\tdoc_lines.append(next_line)\n\t\t\t\t\t\tif next_line == \"</doc>\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t# join by newline (which separates paragraphs in the html file) - \n\t\t\t\t\t# reading separate lines and then str.join() is faster than gradual concatenation)\n\t\t\t\t\tdoc_text = \"\\n\".join(doc_lines)\n\t\t\t\t\t# Html text of wiki page (from <doc ...> to </doc>) is in doc_text. Now convert into plain text and extract info\n\t\t\t\t\tpage_title, page_uri, page_id, page_first_paragraph, page_fulltext = extract_page_info(doc_text) \n\n\t\t\t\t\tif ' (rozcestník)' in page_title or page_title.lower() == 'hlavní strana':\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tpage_title = re.sub(r' \\([^\\)]*\\)$', '', page_title)\n\n\t\t\t\t\t# write data to specific files:\n\t\t\t\t\tparagraphs_file.write(page_uri + '\\t' + page_first_paragraph + '\\n')\n\n\t\t\t\t\t\n\t\t\t\t\t# replace '/' in the #title with %2F - its URL escape - because '/' is forbidden in filenames\n\t\t\t\t\tescaped_page_title = re.sub(r'/', r'%2F', page_title) \n\t\t\t\t\ttemp_filename = \"wp_\" + escaped_page_title # filename: wp_ (as wikipage) + page title\n\t\t\t\t\ttemp_dir = os.path.join(fulltexts_dir, \"d_\" + get_dir_name_fulltexts(escaped_page_title)) # dirname - use first two letters of the page title\n\t\t\t\t\tif not os.path.exists(temp_dir):\n\t\t\t\t\t\tos.makedirs(temp_dir)\n\n\t\t\t\t\ttemp_fulltext_file = open(os.path.join(temp_dir, temp_filename + '.txt'), \"w\")\n\t\t\t\t\ttemp_fulltext_file.write(page_fulltext) \n\t\t\t\t\ttemp_fulltext_file.close()\n\t\t\t\t\n\t\t\t\t\tentity_line = \"{}\\t{}\\t{}\\t{}\".format(page_id, page_uri, page_title, page_first_paragraph)\n\t\t\t\t\tknowledgebase_file.write(entity_line + '\\n')\n\n\t\t\t\t\tlog_totalpagecount += 1\n\t\t\t\t\t# logging\n\t\t\t\t\tif log_totalpagecount % log_pagechunk == 0:\n\t\t\t\t\t\tlogger.info(\"Processed {} pages\".format(log_totalpagecount))\n\n\n\t\t\t\telif line == \"\": # end of file reached\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t# Close opened files:\n\tparagraphs_file.close()\n\tknowledgebase_file.close()\n\n\tlogger.info(\"==== Extraction complete : Pages processed: {} ====\".format(log_totalpagecount))",
"def find_multi_mappers(inputf,output,num_procs=1,keep_temp_files=False,append=False):\n sam_header = []\n file_handles = {}\n f = open(inputf,'r')\n cycle = itertools.cycle(range(0,num_procs))\n for file_num in xrange(0,num_procs):\n if append == False:\n file_handles[file_num]=open(output+\"_sorted_\"+str(file_num),'w')\n else:\n file_handles[file_num]=open(output+\"_sorted_\"+str(file_num),'a')\n for line in f:\n #To deal with the way chromosomes were named in some of our older references\n if line[0] == \"@\":\n continue\n\n fields = line.split(\"\\t\")\n fields[2] = fields[2].replace(\"_f\",\"\")\n fields[2] = fields[2].replace(\"_r\",\"\")\n if fields[2] != \"*\":\n header = fields[0].split(\"!\")\n #BIG ASSUMPTION!! NO TABS IN FASTQ HEADER LINES EXCEPT THE ONES I ADD!\n if (int(fields[1]) & 16) == 16:\n strand = \"-\"\n elif (int(fields[1]) & 16) == 0:\n strand = \"+\"\n seq = decode_c_positions(fields[9],header[-1],strand)\n file_handles[cycle.next()].write(\" \".join(header[:-1])+\"\\t\"+\"\\t\".join(fields[1:9])+\"\\t\"+seq+\"\\t\"+\"\\t\".join(fields[10:]))\n f.close()\n if keep_temp_files == False:\n subprocess.check_call(shlex.split(\"rm \"+inputf))\n for file_num in xrange(0,num_procs):\n file_handles[file_num].close()",
"def test_print_output_files_stats(self):\n self.create_simple_file(\"../output/alpha.txt\")\n self.create_simple_file(\"../output/beta.txt\")\n try: # redirect stdout to string\n old_stdout = sys.stdout\n sys.stdout = my_stdout = StringIO()\n self.my_emr.print_local_output_files_stats()\n finally: # always restore\n sys.stdout = old_stdout\n captured_output = my_stdout.getvalue()\n\n valid_content = False\n if (\n \"FILES CREATED\" in captured_output and\n \"alpha.txt\" in captured_output and\n \"beta.txt\" in captured_output\n ): valid_content = True\n self.assertTrue(valid_content, \"should have two file listings\")",
"def loader_page_chunker_bbm(ull):\n chunks = chunker(ull.find_all(\"li\"), 3)\n for chunk in chunks:\n loader_page_printer(chunk)",
"def process_pdfs(rootpath):\r\n\ttimeouts = []\r\n\terrors = []\r\n\tfor root, dirs, files in os.walk(rootpath):\r\n\t\tdirectory = Directory(root)\r\n\t\tdb_session.add(directory)\r\n\t\tdb_session.commit()\r\n\t\tprint 'Converting files in: %s' % (root)\r\n\t\tfor name in files:\r\n\t\t\tif name[-4:] == '.pdf':\r\n\t\t\t\ttry:\r\n\t\t\t\t\tif already_processed(name, root):\r\n\t\t\t\t\t\tprint 'Already converted: %s' % (name)\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tprint 'Converting: %s' % (name)\r\n\t\t\t\t\tdoc = Document(name)\r\n\t\t\t\t\tdoc.directory_id = directory.id\r\n\t\t\t\t\tq = multiprocessing.Queue()\r\n\t\t\t\t\tp = multiprocessing.Process(target=extract_pages, args=(os.path.join(root, name), q,))\r\n\t\t\t\t\tp.start()\r\n\t\t\t\t\tp.join(timeout=5*60)\r\n\t\t\t\t\tif p.is_alive():\r\n\t\t\t\t\t\t# Timeout has expired\r\n\t\t\t\t\t\tp.terminate()\r\n\t\t\t\t\t\tprint 'Processing timeout: %s' % name\r\n\t\t\t\t\t\ttimeouts.append({'dir':root, 'file':name})\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tpages = q.get()\r\n\t\t\t\t\tif not pages:\r\n\t\t\t\t\t\tprint 'Error processing: %s' % name\r\n\t\t\t\t\t\terrors.append({'dir':root, 'file':name})\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tfor i, text in enumerate(pages):\r\n\t\t\t\t\t\tpage = Page(text, i)\r\n\t\t\t\t\t\tdoc.pages.append(page)\r\n\t\t\t\t\tdb_session.add(doc)\r\n\t\t\t\t\tdb_session.commit()\r\n\t\t\t\texcept:\r\n\t\t\t\t\tprint 'Error processing: %s' % name\r\n\t\t\t\t\terrors.append({'dir':root, 'file':name})\r\n\t\t\telse:\r\n\t\t\t\tprint 'Skipping: %s' % (name)\r\n\t\tdb_session.add(directory)\r\n\t\tdb_session.commit()\r\n\t\treturn {'timeouts':timeouts, 'errors':errors}",
"def merge_sorted_multimap_pe(files, output):\n lines = {}\n fields = {}\n output_handles = {}\n file_handles = {}\n\n total_unique = 0\n count= 0\n cycle = itertools.cycle(range(0,len(files)))\n\n for index,filen in enumerate(files):\n output_handles[index] = open(output+\"_no_multimap_\"+str(index),'a')\n file_handles[filen]=open(filen,'r')\n lines[filen]=file_handles[filen].readline()\n fields[filen] = lines[filen].split(\"\\t\")[0]#Read ID\n while True:\n all_fields = [field for field in fields.values() if field != \"\"]\n if len(all_fields) == 0:\n break\n min_field = min(all_fields)\n count_1 = 0\n count_2 = 0\n current_line_1 = \"\"\n current_line_2 = \"\"\n for key in fields:\n while fields[key] == min_field:\n #Need to modify this in order to deal with PE data\n if(int(lines[key].split(\"\\t\")[1]) & 64 == 64): #First in pair\n count_1 += 1\n current_line_1 = lines[key]\n else:\n count_2 += 1\n current_line_2 = lines[key]\n lines[key]=file_handles[key].readline()\n fields[key]=lines[key].split(\"\\t\")[0]\n #Check if there is only one valid alignment\n if count_1 == 1:\n index = cycle.next()\n #Output\n output_handles[index].write(current_line_1)\n output_handles[index].write(current_line_2)\n output_handles[index].flush()\n total_unique += 1\n\n for index,filen in enumerate(files):\n output_handles[index].close()\n file_handles[filen].close()\n\n ##Yupeng debug\n# exit()\n\n return total_unique"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
given a number of parts, put together a list of DumpFilenames for the corresponding stub output files and return them
|
def set_stub_output_filenames(self, parts):
stub_filenames = []
for partnum in parts:
stub_filenames.append(
"{wiki}-{date}-stub-articles{partnum}.xml.gz".format(
wiki=self.en['wiki'].db_name, date=self.today,
partnum=partnum))
return self.dfnames_from_filenames(stub_filenames)
|
[
"def getBedOutFiles(args):\n bed = os.path.join(args.outDir, 'out.bed')\n bedDetails = os.path.join(args.outDir, 'out_details.bed')\n return bed, bedDetails",
"def get_output_files(description):\n log.info(\"fixing output files in description\")\n files = {}\n if description['outFiles'] and description['outFiles'] != \"NULL\":\n out_files = split(description[\"outFiles\"])\n l = len(out_files)\n ddm_endpoint = split(description.get(\"ddmEndPointOut\"), min_len=l)\n destination_se = split(description.get(\"fileDestinationSE\"), min_len=l)\n dblock_token = split(description.get(\"dispatchDBlockTokenForOut\"), min_len=l)\n dblock_tokens = split(description.get(\"prodDBlockTokenForOut\"), min_len=l)\n datasets = split(description.get(\"realDatasets\"), min_len=l)\n dblocks = split(description.get(\"destinationDblock\"), min_len=l)\n destination_dblock_token = split(description.get(\"destinationDBlockToken\"), min_len=l)\n scope = split(description.get(\"scopeOut\"), min_len=l, fill_last=True)\n\n for i, f in enumerate(out_files):\n if f is not None:\n files[f] = {\n \"ddm_endpoint\": ddm_endpoint[i],\n \"storage_element\": destination_se[i],\n \"dispatch_dblock_token\": dblock_token[i],\n \"destination_dblock_token\": destination_dblock_token[i],\n \"dblock_token\": dblock_tokens[i],\n \"dataset\": datasets[i],\n \"dblock\": dblocks[i],\n \"scope\": scope[i]\n }\n\n return fix_log(description, files)",
"def __tempFilesFromDebs(self, deblist, arch, dir=''):\n local = []\n for item in deblist:\n data = item.split()\n try:\n dtype = data[0]\n url = data[1]\n dist = data[2]\n if len(data) == 3: pass #FIXME: Special case, append only the sections to the end\n for section in data[3:]:\n if section.find('#') != -1: break # If a comment is encountered skip the line\n main = lib.joinUrl(lib.joinUrl(lib.joinUrl(url, 'dists'), dist), section)\n main = self.__appendArch(arch, main)\n main = main[7:].replace('/','_')\n local.append(os.path.join(dir, main)) # Strips unnecessary characters and appends to list\n except: pass # Unable to parse deb entry\n return local",
"def get_file_list(self):\n disks = self.get_snap_files()\n timestamp = str(int(time.time()))\n ret = {}\n for disk, info in disks.items():\n img_info = qemu_utils.img_info(info['base'])\n cur = info['base']\n chain = {}\n # first build an accurate chain of any external snapshots\n # may need to loop through more than once in case the list items come back in an unexpected order.\n while True:\n found_base = False\n for img in img_info:\n\n if img['filename'] == cur:\n # format is base: top to we have easy access to the targeted image, and can count down for depth.\n try:\n chain[img['backing-filename']] = cur\n cur = img['backing-filename']\n except KeyError:\n #no backing file means base image\n chain['base'] = cur\n found_base = True\n if found_base:\n break\n # then generate filenames and filter out any backing files outside our depth.\n if self.job['depth'] <= 0:\n excl = abs(self.job['depth'])\n else:\n excl = len(img_info) - self.job['depth']\n for seq in range(len(img_info)):\n if seq == 0:\n key = chain['base']\n else:\n key = chain[key]\n if seq >= excl:\n ret[key] = f\"{disk}-{timestamp}-{seq}.qcow2\"\n\n return ret\n # todo: info file in staging directory to keep image metadata with the backed-up images",
"def dumpAllMaps(self, path, outPath):\n errors = {} # map name => Exception\n def isMap(name):\n return os.path.isfile(os.path.join(path, name, 'MODELS.bin'))\n maps = list(filter(isMap, os.listdir(path)))\n for i, name in enumerate(maps):\n try:\n printf(\"Extracting %3d/%3d: %s... \", i+1, len(maps), name)\n self.dumpMap(os.path.join(path, name),\n os.path.join(outPath, name))\n print(\"OK\")\n except Exception as ex:\n errors[name] = ex\n print(\"Failed\")\n for name, ex in errors.items():\n print(\"Failed extracting:\", name, ex)",
"def __filesFromDebs(self, deblist, arch, dir=''):\n local = []\n for item in deblist:\n data = item.split()\n try:\n dtype = data[0]\n url = data[1]\n dist = data[2]\n if len(data) == 3: \n pass #FIXME: Special case, append only the sections to the end\n for section in data[3:]:\n if section.find('#') != -1: break # If a comment is encountered skip the line\n main = lib.joinUrl(lib.joinUrl(lib.joinUrl(url, 'dists'), dist), section)\n main = self.__appendArch(arch, main)\n main = main[7:-3].replace('/','_')\n local.append(os.path.join(dir, main)) # Strips unnecessary characters and appends to list\n except: \n pass # Unable to parse deb entry\n return local",
"def _outputFileLists(self, idef=('slice1', 'slice2', 'slice3')):\n for id in idef:\n out = id + 'filelist'\n fh = open(out, 'w')\n for file in self.files:\n tmp = file[:-4] + id\n fh.write(tmp + '\\n')\n fh.close()\n self.log.info('Writing %s' % out)",
"def create_extract_files(self, object_list, target_dir=\"#default#\"):\n extract_file_list = []\n if target_dir == \"#default#\":\n # Allows direct calls to this function to be able to have a working base directory\n target_dir = self.args.basedir\n\n acl_list = self.build_type_object_list(object_list, [\"ACL\"])\n comment_list = self.build_type_object_list(object_list, [\"COMMENT\"])\n process_list = []\n process_count = 0\n\n # Objects extracted with pg_dump\n pgdump_list = self.build_type_object_list(object_list, [\"TABLE\", \"MATERIALIZED VIEW\", \"VIEW\", \"FOREIGN TABLE\"])\n if len(pgdump_list) > 0 and self.args and not self.args.quiet:\n print(\"Extracting tables...\")\n for o in pgdump_list:\n output_file = target_dir\n if self.args and self.args.schemadir:\n if o.get('objschema') != \"-\":\n output_file = self.create_dir(os.path.join(output_file, o.get('objschema')))\n\n if o.get('objtype') == \"TABLE\" or o.get('objtype') == \"FOREIGN TABLE\":\n output_file = self.create_dir(os.path.join(output_file, \"tables\"))\n elif o.get('objtype') == \"VIEW\" or o.get('objtype') == \"MATERIALIZED VIEW\":\n output_file = self.create_dir(os.path.join(output_file, \"views\"))\n else:\n print(\"Invalid dump type in create_extract_files() module\")\n sys.exit(2)\n\n # replace any non-alphanumeric characters with \",hexcode,\"\n objschema_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objschema'))\n objname_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objname'))\n output_file = os.path.join(output_file, objschema_filename + \".\" + objname_filename + \".sql\")\n extract_file_list.append(output_file)\n if self.args and self.args.jobs > 0:\n p = Process(target=self._run_pg_dump, args=([o, output_file]))\n if self.args and self.args.debug:\n self._debug_print(\"PG_DUMP PROCESS CREATED: \" + str(p.name))\n process_list.append(p)\n if (len(process_list) % self.args.jobs) == 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_DUMP PROCESS RUN JOB COUNT REACHED: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n process_list = []\n process_count += 1\n else:\n self._run_pg_dump(o, output_file)\n # If --jobs value was not reached, finish off any that were left in the queue\n if len(process_list) > 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_DUMP PROCESS RUN REMAINING JOBS: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n\n\n # Objects that can be overloaded\n process_list = []\n process_count = 0\n tmp_restore_list = None\n func_agg_list = self.build_type_object_list(object_list, [\"FUNCTION\", \"AGGREGATE\", \"PROCEDURE\"])\n dupe_list = func_agg_list\n if len(func_agg_list) > 0 and self.args and not self.args.quiet:\n print(\"Extracting functions & aggregates...\")\n for o in func_agg_list:\n output_file = target_dir\n if self.args and self.args.schemadir:\n if o.get('objschema') != \"-\":\n output_file = self.create_dir(os.path.join(output_file, o.get('objschema')))\n if o.get('objtype') == \"FUNCTION\":\n output_file = self.create_dir(os.path.join(output_file, 'functions'))\n elif o.get('objtype') == \"AGGREGATE\":\n output_file = self.create_dir(os.path.join(output_file, 'aggregates'))\n elif o.get('objtype') == \"PROCEDURE\":\n output_file = self.create_dir(os.path.join(output_file, 'procedures'))\n else:\n print(\"Invalid object type found while creating function/aggregate extraction files: \" + o.get('objtype'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objschema_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objschema'))\n objbasename_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objbasename'))\n output_file = os.path.join(output_file, objschema_filename + \".\" + objbasename_filename + \".sql\")\n extract_file_list.append(output_file)\n if self.args and self.args.temp != None:\n tmp_restore_list = tempfile.NamedTemporaryFile(prefix='pg_extractor_restore_list', dir=self.args.temp, delete=False)\n else:\n tmp_restore_list = tempfile.NamedTemporaryFile(prefix='pg_extractor_restore_list', delete=False)\n self.temp_filelist.append(tmp_restore_list.name)\n fh = open(tmp_restore_list.name, 'w', encoding='utf-8', newline='\\n')\n # loop over same list to find overloaded functions\n for d in dupe_list:\n if ( o.get('objschema') == d.get('objschema') and \n o.get('objbasename') == d.get('objbasename') ):\n fh.write(d.get('objid') + '\\n')\n # Should grab all overloaded ACL & COMMENTS since it's matching on basename\n for a in acl_list:\n if \"objbasename\" in a:\n if o.get('objschema') == a.get('objschema') and o.get('objbasename') == a.get('objbasename'):\n fh.write(a.get('objid') + '\\n')\n for c in comment_list:\n if re.match(r'(FUNCTION|AGGREGATE|PROCEDURE)', c.get('objsubtype')):\n if o.get('objschema') == c.get('objschema') and o.get('objbasename') == c.get('objbasename'):\n fh.write(c.get('objid') + '\\n')\n fh.close()\n if self.args and self.args.jobs > 0:\n p = Process(target=self._run_pg_restore, args=([tmp_restore_list.name, output_file]))\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE FUNCTIONS PROCESS CREATED: \" + str(p.name))\n process_list.append(p)\n if (len(process_list) % self.args.jobs) == 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE FUNCTIONS PROCESS RUN JOB COUNT REACHED: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n process_list = []\n process_count += 1\n else:\n self._run_pg_restore(tmp_restore_list.name, output_file)\n # If --jobs value was not reached, finish off any that were left in the queue\n if len(process_list) > 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE FUNCTIONS PROCESS RUN REMAINING JOBS: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n\n # Handle if --orreplace is set with --schemadir. This must be done after view & function files have been exported.\n if self.args.orreplace:\n schema_list = self.build_type_object_list(object_list, [\"SCHEMA\"])\n for o in schema_list:\n target_dir_funcs = os.path.join(target_dir, o.get('objname'), \"functions\")\n target_dir_views = os.path.join(target_dir, o.get('objname'), \"views\")\n self.or_replace(target_dir_funcs, target_dir_views)\n\n # Sequences are special little snowflakes\n process_list = []\n process_count = 0\n tmp_restore_list = None\n if self.args and self.args.getsequences:\n sequence_list = self.build_type_object_list(object_list, [\"SEQUENCE\"])\n dupe_list = self.build_type_object_list(object_list, [\"SEQUENCE SET\", \"SEQUENCE OWNED BY\"])\n if len(sequence_list) > 0 and self.args and not self.args.quiet:\n print(\"Extracting sequences...\")\n for o in sequence_list:\n output_file = target_dir\n if self.args and self.args.schemadir:\n if o.get('objschema') != \"-\":\n output_file = self.create_dir(os.path.join(output_file, o.get('objschema')))\n output_file = self.create_dir(os.path.join(output_file, 'sequences'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objschema_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objschema'))\n objname_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objname'))\n output_file = os.path.join(output_file, objschema_filename + \".\" + objname_filename + \".sql\")\n extract_file_list.append(output_file)\n if self.args and self.args.temp != None:\n tmp_restore_list = tempfile.NamedTemporaryFile(prefix='pg_extractor_restore_list', dir=self.args.temp, delete=False)\n else:\n tmp_restore_list = tempfile.NamedTemporaryFile(prefix='pg_extractor_restore_list', delete=False)\n self.temp_filelist.append(tmp_restore_list.name)\n fh = open(tmp_restore_list.name, 'w', encoding='utf-8', newline='\\n')\n fh.write(o.get('objid') + '\\n')\n for d in dupe_list:\n if o.get('objschema') == d.get('objschema') and o.get('objname') == d.get('objname'):\n fh.write(d.get('objid') + '\\n')\n for a in acl_list:\n if o.get('objschema') == a.get('objschema') and o.get('objname') == a.get('objname'):\n fh.write(a.get('objid') + '\\n')\n for c in comment_list:\n if re.search(r'SEQUENCE', c.get('objsubtype')):\n if o.get('objschema') == c.get('objschema') and o.get('objname') == c.get('objname'):\n fh.write(c.get('objid') + '\\n')\n fh.close()\n if self.args and self.args.jobs > 0:\n p = Process(target=self._run_pg_restore, args=([tmp_restore_list.name, output_file]))\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE SEQUENCE PROCESS CREATED: \" + str(p.name))\n process_list.append(p)\n if (len(process_list) % self.args.jobs) == 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE SEQUENCE PROCESS RUN JOB COUNT REACHED: \" + str(process_count))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n process_list = []\n process_count += 1\n else:\n self._run_pg_restore(tmp_restore_list.name, output_file)\n # If --jobs value was not reached, finish off any that were left in the queue\n if len(process_list) > 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE SEQUENCE PROCESS RUN REMAINING JOBS: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n\n\n process_list = []\n process_count = 0\n tmp_restore_list = None\n # Default privileges for roles\n if self.args and self.args.getdefaultprivs:\n acl_default_list = self.build_type_object_list(object_list, [\"DEFAULT ACL\"])\n dupe_list = acl_default_list\n if len(acl_default_list) > 0 and self.args and not self.args.quiet:\n print(\"Extracting default privileges...\")\n for o in acl_default_list:\n output_file = self.create_dir(os.path.join(target_dir, \"roles\"))\n output_file = os.path.join(output_file, o.get('objrole') + \".sql\")\n extract_file_list.append(output_file)\n if self.args and self.args.temp != None:\n tmp_restore_list = tempfile.NamedTemporaryFile(prefix='pg_extractor_restore_list', dir=self.args.temp, delete=False)\n else:\n tmp_restore_list = tempfile.NamedTemporaryFile(prefix='pg_extractor_restore_list', delete=False)\n self.temp_filelist.append(tmp_restore_list.name)\n fh = open(tmp_restore_list.name, 'w', encoding='utf-8', newline='\\n')\n for d in dupe_list:\n if o.get('objrole') == d.get('objrole'):\n fh.write(d.get('objid') + '\\n')\n fh.close()\n if self.args and self.args.jobs > 0:\n p = Process(target=self._run_pg_restore, args=([tmp_restore_list.name, output_file]))\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE DEFAULT PRIVS PROCESS CREATED: \" + str(p.name))\n process_list.append(p)\n if (len(process_list) % self.args.jobs) == 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE DEFAULT PRIVS PROCESS RUN JOB COUNT REACHED: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n process_list = []\n process_count += 1\n else:\n self._run_pg_restore(tmp_restore_list.name, output_file)\n # If --jobs value was not reached, finish off any that were left in the queue\n if len(process_list) > 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE DEFAULT PRIVS PROCESS RUN REMAINING JOBS: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n\n\n # All other objects extracted via _run_pg_restore()\n process_list = []\n process_count = 0\n tmp_restore_list = None\n other_object_list = self.build_type_object_list(object_list, [\"RULE\", \"SCHEMA\", \"TRIGGER\", \"TYPE\", \"EXTENSION\", \"DOMAIN\", \"SERVER\", \"USER MAPPING\"])\n if len(other_object_list) > 0:\n if self.args and not self.args.quiet:\n print(\"Extracting remaining objects...\")\n for o in other_object_list:\n output_file = target_dir\n if self.args and self.args.schemadir:\n if o.get('objschema') != \"-\":\n output_file = self.create_dir(os.path.join(output_file, o.get('objschema')))\n\n if o.get('objtype') == \"RULE\":\n output_file = self.create_dir(os.path.join(output_file, 'rules'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objschema_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objschema'))\n objname_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objname'))\n output_file = os.path.join(output_file, objschema_filename + \".\" + objname_filename + \".sql\")\n\n if o.get('objtype') == \"SCHEMA\":\n if self.args and self.args.schemadir:\n output_file = self.create_dir(os.path.join(output_file, o.get('objname')))\n else:\n output_file = self.create_dir(os.path.join(output_file, 'schemata'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objname_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objname'))\n output_file = os.path.join(output_file, objname_filename + \".sql\")\n\n if o.get('objtype') == \"TRIGGER\":\n output_file = self.create_dir(os.path.join(output_file, 'triggers'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objschema_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objschema'))\n objname_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objname'))\n output_file = os.path.join(output_file, objschema_filename + \".\" + objname_filename + \".sql\")\n\n if o.get('objtype') == \"TYPE\" or o.get('objtype') == \"DOMAIN\":\n output_file = self.create_dir(os.path.join(output_file, 'types'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objschema_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objschema'))\n objname_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objname'))\n output_file = os.path.join(output_file, objschema_filename + \".\" + objname_filename + \".sql\")\n\n if o.get('objtype') == \"EXTENSION\":\n output_file = self.create_dir(os.path.join(output_file, 'extensions'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objname_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objname'))\n output_file = os.path.join(output_file, objname_filename + \".sql\")\n\n if o.get('objtype') == \"SERVER\":\n output_file = self.create_dir(os.path.join(output_file, 'servers'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objname_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objname'))\n output_file = os.path.join(output_file, objname_filename + \".sql\")\n\n if o.get('objtype') == \"USER MAPPING\":\n output_file = self.create_dir(os.path.join(output_file, 'user_mappings'))\n # replace any non-alphanumeric characters with \",hexcode,\"\n objusermapping_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objusermapping'))\n objservername_filename = re.sub(r'\\W', self.replace_char_with_hex, o.get('objservername'))\n output_file = os.path.join(output_file, objusermapping_filename + \"_\" + objservername_filename + \".sql\")\n\n extract_file_list.append(output_file)\n if self.args and self.args.temp != None:\n tmp_restore_list = tempfile.NamedTemporaryFile(prefix='pg_extractor_restore_list', dir=self.args.temp, delete=False)\n else:\n tmp_restore_list = tempfile.NamedTemporaryFile(prefix='pg_extractor_restore_list', delete=False)\n self.temp_filelist.append(tmp_restore_list.name)\n fh = open(tmp_restore_list.name, 'w', encoding='utf-8', newline='\\n')\n fh.write(o.get('objid') + '\\n')\n for a in acl_list:\n if o.get('objschema') == a.get('objschema') and o.get('objname') == a.get('objname'):\n fh.write(a.get('objid') + '\\n')\n for c in comment_list:\n if re.search(r'(RULE|SCHEMA|TRIGGER|TYPE|EXTENSION|DOMAIN)', c.get('objsubtype')):\n if o.get('objschema') == c.get('objschema') and o.get('objname') == c.get('objname'):\n fh.write(c.get('objid') + '\\n')\n fh.close()\n if self.args and self.args.jobs > 0:\n p = Process(target=self._run_pg_restore, args=([tmp_restore_list.name, output_file]))\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE PROCESS CREATED: \" + str(p.name))\n process_list.append(p)\n if (len(process_list) % self.args.jobs) == 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE PROCESS RUN JOB COUNT REACHED: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n process_list = []\n process_count += 1\n else:\n self._run_pg_restore(tmp_restore_list.name, output_file)\n # If --jobs value was not reached, finish off any that were left in the queue\n if len(process_list) > 0:\n if self.args and self.args.debug:\n self._debug_print(\"PG_RESTORE PROCESS RUN REMAINING JOBS: \" + str(len(process_list)))\n self._start_jobs(process_list)\n self._wait_jobs(process_list)\n # end if block for other_object_list\n\n if self.args and self.args.debug:\n self._debug_print(\"\\nEXTRACT FILE LIST\")\n for f in extract_file_list:\n self._debug_print(f)\n\n return extract_file_list",
"def make_stuff(prefix_a, prefix_b, output_path, output_prefix, dump_range):\n\n # We want to combine images from A on top of B\n mask_a = prefix_a + \"%04d.png\"\n mask_b = prefix_b + \"%04d.png\"\n\n# rtp = lcse.rtplot_reader(project_path)\n# dumps = rtp.dump_map.keys()\n# dumps.sort()\n dumps = range(dump_range[0], dump_range[1] + 1)\n\n print \"Processing dummps %s\" % dumps\n\n path = os.path.join(os.path.abspath(output_path), output_prefix)\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n for dump in dumps:\n\n filename_out = os.path.join(path, '%s-%04d.png' % (output_prefix, dump))\n\n if os.path.exists(filename_out):\n continue\n\n print filename_out\n\n # load fv image\n try:\n # Open\n image_a = Image.open(mask_a % dump).convert(\"RGBA\")\n image_b = Image.open(mask_b % dump).convert(\"RGBA\")\n\n # Crop\n image_a = iu.square_crop(image_a, image_a.size[1])\n image_b = iu.square_crop(image_b, image_b.size[1])\n\n # Make the second image transparent\n image_b = iu.color_to_alpha(image_b, threshold=30)\n image_a = iu.alpha_composite(image_b, image_a)\n\n# draw_time(image_a, font, dump=dump, time=rtp.dump_map[dump]['T'])\n image_a.save(filename_out)\n\n except IOError as e:\n print e\n continue",
"def get_sorted_export_files():\n ...",
"def browseFiles(dump=False,factory=None):\n res=[]\n def sortkey(e):\n praw = e.priority\n if praw=='Unable':\n p=-2\n elif isinstance(praw,int):\n p=praw\n else:\n assert praw=='OnlyOnExplicitRequest'\n p=-1\n return (-p, e.factName,e.source,e.name)\n for n,s,f,p in _rawfct['ncrystal_get_filelist']():\n res.append( FileListEntry(name=n,source=s,factName=f,priority=p) )\n res.sort(key=sortkey)\n if dump:\n seen_names=set()\n groupfct = lambda e : (e.factName,e.source,e.priority)\n lastgroup = None\n pending=[]\n def print_pending():\n if not pending:\n return\n if factory is not None and lastgroup[0]!=factory:\n pending.clear()\n return\n n=len(pending) - 1\n pending[0] = pending[0]%('%s files'%n if n!=1 else '%s file'%n )\n for line in pending:\n print (line)\n pending.clear()\n for e in res:\n group = groupfct(e)\n if lastgroup != group:\n print_pending()\n lastgroup = group\n pending.append('==> %%s from \"%s\" (%s, priority=%s):'%group)\n hidden = e.name in seen_names\n seen_names.add(e.name)\n extra=''\n prname=e.name\n if e.priority=='OnlyOnExplicitRequest':\n prname='%s::%s'%(e.factName,e.name)\n elif hidden:\n extra=' <--- Hidden by higher priority entries (select as \"%s::%s\")'%(e.factName,e.name)\n pending.append( ' %s%s'%(prname,extra))\n print_pending()\n return #return None in this case, to avoid spurious printouts in an interactive session\n if factory is None:\n return res\n return [e for e in res if e.factName==factory]",
"def do_dump(self, *args): \n output_dir = self.root.PEC.config.get(\"GENERAL\", \"output_dir\")\n splitr = (self.loaded_dataflow.__name__).split(self.root.PEC.sep)\n if len(splitr) > 1:\n dname = splitr[1]\n else: dname = splitr[0]\n dname_withtime = dname + str(self.root._load_time).split('.')[0] \n if args[0] != '': #check if there is something in the string.\n #if so then we will use it as our filename\n fname = output_dir + self.root.PEC.sep+args[0]\n else:\n fname = output_dir + self.root.PEC.sep + dname_withtime +\\\n \"_\" + str(self.root.iter_count+1) + \".dump\"\n \n try:\n outf = open(fname, \"wb\")\n outf.write(self.root.get_bytes())\n outf.close()\n except IOError, msg:\n raise \"Proteus Dump Error %s\" % msg\n\n print \"Successfully write %s contents to %s.\" %\\\n (self.root.__class__.__name__ , fname)",
"def dumpPdosFilesFromStdOut_simple(stdOutObj, startDir):\n\t#Figure out where we dump it\n\tassert len(stdOutObj.label)==1\n\toutDict = dict()\n\tpathDict = getOutPathDictForPdosDump(stdOutObj, startDir)\n\toutPath = os.path.join(startDir, pathDict[\"pdos_path_ext\"], pathDict[\"pdos_filename\"])\n\n\t#Get the dict that we dump\n\toutDict = dict()\n\tpdosDict = stdOutObj.data[0][0].parsedFile.pdos\n\t\n\tif pdosDict.get(\"atomKinds\", None) is not None:\n\t\toutDict[\"atomKinds\"] = _turnListOfPdosObjsToListOfDicts( pdosDict[\"atomKinds\"] )\n\n\tif pdosDict.get(\"atomLists\", None) is not None:\n\t\toutDict[\"atomLists\"] = _turnListOfPdosObjsToListOfDicts( pdosDict[\"atomLists\"] )\n\n\tdictsDbHelp.dumpDictsToFilePath([outDict], outPath)",
"def create_all_bedfiles(poolref, numpools):\n # create bedfiles for varscan\n print(Bcolors.BOLD + \"\\ncreating bedfiles\" + Bcolors.ENDC)\n for ref in uni(poolref.values()):\n create_bedfiles.main(ref, numpools)",
"def _build_split_filenames(self, data_dir, split_info_list):\n\n filenames = []\n for split_info in split_info_list:\n filenames.extend(naming.filepaths_for_dataset_split(\n dataset_name=self.name,\n split=split_info.name,\n num_shards=split_info.num_shards,\n data_dir=data_dir,\n filetype_suffix=self._file_format_adapter.filetype_suffix,\n ))\n return filenames",
"def getpartnumbers(topd):\n parts = []\n\n for d in os.listdir(topd):\n if d in [\".git\", \".meta\"]:\n continue\n\n path = os.path.join(topd, d)\n\n if os.path.isdir(path):\n parts.extend(getpartnumbers(path))\n try:\n acode = d[d.rindex(\"-sr\")+3:]\n except:\n continue\n parts.append(assetcode.code_to_num(acode))\n elif os.path.isfile(path):\n\n if path[-1] == \"~\":\n \"Ignore temporary files from editors\"\n continue\n\n fname = os.path.basename(path)\n try:\n acode = fname[fname.rindex(\"-sr\")+3:]\n except:\n continue\n parts.append(assetcode.code_to_num(acode))\n\n return parts",
"def generate_assets(file, scale, output):\n scale_str = sketch_scale(scale)\n cmd = \"sketchtool export artboards \\\"%s\\\" --overwriting \\\n--formats=png --scales=%s --output=%s\" % (file, scale_str, output)\n result = subprocess.check_output(cmd, shell=True)\n\n generated = []\n for line in result.split('\\n'):\n if re.search('^Exported *', line):\n efile = line.split('Exported ')[1]\n efile = strip_sketch_scale_suffix(output, efile)\n generated.append(efile)\n return generated",
"def GetMicroDumps(dump_path):\n with open(dump_path, 'r') as d:\n data = d.read()\n all_dumps = []\n current_dump = None\n for line in data.splitlines():\n if current_dump is not None:\n if _MICRODUMP_END.match(line):\n current_dump.append(line)\n all_dumps.append(current_dump)\n current_dump = None\n else:\n current_dump.append(line)\n elif _MICRODUMP_BEGIN.match(line):\n current_dump = []\n current_dump.append(line)\n return all_dumps",
"def divide(self):\n divided = []\n for dbinfo in self.debuginfo:\n source = dbinfo['debuginfo']['filename']\n exists = False\n for src_infos in divided:\n if len(src_infos) > 0 and src_infos[0]['debuginfo']['filename'] == source:\n src_infos.append(dbinfo)\n exists = True\n break\n if not exists:\n divided.append([dbinfo])\n\n return divided"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
given a DumpFilename, return a reasonable first and last page id for the file for testing
|
def get_fake_first_last_pageids(xml_dfname, _dump_dir, _parts):
page_id_info = {1: [1, 100],
2: [101, 300],
3: [301, 600],
4: [601, 3400]}
if xml_dfname.partnum_int is None or xml_dfname.partnum_int not in page_id_info:
return None, None
return page_id_info[xml_dfname.partnum_int][0], page_id_info[xml_dfname.partnum_int][1]
|
[
"def get_id_from_filename(html_filename):\n\treturn html_filename[ html_filename.rindex('_') + 1 : -len('.html') ]",
"def extract_visit(filename):\n # First, make sure we are only working with a filename, not a full path+filename\n filename = os.path.basename(filename)\n if filename.startswith('hst_'):\n rootname = filename.split('_')[-2]\n visit_id = rootname[:6]\n else:\n visit_id = filename[:6]\n\n return visit_id",
"def get_bookid(filename):\n id = re.sub('.rdf$', '', filename.split('/pg',1)[1])\n return int(id)",
"def get_latest_regular_dump(self, account_id='self'):\n last_two_dumps = self.get_dumps(account_id=account_id, limit=2)\n dump_files = self.get_file_urls(account_id=account_id, dump_id=last_two_dumps[0]['dumpId'])\n if dump_files['artifactsByTable']['requests']['partial']:\n # this is not a full requests dump - just a regular dump\n return last_two_dumps[0]['dumpId']\n else:\n # this is a full requests dump; return the second-most-recent dump ID instead\n return last_two_dumps[1]['dumpId']",
"def parse_id(filename):\n match = re.search('B[0-9]{2}-[0-9]{3}', filename) \n if match:\n return match.group()\n return None",
"def get_logpage_id(self, filename, archive_id):\n\n sql = ('SELECT logpage_id FROM {} '\n 'WHERE filename=%s AND archive_id=%s'\n .format(self.table_name('logpage')))\n logpage_id = self.db.execute_query(sql, (filename, archive_id))\n return logpage_id",
"def get_file_index(filename):\n\n substrings = filename.split('_')\n substring_isnumeric = [s.isdigit() for s in substrings]\n index_str = substrings[substring_isnumeric.index(True)]\n return int(index_str)",
"def get_start_stop_idx(filename):\n for part in filename.split(\"_\"):\n if \"-\" in part:\n [start_idx, stop_idx] = part.split(\"-\")\n return start_idx, stop_idx",
"def get_dumpname_base(cls):\n return 'pages-'",
"def max_file_offset(self):\t\t\n\t\treturn idaapi.get_fileregion_offset(MaxEA())",
"def getStepNumber(filename):\n match = re.search(r's\\d+\\.', filename)\n suffix = match.group()\n return int(suffix[1:-1])",
"def page_id(self):\n page_id = self.request.path_info\n page_id = page_id.lstrip('/')\n page_id = page_id.replace('/', '-')\n page_id = page_id.replace('+', '')\n page_id = page_id.rstrip('-')\n\n return 'page-' + (page_id or 'root')",
"def file_id(file_path):\n file_id = file_path.split(\"/\")[-1]\n file_id = file_id.split(\".\")[0]\n return file_id",
"def current_file_offset(self):\n\t\treturn idaapi.get_fileregion_offset(ScreenEA())",
"def _get_start_end_from_imgfp(self, imgfp):\n fn = os.path.basename(imgfp)\n start, end = fn.strip('.jpg').split('-')\n start, end = int(start), int(end)\n return start, end",
"def read_id(file):\n return file.read(4)",
"def getLastStep(reportfile):\n try:\n with open(reportfile, \"r\") as inp:\n report = inp.read()\n last_step = report.split(\"\\n\")[-2].split(\"\\t\")[0]\n except FileNotFoundError:\n last_step = 0\n return int(last_step)",
"def min_file_offset(self):\t\n\t\treturn idaapi.get_fileregion_offset(MinEA())",
"def getNumbering(fileName):\n fileName = fileName.split(\"/\")[-1]\n return fileName.split(\".\")[-2]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
create empty fake page content files in output directory for testing, with part numbers
|
def setup_empty_pagecontent_files_parts(self, partnums):
basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today)
for partnum in partnums:
filename = "{wiki}-{date}-pages-articles{partnum}.xml.bz2".format(
wiki=self.en['wiki'].db_name, date=self.today, partnum=partnum)
path = os.path.join(basedir, filename)
with open(path, "w") as output:
output.write("fake\n")
|
[
"def setup_empty_pagecontent_file(self):\n basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today)\n filename = \"{wiki}-{date}-pages-articles.xml.bz2\".format(\n wiki=self.en['wiki'].db_name, date=self.today)\n path = os.path.join(basedir, filename)\n with open(path, \"w\") as output:\n output.write(\"fake\\n\")",
"def generate_testfiles(self):\n print(\"Opening files...\")\n data = self.open_test_files()\n print(\"Assemble and concat...\")\n testdata, labels = self.assemble_and_concat(**data)\n print(\"Removing nans and saving...\")\n self.remove_nans(testdata, labels)\n data = None\n labels = None",
"def split_file():\r\n\r\n global files\r\n global index\r\n global ls_files\r\n global lsPages\r\n global ent_name\r\n pdf_writer = PdfFileWriter()\r\n\r\n for p in lsPages:\r\n pdf_writer.addPage(p)\r\n\r\n try:\r\n # Try to remove previous temporary files.\r\n os.remove('temp.pdf')\r\n except OSError as e:\r\n # If no temporary files exists give an error.\r\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\r\n # Create a new pdf file into temporary form.\r\n with open('temp.pdf', 'wb') as temp:\r\n pdf_writer.write(temp)\r\n\r\n pdf = Pdf(filepath='temp.pdf', filename=ent_name)\r\n files.append(pdf)\r\n ls_files.insert(index, ent_name.get())\r\n index += 1",
"def set_stub_output_filenames(self, parts):\n stub_filenames = []\n for partnum in parts:\n stub_filenames.append(\n \"{wiki}-{date}-stub-articles{partnum}.xml.gz\".format(\n wiki=self.en['wiki'].db_name, date=self.today,\n partnum=partnum))\n return self.dfnames_from_filenames(stub_filenames)",
"def _create_page_htmls(self):\n for page in self.pages:\n content = self.templates['page'].render(page=page, site=self.site)\n path = os.path.join(BASE_DIR, self.paths['output'],\n page.slug, 'index.html')\n write_to_path(content, path)",
"def createTestPage(self):\n import os\n path = self.dictPagePath()\n if os.path.exists(path):\n self.shouldDeleteTestPage = False\n raise TestSkiped(\"%s exists. Won't overwrite exiting page\" % \n self.dictPage)\n try:\n os.mkdir(path)\n revisionsDir = os.path.join(path, 'revisions')\n os.mkdir(revisionsDir)\n current = '00000001'\n file(os.path.join(path, 'current'), 'w').write('%s\\n' % current)\n text = u' ME:: %s\\n' % self.name\n file(os.path.join(revisionsDir, current), 'w').write(text)\n except Exception, err:\n raise TestSkiped(\"Can not be create test page: %s\" % err)",
"def test_get_nochkpt_outputfiles(self):\n # turn off checkpoints in the config but keep part numbers\n self.en['wiki'].config.checkpoint_time = 0\n\n pages_per_part = FilePartInfo.convert_comma_sep(\n self.en['wiki'].config.pages_per_filepart_history)\n\n content_job = XmlDump(\"articles\", \"articlesdump\", \"short description here\",\n \"long description here\",\n item_for_stubs=None, item_for_stubs_recombine=None,\n prefetch=True, prefetchdate=None,\n spawn=True, wiki=self.en['wiki'], partnum_todo=False,\n pages_per_part=pages_per_part,\n checkpoints=True, checkpoint_file=None,\n page_id_range=None, verbose=False)\n\n dfnames = content_job.get_nochkpt_outputfiles(self.en['dump_dir'])\n expected_files = [\n \"enwiki-{today}-pages-articles1.xml.bz2\".format(today=self.today),\n \"enwiki-{today}-pages-articles2.xml.bz2\".format(today=self.today),\n \"enwiki-{today}-pages-articles3.xml.bz2\".format(today=self.today),\n \"enwiki-{today}-pages-articles4.xml.bz2\".format(today=self.today)]\n expected_dfnames = self.dfnames_from_filenames(expected_files)\n self.assertEqual(dfnames, expected_dfnames)\n\n # turn off part numbers now\n self.en['wiki'].config.parts_enabled = 0\n\n content_job = XmlDump(\"articles\", \"articlesdump\", \"short description here\",\n \"long description here\",\n item_for_stubs=None, item_for_stubs_recombine=None,\n prefetch=True, prefetchdate=None,\n spawn=True, wiki=self.en['wiki'], partnum_todo=False,\n pages_per_part=None,\n checkpoints=True, checkpoint_file=None,\n page_id_range=None, verbose=False)\n\n dfnames = content_job.get_nochkpt_outputfiles(self.en['dump_dir'])\n expected_files = [\n \"enwiki-{today}-pages-articles.xml.bz2\".format(today=self.today)]\n expected_dfnames = self.dfnames_from_filenames(expected_files)\n self.assertEqual(dfnames, expected_dfnames)",
"def make_wp_files(self):\n self.call_sortie_function('generate_mission_file')",
"def create_scanned_files():\n file_paths = [\n os.path.join(STUBS_PATH, \"test3.h\"),\n os.path.join(STUBS_PATH, \"test4.h\"),\n os.path.join(STUBS_PATH, \"test5.h\"),\n os.path.join(STUBS_PATH, \"test6.h\")\n ]\n for file_path in file_paths:\n with open(file_path, \"w\") as new_file:\n if file_path in [os.path.join(STUBS_PATH, \"test3.h\")]:\n new_file.write(HEADER_WITHOUT_SPDX)\n elif file_path in [os.path.join(STUBS_PATH, \"test6.h\")]:\n new_file.write(HEADER_WITH_BINARY_LICENSE)\n else:\n new_file.write(HEADER_WITH_SPDX)\n yield\n for file_path in file_paths:\n os.remove(file_path)",
"def make_fake():\n print(\"\\nFake images created:\\n\")\n for i in range(1,31):\n fake_name = rename.random_name_maker()\n fake_name += rename.random_name_maker()\n fake_name += file_ending[random.randint(1,3)]\n print(fake_name)\n with open(\"./{0}\".format(fake_name), \"w\") as my_file:\n my_file.write('')\n print(\"\")",
"def create_testfile(remove_testdir, tmpdir, request):\n filename = getattr(request, 'param', generate_random_string())\n p = tmpdir.join(filename)\n p.write(generate_random_string(random.randint(1, 100)))\n\n yield tmpdir, filename",
"def generate_website(self):\n\n # check existing directories in output\n if not os.path.isdir(self.outdir) and self.pagelist:\n os.mkdir(self.outdir)\n for path in self.listdir:\n path=path.replace(self.srcdir,self.outdir)\n if not os.path.isdir(path):\n os.mkdir(path)\n\n # apply plugins\n self.log(\"Apply plugins:\")\n self.apply_plugins()\n\n # generate pages content using the selected makup langage\n self.get_pages_content()\n\n # apply plugins after content generation\n self.log(\"Apply plugins post generation:\")\n self.apply_plugins_post()\n\n\n self.log(\"Write pages:\")\n if self.pagelist:\n for page in self.pagelist:\n self.log(\"\\t\"+page['filename'])\n #print \"Generating page: {page}\".format(page=self.outdir+os.sep+page['filename']+'.html')\n\n template=self.templates[page['template']]\n page['raw_page']=template.render(pagelist=self.pagelist,postlist=self.postlist,postlist_lan=self.postlist_lan,ext=self.ext,**page)\n #print page['raw_page']\n f=codecs.open(self.outdir+os.sep+page['filename']+'.html',mode='w', encoding=\"utf8\")\n f.write(page['raw_page'])\n f.close()\n\n if self.config['General']['generate_posts']=='True':\n self.log(\"Write posts:\")\n for page in self.postlist:\n self.log(\"\\t\"+page['filename'])\n #print \"Generating post: {page}\".format(page=self.outdir+os.sep+page['filename']+'_post'+'.html')\n template=self.templates[page['template']]\n page['raw_page']=template.render(pagelist=self.pagelist,ext=self.ext,postlist=self.postlist,postlist_lan=self.postlist_lan,**page)\n #print page['raw_page']\n f=codecs.open(self.outdir+os.sep+page['filename']+'_post'+'.html',mode='w', encoding=\"utf8\")\n f.write(page['raw_page'])\n f.close()\n else:\n print('Warning : no pages generated')",
"def file_generate(path, content):\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n with open(path, 'w') as target:\n target.write(content)",
"def test_emptyFilePaging(self):\n filenameEmpty = self.mktemp()\n fd = file(filenameEmpty, 'w')\n fd.close()\n c, s, pump = connectedServerAndClient()\n pagerizer = FilePagerizer(filenameEmpty, None)\n s.setNameForLocal(\"bar\", pagerizer)\n x = c.remoteForName(\"bar\")\n l = []\n util.getAllPages(x, \"getPages\").addCallback(l.append)\n ttl = 10\n while not l and ttl > 0:\n pump.pump()\n ttl -= 1\n if not ttl:\n self.fail('getAllPages timed out')\n self.assertEquals(''.join(l[0]), '',\n \"Pages received not equal to pages sent!\")",
"def create_unsorted_file(outputs=[]):\n from random import randint\n file = open(outputs[0], 'w')\n for i in range(0,50):\n file.write(\"{0}\\n\".format(randint(1,100)))\n file.close()",
"def prepareOutput():\r\n\r\n os.removedirs(\"output\")\r\n os.mkdir(\"output\")",
"def write_pages_to_files(\n lab_files, gid_pages, exercise_num, lab_num, course_num, save_dir=None\n):\n if save_dir is None:\n save_dir = \"./\"\n\n if isinstance(exercise_num, (list, tuple)) and (len(exercise_num) != 1):\n exercise_num_str = \"\".join([f\"{x}\" for x in exercise_num])\n parser = get_exercises_from_lab\n elif isinstance(exercise_num, np.int):\n exercise_num_str = f\"{exercise_num}\"\n parser = get_exercise_from_lab\n else:\n print(\n f\"Attempting type coercion of exercise_num {exercise_num} \"\n f\"to int, even though it may be iterable\"\n )\n exercise_num = int(exercise_num)\n exercise_num_str = f\"{exercise_num}\"\n parser = get_exercise_from_lab\n\n if not isinstance(lab_num, str):\n lab_num = f\"{lab_num}\"\n if not isinstance(course_num, str):\n course_num = f\"{course_num}\"\n\n fname_html = f\"DSCI{course_num}_lab{lab_num}_exercise{exercise_num_str}\"\n fname_html = fname_html + \"_page{page_number}.html\"\n\n # Write paginated HTML pages\n print(f\"Writing to {save_dir}:\")\n for page_number, gid_page in gid_pages.items():\n fname_page = fname_html.format(page_number=page_number)\n fp = open(save_dir + fname_page, \"a\")\n fp.write(\n \"<head>\\n\"\n '\\t<link rel=\"stylesheet\" href=\"style0.css\">\\n'\n '\\t<link rel=\"stylesheet\" href=\"style1.css\">'\n \"\\n</head>\\n\"\n \"\\n<body>\\n\\n\"\n )\n for gid in gid_page:\n if gid not in lab_files:\n print(f\"gid {gid} not found in lab_files.keys().\")\n else:\n fp.write(f\"\\n\\n<h1>{gid}</h1>\\n\\n\")\n fp.write(\n parser(lab_files[gid], exercise_num, do_display=False)[0]\n )\n fp.write(\"</body>\")\n fp.close()\n print(\"\\t\" + f\"{fname_page}\")\n # Write the CSS files to the same folder\n _, resources = parser(\n list(lab_files.values())[0], exercise_num, do_display=False\n )\n print()\n for i, css_lines in enumerate(resources[\"inlining\"][\"css\"]):\n with open(save_dir + f\"style{i}.css\", \"w\") as fp:\n fp.write(css_lines)\n print(\"\\t\" + f\"style{i}.css\")\n return",
"def test_seq_files(self):\n logger.info(self.test_empty_files.__doc__)\n from storagetest.pkgs.pts.fio import FIO\n for x in range(0, self.dir_n):\n f_size_min, f_size_max = utils.to_int_list(self.file_size_range)\n f_size = random.randint(f_size_min, f_size_max)\n fio = FIO(os.path.join(self.test_path, 'seq_files_dir_{0}'.format(x)))\n self.assertTrue(fio.seq_write(self.file_n, str(f_size)))",
"def create_outline_html_files(self,context):\n for i, title in enumerate(self.titles):\n fp = os.path.join('torah', 'json', 'outline', f\"{title}.json\")\n data = json.loads(open(fp).read())\n context_two = {'title': title, 'title_number': i+1,'outline':data }\n filedata = render_to_string('static_outline.html', dict(context, **context_two))\n fp = open(f'../docs/{i+1}.html','w')\n fp.write(filedata)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
create single empty fake page content file in output directory (coveing all page content for the wiki) for testing
|
def setup_empty_pagecontent_file(self):
basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today)
filename = "{wiki}-{date}-pages-articles.xml.bz2".format(
wiki=self.en['wiki'].db_name, date=self.today)
path = os.path.join(basedir, filename)
with open(path, "w") as output:
output.write("fake\n")
|
[
"def setup_empty_pagecontent_files_parts(self, partnums):\n basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today)\n for partnum in partnums:\n filename = \"{wiki}-{date}-pages-articles{partnum}.xml.bz2\".format(\n wiki=self.en['wiki'].db_name, date=self.today, partnum=partnum)\n path = os.path.join(basedir, filename)\n with open(path, \"w\") as output:\n output.write(\"fake\\n\")",
"def createTestPage(self):\n import os\n path = self.dictPagePath()\n if os.path.exists(path):\n self.shouldDeleteTestPage = False\n raise TestSkiped(\"%s exists. Won't overwrite exiting page\" % \n self.dictPage)\n try:\n os.mkdir(path)\n revisionsDir = os.path.join(path, 'revisions')\n os.mkdir(revisionsDir)\n current = '00000001'\n file(os.path.join(path, 'current'), 'w').write('%s\\n' % current)\n text = u' ME:: %s\\n' % self.name\n file(os.path.join(revisionsDir, current), 'w').write(text)\n except Exception, err:\n raise TestSkiped(\"Can not be create test page: %s\" % err)",
"def file_generate(path, content):\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n with open(path, 'w') as target:\n target.write(content)",
"def write_page(self):\n self._write_page_stylesheet()\n self._copy_job_logs()\n webpath = os.path.join(self.config['AUTOCMS_WEBDIR'], self.testname)\n if not os.path.exists(webpath):\n os.makedirs(webpath)\n # write a 'index.html.new' file and then rename it to\n # 'index.html'. This prevents users from viewing a half\n # completed webpage when the page refreshes.\n newpagepath = os.path.join(webpath, 'index.html.new')\n pagepath = os.path.join(webpath, 'index.html')\n with open(newpagepath, 'w') as output_file:\n output_file.write(self.page)\n os.rename(newpagepath, pagepath)",
"def make_file(self, path):\n self.prepare()\n file_ = \"\".join(str(\"News\"))\n file_path = os.path.join(path, f\"{file_}.html\")\n self.html = self.html.to_html(escape=False)\n with open(f\"{file_path}\", \"w\", encoding=\"utf-8\") as file:\n file.write(self.html)",
"def generate_website(self):\n\n # check existing directories in output\n if not os.path.isdir(self.outdir) and self.pagelist:\n os.mkdir(self.outdir)\n for path in self.listdir:\n path=path.replace(self.srcdir,self.outdir)\n if not os.path.isdir(path):\n os.mkdir(path)\n\n # apply plugins\n self.log(\"Apply plugins:\")\n self.apply_plugins()\n\n # generate pages content using the selected makup langage\n self.get_pages_content()\n\n # apply plugins after content generation\n self.log(\"Apply plugins post generation:\")\n self.apply_plugins_post()\n\n\n self.log(\"Write pages:\")\n if self.pagelist:\n for page in self.pagelist:\n self.log(\"\\t\"+page['filename'])\n #print \"Generating page: {page}\".format(page=self.outdir+os.sep+page['filename']+'.html')\n\n template=self.templates[page['template']]\n page['raw_page']=template.render(pagelist=self.pagelist,postlist=self.postlist,postlist_lan=self.postlist_lan,ext=self.ext,**page)\n #print page['raw_page']\n f=codecs.open(self.outdir+os.sep+page['filename']+'.html',mode='w', encoding=\"utf8\")\n f.write(page['raw_page'])\n f.close()\n\n if self.config['General']['generate_posts']=='True':\n self.log(\"Write posts:\")\n for page in self.postlist:\n self.log(\"\\t\"+page['filename'])\n #print \"Generating post: {page}\".format(page=self.outdir+os.sep+page['filename']+'_post'+'.html')\n template=self.templates[page['template']]\n page['raw_page']=template.render(pagelist=self.pagelist,ext=self.ext,postlist=self.postlist,postlist_lan=self.postlist_lan,**page)\n #print page['raw_page']\n f=codecs.open(self.outdir+os.sep+page['filename']+'_post'+'.html',mode='w', encoding=\"utf8\")\n f.write(page['raw_page'])\n f.close()\n else:\n print('Warning : no pages generated')",
"def write_site_content(self):\n\n for root, dirs, files in os.walk(self.BASE_DIR):\n base = root.replace(self.BASE_DIR, '')\n base = base.lstrip(os.path.sep)\n\n for d in self.ignoreFilter(dirs):\n nd = os.path.join(self.DEPLOY_DIR, base, d)\n if not os.path.isdir(nd):\n os.makedirs(nd)\n dirs[:] = self.ignoreFilter(dirs)\n\n for f in self.ignoreFilter(files):\n if Page.transformable(f):\n Page(os.path.join(root, f),\n self.layouts,\n self.context).write()\n else:\n path = os.path.abspath(root)\n path = path.replace(os.path.abspath(self.BASE_DIR), '', 1)\n path = path.lstrip(os.path.sep)\n path = os.path.join(self.DEPLOY_DIR, path)\n if not os.path.isdir(path):\n os.makedirs(path)\n shutil.copy(os.path.join(root, f), os.path.join(path, f))",
"def _create_page_htmls(self):\n for page in self.pages:\n content = self.templates['page'].render(page=page, site=self.site)\n path = os.path.join(BASE_DIR, self.paths['output'],\n page.slug, 'index.html')\n write_to_path(content, path)",
"def test_export():\n molecule = mogli.read('examples/dna.xyz')[0]\n mogli.export(molecule, 'test.html')\n assert os.path.exists('test.html')",
"def make_index_html():\n\tif not os.path.exists('template.html'):\n\t\tindex_content = file('lib/html/template.html','r').read()\n\t\tindex = open('template.html', 'w')\n\t\tindex.write(index_content % {\"app_name\":conf.app_name})\n\t\tindex.close()\n\t\tprint \"template.html made\"",
"def create_testfile(remove_testdir, tmpdir, request):\n filename = getattr(request, 'param', generate_random_string())\n p = tmpdir.join(filename)\n p.write(generate_random_string(random.randint(1, 100)))\n\n yield tmpdir, filename",
"def write(self,pages):\n\n\t\t# Write web pages\n\t\tfor page in pages:\t\n\t\t\tbuildfile = os.path.join(self.directory,page['name'])\n\t\t\twith open(buildfile, 'wb') as f: \n\t\t\t\tf.write(page['data'])",
"def create_wiki_page(self, page_name, page_content):\n return_type = File(self.context)\n\n def _root_folder_loaded():\n page_url = self.root_folder.serverRelativeUrl + \"/\" + page_name\n wiki_props = WikiPageCreationInformation(page_url, page_content)\n Utility.create_wiki_page_in_context_web(self.context, wiki_props, return_type)\n self.ensure_property(\"RootFolder\", _root_folder_loaded)\n return return_type",
"def _create_empty_file() -> None:\n FileManager.write_data(data={})",
"def create_nojekyll():\n\n print('Creating .nojekyll file at {} ... '.format(os.getcwd()),\n end = '',\n flush = True)\n with open('.nojekyll', 'a'):\n pass\n print('done')",
"def main():\n dest_dir = \".public\"\n if os.path.isdir(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n\n env = Environment(\n loader=FileSystemLoader('templates'),\n autoescape=select_autoescape(['html'])\n )\n\n ignore_files = ignoreFile()\n files_in_dir = os.walk('templates')\n filenames = [filename for _, _, filename in files_in_dir]\n files = [filename for filename in filenames[0] if filename not in ignore_files]\n for i in files:\n template = env.get_template(i)\n final_html = template.render()\n\n\n write_prefix = glob.glob(\".public\")[0]\n write_path = os.path.join(write_prefix, i)\n print write_path\n try:\n html_file = codecs.open(write_path, 'w', 'utf8')\n html_file.write(final_html)\n finally:\n html_file.close()",
"def create_tweet_me_page(full_path, image_topic, headline, sayings,\n other_button='', image_host='localhost'):\n try:\n os.makedirs(os.path.dirname(full_path))\n except OSError, e:\n pass\n\n page = str(render.tweet_me(image_topic, headline, sayings, other_button,\n image_host))\n with open(full_path, 'w+') as f:\n f.write(page)\n\n print('written ', full_path)",
"def wiki_page_source(request, slug):\n try:\n page = WikiPage.objects.get(slug=slug)\n except WikiPage.DoesNotExist:\n raise Http404\n \n response = HttpResponse(mimetype=\"text\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % slug\n\n response.write(page.content)\n\n return response",
"def generatedocs():\n fe_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../fastestimator')\n save_dir = os.path.join(tempfile.gettempdir(), 'fe')\n #insert project path to system path to later detect the modules in project\n sys.path.insert(0, fe_path)\n #parent directory where all the markdown files will be stored\n\n for subdirs, dirs, files in os.walk(fe_path, topdown=True):\n for f in files:\n fname, ext = os.path.splitext(os.path.basename(f))\n if not f.startswith('_') and ext == '.py':\n #if f == 'pggan.py':\n f_path = os.path.join(subdirs, f)\n mod_dir = os.path.relpath(f_path, fe_path)\n mod = mod_dir.replace('/', '.')\n if subdirs == fe_path:\n save_path = os.path.join(*[save_dir, 'fe'])\n else:\n save_path = os.path.join(*[save_dir, os.path.relpath(subdirs, fe_path)])\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n mdtexts = extractmarkdown(mod, save_path)\n return save_dir"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read all sensors and publish the results to the MQTT broker
|
def publish():
print("Publishing Thread")
client = start_client(PUBLISH_CLIENT_ID)
while publishing:
illuminance = read_light_sensor()
temp, hum = read_temperature_humidity()
readings = {
'pi1_timestamp': datetime.now().isoformat(),
'illuminance': read_light_sensor(),
'temperature': temp,
'humidity': hum,
'raspberry_pi': 1
}
client.publish(TOPIC, json.dumps(readings))
print('Published readings: ', readings)
client.loop(.1)
time.sleep(10)
print('Stop publishing.')
|
[
"def run(self):\n time.sleep(5)\n while(1):\n time.sleep(5)\n temperature = SensorData_Object.getTemperature()\n self.temp_value.set_value(temperature) # Publish Temperature Sensor Data\n \n humidity = SensorData_Object.getHumidity()\n self.hum_value.set_value(humidity) # Publish Humidity Sensor Data\n \n flux = SensorData_Object.getMagFlux()\n self.flux_value.set_value(flux) # Publish MagneticFlux Data\n \n corona_level = SensorData_Object.getCorona()\n self.corona_level.set_value(corona_level) # Publish Corona Level Data\n \n Resistence = SensorData_Object.getResistence()\n self.resistance.set_value(Resistence) # Publish Resistence Data\n \n logging.info(\"All Data Published to OPC Server\")",
"def readTemplateSensorsLoop(self):\n self.log.info(u\"==> Thread for 'Template reading sensors' started\")\n templateinfo_nextread = {}\n while not self.stopplugin.isSet():\n for deviceid in self.templatedevices:\n devicetype = self.templatedevices[deviceid][\"devicetype\"]\n if devicetype == \"template.number\": \n name = self.templatedevices[deviceid][\"name\"]\n interval1 = self.templatedevices[deviceid][\"interval1\"]\n interval2 = self.templatedevices[deviceid][\"interval2\"]\n \n # Read sensor (in this template plugin, it's only un random generate numer)\n value = random.randint(interval1, interval2)\n \n self.log.info(u\"==> UPDATE Sensor for device '%s' with value '%s' \" % (name, value))\n self.send(deviceid, \"number-sensor_template\", value) # Update sensor value in Domogik, \"number-sensor_template\" is the sensorid_name in info.json\n self.log.info(u\"==> WAIT {0} seconds before the next sensor reading for device '{1}' \".format(self.updatetime, name))\n self.stopplugin.wait(self.updatetime) # Sleep \"self.updatetime\" seconds or exit if plugin is stopped.\n self.log.info(u\"==> Thread for 'Template reading sensors' stopped\")",
"def add_all_sensors(self):\n for x in self.configuration.sensors:\n self.add_sensor(x)",
"def get_all(self):\n host_url = '/'.join([pecan.request.host_url, 'v1', 'sensors'])\n return Sensors(uri=host_url,\n type='sensors',\n description='Collection of sensors',\n sensor_links=[])",
"def setupSensors(self) :\n for sensor in self.sensors :\n self.manager.addListener(sensor, self.gpioUpdate)",
"def get_readings_task():\n\n print('Getting readings')\n\n try:\n humidity, temperature = \\\n Adafruit_DHT.read_retry(SENSOR_TYPE, SENSOR_PIN_BCM)\n\n print('Humidity {0}, temperature {1}'.format(humidity,\n temperature))\n\n if USE_CLOUD:\n send_meas_cloud(temperature=float(temperature)\n , humidity=float(humidity))\n\n if USE_FILE:\n send_meas_filesystem(temperature=float(temperature)\n , humidity=float(humidity))\n\n except Exception as err:\n print('Exception while reading/sending measurements: {0}'.format(\n err\n ))\n\n # Reschedule.\n scheduler.enter(INTERVAL_SECONDS, 1, get_readings_task)",
"def _read_sensors(self):\n readings = {\"sensors\": []}\n\n for sensor in self.sensors:\n sensor_data = sensor.read()\n for data in sensor_data:\n readings[\"sensors\"].append(data)\n\n return readings",
"def all_sensors(self):\n return self._robot.GetAllSensors() + self._sensors",
"def start(self):\n for s in self.sensors:\n q = queue.Queue()\n self._queues.append(q)\n\n thread = Thread(target=self._measure_sensor, args=[s, q], daemon=True)\n self._threads.append(thread)\n thread.start()",
"def process_temperature():\n \n \"\"\"for mutliple Sensors\"\"\"\n\n for SENSOR in W1ThermSensor.get_available_sensors():\n\tlogging.info(\"Sensor %s has temperature %.2f\" % (SENSOR.id, SENSOR.get_temperature()))\n \tG.labels(\"%s\" % SENSOR.id).set(\"%.2f\" % SENSOR.get_temperature())",
"def get_sensor_data(self):\n trynum = 0\n sleep_time = 3\n self._valid_sensor_read = None\n self._total_process_time = None\n tprocess0 = time.perf_counter()\n while not self._valid_sensor_read and trynum < self.wmoptions['ERR_RETRIES']:\n trynum += 1\n if trynum >= 2:\n self.log.warning(f\"Try num.: {trynum}\")\n sleep_time = max(3, (min((2**trynum), self.wmoptions['MAXIMUM_BACKOFF_TIME']) - round(int(self._sensor_read_elapsed or 0))))\n self.log.debug(f\"Sleeping for {sleep_time} seconds\")\n time.sleep(sleep_time)\n\n self._valid_sensor_read = None\n self.log.debug(\"Requesting Sensor data...\")\n self.__read_sensor()\n self.log.debug(f\"Sensor read elapsed time: {self._sensor_read_elapsed:.4f} seconds at: {self._sensor_read_time_utc} (UTC)\")\n\n if self._humidity is None or self._temperature is None:\n self.log.error(\"Sensor didn't return anything\")\n self.__reset_sensor()\n elif self._temperature < -40 or self._temperature > 125 or self._humidity < 0 or self._humidity > 100:\n self.log.error(f\"Bogus values: Temperature: {self._temperature}, Humidity: {self._humidity}\")\n if trynum + 2 > self.wmoptions['ERR_RETRIES']:\n self.log.warning(\"Less then two retries remaining.\")\n self.__reset_sensor()\n else: self._valid_sensor_read = True\n\n self._total_process_time = time.perf_counter() - tprocess0\n self.log.debug(f\"Total sensor read process time: {self._total_process_time:.4f} seconds - Tries: {trynum}\")\n\n if self._valid_sensor_read:\n self.log.info(f\"Success reading sensor. Temperature: {self._temperature}, Humidity: {self._humidity}\")\n else: self.log.error(\"Could not read sensor. Aborting...\")\n\n return self._valid_sensor_read",
"def arduinoGetSensors(self):\r\n ret = {}\r\n if self.arduinoIsReady():\r\n self.flush()\r\n self.write(self.CHAR_READ)\r\n time.sleep(0.5)\r\n ret = self._processData(self.readline())\r\n return(ret)",
"def post(self):\n try:\n response = SensorDataController.get_all_sensor_data()\n return response\n except Exception as e:\n logs.logger.error(e)",
"def read_sensors(self):\n data = []\n for u in range(0, 8):\n mux = self.muxes[u]\n mux_data = []\n data.append(mux_data)\n if mux != '--' and self.mux_present[u]:\n mux = '0x' + mux\n mux = int(mux, 16)\n x2 = 1\n for s2 in range(0, 8):\n sensor_data = None\n # Choose sensor x on mux m\n self.bus.write_byte_data(mux, SensorReader.DEVICE_REG_MODE1, x2)\n if self.sensor_present[u][s2]:\n # Read from sensor\n rgb = self.stream_data()\n\n # Store data in proto\n sensor_data = singlepixel_pb2.SinglePixelSensorReading()\n sensor_data.red = self._normalize_reading(rgb['r'])\n sensor_data.green = self._normalize_reading(rgb['g'])\n sensor_data.blue = self._normalize_reading(rgb['b'])\n sensor_data.clear = self._normalize_reading(rgb['c'])\n sensor_data.ntp_capture_time.GetCurrentTime()\n\n x2 = x2 * 2\n mux_data.append(sensor_data)\n # Choose sensor x on mux m\n self.bus.write_byte_data(mux, SensorReader.DEVICE_REG_MODE1, 0x00)\n return data",
"def sample_sensors(self, callback = lambda sensors: None, **kwargs):\n try:\n for sensor in self.sensors:\n sensor.start()\n while self.is_alive:\n pass\n self.process_sensors(callback, **kwargs)\n except KeyboardInterrupt:\n self.process_sensors(callback, **kwargs)",
"def refresh_sensors():\n # We refresh the sensors display.\n display_sensors()",
"def sensors_listener():\n print(\"initializing sensors_listener node\")\n\n rospy.init_node('sensors_listener')\n \n # subscribe to sensors' topics, a subscriber is initialized by (topic, msg_type, callback_fn)\n _ = rospy.Subscriber('mavros/imu/data', Imu, sensors_callback, \"imu\", queue_size=100)\n _ = rospy.Subscriber('mavros/imu/mag', MagneticField, sensors_callback, \"mag\", queue_size=100)\n _ = rospy.Subscriber('mavros/altitude', Altitude, sensors_callback, \"alt\", queue_size=100)\n\n # subscribe to window maintainer and nn_predictor\n _ = rospy.Subscriber('deep_nav/sensors_ts', Float64, window_maintainer, queue_size=15)\n _ = rospy.Subscriber('deep_nav/features_window', ListOfLists, make_prediction, queue_size=15)\n \n # subscribe to EKF local position and velocity, used to calculate DeepNav error\n # and to align initial states\n _ = rospy.Subscriber('mavros/local_position/pose', PoseStamped, sensors_callback, \"ekf_position\", queue_size=100)\n _ = rospy.Subscriber('mavros/local_position/velocity_local', TwistStamped, sensors_callback, \"ekf_velocity\", queue_size=100)\n\n print(\"you can start publishing mavros sensor messages now\")\n\n # call the window maintaner at a fixed rate of 5 Hz\n window_maintainer_rate = 5\n rate = rospy.Rate(window_maintainer_rate)\n while not rospy.is_shutdown():\n windowing_invoker_pub.publish(time.time())\n rate.sleep()\n\n rospy.spin()",
"def sensors_thread():\n while True:\n global binary_sensors\n socketio.sleep(0.2)\n for s in binary_sensors:\n new_val = gpio.is_high(s.pin)\n if (s.old_val == False) and (new_val == True):\n print s.rising_event\n socketio.emit('binary_sensors', {'data': s.rising_event},\n namespace='/api/v1')\n elif (s.old_val == True) and (new_val == False):\n print s.falling_event\n socketio.emit('binary_sensors', {'data': s.falling_event},\n namespace='/api/v1')\n else:\n pass\n s.old_val = new_val",
"def sendMQTTData(temperature, humidity):\n timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime(time.time()))\n payload = (\"\"\"\n {\n \"deviceID\" : \"WeatherMap\",\n \"Data\" :{\n \"Temperature\" : {\n \"data\": \"%s\",\n \"unit\" : \"C\"\n },\n \"Humidity\" : {\n \"data\" : \"%s\",\n \"unit\" : \"%%\"\n },\n \"Timestamp\" : \"%s\"\n }\n }\n \"\"\"%(temperature, humidity, timestamp))\n client.publish(\"/RSU/remote/WeatherMap/json\", payload, 1)\n\n f = open(\"Receive/Weather.txt\", \"a+\")\n f.write(payload + \"\\n\")\n f.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Listen for new messages on subscribed topic, start the publisher and
|
def listen(publisher):
client = start_client(LISTEN_CLIENT_ID)
client.subscribe(SUBSCRIBER)
print('Subscribed to topic.')
while listening:
client.loop(.1)
|
[
"def listen(publisher):\n global client\n client.subscribe(SUBSCRIBER_TOPIC, 1, on_message)\n print('Subscribed to topic.')\n while listening:\n time.sleep(10)",
"def start_mqtt():\n with app.app_context():\n sub = Subscriber()\n sub.subscribe()",
"def subscribe(listener, topic):\n if topic not in _topics:\n _topics[topic] = Topic()\n\n _topics[topic].add_subscriber(listener)",
"def register(self, topic):\n LOGGER.info(f\"Subscriber registering to topic {topic} at address {self.address}\")\n\n self.topics.append(topic)\n\n self.registration.send_string(pubsub.REG_SUB, flags=zmq.SNDMORE)\n self.registration.send_string(topic, flags=zmq.SNDMORE)\n self.registration.send_string(self.address)\n\n self.message_sub.setsockopt_string(zmq.SUBSCRIBE, topic)\n\n # socket listening for new publishers should also subscribe to the topic\n # This allows it to only receive notifications about publishers it wants to\n # connect to\n self.publisher_sub.setsockopt_string(zmq.SUBSCRIBE, topic)\n\n broker_type = self.registration.recv_string()\n\n # process response from registration\n # If broker is ROUTING we need to the socket accepting messages to be bound\n # to this subscriber's address\n # If broker is DIRECT we need to connect the socket accepting messages to\n # each address received. Additionally, if we are using the DIRECT broker\n # we need to be able to receive notifications about new publishers so\n # we need to connect the appropriate socket to this subscriber's address\n if broker_type == BrokerType.ROUTE:\n if not self.message_sub_bound:\n self.publisher_sub.unbind(self.address)\n sleep(.5)\n self.message_sub.bind(self.address)\n self.message_sub_bound = True\n elif broker_type == BrokerType.DIRECT:\n\n # Ensure that publisher_sub is receiving new publishers\n # before we get the list of existing publishers otherwise\n # we could miss a publisher registration\n if not self.publisher_sub_ready:\n sleep(self.conn_sec)\n self.publisher_sub_ready = True\n\n has_addresses = self.registration.recv()\n if has_addresses == b'\\x01':\n addresses = self.registration.recv_multipart()\n for address in addresses:\n self.message_sub.connect(address.decode('utf-8'))\n\n LOGGER.info(f\"Connected to {broker_type} broker\")",
"def subscribe(self, topic):\n self.mqtt_client.subscribe(topic)",
"def receive_messages():\n\n subscriber = pubsub_v1.SubscriberClient()\n subscription_path = subscriber.subscription_path(\n PROJECT, SUBSCRIPTION)\n\n def callback(message):\n msg = json.loads(message.data.decode('utf-8'))\n print('Received message: {}'.format(msg))\n message.ack()\n\n subscriber.subscribe(subscription_path, callback=callback)\n\n print('Listening for messages on {}'.format(subscription_path))\n while True:\n time.sleep(3)",
"async def listen(self):\n \"\"\"\n # Recover registered prefix to enable hot restart\n if not self.register_root:\n self.recover_registered_prefixes()\n \"\"\"\n\n # Init PubSub\n self.insert_handle.pb.set_publisher_prefix(self.repo_prefix)\n await self.insert_handle.pb.wait_for_ready()\n\n await self.insert_handle.listen(self.repo_prefix)\n await self.delete_handle.listen(self.repo_prefix)",
"def startService(self):\n\t\tprint(\"starting MQTT Client Subscriber Service\")\n\t\t# invoke whenConnected() inherited method\n\t\tself.whenConnected().addCallback(self.connectToBroker)\n\t\tClientService.startService(self)",
"async def listen_for_subscriptions(self):\n ws = None\n while True:\n try:\n ws = await self._subscribe_to_order_book_streams()\n async for ws_response in ws.iter_messages():\n data = ws_response.data\n if \"result\" in data:\n continue\n event_type = data.get(\"m\")\n if event_type in [self.TRADE_TOPIC_ID, self.DIFF_TOPIC_ID]:\n self._message_queue[event_type].put_nowait(data)\n if event_type in [self.PING_TOPIC_ID]:\n await self._handle_ping_message(ws)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error occurred when listening to order book streams. \"\n \"Retrying in 5 seconds...\",\n exc_info=True)\n await self._sleep(5.0)\n finally:\n ws and await ws.disconnect()",
"def listen(self):\n try:\n # Send messages from shoutbox every few seconds\n self.start_message_loop()\n self.start_ping_loop()\n # Start the reactor\n reactor.run()\n except KeyboardInterrupt:\n self.close_connection()\n self.logprint(\"Exiting...\")",
"def start(self):\n super().start()\n self._order_book_stream_listener_task = safe_ensure_future(\n self._data_source.listen_for_subscriptions()\n )",
"def connect_and_subscribe():\n global client\n client = MQTTClient(machine_id, broker)\n client.set_callback(mqtt_callback)\n client.connect()\n print(\"Connected to {}\".format(broker))\n for topic in (b'config', b'set'):\n t = topic_name(topic)\n client.subscribe(t)\n print(\"Subscribed to {}\".format(t))",
"def run(self):\n\t\tself.client.loop_start()\n\t\tself.discover_and_notify()\n\t\tself.publish()",
"def test_subscribe_topics(self):\n test_is_done = threading.Event()\n func = self.func003\n data = [\n {'valid': False, 'pattern': ''},\n {'valid': False, 'pattern': None},\n {'valid': False, 'pattern': 1234},\n {'valid': False, 'pattern': func},\n {'valid': True, 'pattern': 'kittens'},\n {'valid': True, 'pattern': '/kittens'},\n {'valid': True, 'pattern': '+'},\n {'valid': True, 'pattern': '#'},\n {'valid': True, 'pattern': '/#'},\n {'valid': True, 'pattern': '/+'}\n ]\n\n def started(client):\n \"\"\"started listener\"\"\"\n try:\n for test in data:\n if test['valid']:\n client.subscribe(test['pattern'])\n else:\n with pytest.raises(InvalidArgumentError):\n client.subscribe(test['pattern'])\n except Exception as exc:\n pytest.fail('Unexpected Exception ' + str(exc))\n finally:\n client.stop()\n test_is_done.set()\n client = mqlight.Client('amqp://host',\n 'test_subscribe_topics',\n on_started=started)\n test_is_done.wait(self.TEST_TIMEOUT)\n assert test_is_done.is_set()",
"def wait_for_registration(self):\n # receive notification of new publisher\n message = self.publisher_sub.recv_multipart()\n address = message[1].decode('utf-8')\n\n # connect message receiving socket to new publisher address\n self.message_sub.connect(address)",
"def receive(self):\n \n\n try:\n if not self.session:\n return\n \n t = time.time()\n str_types = [str, unicode]\n for name, topic in self.topics.items():\n\n # if no user has requested data for this topic in a while\n # close the topic\n if topic.inactive(t):\n print \"Closing topic %s for inactivity, %s\" % (name, topic.config)\n topic.close()\n del self.topics[name]\n continue\n\n while topic.available() > 0:\n msg = topic.receive()\n data = msg.content\n \n if type(data) == str:\n data = json.loads(data)\n if type(data) == unicode:\n data = json.loads(data)\n\n for callback in topic.callbacks:\n callback(msg, data)\n \n self.store(msg.subject, data)\n #print \"Got data for %s: %s\" % (msg.subject, data)\n\n self.session.acknowledge()\n except qmsg.exceptions.ConnectionError, inst:\n self.dbg(traceback.format_exc())\n self.disconnect()\n except qmsg.exceptions.SessionClosed, inst:\n self.dbg(traceback.format_exc())\n self.disconnect()",
"def pub_callback(topic, payload, qos, retain):\n mqtt.async_publish(topic, payload, qos, retain)",
"def subscribe(self):\n self.vision_sub()\n self.my_pos_sub()\n self.my_role_sub()",
"def subscribe(self, topics):\n logging.info(\"Consuming from Kafka at %(target)s\", {\"target\": self.target,})\n consumer = Consumer(\n {\n \"bootstrap.servers\": self.target,\n \"group.id\": \"mygroup\",\n \"default.topic.config\": {\"auto.offset.reset\": \"smallest\",},\n }\n )\n\n logging.info(\"Subscribing Kafka consumer to %(topics)s\", {\"topics\": topics,})\n consumer.subscribe(topics)\n return KafkaSubscriber(self.loop, topics, consumer)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
function to run parallel training with flags from command line
|
def main(arg):
del arg
params_list = None
model_ids = None
nb_jobs = FLAGS.NB_JOBS
if FLAGS.params:
params_list = eval("config."+FLAGS.params)
nb_jobs = min(FLAGS.NB_JOBS, len(params_list))
print('combinations: {}'.format(len(params_list)))
elif FLAGS.model_ids:
try:
model_ids = eval("config."+FLAGS.model_ids)
except Exception:
model_ids = eval(FLAGS.model_ids)
nb_jobs = min(FLAGS.NB_JOBS, len(model_ids))
print('combinations: {}'.format(len(model_ids)))
overwrite_params = None
if FLAGS.overwrite_params:
try:
overwrite_params = eval("config."+FLAGS.overwrite_params)
except Exception:
overwrite_params = eval(FLAGS.overwrite_params)
get_training_overview_dict = None
if FLAGS.get_overview:
get_training_overview_dict = eval("config."+FLAGS.get_overview)
plot_paths_dict = None
if FLAGS.plot_paths:
plot_paths_dict = eval("config."+FLAGS.plot_paths)
crossval = None
if FLAGS.crossval:
crossval = eval("config."+FLAGS.crossval)
plot_conv_study = None
if FLAGS.plot_conv_study:
plot_conv_study = eval("config."+FLAGS.plot_conv_study)
print('nb_jobs: {}'.format(nb_jobs))
if params_list is not None or model_ids is not None:
parallel_training(
params=params_list, model_ids=model_ids,
first_id=FLAGS.first_id, nb_jobs=nb_jobs,
saved_models_path=FLAGS.saved_models_path,
overwrite_params=overwrite_params
)
if get_training_overview_dict is not None:
extras.get_training_overview(
send=FLAGS.SEND, **get_training_overview_dict)
if plot_paths_dict is not None:
extras.plot_paths_from_checkpoint(
send=FLAGS.SEND, **plot_paths_dict)
if crossval is not None:
extras.get_cross_validation(send=FLAGS.SEND, **crossval)
if plot_conv_study is not None:
extras.plot_convergence_study(send=FLAGS.SEND, **plot_conv_study)
|
[
"def main():\n\n # TODO: define:\n # step+noize\n # log scale instead of uniform\n\n # Define parametter: [min, max]\n dictParams = {\n \"batchSize\": [int, [1, 3]],\n \"learningRate\": [float, [1, 3]]\n }\n\n # Training multiple times with different parametters\n for i in range(10):\n # Generate the command line arguments\n trainingArgs = \"\"\n for keyArg, valueArg in dictParams:\n value = str(random(valueArg[0], max=valueArg[1]))\n trainingArgs += \" --\" + keyArg + \" \" + value\n\n # Launch the program\n os.run(\"main.py\" + trainingArgs)\n\n # TODO: Save params/results ? or already inside training args ?",
"def elaspic_train(args):\n _train_predictor('core')\n _train_predictor('interface')",
"def run_train():\n parser = argparse.ArgumentParser(description=\"GPT training\")\n parser.add_argument('--device_id', type=int, default=0, help=\"Device id, default is 0.\")\n parser.add_argument(\"--device_num\", type=int, default=1, help=\"Use device nums, default is 1.\")\n parser.add_argument(\"--distribute\", type=str, default=\"false\", choices=[\"true\", \"false\"],\n help=\"Run distribute, default is false.\")\n parser.add_argument(\"--optimizer\", type=str, default=\"adam\", choices=[\"adam\", \"lamb\"],\n help=\"select which optimizer to be used, default adam\")\n parser.add_argument(\"--epoch_size\", type=int, default=10, help=\"Epoch size, default is 10.\")\n parser.add_argument(\"--warmup_step\", type=int, default=10000, help=\"Warmup step, default is 10000.\")\n parser.add_argument(\"--data_path\", type=str, default=\"\", help=\"Data path of your MindRecord files.\")\n parser.add_argument(\"--start_lr\", type=float, default=\"5e-5\", help=\"Start learning rate, default is 5e-5.\")\n parser.add_argument(\"--end_lr\", type=float, default=\"1e-10\", help=\"End learning rate, default is 1e-10.\")\n parser.add_argument(\"--sink_size\", type=int, default=100, help=\"Sink size for every iteration, default is 100\")\n parser.add_argument(\"--model_parallel_num\", type=int, default=8, help=\"Num of model parallel, default is 8\")\n\n\n args_opt = parser.parse_args()\n device_id = int(os.getenv(\"DEVICE_ID\", '0'))\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", device_id=device_id)\n if args_opt.distribute == \"true\":\n D.init()\n device_num = args_opt.device_num\n rank = device_id % device_num\n print(\"device_id is {}, rank_id is {}\".format(device_id, rank))\n\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,\n device_num=device_num)\n\n else:\n rank = 0\n device_num = 1\n\n config = GPTConfig(batch_size=4,\n seq_length=1024,\n vocab_size=50257,\n embedding_size=1024,\n num_layers=24,\n num_heads=16,\n expand_ratio=4,\n post_layernorm_residual=False,\n dropout_rate=0.1,\n compute_dtype=mstype.float16,\n use_past=False)\n gpt = GPT(config)\n model_parallel_num = args_opt.model_parallel_num\n data_parallel_num = int(device_num / model_parallel_num)\n parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,\n model_parallel=model_parallel_num)\n loss = CrossEntropyLoss(parallel_config.dp_mp_config)\n gpt_with_loss = GPTWithLoss(gpt, loss)\n\n ds = create_dataset(config.batch_size, data_path=args_opt.data_path, device_num=device_num, rank=rank)\n\n\n epoch_num = args_opt.epoch_size\n step_per_epoch = ds.get_dataset_size()\n\n lr = LearningRate(learning_rate=args_opt.start_lr,\n end_learning_rate=args_opt.end_lr,\n warmup_steps=args_opt.warmup_step,\n decay_steps=epoch_num*step_per_epoch)\n\n decay_filter = lambda x: 'layernorm' not in x.name.lower() and \"bias\" not in x.name.lower()\n params = gpt.trainable_params()\n decay_params = list(filter(decay_filter, params))\n other_params = list(filter(lambda x: not decay_filter(x), params))\n group_params = [{'params': decay_params, 'weight_decay': 1e-2},\n {'params': other_params, 'weight_decay': 0.0},\n {'order_params': params}]\n\n if args_opt.optimizer == \"lamb\":\n optimizer = nn.Lamb(group_params, learning_rate=lr)\n else:\n optimizer = nn.AdamWeightDecay(group_params, learning_rate=lr)\n\n callback_size = args_opt.sink_size\n actual_epoch_num = int(epoch_num * step_per_epoch/callback_size)\n callback = [TimeMonitor(callback_size), LossMonitor(callback_size)]\n\n config_ck = CheckpointConfig(save_checkpoint_steps=step_per_epoch, keep_checkpoint_max=1)\n ckpoint_cb = ModelCheckpoint(prefix=\"GPT2\", config=config_ck)\n callback.append(ckpoint_cb)\n\n\n update_cell = DynamicLossScaleUpdateCell(loss_scale_value=1024,\n scale_factor=2,\n scale_window=1000)\n\n gpt_with_grads = GPTTrainOneStepWithLossScaleCell(gpt_with_loss, optimizer=optimizer,\n scale_update_cell=update_cell)\n\n\n model = Model(gpt_with_grads)\n model.train(actual_epoch_num, ds, callbacks=callback, dataset_sink_mode=True, sink_size=callback_size)",
"def run_training_cli(config_file: str, verbose: bool = True) -> None:\n run_training(config_file=config_file, verbose=verbose)",
"def main():\n\n config = SimCLRConfig.parse_arguments()\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in config.gpus])\n num_gpus_per_node = len(config.gpus)\n world_size = config.num_nodes * num_gpus_per_node\n distributed = world_size > 1\n setattr(config, 'num_gpus_per_node', num_gpus_per_node)\n setattr(config, 'world_size', world_size)\n setattr(config, 'distributed', distributed)\n \n rich.print(config.__dict__)\n config.save()\n\n if config.distributed:\n rich.print(f\"Distributed training on {world_size} GPUs.\")\n mp.spawn(\n main_worker,\n nprocs=config.num_gpus_per_node,\n args=(config, )\n )\n else:\n rich.print(f\"Single GPU training.\")\n main_worker(0, config=config) # single machine, single gpu",
"def do_training():\n train_cls = Train()\n train_cls.run()",
"def train(args):\n # ce\n if args.enable_ce:\n SEED = 102\n fluid.default_main_program().random_seed = SEED\n fluid.default_startup_program().random_seed = SEED\n\n cat_feat_dims_dict = OrderedDict()\n for line in open(args.cat_feat_num):\n spls = line.strip().split()\n assert len(spls) == 2\n cat_feat_dims_dict[spls[0]] = int(spls[1])\n dcn_model = DCN(args.cross_num, args.dnn_hidden_units, args.l2_reg_cross,\n args.use_bn, args.clip_by_norm, cat_feat_dims_dict,\n args.is_sparse)\n dcn_model.build_network()\n dcn_model.backward(args.lr)\n\n # config dataset\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var(dcn_model.data_list)\n pipe_command = 'python reader.py {}'.format(args.vocab_dir)\n dataset.set_pipe_command(pipe_command)\n dataset.set_batch_size(args.batch_size)\n dataset.set_thread(args.num_thread)\n train_filelist = [\n os.path.join(args.train_data_dir, fname)\n for fname in next(os.walk(args.train_data_dir))[2]\n ]\n dataset.set_filelist(train_filelist)\n num_epoch = args.num_epoch\n if args.steps:\n epoch = args.steps * args.batch_size / 41000000\n full_epoch = int(epoch // 1)\n last_epoch = epoch % 1\n train_filelists = [train_filelist for _ in range(full_epoch)] + [\n random.sample(train_filelist, int(\n len(train_filelist) * last_epoch))\n ]\n num_epoch = full_epoch + 1\n print(\"train epoch: {}\".format(num_epoch))\n\n # Executor\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n\n for epoch_id in range(num_epoch):\n start = time.time()\n sys.stderr.write('\\nepoch%d start ...\\n' % (epoch_id + 1))\n dataset.set_filelist(train_filelists[epoch_id])\n exe.train_from_dataset(\n program=fluid.default_main_program(),\n dataset=dataset,\n fetch_list=[\n dcn_model.loss, dcn_model.avg_logloss, dcn_model.auc_var\n ],\n fetch_info=['total_loss', 'avg_logloss', 'auc'],\n debug=False,\n print_period=args.print_steps)\n model_dir = os.path.join(args.model_output_dir,\n 'epoch_' + str(epoch_id + 1), \"checkpoint\")\n sys.stderr.write('epoch%d is finished and takes %f s\\n' % (\n (epoch_id + 1), time.time() - start))\n fluid.save(fluid.default_main_program(), model_dir)",
"def start():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"output\", type=str, help=\"Path to the results folder\")\n parser.add_argument(\"--run\", type=str, default=\"PPO\", help=\"Deep RL algorithm to train the agent\")\n parser.add_argument(\"--num-cpus\", type=int, default=0, help=\"Number of CPUs when training\")\n parser.add_argument(\"--num-workers\", type=int, default=16, help=\"Number of workers during the training\")\n parser.add_argument(\"--env-per-workers\", type=int, default=5, help=\"Number of environments per worker\")\n parser.add_argument(\"--as-test\", action=\"store_true\", help=\"Check if learning was successful\")\n parser.add_argument(\"--use-prev-action-reward\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--stop-iters\", type=int, default=None, help=\"Stop training after specific iterations\")\n parser.add_argument(\"--stop-timesteps\", type=int, default=None, help=\"Stop training after specific timesteps\")\n parser.add_argument(\"--stop-reward\", type=float, default=None, help=\"Stop training after a specific return-value\")\n parser.add_argument(\"--pl\", type=float, default=None, help=\"Factor of the paddle length\")\n parser.add_argument(\"--restore\", action=\"store_true\", help=\"Continue training by restoring the latest checkpoint\")\n parser.add_argument(\"--checkpoint-freq\", type=int, default=20, help=\"Frequency of saving the next checkpoint\")\n parser.add_argument(\"--play\", action=\"store_true\", help=\"Start the parameter analysis by testing the environment\")\n parser.add_argument(\"--play-steps\", type=int, default=10, help=\"Number of playing steps\")\n parser.add_argument(\"--session\", type=int, default=1, help=\"Training session for a more precise analysis\")\n parser.add_argument(\"--env-name\", type=str, default=\"jupong2d\", help=\"Name of the Gym-Environment to register\")\n args = parser.parse_args()\n\n runner = RunRLlib(args.output, args.num_cpus, args.env_name, args.pl, session=args.session, checkpoint_frequency=args.checkpoint_freq, train_algorithm=args.run, num_workers=args.num_workers, env_per_worker=args.env_per_workers, stop_reward=args.stop_reward, stop_iters=args.stop_iters, stop_timesteps=args.stop_timesteps, restore=args.restore, as_test=args.as_test, play=args.play, play_steps=args.play_steps)\n runner.start()",
"def train_split_run(args):\n # pylint: disable=unused-argument\n from .prepare.train_split import run\n\n run(equal_splits=args.equalsplits)",
"def train(args):\r\n\r\n\r\n # Load the VQA training set\r\n print('Loading data...')\r\n dataset = VQA_Dataset(args.data_dir, args.emb)\r\n loader = DataLoader(dataset, batch_size=args.bsize,\r\n shuffle=True, num_workers=0, collate_fn=collate_fn)\r\n\r\n # Load the VQA validation set\r\n dataset_test = VQA_Dataset(args.data_dir, args.emb, train=False)\r\n loader_val = DataLoader(dataset_test,\r\n batch_size=args.bsize,\r\n shuffle=False,\r\n num_workers=0,\r\n collate_fn=collate_fn)\r\n\r\n n_batches = len(dataset) // args.bsize\r\n question_vocab = pickle.load(open('/mnt/data/xiaojinhui/wangtan_MM/vqa-project/data/train_q_dict.p', 'rb'))\r\n\r\n # Print data and model parameters\r\n print('Parameters:\\n\\t'\r\n 'vocab size: %d\\n\\tembedding dim: %d\\n\\tfeature dim: %d'\r\n '\\n\\thidden dim: %d\\n\\toutput dim: %d' % (dataset.n_answers, args.emb,\r\n dataset.feat_dim,\r\n args.hid,\r\n dataset.n_answers))\r\n\r\n print('Initializing model')\r\n model_gcn = conditional_GCN(nfeat=options['gcn']['nfeat'],\r\n nhid=options['gcn']['nhid'],\r\n nclass=options['gcn']['nclass'],\r\n emb = options['gcn']['fliter_emb'],\r\n dropout=options['gcn']['dropout'])\r\n # model_gcn_nofinding = layer_vqg.conditional_GCN_1(nfeat=options['gcn']['nfeat'],\r\n # nhid=options['gcn']['nhid'],\r\n # nclass=options['gcn']['nclass'],\r\n # emb = options['gcn']['fliter_emb'],\r\n # dropout=options['gcn']['dropout'])\r\n model_vqg = question_gen(vocab=question_vocab['wtoi'], vocab_i2t=question_vocab['itow'], opt=options['vqg'])\r\n # no_finding = layer_vqg.no_finding_area_top(in_feature=2652, hidden_feature=512, dropout=options['gcn']['dropout'])\r\n\r\n\r\n\r\n criterion = nn.CrossEntropyLoss()\r\n\r\n # Move it to GPU\r\n model_gcn = model_gcn.cuda()\r\n model_vqg = model_vqg.cuda()\r\n # model_gcn_nofinding = model_gcn_nofinding.cuda()\r\n # no_finding = no_finding.cuda()\r\n criterion = criterion.cuda()\r\n\r\n\r\n\r\n # Define the optimiser\r\n optimizer = torch.optim.Adam([\r\n {'params': model_gcn.parameters()},\r\n {'params': model_vqg.parameters()}], lr=args.lr)\r\n\r\n # Continue training from saved model\r\n start_ep = 0\r\n if args.model_path and os.path.isfile(args.model_path):\r\n print('Resuming from checkpoint %s' % (args.model_path))\r\n ckpt = torch.load(args.model_path)\r\n start_ep = ckpt['epoch']\r\n model_gcn.load_state_dict(ckpt['state_dict_gcn'])\r\n model_vqg.load_state_dict(ckpt['state_dict_vqg'])\r\n optimizer.load_state_dict(ckpt['optimizer'])\r\n\r\n # Update the learning rate\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = args.lr\r\n\r\n # Learning rate scheduler\r\n scheduler = MultiStepLR(optimizer, milestones=[30], gamma=0.5)\r\n scheduler.last_epoch = start_ep - 1\r\n\r\n # Train iterations\r\n print('Start training.')\r\n bleu_best= [0.0, 0.0, 0.0, 0.0]\r\n cider_best = 0.0\r\n meteor_best = 0.0\r\n rouge_best = 0.0\r\n for ep in range(start_ep, start_ep + args.ep):\r\n\r\n adjust_learning_rate(optimizer, ep)\r\n scheduler.step()\r\n ep_loss = 0.0\r\n ep_top3 = 0.0\r\n ep_top1 = 0.0\r\n ave_loss = 0.0\r\n ave_top3 = 0.0\r\n ave_top1 = 0.0\r\n iter_time_all = 0.0\r\n\r\n for step, next_batch in enumerate(loader):\r\n\r\n model_gcn.train()\r\n model_vqg.train()\r\n # Move batch to cuda\r\n target_q, an_feat, img_feat, adj_mat = \\\r\n utils.batch_to_cuda(next_batch, volatile=True)\r\n\r\n # forward pass\r\n torch.cuda.synchronize()\r\n start = time.time()\r\n\r\n # img_feat_no = torch.mul(img_feat[:,:,None,:], img_feat[:,None,:,:]).view(-1, 2652)\r\n # adj_mat = no_finding(img_feat_no).view(-1,36,36)\r\n # adj_mat += torch.eye(36).cuda()\r\n # adj_mat = torch.clamp(adj_mat, max=1)\r\n feat_gcn, adj_new = model_gcn(img_feat, adj_mat)\r\n # feat_gcn, adj_new = model_gcn_nofinding(img_feat, adj_mat)\r\n output = model_vqg(feat_gcn, an_feat, target_q)\r\n # for i in range(256):\r\n # dataset.drawarea[i]['adj'] = adj_new[i].detach().cpu().numpy().tolist()\r\n # dataset.drawarea[i]['adj_diag'] = np.diag(adj_new[i].detach().cpu().numpy()).tolist()\r\n #\r\n # json.dump(dataset.drawarea, open('/mnt/data/xiaojinhui/wangtan_MM/vqa-project/draw/new_adj_t.json', 'w'))\r\n # output_bs = model_vqg.beam_search(feat_gcn[0].unsqueeze(0),\r\n # an_feat[0].unsqueeze(0))\r\n target_q = target_q[:, 1:].contiguous()\r\n\r\n loss = criterion(output.view(output.size(0)*output.size(1), output.size(2)), target_q.view(target_q.size(0)*target_q.size(1)))\r\n\r\n\r\n\r\n # Compute batch accu\r\n\r\n top1 = utils.accuracy(output, target_q, 1)\r\n top3 = utils.accuracy(output, target_q, 3)\r\n\r\n ep_top1 += top1\r\n ep_top3 += top3\r\n ep_loss += loss.item()\r\n ave_top1 += top1\r\n ave_top3 += top3\r\n ave_loss += loss.item()\r\n\r\n\r\n # This is a 40 step average\r\n if step % 40 == 0 and step != 0:\r\n print(' Epoch %02d(%03d/%03d), ave loss: %.7f, top1: %.2f%%, top3: %.2f%%, iter time: %.4fs' %\r\n (ep + 1, step, n_batches, ave_loss / 40,\r\n ave_top1/40, ave_top3 / 40, iter_time_all/40))\r\n\r\n ave_top1 = 0\r\n ave_top3 = 0\r\n ave_loss = 0\r\n iter_time_all = 0\r\n\r\n # Compute gradient and do optimisation step\r\n optimizer.zero_grad()\r\n loss.backward()\r\n # clip_grad_norm_(model_gcn.parameters(), 2.)\r\n # clip_grad_norm_(model_gcn.parameters(), 2.)\r\n # clip_grad_norm_(no_finding.parameters(), 2.)\r\n optimizer.step()\r\n\r\n end = time.time()\r\n iter_time = end - start\r\n iter_time_all += iter_time\r\n\r\n\r\n\r\n # save model and compute validation accuracy every 400 steps\r\n if step == 0:\r\n with torch.no_grad():\r\n epoch_loss = ep_loss / n_batches\r\n epoch_top1 = ep_top1 / n_batches\r\n epoch_top3 = ep_top3 / n_batches\r\n\r\n # compute validation accuracy over a small subset of the validation set\r\n model_gcn.train(False)\r\n model_vqg.train(False)\r\n model_gcn.eval()\r\n model_vqg.eval()\r\n\r\n output_all = []\r\n output_all_bs = {}\r\n ref_all = []\r\n\r\n flag_val = 0\r\n\r\n for valstep, val_batch in tqdm(enumerate(loader_val)):\r\n # test_batch = next(loader_test)\r\n target_q, an_feat, img_feat, adj_mat = \\\r\n utils.batch_to_cuda(val_batch, volatile=True)\r\n # img_feat_no = torch.mul(img_feat[:, :, None, :], img_feat[:, None, :, :]).view(-1, 2652)\r\n # adj_mat = no_finding(img_feat_no).view(-1, 36, 36)\r\n # adj_mat += torch.eye(36).cuda()\r\n # adj_mat = torch.clamp(adj_mat, max=1)\r\n # feat_gcn, _ = model_gcn_nofinding(img_feat, adj_mat)\r\n feat_gcn, adj_new = model_gcn(img_feat, adj_mat)\r\n output = model_vqg.generate(feat_gcn, an_feat)\r\n\r\n for j in range(feat_gcn.size(0)):\r\n output_bs = model_vqg.beam_search(feat_gcn[j].unsqueeze(0),\r\n an_feat[j].unsqueeze(0))\r\n output_all_bs[flag_val] = output_bs\r\n flag_val += 1\r\n\r\n\r\n output_all.append(output.cpu().numpy())\r\n ref_all.append(target_q[:, :-1].cpu().numpy())\r\n\r\n\r\n\r\n gen, ref = utils.idx2question(np.concatenate(output_all, 0), np.concatenate(ref_all, 0), question_vocab['itow'])\r\n print(gen.values()[:10])\r\n\r\n # save the best\r\n bleu, cider, meteor, rouge = main.main(ref, gen)\r\n bleu_best, cider_best, meteor_best, rouge_best, choice = utils.save_the_best(bleu, cider, meteor, rouge,\r\n bleu_best, cider_best,\r\n meteor_best, rouge_best)\r\n if choice:\r\n utils.save(model_gcn, model_vqg, optimizer, ep, epoch_loss, epoch_top1,\r\n dir=args.save_dir, name=args.name + '_' + str(ep + 1))\r\n\r\n print('use beam search...')\r\n bleu, cider, meteor, rouge = main.main(ref, output_all_bs)\r\n bleu_best, cider_best, meteor_best, rouge_best, choice = utils.save_the_best(bleu, cider, meteor, rouge,\r\n bleu_best, cider_best,\r\n meteor_best, rouge_best)\r\n if choice:\r\n utils.save(model_gcn, model_vqg, optimizer, ep, epoch_loss, epoch_top1,\r\n dir=args.save_dir, name=args.name + '_' + str(ep + 1))\r\n\r\n\r\n print('the best bleu: %s, cider: %.6s, meteor: %.6s, rouge: %.6s'\r\n % (bleu_best, cider_best, meteor_best, rouge_best))\r\n print(output_all_bs.values()[:10])\r\n\r\n\r\n\r\n model_gcn.train(True)\r\n model_vqg.train(True)",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default=\"configs.baseline\",\n help='experiment configuration dict')\n parser.add_argument('--train', action='store_true',\n help='whether to train')\n parser.add_argument('--test', action='store_true', help='whether to test')\n args = parser.parse_args()\n\n config_module = importlib.import_module(args.config)\n trainer = Trainer(config_module.config)\n if args.train:\n trainer.run()\n if args.test:\n test_report = trainer.test()\n print(test_report)",
"def shell_train(global_profile, profiles):\n run_shell(global_profile, profiles, 'pyspark_train')",
"def run_multiple_threads(num_threads, naming_function, label_function, feature_function_list, \n\t\t\t\t\t\tmaker_function, make_file, make_db, use_gold):\n\tmake_file = True\n\tmake_db = False\n\tnum_threads_training = num_threads - 1\n\tsections_per_threads = 20 / (num_threads_training)\n\tprocs = []\n\tfor i in range(num_threads_training):\n\t\tstart_sec = 2 + (i * sections_per_threads)\n\t\tif i == num_threads_training - 1:\n\t\t\tend_sec = 20 + 1\n\t\t\t#end_sec = 21 + 1\n\t\telse:\n\t\t\tend_sec = 2 + ((i + 1) * sections_per_threads)\n\t\tfile_name = 'sec_%s_%s_training_set.csv' % (start_sec, end_sec - 1)\n\t\tp = Process(target=maker_function, \\\n\t\t\t\targs= (naming_function, label_function, feature_function_list, range(start_sec, end_sec), file_name, make_file, make_db, use_gold))\n\t\tprocs.append(p)\n\tp = Process(target=maker_function, \\\n\t\t\targs= (naming_function, label_function, feature_function_list, [21, 22], 'sec_21_22_test_set.csv', make_file, make_db, use_gold))\n\tprocs.append(p)\n\tfor p in procs: p.start()\n\tfor p in procs: p.join()\n\t_merge_data_files()",
"def run_benchmark(args: Config) -> None:\n\n # sanity checks\n if args.gpus is None:\n click.echo(\"Error: --num_gpus is not given\")\n exit()\n if args.gpus <= 1:\n click.echo(\"Warning: tensor parallel will be activated with at least 2 devices.\")\n\n click.echo(\"=== Benchmarking Parameters ===\")\n for k, v in args.items():\n click.echo(f'{k}: {v}')\n click.echo('')\n\n config_list = find_all_configs(args.gpus)\n\n avail_ports = [free_port() for _ in range(len(config_list))]\n run_func = partial(run_dist_profiling,\n world_size=args.gpus,\n port_list=avail_ports,\n config_list=config_list,\n hyperparams=args)\n mp.spawn(run_func, nprocs=args.gpus)",
"def test_parallel_pipeline_tuner(clf_binary):\n # TODO: Add test\n pass",
"def test_run_detext_multitask_ranking(self):\n output = os.path.join(DataSetup.out_dir, \"multitask_model\")\n args = self.multitask_args + [\"--task_ids\", \"0\", \"1\",\n \"--task_weights\", \"0.2\", \"0.8\",\n \"--out_dir\", output]\n sys.argv[1:] = args\n main(sys.argv)\n self._cleanUp(output)",
"def run_with_args():\n model_path = sys.argv[1]\n data_folder = sys.argv[2]\n output_folder = sys.argv[3]\n if len(sys.argv) >= 5:\n intensity_correction = sys.argv[4]\n else:\n intensity_correction = 0.0\n\n predict_and_evaluate(model_path, data_folder, output_folder, intensity_correction=intensity_correction)",
"def main(args):\n model, ensemble = setup_gnina_model(args.cnn, args.dimension, args.resolution)\n model.eval() # Ensure models are in evaluation mode!\n\n device = utils.set_device(args.gpu)\n model.to(device)\n\n example_provider = setup.setup_example_provider(args.input, args, training=False)\n grid_maker = setup.setup_grid_maker(args)\n\n # TODO: Allow average over different rotations\n loader = dataloaders.GriddedExamplesLoader(\n example_provider=example_provider,\n grid_maker=grid_maker,\n random_translation=0.0, # No random translations for inference\n random_rotation=False, # No random rotations for inference\n device=device,\n grids_only=True,\n )\n\n for batch in loader:\n if not ensemble:\n log_pose, affinity = model(batch)\n else:\n log_pose, affinity, affinity_var = model(batch)\n\n pose = torch.exp(log_pose[:, -1])\n\n for i, (p, a) in enumerate(zip(pose, affinity)):\n print(f\"CNNscore: {p:.5f}\")\n print(f\"CNNaffinity: {a:.5f}\")\n if ensemble:\n print(f\"CNNvariance: {affinity_var[i]:.5f}\")\n print(\"\")",
"def main():\n log_file = os.path.join(ARGS.result_dir, ARGS.experiment_name, \"log.txt\")\n print(\"Result dir: %s\", ARGS.result_dir)\n print(\"Log file: %s\", log_file)\n\n # Setup logging in base_dir/log.txt\n setup_logging(level=ARGS.log_level, filename=log_file)\n logger.info(\" -- MNIST Multilabel -- Started \")\n tstart = time.time()\n\n try:\n if not ARGS.cuda:\n # Set number of CPU threads\n torch.set_num_threads(ARGS.njobs)\n\n # Create and run experiment\n run_multilabel_mnist(ARGS)\n except Exception as e:\n logger.exception(\"Experiment crashed.\")\n logger.exception(\"Exception: %s\", str(e))\n\n # Measure time\n tstr = time_delta_now(tstart)\n logger.info(\" -- MNIST -- Finished, took %s\", tstr)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the randomizer's domain parameter distribution for one domain parameter.
|
def adapt_one_distr_param(
self, domain_param_name: str, domain_distr_param: str, domain_distr_param_value: Union[float, int]
):
for dp in self.domain_params:
if dp.name == domain_param_name:
if domain_distr_param in dp.get_field_names():
# Set the new value
if not isinstance(domain_distr_param_value, (int, float, bool)):
pyrado.TypeErr(given=domain_distr_param_value, expected_type=[int, float, bool])
dp.adapt(domain_distr_param, domain_distr_param_value)
else:
raise pyrado.KeyErr(
msg=f"The domain parameter {dp.name} does not have a domain distribution parameter "
f"called {domain_distr_param}!"
)
|
[
"def _get_wrapper_domain_param(self, domain_param: dict):\n # Cast the factor value to int, since randomizer yields ndarrays or Tensors\n self._factor = int(domain_param.get(\"downsampling\", self._factor))",
"def rescale_distr_param(self, param: str, scale: float):\n if not scale >= 0:\n raise pyrado.ValueErr(given=scale, ge_constraint=\"0\")\n\n for dp in self.domain_params:\n if hasattr(dp, param):\n # Scale the param attribute of the domain parameters object\n setattr(dp, param, scale * getattr(dp, param))\n\n # Also scale the distribution (afterwards)\n if isinstance(dp, UniformDomainParam):\n dp.distr = Uniform(dp.mean - dp.halfspan, dp.mean + dp.halfspan)\n if isinstance(dp, NormalDomainParam):\n dp.distr = Normal(dp.mean, dp.std)\n if isinstance(dp, MultivariateNormalDomainParam):\n dp.distr = MultivariateNormal(dp.mean, dp.cov)\n if isinstance(dp, BernoulliDomainParam):\n dp.distr = Bernoulli(dp.prob_1)",
"def _set_wrapper_domain_param(self, domain_param: dict):\n # Cast to integer for consistency\n domain_param[\"downsampling\"] = int(self._factor)",
"def set_distribution(self, distribution):\n if self.distribution == distribution:\n return\n self.distribution = distribution\n self.random_list = self.get_random_list()\n np.random.shuffle(self.random_list)\n self.random_pr = 0\n self.random_list_length = len(self.random_list)",
"def _update_policy_and_distribution(self):\n self._policy = self.get_softmax_policy()\n self._distribution = distribution_std.DistributionPolicy(\n self._game, self._policy)",
"def _randomize_parameters(self):\n for parameter in self.parameters:\n param_ranges = self.parameters[parameter]['range']\n\n # check if parameter has multiple values (=> multiple ranges) like for curves\n if isinstance(self.parameters[parameter]['value'], list): \n values = []\n for param_range in param_ranges:\n values.append(self._new_value(param_range))\n self.parameters[parameter]['value'] = values\n else: # simple 1-value parameter\n self.parameters[parameter]['value'] = self._new_value(param_ranges)",
"def update(self):\n self.domain.update()",
"def update(self,lr):\n self.sample_minibatch(lr)\n # Calculate gradients at current point\n dlogbeta = lr.dlogpost(self)\n lr.grad_sample[self.iter-1,:] = dlogbeta\n\n # Update parameters using SGD\n eta = np.random.normal( scale = self.epsilon )\n lr.beta += self.epsilon / 2 * dlogbeta + eta",
"def domain_param(self, param: dict):\n self._load_domain_param(param)\n self._wrapped_env.domain_param = param",
"def update_distr(self, distr, index, count_vec):\n\n length = len(distr)\n\n #update size. Function of learning rate and relative count\n # The more frequently is seen, the lower the smaller the update\n #update = self.learn_rate*(1 - count_vec[index]/sum(count_vec))\n update = self.learn_rate\n\n if np.isnan(update): #handles division by zero\n update = self.learn_rate\n\n distr[index] *= 1 + update\n distr /= sum(distr)\n\n assert abs(sum(distr) - 1) < 1e-9 #allows for rounding errors\n\n return",
"def update_param_dist(self, x, y):\n sinv0 = self.sinv\n mean0 = self.mean\n\n phi_x = self.phi(x)\n sinv_new = sinv0 + self.beta * np.dot(phi_x.T, phi_x)\n mean_new = np.linalg.solve(sinv_new, np.dot(sinv0, mean0) + self.beta * np.dot(phi_x.T, y))\n\n self.mean = mean_new\n self.sinv = sinv_new\n self.s = np.linalg.inv(sinv_new)",
"def uniform_probability(self, args = []):\n\t\tself.probability = 1",
"def randomise_proposed_value(self):\n if self.parameter_type is MMCParameterType.UNIFORM_DIST:\n (a, b) = self.static_dist_or_list\n self.proposed_value = random.uniform(a, b)\n elif self.parameter_type is MMCParameterType.NORMAL_DIST:\n (mu, sigma) = self.static_dist_or_list\n self.proposed_value = random.normalvariate(mu, sigma)\n elif self.parameter_type is MMCParameterType.DISCRETE_RANGE:\n (min_v, max_v, step) = self.static_dist_or_list\n self.proposed_value = random.choice(\n numpy.arange(min_v, max_v, step))\n elif self.parameter_type is MMCParameterType.LIST:\n self.proposed_value = random.choice(self.static_dist_or_list)\n elif self.parameter_type is MMCParameterType.STATIC_VALUE:\n raise TypeError('This value is static, it cannot be mutated.')\n else:\n raise TypeError(\n 'Cannot randomise this parameter, unknown parameter type.')\n return",
"def test_post_parameter_update_regularizer(self):\n if not hasattr(self.instance, \"regularizer\"):\n self.skipTest(\"no regularizer\")\n\n # set regularizer term to something that isn't zero\n self.instance.regularizer.regularization_term = torch.ones(1, dtype=torch.float, device=self.instance.device)\n\n # call post_parameter_update\n self.instance.post_parameter_update()\n\n # assert that the regularization term has been reset\n expected_term = torch.zeros(1, dtype=torch.float, device=self.instance.device)\n assert self.instance.regularizer.regularization_term == expected_term",
"def update_algo_parameter(self, parameter_name, new_parameter_value):\n if hasattr(self, parameter_name):\n setattr(self, parameter_name, new_parameter_value)\n if parameter_name == \"lr\":\n for param_group in self.pi_optimizer.param_groups:\n param_group['lr'] = new_parameter_value\n for param_group in self.q_optimizer.param_groups:\n param_group['lr'] = new_parameter_value\n for param_group in self.alpha_optimizer.param_groups:\n param_group['lr'] = new_parameter_value",
"def init_params_random(self) -> None:\n self.probs = Dirichlet(self.prior).sample()",
"def update_params(self):\n # todo: sample theta and phi\n\n #sample theta from dirichlet (A_{d,k}+alpha), since dim(theta)=ndoc * ntopic , we need to update for each d, so for each row\n for d in range(self.n_docs):\n self.theta[d,:] = np.random.dirichlet(self.A_dk[d,:] + self.alpha)\n\n #sample phi from dirichlet (B_{k,w}+beta), dim(phi) = ntopics * nwords\n for k in range(self.n_topics):\n self.phi[k,:] = np.random.dirichlet(self.B_kw[k,:] + self.beta)\n\n\n self.update_topic_doc_words()\n #print('thishif',self.topic_doc_words_distr[0,0,:])\n self.sample_counts() #update A and B",
"def test_post_parameter_update(self):\n # do one optimization step\n opt = optim.SGD(params=self.instance.parameters(), lr=1.0)\n batch = self.factory.mapped_triples[: self.batch_size, :].to(self.instance.device)\n scores = self.instance.score_hrt(hrt_batch=batch, mode=self.mode)\n fake_loss = scores.mean()\n fake_loss.backward()\n opt.step()\n\n # call post_parameter_update\n self.instance.post_parameter_update()\n\n # check model constraints\n self._check_constraints()",
"def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n # see https://github.com/pytorch/fairseq/blob/master/fairseq/models/transformer.py\n # embedding\n nn.init.normal_(self.embed.weight, mean=0., std=self.d_model**-0.5)\n nn.init.constant_(self.embed.weight[self.pad], 0.)\n # output layer\n nn.init.xavier_uniform_(self.output.weight)\n nn.init.constant_(self.output.bias, 0.)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Rescale a parameter for all distributions.
|
def rescale_distr_param(self, param: str, scale: float):
if not scale >= 0:
raise pyrado.ValueErr(given=scale, ge_constraint="0")
for dp in self.domain_params:
if hasattr(dp, param):
# Scale the param attribute of the domain parameters object
setattr(dp, param, scale * getattr(dp, param))
# Also scale the distribution (afterwards)
if isinstance(dp, UniformDomainParam):
dp.distr = Uniform(dp.mean - dp.halfspan, dp.mean + dp.halfspan)
if isinstance(dp, NormalDomainParam):
dp.distr = Normal(dp.mean, dp.std)
if isinstance(dp, MultivariateNormalDomainParam):
dp.distr = MultivariateNormal(dp.mean, dp.cov)
if isinstance(dp, BernoulliDomainParam):
dp.distr = Bernoulli(dp.prob_1)
|
[
"def scale(self, value):\n\t\tfor val in self.dilutions:\n\t\t\tself.dilutions[val] /= value",
"def _normalize_param_scaling(self):\n\n if 'original_units_in_meter' in self.properties: # pattern was scaled\n scaling = 100 / self.properties['original_units_in_meter']\n for parameter in self.parameters:\n if self.parameters[parameter]['type'] == 'additive_length': \n self.parameters[parameter]['value'] = scaling * self.parameters[parameter]['value']\n self.parameters[parameter]['range'] = [\n scaling * elem for elem in self.parameters[parameter]['range']\n ]\n\n # now we have cm everywhere -- no need to keep units info\n self.properties.pop('original_units_in_meter', None)\n\n print('Warning: Parameter units were converted to cm')",
"def parameters_scaled(self) -> OptimizationVariableList:\n return self._nlp.parameters.scaled",
"def setKernelParam(self, alpha, scale) -> None:\n ...",
"def scale(self, x):\n # assume a scalar that can multiply values\n for k in tuple(self.keys()): # I think the tuple() might speed things up (why?)\n self[k] *= x",
"def _parameter_scale(self, var):\n return tf.maximum(reduce_rms(var), self.epsilon2)",
"def reset_rescale(self):\n\n for name in self.names:\n self.rescale_parameters[name] = None",
"def _rescale(self, samp, **kwargs):\n \"\"\"\n Here is where the subclass where overwrite rescale method\n \"\"\"\n return samp",
"def rescaling(scores, base):\n return (scores - base) / (1 - base)",
"def set_scale_values(self):\n\n for param_n in self.log_ids:\n log_data = self.__log_scale(param_n, self.channel)\n self.scale[param_n] = log_data\n\n if self.gain_ids:\n for param_n in self.gain_ids:\n gain_data = self.__gain_scale(param_n, self.channel)\n self.scale[param_n] = gain_data",
"def applyScale(self, scale):\n pass",
"def rescaled_image():",
"def rescale(x, min=0, max=1):\n n_features = x.shape[-1]\n x_copy = np.copy(x)\n for f in range(n_features):\n c = x.obs[:, :, f]\n c_min = np.min(c)\n c_max = np.max(c)\n x_copy[:, :, f] = (c - c_min) / (c_max - c_min) * (max - min) + min\n return x_copy",
"def normalize_parameters(self, optimized_params):\n for rosparam_name, bounds in self.design_space.items():\n assert(optimized_params[rosparam_name] >= bounds[0] and optimized_params[rosparam_name] <= bounds[1])\n old_val = optimized_params[rosparam_name] # TODO remove after test\n param_range = bounds[1] - bounds[0]\n optimized_params[rosparam_name] = (optimized_params[rosparam_name] - bounds[0]) / param_range\n print(\"Normalizing\", rosparam_name, \"from\", old_val, \"to\", optimized_params[rosparam_name]) # TODO remove after test\n\n return optimized_params",
"def rescale(set: np.array,maxiter:int, interpolation:str):\n return colorInterpolations[interpolation](set,maxiter)",
"def normalize_distribution(x):\n norm = 1 / np.sum(x)\n return x * norm",
"def control_scale(self):\n\t\tfor bac in self.bacteria():\n\t\t\tval = np.mean(self.map[\"control\"][str(bac)].values())\n\t\t\tfor pref in self.map[str(bac)].keys():\n\t\t\t\tself.map[str(bac)][pref].scale(val)",
"def set_scaling(self, factor=None):\n if factor is None:\n factor = self.get_default_scaling_factor()\n\n factor = float(factor)\n if np.isnan(factor) or factor == 1:\n return\n log.debug(f\"Applying scaling factor {factor:.3f}\")\n self.gain /= factor",
"def Scale(self, w):\n\t\tfor key in self._map.keys():\n\t\t\tvalue = self.GetValue(key)\n\t\t\terror = self.GetErrorStat(key)\n\t\t\trms = self.GetRMS(key)\n\n\t\t\tself.Set(key, value * w, error * w, rms * w)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print the application banner.
|
def print_banner():
print(
"""\033[92m
_____ _ _ _ _____ _____
| | | | |_ _| __|
| | | | | | | | | __|
|_____|_____| |_| |__|
@owtfp
http://owtf.org
Version: {0}
\033[0m""".format(
__version__
)
)
|
[
"def display_banner():\n\n banner = \"\"\"\n __ _____ ___ ___ __ \n / / / / _ \\/ _ \\ ____/ (_)__ ___ / /_\n / /_/ / // / ___/ / __/ / / -_) _ \\/ __/\n \\____/____/_/ \\__/_/_/\\__/_//_/\\__/ \n\n \"\"\"\n print(banner, flush=True)",
"def _banner() -> None:\n print(\"\"\"\\nThe Game of\n ______ ______ __ __ ______ __ __ ______\n /\\ __ \\ /\\__ _\\ /\\ \\_\\ \\ /\\ ___\\ /\\ \\ /\\ \\ /\\ __ \\\\\n \\ \\ \\/\\ \\ \\/_/\\ \\/ \\ \\ __ \\ \\ \\ __\\ \\ \\ \\____ \\ \\ \\____ \\ \\ \\/\\ \\\\\n \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\_\\ \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_____\\\\\n \\/_____/ \\/_/ \\/_/\\/_/ \\/_____/ \\/_____/ \\/_____/ \\/_____/\"\"\")",
"def print_banner(self):\n print('<===================')\n if self.state:\n for addr in self.watch_addrs:\n nice_addr, val = self.nice_format_addr(addr), self.state.memory[addr]\n print('{} = {:x}'.format(nice_addr, val))\n\n if self.state.halt:\n print('CPU HALTED')\n\n print('\\n====================\\n')\n\n for cmd in self.autocmds:\n try:\n self.runcmd(cmd, user=False)\n except Exception as e:\n pass\n else:\n print('Not running!')\n print('===================>')",
"def get_banner():\n return \"** Shell prompt **\\n\"",
"def show_banner(message):\n click.clear()\n click.echo('')\n click.secho(' {} '.format(message), bg='blue', fg='black')\n click.echo('')\n click.secho('n = Next\\nb = Beginning\\nx = Exit\\nl = Next Lyric\\nv = Previous Song', bg='yellow', fg='black')\n click.echo('')",
"def banner(message, border=\"*\"):\n print((len(message)+4) * border)\n print(border, message, border)\n print((len(message)+4) * border)",
"def banner(self, irc, msg, args, text):\n if not ircdb.checkCapability(msg.prefix, 'admin'):\n self.log.warning(\"Permission Denied!\")\n return\n resp = figlet_format(text, font='banner')\n lines = resp.split(\"\\n\")\n for line in lines:\n if len(line) == 0:\n continue\n irc.reply(line)",
"def pyre_interactiveBanner(self):\n # just saying hi...\n return \"entering interactive mode...\\n\"",
"def print_botball_logo():\n\n print \" ____ ____ __________ ___ __ __ \"\n print \" / __ )/ __ \\\\/_ __/ __ )/ | / / / / \"\n print \" / __ / / / / / / / __ / /| | / / / / \"\n print \" / /_/ / /_/ / / / / /_/ / ___ |/ /___/ /___ \"\n print \"/_____/\\\\____/ /_/ /_____/_/ |_/_____/_____/\"\n print \"============================================\\n\"",
"def banner(self):\n # The default initially sets 'kludge' mode, which does not warrant\n # any reply and is always compatible with any client NVT.\n #\n # Notably, a request to negotiate TTYPE is made. If sucessful,\n # the callback ``request_advanced_opts()`` is fired.\n self.echo ('Welcome to {}! '.format(__file__,))\n self.stream.iac(telopt.WILL, telopt.SGA)\n self.stream.iac(telopt.WILL, telopt.ECHO)\n self.stream.iac(telopt.DO, telopt.TTYPE)",
"def show_msg_on_startup(self):\n print(\" \")\n print(\" ██████╗ ███████╗████████╗████████╗███████╗██████╗ ███████╗██╗███████╗ \") \n print(\" ██╔══██╗██╔════╝╚══██╔══╝╚══██╔══╝██╔════╝██╔══██╗██╔════╝██║██╔════╝ \")\n print(\" ██████╔╝█████╗ ██║ ██║ █████╗ ██████╔╝███████╗██║███████╗ \")\n print(\" ██╔══██╗██╔══╝ ██║ ██║ ██╔══╝ ██╔══██╗╚════██║██║╚════██║ \")\n print(\" ██████╔╝███████╗ ██║ ██║ ███████╗██║ ██║███████║██║███████║ \")\n print(\" ╚═════╝ ╚══════╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝╚══════╝╚═╝╚══════╝ \")\n print(\" \")\n print(\" ===================================================================== \")\n print(\" \")\n print(\" DISCLAIMER: \")\n print(\" BetterSIS, this software, controls SIS in the background and tries to \")\n print(\" provide modern features \")\n print(\" (such as command history, suggestions and autocompletion) \")\n print(\" and small improvements to SIS itself. (like the simulate improvement) \")\n print(\" > SIS is a tool for synthesis and optimization of sequential circuits \")\n print(\" \")\n print(\" I'm not affiliated with the SIS developers in any way. \")\n print(\" You can read more about SIS here: \")\n print(\" https://jackhack96.github.io/logic-synthesis/sis.html \")\n print(\" \")\n print(\" ===================================================================== \")\n print(\" \")\n print(\" BetterSIS version: {}\".format(__version__) )\n print(\" BetterSIS repository: https://github.com/mario33881/bettersis \")\n print(\" Siswrapper version: {}\".format(siswrapper.__version__) )\n print(\" Running in the background: \", self.sis.res[\"stdout\"] )",
"def print_header():\n\n print('------------------------------------')\n print(' CAT FACTORY')\n print('------------------------------------')",
"def print_intro():\n globals.clear_screen()\n print(\n \"THE DESTROYER'S DESTINY\\n\"\n \"\\tCSC 11300 Projects 1 & 2\\n\"\n \"\\tBy: Vishnu Nair\\n\\n\"\n \"(C) 2015 Vishnu Nair. All rights reserved.\\n\"\n )",
"def printBeskjed():\n print(\"Hvilken kolonne er tallet ditt i? (v/m/h) \") #Printer ut en beskjed.",
"def inbg():\n return render_template(\n 'inbg.html',\n title='In bg',\n year=datetime.now().year,\n message='Your in bg page.'\n )",
"def print_greeting():\n print(\n \"\\nHi there! \\nI can help you figure out the notes of a scale or mode of your choice!\"\n )",
"def banner(message, char='#', indent=4):\n return '{} {} {}'.format(char * indent, message, char * (80 - (indent + 2 + len(message))))",
"def welcome():\r\n print(''' _ _ \r\n | | | | \r\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __ \r\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \r\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\r\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\r\n __/ | \r\n |___/''')",
"def capturing(self):\n self.status_label.setText('Capturing ......')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
graph GraphWithV8 to find embeddings on This code finds all the jumps for the given GraphWithV8 A jump is an edge between 1) Two non cofacial edges 2) One vertex and one edge that are not cofacial 3) Two non cofacial vertices THIS RETURNS A LIST OF NEW GRAPHS WITH JUMPS. EACH ONE A NEW JUMP
|
def findJumps(graph, embedding):
#check with stage2EmbeddingTest whether current graph embedds in firstCurrentlyEmbedding
vertices = graph.getVertices()
vertexFaces = getVertexFaces(vertices, embedding)
vertexJumps = findVertexJumps(graph, vertices, vertexFaces)
edges = graph.getEdges()
edgeFaces = getEdgeFaces(edges, embedding)
edgeJumps = findEdgeJumps(graph, edges, edgeFaces, embedding)
vertexEdgeJumps = findVertexEdgeJumps(graph, vertices, vertexFaces, edges, edgeFaces)
nonIsomorphicJumps = []
nonIsomorphicJumpLabels = []
for currGraph in vertexJumps + vertexEdgeJumps + edgeJumps:
currGraphLabel = currGraph.getCanonicalLabel()
for currNonIsomorphicGraphLabel in nonIsomorphicJumpLabels:
if currNonIsomorphicGraphLabel == currGraphLabel:
break
else:
nonIsomorphicJumps.append(currGraph)
nonIsomorphicJumpLabels.append(currGraph.getCanonicalLabel())
return nonIsomorphicJumps
|
[
"def graph_to_cycles(graph: list, breakpoint_graph=False) -> list:\n nodes = defaultdict(list)\n\n for i in graph:\n nodes[i[0]].append(i[1])\n nodes[i[1]].append(i[0])\n\n # If the graph is the breakpoint graph, two ends of the synteny block is connected to each other\n if not breakpoint_graph:\n for i in nodes.keys():\n nodes[i].append(i + 1 if i % 2 == 1 else i - 1)\n\n chromosomes = []\n prev_node = list(nodes.keys())[0]\n next_node = nodes[prev_node][0]\n # Remove the edge by removing the ending and starting nodes from the possible transitions in respective nodes\n nodes[prev_node].remove(next_node)\n nodes[next_node].remove(prev_node)\n chromosome = [next_node]\n prev_node = next_node\n while True:\n try:\n next_node = nodes[prev_node][0]\n chromosome.append(next_node)\n nodes[prev_node].remove(next_node)\n nodes[next_node].remove(prev_node)\n nodes.pop(prev_node)\n prev_node = next_node\n # When the starting node is reached there will be no outgoing edges\n except IndexError:\n chromosomes.append(chromosome)\n nodes.pop(prev_node)\n if len(nodes) == 0:\n break\n else:\n prev_node = list(nodes.keys())[0]\n next_node = nodes[prev_node][0]\n nodes[prev_node].remove(next_node)\n nodes[next_node].remove(prev_node)\n chromosome = [next_node]\n prev_node = next_node\n return chromosomes",
"def BFS_01(graph,start_v):\n Double_Queue = []\n #We can skip the part of allocating this tabs but for convinience leave it as it is\n distance = [float(\"inf\")] * graph.get_vertex()\n parents = [-1] * graph.get_vertex()\n visited = [False] * graph.get_vertex()\n distance[start_v] = 0\n visited[start_v] = True\n parents[start_v] = start_v\n Double_Queue.append(start_v)\n while len(Double_Queue)!=0:\n u = Double_Queue.pop(0)\n for v in graph.get_incidence(u):\n w = v[1]\n v = v[0]\n if not visited[v] or distance[v] > distance[u] + w:\n visited[v] = True\n distance[v] = distance[u] + w\n parents[v] = u\n if (w==0):\n Double_Queue.insert(0,v)\n else:\n Double_Queue.append(v)\n return distance",
"def make_contigs(db_graph):\n\n paths = []\n\n for node in db_graph:\n #if NOT a 1-in-1-out node...\n if len(node.followers)!=1 or node.num_incoming!=1:\n # and it has a follower...\n if len(node.followers)>0:\n # go through these followers...\n for kmer_follower in node.followers:\n # start a contig\n current_node = kmer_node_to_db_node(kmer_follower, db_graph)\n current_path = [node, current_node]\n # extend the contig if followers are 1-in-1-out\n while len(current_node.followers)==1 and current_node.num_incoming==1:\n #update the current node\n #always be the first and only follower of current_node\n #[0] because you need an int, not a list\n current_node = kmer_node_to_db_node(current_node.followers[0], db_graph)\n current_path.append(current_node)\n paths.append(current_path)\n\n return paths",
"def copy_links(G,node1,node2,node3,direction):\n\tif direction=='out':\n\t\tedge_list = G.successors(node2)\n\t\tsource_node = node2\n\telif direction == 'in':\n\t\tedge_list = G.predecessors(node1)\n\t\ttarget_node = node1\n\telse:\n\t\traise ValueError(\"direction can only be 'in' or 'out'.\")\n\t\n\t# store the variable outside of the graph to access it faster\n\tedge_paths = copy.deepcopy(G[node1][node2]['paths'])\n\tedge_paths_tmp = {}\n\ttext_ids = [x for x in edge_paths.keys()]\n\tset1 = set(text_ids)\n\tedges_copied = []\n\tedge_paths_tmp = {}\n\tfor n_neighbors in edge_list:\n\t\tif direction=='out':\n\t\t\ttarget_node = n_neighbors\n\t\telse:\n\t\t\tsource_node = n_neighbors\n\t\tpositions_sourcetarget = G[source_node][target_node]['paths']\n\t\t#print(direction,source_node,target_node,positions_sourcetarget)\n\t\tset2 = set([x for x in G[source_node][target_node]['paths'].keys()])\n\t\tcommon_elems = set1 & set2\n\t\tfor text_id in common_elems:\n\t\t\tif not text_id in edge_paths_tmp:\n\t\t\t\tedge_paths_tmp[text_id] = {}\n\t\t\tfor idx in edge_paths[text_id]['word_positions']:\n\t\t\t\tif not idx in edge_paths_tmp[text_id]:\n\t\t\t\t\tedge_paths_tmp[text_id][idx] = {}\n\t\t\t\tif not 'closest_idx' in edge_paths_tmp[text_id][idx]:\n\t\t\t\t\tedge_paths_tmp[text_id][idx]['closest_idx'] = -1\n\t\t\t\tif not 'neighbor_node' in edge_paths_tmp[text_id][idx]:\n\t\t\t\t\tedge_paths_tmp[text_id][idx]['neighbor_node'] = ''\n\t\t\t\tif direction=='out':\n\t\t\t\t\tclosest_idx_s_node = find_next_idx_in_node(positions_sourcetarget,text_id,idx)\n\t\t\t\t\tif closest_idx_s_node >0:\n\t\t\t\t\t\tif edge_paths_tmp[text_id][idx]['closest_idx']==-1 or closest_idx_s_node < edge_paths_tmp[text_id][idx]['closest_idx']:\n\t\t\t\t\t\t\tedge_paths_tmp[text_id][idx]['closest_idx'] = closest_idx_s_node\n\t\t\t\t\t\t\tedge_paths_tmp[text_id][idx]['neighbor_node'] = n_neighbors\n\t\t\t\telse:\n\t\t\t\t\tclosest_idx_p_node = find_previous_idx_in_node(positions_sourcetarget,text_id,idx)\n\t\t\t\t\tif closest_idx_p_node > edge_paths_tmp[text_id][idx]['closest_idx']:\n\t\t\t\t\t\t\tedge_paths_tmp[text_id][idx]['closest_idx'] = closest_idx_p_node\n\t\t\t\t\t\t\tedge_paths_tmp[text_id][idx]['neighbor_node'] = n_neighbors\n\t\t\t\t#print(idx,edge_paths_tmp[text_id][idx]['closest_idx'],edge_paths_tmp[text_id][idx]['neighbor_node'])\n\n\tfor text_id in edge_paths_tmp.keys():\n\t\tfor idx in edge_paths_tmp[text_id]:\n\t\t\tidx2 = edge_paths_tmp[text_id][idx]['closest_idx']\n\t\t\tn_neighbors = edge_paths_tmp[text_id][idx]['neighbor_node']\n\t\t\tif idx2>=0:\n\t\t\t\tif idx2 not in edge_paths[text_id]['word_positions']: # rule for self-loops (repeating a word n times)\n\t\t\t\t\t#connect to new node\n\t\t\t\t\tif direction == 'out':\n\t\t\t\t\t\tadd_connection_node(G,node3,n_neighbors,text_id,idx2)\n\t\t\t\t\telse:\n\t\t\t\t\t\tadd_connection_node(G,n_neighbors,node3,text_id,idx2)\n\t\t\t\t\t# save copied edges to disconnect them from previous nodes (later on)\n\t\t\t\t\tedges_copied.append((n_neighbors,text_id,idx2))\n\t#print(edges_copied)\n\treturn edges_copied",
"def graph_to_clause(graph: nx.graph) -> list:\n\n # num_vert = len(graph)\n clause_list = []\n\n # for i in range(num_vert):\n # for j in range(i, num_vert):\n # if graph[i,j] != 0:\n # clause_list.append((-0.5, (i, j)))\n\n for edge in graph.edges:\n i, j = edge\n clause_list.append((-0.5, (i, j)))\n return clause_list",
"def findAllTogiticalPaths(dictionary):\n alphabet_graph = Graph()\n\n if len(dictionary) == 0:\n return []\n \n first_word = dictionary[0] \n for v in first_word:\n alphabet_graph.add_vertex(v)\n \n for word_index in range(1,len(dictionary)): \n word = dictionary[word_index]\n if word is None:\n print(\"Invalid Input: one of the inputs is None\")\n return\n prev_word = dictionary[word_index-1]\n find_Adj_succ = False\n # try to get an edge between to characters in alphabet_graph from two adj words in the dict\n for v in range(len(word)):\n alphabet_graph.add_vertex(word[v])\n if not find_Adj_succ and len(word) <= len(prev_word):\n if word[v] is not prev_word[v]:\n alphabet_graph.add_Edge(word[v], prev_word[v])\n find_Adj_succ = True\n return alphabet_graph.topologicalSort()",
"def searchHillClimbing(graph, start, goal):\n\n # Initialise the came_from dictionary\n came_from = {}\n came_from[start] = None\n\n # BEGIN HERE #\n if start==goal:\n return came_from\n visited={}\n parent={}\n stack=[]\n stack.append(start)\n visited[start]=1\n current=None\n while(len(stack)):\n next_node = stack.pop()\n if next_node == goal:\n break\n current=next_node\n sc_neighbors=sorted([[heuristic(x,goal),x] for x in graph.neighboursOf(current)])\n for h,neighbor in sc_neighbors[::-1]:\n if neighbor == goal:\n parent[neighbor]=current\n if not visited.get(neighbor,0):\n visited[neighbor]=1\n parent[neighbor]=current\n stack.append(neighbor)\n if goal not in parent.keys():\n return came_from\n temp=goal\n while(1):\n came_from[temp]=parent[temp]\n temp=parent[temp]\n if(temp==start):\n return came_from\n # END HERE #\n\n return came_from",
"def BFS(g,s,discovered):\n level = [s] # first level includes only s\n while len(level) > 0:\n next_level = [] # prepare to gather newly found vertices\n for u in level:\n for e in g.incident_edges(u): # for every outgoing edge from u\n v = e.opposite(u)\n if v not in discovered: # v is an unvisited vertex\n discovered[v] = e # e is the tree edge that discovered v\n next_level.append(v)# v will be further consider in next pass\n level = next_level",
"def out_edges(self, v):\n\n es = []\n for w in self[v]:\n e = self[v][w]\n if e not in es:\n es.append(e)\n return es",
"def _graph_traversal_handler(self, g, src, dst, data, blockaddr_to_function, known_functions, all_edges):\n\n src_addr = src.addr\n src_function = self._addr_to_function(src_addr, blockaddr_to_function, known_functions)\n\n if src_addr not in src_function.block_addrs_set:\n n = self.model.get_any_node(src_addr)\n if n is None:\n node = src_addr\n else:\n node = self._to_snippet(n)\n self.kb.functions._add_node(src_function.addr, node)\n\n if data is None:\n # it's a single node only\n return\n\n jumpkind = data[\"jumpkind\"]\n\n if jumpkind == \"Ijk_Ret\":\n n = self.model.get_any_node(src_addr)\n if n is None:\n from_node = src_addr\n else:\n from_node = self._to_snippet(n)\n self.kb.functions._add_return_from(src_function.addr, from_node, None)\n\n if dst is None:\n return\n\n dst_addr = dst.addr\n\n # get instruction address and statement index\n ins_addr = data.get(\"ins_addr\", None)\n stmt_idx = data.get(\"stmt_idx\", None)\n\n if jumpkind == \"Ijk_Call\" or jumpkind.startswith(\"Ijk_Sys\"):\n is_syscall = jumpkind.startswith(\"Ijk_Sys\")\n\n # It must be calling a function\n dst_function = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions)\n\n n = self.model.get_any_node(src_addr)\n if n is None:\n src_snippet = self._to_snippet(addr=src_addr, base_state=self._base_state)\n else:\n src_snippet = self._to_snippet(cfg_node=n)\n\n # HACK: FIXME: We need a better way of representing unresolved calls and whether they return.\n # For now, assume UnresolvedTarget returns if we're calling to it\n\n # If the function doesn't return, don't add a fakeret!\n if not all_edges or (dst_function.returning is False and not dst_function.name == \"UnresolvableCallTarget\"):\n fakeret_node = None\n else:\n fakeret_node = self._one_fakeret_node(all_edges)\n\n if fakeret_node is None:\n fakeret_snippet = None\n else:\n fakeret_snippet = self._to_snippet(cfg_node=fakeret_node)\n\n if isinstance(dst_addr, SootAddressDescriptor):\n dst_addr = dst_addr.method\n\n self.kb.functions._add_call_to(\n src_function.addr,\n src_snippet,\n dst_addr,\n fakeret_snippet,\n syscall=is_syscall,\n ins_addr=ins_addr,\n stmt_idx=stmt_idx,\n )\n\n if dst_function.returning and fakeret_node is not None:\n returning_target = src.addr + src.size\n if returning_target not in blockaddr_to_function:\n if returning_target not in known_functions:\n blockaddr_to_function[returning_target] = src_function\n else:\n self._addr_to_function(returning_target, blockaddr_to_function, known_functions)\n\n to_outside = blockaddr_to_function[returning_target] is not src_function\n\n n = self.model.get_any_node(returning_target)\n if n is None:\n try:\n returning_snippet = self._to_snippet(addr=returning_target, base_state=self._base_state)\n except SimEngineError:\n # it may not exist\n returning_snippet = None\n else:\n returning_snippet = self._to_snippet(cfg_node=n)\n\n if returning_snippet is not None:\n self.kb.functions._add_fakeret_to(\n src_function.addr, src_snippet, returning_snippet, confirmed=True, to_outside=to_outside\n )\n\n elif jumpkind in (\"Ijk_Boring\", \"Ijk_InvalICache\", \"Ijk_Exception\"):\n # convert src_addr and dst_addr to CodeNodes\n n = self.model.get_any_node(src_addr)\n if n is None:\n src_node = src_addr\n else:\n src_node = self._to_snippet(cfg_node=n)\n\n n = self.model.get_any_node(dst_addr)\n if n is None:\n dst_node = dst_addr\n else:\n dst_node = self._to_snippet(cfg_node=n)\n\n if self._skip_unmapped_addrs:\n # pre-check: if source and destination do not belong to the same section, it must be jumping to another\n # function\n belong_to_same_section = self._addrs_belong_to_same_section(src_addr, dst_addr)\n if not belong_to_same_section:\n _ = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions)\n\n if self._detect_tail_calls:\n if self._is_tail_call_optimization(\n g, src_addr, dst_addr, src_function, all_edges, known_functions, blockaddr_to_function\n ):\n l.debug(\"Possible tail-call optimization detected at function %#x.\", dst_addr)\n # it's (probably) a tail-call optimization. we should make the destination node a new function\n # instead.\n blockaddr_to_function.pop(dst_addr, None)\n _ = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions)\n self.kb.functions._add_outside_transition_to(\n src_function.addr, src_node, dst_node, to_function_addr=dst_addr\n )\n self._tail_calls.add(dst_addr)\n\n # is it a jump to another function?\n if isinstance(dst_addr, SootAddressDescriptor):\n is_known_function_addr = dst_addr.method in known_functions and dst_addr.method.addr == dst_addr\n else:\n is_known_function_addr = dst_addr in known_functions\n\n if (is_known_function_addr and dst_addr != src_function.addr) or (\n dst_addr in blockaddr_to_function and blockaddr_to_function[dst_addr] is not src_function\n ):\n # yes it is\n dst_function_addr = (\n blockaddr_to_function[dst_addr].addr if dst_addr in blockaddr_to_function else dst_addr\n )\n\n self.kb.functions._add_outside_transition_to(\n src_function.addr,\n src_node,\n dst_node,\n to_function_addr=dst_function_addr,\n is_exception=jumpkind == \"Ijk_Exception\",\n )\n\n _ = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions)\n else:\n # no it's not\n # add the transition code\n\n if dst_addr not in blockaddr_to_function:\n blockaddr_to_function[dst_addr] = src_function\n\n self.kb.functions._add_transition_to(\n src_function.addr,\n src_node,\n dst_node,\n ins_addr=ins_addr,\n stmt_idx=stmt_idx,\n is_exception=jumpkind == \"Ijk_Exception\",\n )\n\n elif jumpkind == \"Ijk_FakeRet\":\n # convert src_addr and dst_addr to CodeNodes\n n = self.model.get_any_node(src_addr)\n if n is None:\n src_node = src_addr\n else:\n src_node = self._to_snippet(n)\n\n n = self.model.get_any_node(dst_addr)\n if n is None:\n dst_node = dst_addr\n else:\n dst_node = self._to_snippet(n)\n\n if dst_addr not in blockaddr_to_function:\n if isinstance(dst_addr, SootAddressDescriptor):\n if dst_addr.method not in known_functions:\n blockaddr_to_function[dst_addr] = src_function\n target_function = src_function\n else:\n target_function = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions)\n else:\n if dst_addr not in known_functions:\n blockaddr_to_function[dst_addr] = src_function\n target_function = src_function\n else:\n target_function = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions)\n else:\n target_function = blockaddr_to_function[dst_addr]\n\n # Figure out if the function called (not the function returned to) returns.\n # We may have determined that this does not happen, since the time this path\n # was scheduled for exploration\n called_function = None\n called_function_addr = None\n # Try to find the call that this fakeret goes with\n for _, d, e in all_edges:\n if e[\"jumpkind\"] == \"Ijk_Call\":\n if d.addr in blockaddr_to_function:\n called_function = blockaddr_to_function[d.addr]\n called_function_addr = d.addr\n break\n # We may have since figured out that the called function doesn't ret.\n # It's important to assume that all unresolved targets do return\n if called_function is not None and called_function.returning is False:\n return\n\n to_outside = target_function is not src_function\n\n confirmed = called_function is None or called_function.returning is True\n self.kb.functions._add_fakeret_to(\n src_function.addr,\n src_node,\n dst_node,\n confirmed=confirmed,\n to_outside=to_outside,\n to_function_addr=called_function_addr,\n )\n\n else:\n l.debug(\"Ignored jumpkind %s\", jumpkind)",
"def _find_cycles(graph):\n # This uses Tarjan's strongly-connected components algorithm, as described by\n # Wikipedia. This is a depth-first traversal of the graph with a node stack\n # that is independent of the call stack; nodes are added to the stack when\n # they are first encountered, but not removed until all nodes they can reach\n # have been checked.\n next_index = [0]\n node_indices = {}\n node_lowlinks = {}\n nodes_on_stack = set()\n stack = []\n nontrivial_components = set()\n\n def strong_connect(node):\n \"\"\"Implements the STRONGCONNECT routine of Tarjan's algorithm.\"\"\"\n node_indices[node] = next_index[0]\n node_lowlinks[node] = next_index[0]\n next_index[0] += 1\n stack.append(node)\n nodes_on_stack.add(node)\n\n for destination_node in graph[node]:\n if destination_node not in node_indices:\n strong_connect(destination_node)\n node_lowlinks[node] = min(node_lowlinks[node],\n node_lowlinks[destination_node])\n elif destination_node in nodes_on_stack:\n node_lowlinks[node] = min(node_lowlinks[node],\n node_indices[destination_node])\n\n strongly_connected_component = []\n if node_lowlinks[node] == node_indices[node]:\n while True:\n popped_node = stack.pop()\n nodes_on_stack.remove(popped_node)\n strongly_connected_component.append(popped_node)\n if popped_node == node:\n break\n if (len(strongly_connected_component) > 1 or\n strongly_connected_component[0] in\n graph[strongly_connected_component[0]]):\n nontrivial_components.add(frozenset(strongly_connected_component))\n\n for node in graph:\n if node not in node_indices:\n strong_connect(node)\n return nontrivial_components",
"def in_edges(self, v):\n\n es = []\n for w in self.reverse_graph[v]:\n e = self.reverse_graph[v][w]\n if e not in es:\n es.append(e)\n return es",
"def unknown_neighbors_of_8(mapdata, x, y, visited):\n\n # CREATES AN EMPTY OUTPUT LIST\n unknown_neighbors = []\n # LISTS ALL NEIGHBORS\n xa = int(x - 1)\n xb = int(x + 1)\n ya = int(y - 1)\n yb = int(y + 1)\n neighbors = [(x, yb), (xa, yb), (xa, y), (xa, ya), (x, ya), (xb, ya), (xb, y), (xb, yb)]\n # ITERATES THROUGH LIST OF NEIGHBORS\n for coord in neighbors:\n # ADDS UNKNOWN NEIGHBORS TO THE OUTPUT LIST\n unknown_index = FindFrontier.grid_to_index(mapdata, coord[0], coord[1])\n if mapdata.data[unknown_index] == -1 and unknown_index not in visited:\n unknown_neighbors.append(coord)\n # RETURNS OUTPUT LIST\n return unknown_neighbors",
"def to_junction_tree(g: nx.Graph):\n return nx.maximum_spanning_tree(g, weight='weight', algorithm='kruskal')",
"def _compute_all_shortest_paths(graph, source, target, exclude_edge=False):",
"def pull_graph(self):\n res = self.neo4j_run_cypher_query('match (n)-[r]-(m) where m<>n and type(r)<>\"contraindicated_for\" and type(r)<>\"indicated_for\" with distinct n as node1, m as node2 return node1.id as source, node2.id as target')\n df = pd.DataFrame(res.data())\n return df",
"def dijkstra1(self, game, graph, start, player):\n graph = {key: value for (key, value) in graph.items()} # Create a new dict to avoid the orignal one be replaced\n shortest_distance = {} # In the following 18 line of codes, which are derived and adjused from the Ian Sullivan(2017)(start) \n unseenNodes = graph # the code source: Implementation of dijkstra in python https://www.youtube.com/watch?v=IG1QioWSXRI&t=1s\n inf = 5000 \n size_board = game.size\n\n for node in unseenNodes:\n shortest_distance[node] = inf\n shortest_distance[start] = 0\n while unseenNodes:\n minNode = -10\n for node in unseenNodes:\n if minNode == -10:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n\n for childNode, distance in graph[minNode].items():\n if distance + shortest_distance[minNode] < shortest_distance[childNode]:\n shortest_distance[childNode] = distance + shortest_distance[minNode]\n\n unseenNodes.pop(minNode) # In the upper 18 line of codes, which are derived and adjused from the Ian Sullivan(2017)(end)\n\n # In the below, all codes is to identify the smallest distnace for red/blue pieces to the two side border\n if player == HexBoard.RED: # red is vertical\n edgeupper1 = []\n edgelower2 = []\n\n for i in range(size_board):\n a_edge1 = (i, 0)\n a_edge2 = (i, size_board - 1)\n edgeupper1.append(a_edge1)\n edgelower2.append(a_edge2)\n else: # blue is horizontal\n edgeupper1 = []\n edgelower2 = []\n\n for i in range(size_board):\n a_edge1 = (0, i)\n a_edge2 = (size_board - 1, i)\n edgeupper1.append(a_edge1)\n edgelower2.append(a_edge2)\n target_upper = inf\n for candidate in edgeupper1:\n if shortest_distance[candidate] < target_upper:\n target_upper = shortest_distance[candidate]\n target_lower = inf\n for candidate2 in edgelower2:\n if shortest_distance[candidate2] < target_lower:\n target_lower = shortest_distance[candidate2]\n return target_lower + target_upper",
"def vertex_cover_to_sat_cnf(G, k):\n n = len(G)\n\n def y(i, j):\n return (i+j)*(i+j+1)//2+i + n+1\n\n cnf_x = edgeList(G)\n\n cnf_y_i0 = [\n [y(i, 0)]\n for i in range(n+1)\n ]\n cnf_y_0j = [\n [-y(0, j)]\n for j in range(1, n+1)\n ]\n cnf_impl1 = [\n [-y(i-1, j), y(i, j)]\n for i in range(1, n+1)\n for j in range(1, n+1)\n ]\n cnf_impl2 = [\n [-y(i-1, j-1), -i, y(i, j)]\n for i in range(1, n+1)\n for j in range(1, n+1)\n ]\n cnf_last = [\n [-y(n, k+1)]\n ]\n\n cnf = cnf_x + cnf_y_i0 + cnf_y_0j + cnf_impl1 + cnf_impl2 + cnf_last\n return cnf",
"def translocation_walk(G):\n\n ref_seq_uid_set = set([G.ref_intervals.seq_uid])\n\n contig_seq_uid_set = set(ci.seq_uid for ci in\n G.contig_intervals_list.values())\n\n def ref_neighbors(vert):\n if vert not in G:\n return []\n return [v for v in G.neighbors(vert) if v.seq_uid in ref_seq_uid_set]\n\n def contig_neighbors(vert):\n if vert not in G:\n return []\n return [v for v in G.neighbors(vert) if v.seq_uid in\n contig_seq_uid_set]\n\n forward_edges = []\n back_edges = []\n\n dset = set()\n for exit_ref in G.ref_intervals.vertices:\n for enter_contig in contig_neighbors(exit_ref):\n queue = set([enter_contig])\n visited = []\n while queue:\n exit_contig = queue.pop()\n for enter_ref in ref_neighbors(exit_contig):\n\n iv = InsertionVertices(\n exit_ref, enter_contig, exit_contig,\n enter_ref)\n dset.add(iv)\n\n if exit_ref.pos < enter_ref.pos:\n forward_edges.append(iv)\n else:\n back_edges.append(iv)\n\n visited.append(exit_contig)\n queue.update([n for n in contig_neighbors(exit_contig)\n if n not in visited])\n\n sorted_by_exit_ref = sorted(forward_edges + back_edges,\n key=lambda x: x.exit_ref.pos)\n\n sorted_by_enter_ref = sorted(forward_edges + back_edges,\n key=lambda x: x.enter_ref.pos)\n\n iv_pairs = []\n\n if len(sorted_by_enter_ref):\n i = 0\n for enter_iv in sorted_by_exit_ref:\n\n while (sorted_by_enter_ref[i].enter_ref.pos <\n enter_iv.exit_ref.pos - ME_TRANSLOCATION_OVERLAP_TOLERANCE\n and i < len(sorted_by_enter_ref) - 1):\n i += 1\n\n j = i\n exit_iv = sorted_by_enter_ref[j]\n deletion = exit_iv.enter_ref.pos - enter_iv.exit_ref.pos\n while deletion < MAX_TRANS_DELETION:\n\n # Length of translocation sequence\n trans_length = exit_iv.exit_ref.pos - enter_iv.enter_ref.pos\n\n if (all(v.seq_uid in ref_seq_uid_set for v in\n [exit_iv.exit_ref, exit_iv.enter_ref,\n enter_iv.exit_ref, enter_iv.enter_ref]) and (\n MIN_TRANS_LENGTH < trans_length < MAX_TRANS_LENGTH)):\n iv_pairs.append((enter_iv, exit_iv))\n\n if j == len(sorted_by_enter_ref) - 1:\n break\n\n j += 1\n exit_iv = sorted_by_enter_ref[j]\n deletion = exit_iv.enter_ref.pos - enter_iv.exit_ref.pos\n\n filtered = match_region_filter(G, iv_pairs)\n\n return filtered"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if the diff for a given file name contains a change of the serialVersionUID.
|
def was_serial_id_changed(file_name):
result = subprocess.run(['git', 'diff', '--cached', '--unified=0', file_name], stdout=subprocess.PIPE);
lines = result.stdout.decode('utf_8').split("\n")
for line in lines:
if 'serialVersionUID' in line:
print("found")
return True
return False
|
[
"def _is_changed(self, concatted_file):\r\n tmp_concatted = '%s.tmp' % concatted_file\r\n if (os.path.exists(concatted_file) and\r\n os.path.getsize(concatted_file) == os.path.getsize(tmp_concatted)):\r\n orig_hash = self._file_hash(concatted_file)\r\n temp_hash = self._file_hash(tmp_concatted)\r\n return orig_hash != temp_hash\r\n return True # Different filesize, so it was definitely changed\r",
"def contains_file_serial_id(file_name):\n with open(file_name, 'rb', 0) as file, \\\n mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as file_contents:\n return file_contents.find(b'serialVersionUID') != -1",
"def filecmp(filename_a, filename_b):\n size_a = FileIO(filename_a, \"rb\").size()\n size_b = FileIO(filename_b, \"rb\").size()\n if size_a != size_b:\n return False\n\n # Size is the same. Do a full check.\n crc_a = file_crc32(filename_a)\n crc_b = file_crc32(filename_b)\n return crc_a == crc_b",
"def _validate_version(self):\n cf_version = callo([self.path, \"--version\"])\n\n if CLANG_FORMAT_VERSION in cf_version:\n return True\n\n print(\"WARNING: clang-format with incorrect version found at \" + self.path + \" version: \" +\n cf_version)\n\n return False",
"def can_contain_diff(self):\n return Html5Definition.get_diff_type(self.get_current_element()) == DiffBehaviour.internally",
"def __lt__(self, other):\n\t\tif self.__class__ != other.__class__:\n\t\t\traise NotImplementedError\n\n\t\t# Sort by file type as defined by file_type_lt().\n\t\tif self.file_type_lt(self, other):\n\t\t\treturn True\n\t\telif self.file_type_lt(other, self):\n\t\t\treturn False\n\n\t\t# Files have the same type.\n\t\tif self.file_type == \"EBUILD\":\n\t\t\t# Sort by version. Lowest first.\n\t\t\tver = \"-\".join(pkgsplit(self.file_name[:-7])[1:3])\n\t\t\tother_ver = \"-\".join(pkgsplit(other.file_name[:-7])[1:3])\n\t\t\treturn vercmp(ver, other_ver) < 0\n\t\telse:\n\t\t\t# Sort lexicographically.\n\t\t\treturn self.file_name < other.file_name",
"def verify_versions(cls, block_structure):\n outdated_transformers = []\n for transformer in TransformerRegistry.get_registered_transformers():\n version_in_block_structure = block_structure._get_transformer_data_version(transformer) # pylint: disable=protected-access\n if transformer.READ_VERSION > version_in_block_structure:\n outdated_transformers.append(transformer)\n\n if outdated_transformers:\n raise TransformerDataIncompatible( # lint-amnesty, pylint: disable=raising-format-tuple\n \"Collected Block Structure data for the following transformers is outdated: '%s'.\",\n [(transformer.name(), transformer.READ_VERSION) for transformer in outdated_transformers],\n )\n return True",
"def has_changed(self, fpath):\n before = open(fpath,'r').read()\n after = open((self^fpath),'r').read()\n file_changed = not( before == after )\n ast_changed = compiler.parse(before,'exec')==compiler.parse(after,'exec')\n\n out = dict( file_changed=file_changed,\n ast_changed=ast_changed )\n return out",
"def assertConffileDiffs(self, nlines_diff):\n comm_output = fcomparator(orig_conffile, conffile)\n actual_nlines_diff = len(comm_output.split(\"\\n\"))\n if nlines_diff == 0 and comm_output == \"\":\n return\n if actual_nlines_diff != nlines_diff:\n raise AssertionError((\"original and modified config files \" +\n \"differ in %d lines instead of %d:\\n\" +\n \"\\n\\\"%s\\\"\") % (actual_nlines_diff, nlines_diff,\n comm_output))",
"def has_been_modified_and_reason (self, file) :\n res = True\n reason = None\n \n if file not in self.copyFiles :\n reason = \"new\"\n res = True\n else :\n obj = self.copyFiles[file]\n st = os.stat(file)\n if st.st_size != obj.size :\n reason = \"size %s != old size %s\" % (str(st.st_size), str(obj.size))\n res = True\n else :\n l = obj.mdate\n _m = st.st_mtime\n d = convert_st_date_to_datetime(_m)\n if d != l :\n # dates are different but files might be the same\n if obj.checksum is not None :\n ch = checksum_md5 (file)\n if ch != obj.checksum :\n reason = \"date/md5 %s != old date %s md5 %s != %s\" % (str(l), str(d), obj.checksum, ch)\n res = True\n else :\n res = False\n else :\n # we cannot know, we do nothing\n res = False\n else :\n # mda.... no expected modification (dates did not change)\n res = False\n \n if res :\n self.modifiedFile.append( (file, reason) )\n return res, reason",
"def _is_book_modified(book):\n\n from booki.editor.views import getVersion\n from time import mktime\n bv = getVersion(book, None)\n created = mktime(book.created.timetuple())\n for chapter in models.Chapter.objects.filter(version=bv):\n logWarning(\"chapter %s created %s mod %s\" % (chapter.id, book.created, chapter.modified))\n #5 seconds grace before a chapter is deemed modified\n if created + 5 < mktime(chapter.modified.timetuple()):\n return True\n return False",
"def __is_old(self, file):\n changed = os.path.getctime(file)\n now = time.time()\n return now - changed > timestring_to_seconds(self.backlog)",
"def isValidPatch(patchFile):\n \n if tarfile.is_tarfile(patchFile) == False:\n print >>sys.stderr,'Error: File is not a valid tar file'\n return False\n \n # there are 2 kinds of patches now... \n # the \"old\" kind, which has a well defined tar file name: patch-650SP1-1234-name.tar\n # the \"new\" kind has a less well defined name: PSP_jobname_br650_be650SP1-123_patchname.tar\n # --> where the jobname and patchname may or may not contain dashes \"-\"\n # --> therefore, splitting on dashes will not work for these\n # --> furthermore, the patch number alone is no longer unique, rather patchnumber_patchname is unique.\n\n # so, check patch to see if it's old or new:\n if (os.path.basename(patchFile).startswith('patch-')):\n # old type of patch\n info = os.path.basename(patchFile).split('-',4)\n try:\n patchBase = info[1]\n patchNum = info[2]\n except:\n print >>sys.stderr,'Error: File name does not match standard patch conventions'\n return False\n else:\n # new type of patch\n name = os.path.basename(patchFile)\n try:\n parts = re.compile(\"-\\d+_\").split(name)\n num = re.findall('-(\\d+)_', name)\n patchNum = num[0] + '_' + parts[1]\n patchNum = patchNum[:-4] # remove .tar\n moreparts = parts[0].split('_')\n patchBase = moreparts[len(moreparts)-1] # be650SP1\n patchBase = patchBase[2:] # 650SP1\n except:\n print >>sys.stderr,'Error: File name does not match standard patch conventions.'\n return False\n\n try:\n fil = tarfile.open(patchFile,'r')\n except Exception,e:\n print >>sys.stderr, 'Unable to open file:'+e\n return False\n \n # The internal structure of the patch must match the base and number\n manifestfound = False\n pathfound = False\n rootdir = 'patches/%s-%s' % (patchBase,patchNum)\n\n # if it was new style:\n if patchNum.find(\"_\"):\n rootdir = 'patches/%s-%s' % (patchBase, patchNum.split('_')[0])\n\n for item in fil:\n if item.name == rootdir+'/manifest':\n manifestfound = True\n if item.name.startswith(rootdir+'/target/') or item.name.startswith(rootdir+'/host/') or item.name.startswith(rootdir+'/deployment/'):\n pathfound = True\n \n if manifestfound == False or pathfound == False:\n print >>sys.stderr, 'Error: Internal structure does not conform to a standard patch'\n return False\n\n return True",
"def verify(f: IO) -> bool:\n total_bytes_no_crc: int = f.tell()\n crc: int = read_u32(f, endianess=Endianess.LITTLE_ENDIAN)\n f.seek(0)\n computed_crc = zlib.crc32(f.read(total_bytes_no_crc))\n return crc == computed_crc",
"def version_checking(self,meta):\n if meta[0] == self._valid_metadata:\n pass\n else:\n raise Exception('Incorrect Metadata format')",
"def check_modified(self):\n return bool(self._modified)",
"def check_consistency(self):\n log.debug(f'Check consistency of {self.abbreviation()}, input was {self._input}')\n\n # check for modifications depending on class\n if self.lipidclass.name in CLASS_DEFAULT_MODIFICATION:\n default_modification = CLASS_DEFAULT_MODIFICATION[self.lipidclass.name]\n\n if not self.residueslist[0].modification == default_modification:\n log.debug(\n f'Default modification not correct. Expected {default_modification}, found {self.residueslist[0].modification}')\n self.residueslist[0].modification = default_modification",
"def is_old_format(self, peerfile=''):\n if not peerfile:\n peerfile = self.peerfile\n with open(peerfile, 'r') as f:\n line = f.readline(1)\n return '(' in line",
"def was_modified_since_last_sync(self):\n info = self.get_sync_info()\n if not info:\n return None\n if self.size != info[\"s\"]:\n return True\n if self.mtime > info[\"m\"]:\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if the given file contains a serialVersionUID.
|
def contains_file_serial_id(file_name):
with open(file_name, 'rb', 0) as file, \
mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as file_contents:
return file_contents.find(b'serialVersionUID') != -1
|
[
"def was_serial_id_changed(file_name):\n result = subprocess.run(['git', 'diff', '--cached', '--unified=0', file_name], stdout=subprocess.PIPE);\n lines = result.stdout.decode('utf_8').split(\"\\n\")\n for line in lines:\n if 'serialVersionUID' in line:\n print(\"found\")\n return True\n return False",
"def should_check_file(self, filename):\n raise NotImplementedError",
"def verify(f: IO) -> bool:\n total_bytes_no_crc: int = f.tell()\n crc: int = read_u32(f, endianess=Endianess.LITTLE_ENDIAN)\n f.seek(0)\n computed_crc = zlib.crc32(f.read(total_bytes_no_crc))\n return crc == computed_crc",
"def _validate_version(self):\n cf_version = callo([self.path, \"--version\"])\n\n if CLANG_FORMAT_VERSION in cf_version:\n return True\n\n print(\"WARNING: clang-format with incorrect version found at \" + self.path + \" version: \" +\n cf_version)\n\n return False",
"def is_pickle(filename):\n filename = os.path.basename(filename)\n if filename.endswith(\".pkl\"):\n return True\n return False",
"def check_cpacker_file(file):\n\n signature = file.read(6)\n if signature != b'\\x99CPack':\n raise ValueError('File is not an CPack archive')\n\n version = struct.unpack('>H', file.read(2))[0]\n if version != 0:\n raise ValueError('Unsupported CPack archive version')",
"def is_valid_file(prv_file: str) -> bool:\n\n # Checks if the file is accessible\n try:\n opened_prv_file = open(prv_file, 'r')\n except FileNotFoundError:\n print(f'==ERROR== Could not open the file {prv_file}')\n return False\n\n # Checks if file's header contains Paraver's signature\n prv_header = opened_prv_file.readline()\n opened_prv_file.close()\n\n if PARAVER_MAGIC_HEADER not in prv_header:\n print(f'==ERROR== The file {prv_file} doesn not contain a valid header.')\n return False\n\n return True",
"def is_datafile_valid(datafile):\n\n try:\n datafile_json = json.loads(datafile)\n datafile_version = datafile_json.get('version')\n except:\n return False\n\n json_schema = None\n\n if datafile_version == project_config.V1_CONFIG_VERSION:\n json_schema = constants.JSON_SCHEMA_V1\n if datafile_version == project_config.V2_CONFIG_VERSION:\n json_schema = constants.JSON_SCHEMA_V2\n\n if not json_schema:\n return False\n\n try:\n jsonschema.Draft4Validator(json_schema).validate(datafile_json)\n except:\n return False\n\n return True",
"def version_checking(self,meta):\n if meta[0] == self._valid_metadata:\n pass\n else:\n raise Exception('Incorrect Metadata format')",
"def check_labels_file_header(self,filename):\n\t\twith tf.io.gfile.GFile(filename, 'rb') as f:\n\t\t\tmagic = self.read32(f)\n\t\t\tself.read32(f) # num_items, unused\n\t\t\tif magic != 2049:\n\t\t\t\traise ValueError('Invalid magic number %d in MNIST file %s' % (magic,\n\t\t\t\t f.name))",
"def _check_valid_file(file: werkzeug.datastructures.FileStorage):\n allowed_extensions = current_app.config['ALLOWED_MOLECULE_SET_EXTENSIONS']\n user_filename = file.filename\n if not ('.' in user_filename and user_filename.rsplit('.', 1)[1].lower() in allowed_extensions):\n raise ValueError(\"Please upload a .smi or .smiles file!\")\n\n try:\n lines = [line.decode('utf-8') for line in file.readlines()]\n print('\\n'.join(lines))\n except UnicodeError:\n raise ValueError(\n 'Could not decode file as UTF8 text! Are you sure this is a molecule file?'\n )\n\n nof_molecule_lines = len([line for line in lines if not line.startswith('#')])\n max_molecules = current_app.config['MAX_UPLOADED_MOLECULE_NUMBER']\n if nof_molecule_lines > max_molecules:\n raise ValueError(\n f\"You seem to have uploaded {nof_molecule_lines} molecules. \"\n f\"Please upload a file with {max_molecules} molecules or less!\"\n )\n file.seek(0)\n return file",
"def is_file_signed(f, filelen):\n if filelen < MODULE_SIG_STRING_LEN:\n return False\n\n f.seek(-MODULE_SIG_STRING_LEN, os.SEEK_END)\n sigmarker = f.read()\n return sigmarker == MODULE_SIG_STRING",
"def CanReadFile(filename, magic):",
"def IsSerializable(self) -> bool:",
"def _should_file_be_ignored(file_path: str) -> bool:\n return \"lib/rucio/vcsversion.py\" in file_path",
"def _is_ascii_file(data):\n # Skip header...\n data.seek(BINARY_HEADER)\n size = struct.unpack('<I', data.read(4))[0]\n # Use seek() method to get size of the file.\n data.seek(0, os.SEEK_END)\n file_size = data.tell()\n # Reset to the start of the file.\n data.seek(0)\n\n if size == 0: # Odds to get that result from an ASCII file are null...\n print(\"WARNING! Reported size (facet number) is 0, assuming invalid binary STL file.\")\n return False # Assume binary in this case.\n\n return (file_size != BINARY_HEADER + 4 + BINARY_STRIDE * size)",
"def verify(filename):\n\n path, fn = os.path.split(filename)\n catobj = None\n\n if fn.startswith(\"catalog\"):\n if fn.endswith(\"attrs\"):\n catobj = CatalogAttrs(meta_root=path)\n else:\n catobj = CatalogPart(fn, meta_root=path)\n elif fn.startswith(\"update\"):\n catobj = CatalogUpdate(fn, meta_root=path)\n else:\n # Unrecognized.\n raise api_errors.UnrecognizedCatalogPart(fn)\n\n # With the else case above, this should never be None.\n assert catobj\n\n catobj.validate(require_signatures=True)",
"def is_encrypted(self):\n try:\n with open(self._filepath, 'rb') as fp:\n first_bline = fp.readline() # Including trailing newline\n except (OSError, IOError) as exc:\n new_exc = EasyVaultFileError(\n \"Cannot open vault file {fn} for reading: {exc}\".\n format(fn=self._filepath, exc=exc))\n new_exc.__cause__ = None\n raise new_exc # EasyVaultFileError\n # On Windows, match() does not tolerate CRLF line endings\n first_bline_s = first_bline.strip(b'\\n').strip(b'\\r')\n m = HEADER_PATTERN.match(first_bline_s)\n if m is None:\n return False\n if m.group('moniker') != HEADER_MONIKER:\n return False\n return True",
"def is_library(self, file):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if a file name has a .java ending
|
def is_java_file(file_name):
return file_name.endswith(".java")
|
[
"def is_python_filename(name: str) -> bool:\n return os.path.exists(name) and name.endswith(\".py\")",
"def isCFile(filename):\n return filename.endswith(\".c\") or filename.endswith(\".cpp\") or filename.endswith(\".cc\")",
"def _check_name(self, filename: str) -> bool:\n pattern = r'[\\/\\\\\\:\\<\\>]'\n if re.search(pattern, filename):\n return False\n return True",
"def is_proper_file(ele, path ):\n return not (re.match('(.*).py', ele) or re.match('\\.(.*)', ele)) and os.path.isfile(path + '/' + ele)",
"def check_filename(self, filename):\n if self.filename.endswith(\".tsp\"):\n return True\n else:\n return False",
"def file_type(self, file_name):\n string = \"\"\n check = False\n for i in range(len(file_name) - 1, 0, -1):\n if file_name[i] != \".\":\n string = file_name[i] + string\n else:\n check = True\n break\n if check is True and len(string) > 0:\n return string\n return False",
"def _is_with_extension(self, filename):\n return self._extension in filename",
"def issourcefile(fname):\n return hasextension(fname) and True not in [fname.endswith(x) for x in ['.o','.exe','.a'] ]",
"def check_file_name(file_name, file_type=\"\", extension=\"\"):\n\n file_name = check_string(file_name, -1, '.', extension)\n file_name = check_string(file_name, -1, '_', file_type)\n\n return file_name",
"def ends_in_ext(fname, ext):\n return fname[-len(ext):] == ext",
"def is_python_file(filename: str) -> bool:\n if filename.endswith(\".py\"):\n return True\n\n max_python_file_detection_bytes = 1024\n try:\n with open(filename, \"rb\") as f:\n text = f.read(max_python_file_detection_bytes)\n if not text:\n return False\n first_line = text.splitlines()[0]\n except (OSError, IndexError):\n return False\n\n if not Regex.PYTHON_SHEBANG.match(first_line):\n return False\n\n return True",
"def _is_python_file(filename):\n if filename.endswith('.py'):\n return True\n else:\n with open(filename, 'r') as file_handle:\n first_line = file_handle.readline()\n return 'python' in first_line and '#!' in first_line",
"def _is_source(file_path):\n _, ext = os.path.splitext(file_path)\n return ext in KEEP",
"def is_source(filename: str) -> bool:\n\n if (\n \".virtualenvs\" in filename\n or \"site-packages\" in filename\n or re.search(\"python[0-9]\\.[0-9]\", filename) is not None\n ):\n return False\n else:\n return True",
"def check_file(self, input_file):\n\n if os.path.isfile(input_file):\n work_dir = os.path.dirname(input_file)\n file = input_file[len(work_dir)+1:]\n\n return file\n\n else:\n work_dir = input_file\n file = \"*.py\"\n\n return file",
"def is_valid_file_name(name:str, has_ext:bool=False) -> bool:\r\n return is_valid_file_name_linux(name) and is_valid_file_name_win(name, has_ext=has_ext)",
"def looks_comicy(filename):\n return os.path.splitext(filename)[1].lower() in COMIC_EXT",
"def allowed_file(filename):\n\n\tif '.' in filename and filename.rsplit('.', 1)[1] == 'dae':\n\t\t# Extract the file extension and return true if dae\n\t\treturn True\n\n\treturn False",
"def isFileNameMatchPatten(file_name, patten):\n match= re.match(patten, file_name)\n\n if match:\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Setup the matrix of the linear system with Dirichlet boundary conditions to be solved. The matrix is on the form A = [ T I 0 0 0 ... 0 0 0 I T I 0 0 ... 0 0 0 0 I T I 0 ... 0 0 0 ... 0 0 0 0 0 ... 0 I T ] where T = [ 4 1 0 0 0 ... 0 0 0 1 4 1 0 0 ... 0 0 0 0 1 4 1 0 ... 0 0 0 ... 0 0 0 0 0 ... 0 1 4 ] and I is the identity matrix.
|
def setup_A(self):
# first column of the toeplitz matrices that make up the diagonal of
# the matrix
column = scipy.concatenate((scipy.array([-4, 1]),
scipy.zeros(self.northwall.len - 2)))
# set up toeplitz matrix that make up the block iagonal of the matrix
T = scipy.linalg.toeplitz(column)
# tuple of toeplitz matrices to
Ttuple = tuple((T for _ in range(self.westwall.len)))
# set up matrix, using T and inserting ones of the identity matrices
A = scipy.linalg.block_diag(*Ttuple) \
+ \
scipy.eye(self.northwall.len * self.westwall.len,
k=self.northwall.len) \
+ \
scipy.eye(self.northwall.len * self.westwall.len,
k=-self.northwall.len)
return (1 / self.h ** 2) * A
|
[
"def LinearSystem(self):\n # assembly matrix of linear system\n # to solve u(t) based on u(t-1) and u(t-2)\n # the matrix includes all future values of u\n # in the entire grid, so size is the number of cells\n # start with zeros that is also the boundary condition u(t)=0\n self.mUt = np.zeros([self.Nz*self.Nx, self.Nz*self.Nx])\n\n # assembly linear system, the linear system\n # ignores external part of the grid = locked boundary\n # ln go through all the cells in the grid Ut\n # each cell gives one equation (line)\n for Ln in range(0, self.Nz*self.Nx, 1):\n # 1.0*u(x-1,z) + Gamma(x,z)*u(x,z) + 1.0*u(x+1,z) + 1.0*u(x,z-1) + 1.0*u(x,z+1)\n # turn the indices to the one of original matrix\n i = Ln%self.Nx\n k = Ln/self.Nx\n\n self.mUt[Ln][Ln] = self.Gamma(k, i)\n #is this right?\n if(i-1 >= 0): # u(x-1,z) inside grid in I\n self.mUt[Ln][Ln-1] = 1.0\n if(i+1 < self.Nx): # u(x+1,z) inside grid in I\n self.mUt[Ln][Ln+1] = 1.0\n if(k-1 >= 0): #u(x,z-1)\n self.mUt[Ln][Ln-self.Nx]= 1.0\n if(k+1 < self.Nz): #u(x,z+1)\n self.mUt[Ln][Ln+self.Nx]= 1.0\n\n return self.mUt",
"def setup_linear_problem(self, A_name, b_name):\n self.A = self.mesh.matrix_manager.get_matrix(A_name)\n self.b = self.mesh.matrix_manager.get_vector(b_name)\n\n self.x = Epetra.Vector(\n self.mesh.matrix_manager.std_map[self.solution_dim])\n\n self.linearProblem = Epetra.LinearProblem(self.A, self.x, self.b)\n self.solver = AztecOO.AztecOO(self.linearProblem)\n self.solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_last)",
"def stiffness_matrix_local(self):\n\n E = self.properties['E']\n G = self.properties['G']\n A = self.properties['A']\n Iy = self.properties['Iy']\n Iz = self.properties['Iz']\n J = self.properties['J']\n\n EA = E*A\n EIy = E*Iy\n EIz = E*Iz\n GJ = G*J\n L = self.length\n L2 = L**2\n L3 = L**3\n\n k_elem = np.zeros((12, 12))\n\n k_elem[0, 0] = EA/L\n k_elem[0, 6] = -EA/L\n\n k_elem[1, 1] = 12*EIz/L3\n k_elem[1, 5] = 6*EIz/L2\n k_elem[1, 7] = -12*EIz/L3\n k_elem[1, 11] = 6*EIz/L2\n\n k_elem[2, 2] = 12*EIy/L3\n k_elem[2, 4] = -6*EIy/L2\n k_elem[2, 8] = -12*EIy/L3\n k_elem[2, 10] = -6*EIy/L2\n\n k_elem[3, 3] = GJ/L\n k_elem[3, 9] = -GJ/L\n\n k_elem[4, 4] = 4*EIy/L\n k_elem[4, 8] = 6*EIy/L2\n k_elem[4, 10] = 2*EIy/L\n\n k_elem[5, 5] = 4*EIz/L\n k_elem[5, 7] = -6*EIz/L2\n k_elem[5, 11] = 2*EIz/L\n\n k_elem[6, 6] = EA/L\n\n k_elem[7, 7] = 12*EIz/L3\n k_elem[7, 11] = -6*EIz/L2\n\n k_elem[8, 8] = 12*EIy/L3\n k_elem[8, 10] = 6*EIy/L2\n\n k_elem[9, 9] = GJ/L\n\n k_elem[10, 10] = 4*EIy/L\n\n k_elem[11, 11] = 4*EIz/L\n\n k_elem += np.triu(k_elem, k=1).T\n return k_elem",
"def init_Q(self):\n self.Q = np.matrix(np.tril(self.A))",
"def __init__(self, matrix, neighbor_function, weight_function):\n self.lattice = matrix\n self.row_dim = len(self.lattice)\n self.col_dim = len(self.lattice[0])\n self.neighbor_function = neighbor_function\n self.weight_function = weight_function\n self.consistency_check()\n self.build_adjacency_list()",
"def init_input(self):\n m1 = np.diagflat([-1] * (self.n - 1), -1)\n m2 = np.diagflat([-1] * (self.n - 1), 1)\n m3 = np.diagflat([self.gamma] * self.n)\n self.A = np.matrix((m1 + m2 + m3).astype(np.double))\n\n self.b = np.matrix(\n np.full((self.n, 1), self.gamma - 2).astype(np.double)\n )\n self.b[0] = self.gamma - 1\n self.b[self.n - 1] = self.gamma - 1\n\n self.x0 = np.matrix(\n np.full((self.n, 1), 0).astype(np.double)\n )",
"def init_Q(self):\n self.Q = np.matrix(np.diagflat(np.diag(self.A)))",
"def calculate_BH_matrix_elements(self):\n # first handle diagonal (site number-operator commuting) elements\n self.sum_nnm1=(self.hilbert_space.fock_basis*(self.hilbert_space.fock_basis-1)).sum(axis=1)\n self.sum_n=self.hilbert_space.fock_basis.sum(axis=1)\n self.muU=self.U*self.sum_nnm1 - self.mu*self.sum_n\n # now handle off-diagonal tunneling elements - tunneling preserves (commutes with) number\n for n in numpy.unique(self.sum_n):\n # pick out the subspace at occupancy n\n n_subspace=self.hilbert_space.fock_basis[self.sum_n==n].astype(numpy.int32)\n # difference all fock basis states by local number, sum their absolute values across sites\n # tunnel-coupled states must have one site raised and one site lowered, thus\n # the sum of absolute value of differences must be 2 - use these to mark possible\n # tunnel-coupled sites in a list of basis state i <-> basis state j , (i,j) pairs\n fock_diffs=n_subspace[:,numpy.newaxis]-n_subspace\n single_swap_subspace=(numpy.abs(fock_diffs).sum(axis=2)==2).nonzero()\n # for each candidate tunnel-coupled pair, check the connectivity graph\n # what we want is essentially a graph to graph mapping; the connection \n # of basis state i to basis state j is a lowering of site s and raising of site t\n # so potential matrix element (i,j) depends on lattice connectivity (s,t)\n # start by finding raised and lowered sites for each (i,j) basis state pair\n raised=(fock_diffs[single_swap_subspace]==1).nonzero()[1]\n lowered=(fock_diffs[single_swap_subspace]==-1).nonzero()[1]\n pdb.set_trace()\n \n self.K=numpy.empty((self.hilbert_space.fock_basis.shape[0],)*2,dtype=numpy.float64) \n pass",
"def setUpTripartiteSystem(d):\n rho = setUpNQudits(3, d)\n POVM = setUpPOVMElements(d)\n tau = TensorProduct(rho[0], rho[1], rho[2])\n M = TensorProduct(POVM[0], POVM[1], sympy.eye(d,d))\n return (M * tau).trace()",
"def __init__(self, model, A, B, **kwargs):\n super(LinearSystem, self).__init__(model)\n self._opts = {}\n self._mats = {'A': np.asarray(A), 'B': np.asarray(B)}\n if 'C' in kwargs:\n self._mats['C'] = np.asarray(kwargs['C'])\n else:\n self._mats['C'] = np.eye(self._mats['A'].shape[1])\n self._dims = {'x': self._mats['A'].shape[1],\n 'u': self._mats['B'].shape[1],\n 'y': self._mats['C'].shape[0]}\n if 'D' in kwargs:\n self._mats['D'] = np.asarray(kwargs['D'])\n else:\n self._mats['D'] = np.zeros((self._dims['y'], self._dims['u']))\n if 'E' in kwargs:\n self._mats['E'] = np.asarray(kwargs['E'])\n self._dims['v'] = self._mats['E'].shape[1]\n if 'x0' in kwargs:\n self.x0 = kwargs['x0']\n self._changed = {'T': True}",
"def LKB_matrix(self, variables='x,y'):\n return self.parent()._LKB_matrix_(self.Tietze(), variab=variables)",
"def initialize(self):\n self.lattice = 2 * np.random.randint(2, size=(self.N, self.N)) - 1",
"def _init_A(self, factor, module):\n self.m_A[module] = torch.diag(factor.new(factor.shape[0]).fill_(1))\n self.m_dA[module] = factor.new_zeros(factor.shape[0])\n self.m_QA[module] = factor.new_zeros(factor.shape)",
"def _build_j_mtx(self):\n size = 2*self.n\n j_mtx = np.zeros((size,size))\n # selecting indices\n inds = np.arange(size-1)\n # selecting upper-diagonal indices\n j_mtx[inds, inds+1] = -1\n # selecting lower-diagonal indices\n j_mtx[inds+1, inds] = 1\n return j_mtx",
"def initialize(self):\n F = len(self.inputs[0])\n min_val = np.min(self.inputs)\n max_val = np.max(self.inputs)\n \n np.random.seed(1)\n if self.init=='random':\n # create 3D array storing initial models\n self.M = np.random.uniform(min_val, max_val, size=(self.J*self.K, F))\n self.M = np.array(self.M)",
"def Set_to_diagonal_matrix_unb_var1(d, A):\n \n dT, \\\n dB = flame.part_2x1(d, \\\n 0, 'TOP')\n\n ATL, ATR, \\\n ABL, ABR = flame.part_2x2(A, \\\n 0, 0, 'TL')\n\n while dT.shape[0] < d.shape[0]:\n\n d0, \\\n delta1, \\\n d2 = flame.repart_2x1_to_3x1(dT, \\\n dB, \\\n 1, 'BOTTOM')\n\n A00, a01, A02, \\\n a10t, alpha11, a12t, \\\n A20, a21, A22 = flame.repart_2x2_to_3x3(ATL, ATR, \\\n ABL, ABR, \\\n 1, 1, 'BR')\n\n laff.zerov(a01)\n laff.copy(delta1, alpha11)\n laff.zerov(a21)\n\n dT, \\\n dB = flame.cont_with_3x1_to_2x1(d0, \\\n delta1, \\\n d2, \\\n 'TOP')\n\n ATL, ATR, \\\n ABL, ABR = flame.cont_with_3x3_to_2x2(A00, a01, A02, \\\n a10t, alpha11, a12t, \\\n A20, a21, A22, \\\n 'TL')\n\n flame.merge_2x1(dT, \\\n dB, d)\n\n flame.merge_2x2(ATL, ATR, \\\n ABL, ABR, A)",
"def setBoundaryCondition(self):\n \n \n if self.grid.bc == 'constant' and self.t == 0.0:\n # conditions are fixed to their starting values at edges\n self.__qR__ = np.array([[self.q[0][-1]],[self.q[1][-1]],[self.q[2][-1]]])\n self.__qL__ = np.array([[self.q[0][0]] ,[self.q[1][0]] ,[self.q[2][0]]])\n \n self.__fR__ = np.array([[self.f[0][-1]],[self.f[1][-1]],[self.f[2][-1]]])\n self.__fL__ = np.array([[self.f[0][0]] ,[self.f[1][0]] ,[self.f[2][0]]])\n \n \n elif self.grid.bc == 'periodic':\n self.__qR__ = np.array([[self.q[0][0]],[self.q[1][0]],[self.q[2][0]]])\n self.__qL__ = np.array([[self.q[0][-1]],[self.q[1][-1]],[self.q[2][-1]]])\n\n self.__fR__ = np.array([[self.f[0][0]],[self.f[1][0]],[self.f[2][0]]])\n self.__fL__ = np.array([[self.f[0][-1]],[self.f[1][-1]],[self.f[2][-1]]])\n \n elif not self.grid.bc == 'constant':\n print \"nothing set with boundary conditions... check bc settings\"",
"def Set_to_diagonal_matrix_unb_var2(d, A):\n \n dT, \\\n dB = flame.part_2x1(d, \\\n 0, 'TOP')\n\n ATL, ATR, \\\n ABL, ABR = flame.part_2x2(A, \\\n 0, 0, 'TL')\n\n while dT.shape[0] < d.shape[0]:\n\n d0, \\\n delta1, \\\n d2 = flame.repart_2x1_to_3x1(dT, \\\n dB, \\\n 1, 'BOTTOM')\n\n A00, a01, A02, \\\n a10t, alpha11, a12t, \\\n A20, a21, A22 = flame.repart_2x2_to_3x3(ATL, ATR, \\\n ABL, ABR, \\\n 1, 1, 'BR')\n\n laff.zerov(a10t)\n laff.copy(delta1, alpha11)\n laff.zerov(a12t)\n\n dT, \\\n dB = flame.cont_with_3x1_to_2x1(d0, \\\n delta1, \\\n d2, \\\n 'TOP')\n\n ATL, ATR, \\\n ABL, ABR = flame.cont_with_3x3_to_2x2(A00, a01, A02, \\\n a10t, alpha11, a12t, \\\n A20, a21, A22, \\\n 'TL')\n\n flame.merge_2x1(dT, \\\n dB, d)\n\n flame.merge_2x2(ATL, ATR, \\\n ABL, ABR, A)",
"def _initialize_mask(self):\n if 'locally_connected' in self.mask_type:\n assert self.neighbour_matrix is not None\n L = self.neighbour_matrix.T\n assert L.shape == (self.in_joints, self.in_joints)\n if 'learnable' not in self.mask_type:\n self.mask = tf.constant(L)\n else:\n if self.init_type == 'same':\n initializer = L\n elif self.init_type == 'ones':\n initializer = tf.initializers.ones\n elif self.init_type == 'random':\n initializer = tf.random.uniform\n var_mask = tf.Variable(\n name='mask', shape=[self.in_joints, self.out_joints] if self.init_type != 'same' else None,\n dtype=tf.float32, initial_value=initializer\n )\n var_mask = tf.nn.softmax(var_mask, axis=0)\n self.mask = var_mask * tf.constant(L != 0, dtype=tf.float32)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
str > str Get hashcode from URL
|
def get_hash(url):
response = requests.get(url)
return response.text
|
[
"def get_hash(link, *, hash_type = 'md5'):\n data = requests.get(link).content\n m = eval('hashlib.{}(data)'.format(hash_type))\n return m.hexdigest()",
"def path_hash(path_string: str):\n hashable = path_string.replace(\"\\\\\", \"/\")\n if not hashable.startswith(\"/\"):\n hashable = \"/\" + hashable\n h = 0\n for i, s in enumerate(hashable):\n h += i * 37 + ord(s)\n return h",
"def url_hash_fragment(distribution_url: str) -> Tuple[str, str]:\n algo_type, hash_value = distribution_url.split('#')[1].split('=')\n return algo_type, hash_value",
"def hash_string(string):\r\n return hashlib.sha256(string.encode('utf-8')).hexdigest()",
"def get_hashed_string(s):\n return hashlib.sha1(s.encode('utf-8')).hexdigest()",
"def get_hash(text):\n if text:\n stripped = \" \".join(unicode(text).translate(tbl).lower().split())\n return hashlib.sha1(stripped.encode('utf-8')).hexdigest()",
"def strhashfn(*args):\n return _wali.strhashfn(*args)",
"def cache_hash(url):\n try:\n with DB_HASHER.atomic():\n Hasher.create(hash=hashlib.sha1(url.encode('ascii', 'ignore')).hexdigest())\n return 0\n except IntegrityError:\n return 1\n except:\n return -1",
"def hashGeneretor(inputString):\n\treturn hashlib.sha256(inputString.encode('utf-8')).hexdigest()",
"def create_hash(secret: str, url: str) -> str:\n s = f'{secret}{url}'\n return str(hashlib.md5(s.encode()).hexdigest()[0:8])",
"def hash(bytes):\n return unpack(sha256(bytes).digest())",
"def find_sha256_hash(db, url):\n script, url_id = find_script(db, url, want_code=False)\n if script:\n return (script.get('sha256'), url_id)\n return (None, None)",
"def hash_code(s):\n mask = (1 << 32) - 1 # limit to 32-bit integers\n h = 0\n for character in s:\n h = (h << 5 & mask) | (h >> 27) # 5-bit cyclic shift of running sum\n h += ord(character) # add in value of next character\n return h",
"def extract_hash(magnet):\n query = urlparse.urlparse(magnet).query\n exact_topic = urlparse.parse_qs(query)['xt'][0]\n sha1 = exact_topic.split(':')[-1]\n return sha1.lower()",
"def hash(*args) -> \"uint32_t\":\n return _coin.SbString_hash(*args)",
"def hash_string_to_hue(string):\n\treturn str(int(ord(hashlib.md5(string).digest()[0]) / 255 * 360))",
"def hash_string(string_to_hash: str) -> str:\n return hashlib.sha256(string_to_hash.encode(\"utf-8\")).hexdigest()",
"def SbString_hash(*args) -> \"uint32_t\":\n return _coin.SbString_hash(*args)",
"def getHash(name):\n return hashlib.md5(name).hexdigest()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
int > str Builds a 7 characters string from an int
|
def int_to_7char_str(i):
#the pins always have 7 digits
pin = str(i)
l = len(pin)
if (l < 7):
zeros = ""
for j in range(7-l):
zeros += "0"
pin = zeros + pin
return pin
|
[
"def convert_int(n: int) -> str:\n\n return str(n)",
"def _str(i: int) -> str:\n if i < 0 or i > 999:\n raise ValueError(\"0 <= i <= 999\")\n if 0 <= i <= 9:\n s = \"__\" + str(i)\n elif 10 <= i <= 99:\n s = \"_\" + str(i)\n else:\n s = str(i)\n return s",
"def IntStr( num, dec=None ):\n num = int(num)\n if not dec: return str(num)\n if dec <= len(str(num)):\n return str(num)\n ans = \"0\" * (dec - (len(str(num)))) + str(num)\n return ans",
"def convertToBase7(self, num: int) -> str:\n base = 7\n out = []\n sign = 1\n if num < 0:\n sign = -1\n num *= sign\n while num >= base:\n x = num // base\n rest = num % (x * base)\n out.insert(0, str(rest))\n num = x\n out.insert(0, str(num))\n if sign < 0:\n out.insert(0, '-')\n return ''.join(out)",
"def convert_to_string(num_int, base=10):\n base_chars = BASES[base-1]\n if num_int >= base:\n mod = num_int % base\n return convert_to_string(num_int // base, base=base) + base_chars[mod]\n else:\n return base_chars[num_int]",
"def convert_to_two_char_string(number):\n\tif number < 10:\n\t\treturn '0%s' % number\n\telse:\n\t\treturn '%s' % number",
"def get_tilestr(tileid):\n tileid=int(tileid)\n return '%03d' % tileid",
"def from10to32(string):\r\n \r\n res = \"\"\r\n remainder = int(string)\r\n while remainder > 31:\r\n char = AIC_TABLE[remainder%32].upper()\r\n remainder = remainder//32\r\n res = res + char\r\n res = res + AIC_TABLE[remainder].upper()\r\n res = res[::-1]\r\n return res.zfill(6)",
"def to_str(number, base):\n convert_string = \"0123456789ABCDEF\"\n if number < base:\n return convert_string[number]\n else:\n return to_str(number // base, base) + convert_string[number % base]",
"def concatenateDigits(lengthLimit):\n i = 1\n intString = \"\"\n while len(intString) < lengthLimit:\n intString += str(i)\n i += 1\n return intString",
"def int_conv(string):\n try:\n intstring=int(string)\n except:\n intstring=999\n return intstring",
"def get_digit_string(num, base):\n remainder = num % base\n if base == 16 or base == 32:\n return to_char(remainder)\n else:\n return str(remainder)",
"def convert_any_base_to_base_10(s: str, base: int) -> int:\n assert(1 < base < 37)\n return int(s, base)",
"def int2ascii(i: int) -> str:\n if i > 127:\n raise ValueError('The passed integer value must be <= 127.')\n return chr(i)",
"def int2str(val, max_dec=1024):\n if val > max_dec:\n return \"0x%x\" % val\n else:\n return \"%d\" % val",
"def convert_base_10_to_any_base(x: int, base: int) -> str:\n assert(x >= 0)\n assert(1< base < 37)\n r = ''\n import string\n while x > 0:\n r = string.printable[x % base] + r\n x //= base\n return r",
"def int_to_string( long_int, padto=None ):\n if long_int > 0:\n octet_string = \"\"\n while long_int > 0:\n long_int, r = divmod( long_int, 256 )\n octet_string = chr( r ) + octet_string\n elif long_int == 0:\n octet_string = chr(0)\n else:\n raise ValueError('int_to-string unable to convert negative numbers')\n \n if padto:\n padlen = padto - len(octet_string)\n assert padlen >= 0\n octet_string = padlen*chr(0) + octet_string\n return octet_string",
"def convert_base(s: str, frombase: int, tobase: int) -> str:\n if frombase == 10:\n x = int(s)\n else:\n x = convert_any_base_to_base_10(s, frombase)\n if tobase == 10:\n return str(x)\n return convert_base_10_to_any_base(x, tobase)",
"def champernowne(n):\n digit_count, next_integer = 0, 1\n while digit_count + len(str(next_integer)) < n:\n digit_count += len(str(next_integer))\n next_integer += 1\n return int(str(next_integer)[n - digit_count - 1])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
dic > void builds a dictionary storing all the 107 7 digits pin by hash code
|
def build_hash_dictionary(hash_dic):
for i in range(10**7):
pin = int_to_7char_str(i)
hash_code = encode_hash(pin)
hash_dic[hash_code] = pin
|
[
"def generateDictionary(n=4):\r\n m = 2**n\r\n d = dict()\r\n for i in range(m):\r\n format = \"{0:0\"+str(n)+\"b}\"\r\n binary = list(format.format(i))\r\n d[i] = [int(binary[j]) for j in range(len(binary))]\r\n\r\n return d",
"def generate_num_dict(hand: list[str]) -> dict:\n nums_dict = defaultdict(list[str])\n\n for card in hand.split():\n nums_dict[CARDS.get(card[:-1])].append(card[-1])\n\n return nums_dict",
"def getPostCodeMapping():\n postcode_mapping = {\n \"110031v\" : \"110031\", #removed the extra v in the end\n \"2242\" : \"122001\", # manually scanned the OSM file for pincode for same place\n \"10089\" : \"110085\", #checked manually on internet\n \"1100002\" : \"110002\",\n \"1100049\" : \"110049\",\n \"2010\" : \"201010\",\n \"1100016\" : \"110016\"\n }\n return postcode_mapping",
"def generate_hash_map(self):\n\n # clear the hash map\n self._hash_map.clear()\n\n for line in self._document_content:\n\n line = line.encode('utf-8')\n\n line = str(line).translate(PUNCTUATION_TRANS)\n words = line.split()\n\n for word in words:\n\n word = word.decode('utf-8-sig')\n word = PorterStemmer().stem(word)\n word = word.lower()\n\n if word.isalpha():\n if not self._is_stop_word(word):\n\n # if the word is not in hash\n if word not in self._hash_map:\n self._hash_map[word] = 1\n else:\n self._hash_map[word] += 1",
"def initializeRegisterDict(self):\n for day in DAYSOFWEEK: \n self.registerDict[day] = {}",
"def __init__(self, zipreader):\n self.ziphash = { z[\"zip_code\"] : ZipCodeMappingModel(z) for z in zipreader }",
"def __hash__(self):\n return hash(self.kim_code)",
"def build_pin_dict(fp, filepath):\n\n dom = parse(filepath) # load .pic file\n\n pinlist = {} # new dictionary\n i = 1 # pin number\n for pin in dom.getElementsByTagName(\"edc:Pin\"): # select pin nodes\n aliaslist = [] # new aliaslist this pin\n for vpin in pin.getElementsByTagName(\"edc:VirtualPin\"):\n alias = vpin.getAttribute(\"edc:name\") # raw alias\n alias = alias.upper().strip(\"_\").split()[0] # first word\n aliaslist.append(alias) # add alias!\n\n pinlist[i] = aliaslist # add aliaslist this pin\n i += 1\n\n for alias in aliaslist:\n if (re.match(portpin, alias) or re.match(gpiopin, alias)): # select Rxy or GPx\n portbit = alias\n if portbit != aliaslist[0]: # not first in list\n aliaslist.remove(portbit) # remove it\n aliaslist.insert(0, portbit) # add it to front\n break\n\n picname = os.path.splitext(os.path.split(filepath)[1])[0][3:].upper() # pic type\n print(picname) # progress signal\n fp.write(picname + \"\\n\")\n if len(pinlist) > 0: # any pins in list\n list_pic_pins(fp, pinlist) # list pinmap this pic\n else:\n print(\" No pinlist!\")\n fp.write(\" No pinlist\\n\")",
"def barcode_data(barcodes_dict):\n file = open(barcodes_file, \"r\")\n for line in file:\n if not line.startswith(\"#\"):\n values = line.split()\n\n key = int(values[1])\n subject = int(values[0])\n # landmarks have numbers 6 -> 20\n if subject >= 6:\n # key is the barcode number\n # element if the subject number\n barcodes_dict.update({key : subject})\n\n file.close()",
"def dict_values():\n d_list = defaultdict(lambda: False)\n n = 256\n for i in range(8):\n n = n/2\n d_list[n] = i\n return d_list",
"def getAircraftCodeDict():\n\n d1 = d2 = {}\n \n f = shelve.open(filename2) \n d1 = f\n for sk in d1.keys():\n k = int(sk) # convert the string key in the shelve\n d2[k] = d1[sk] # to an int for the dictionary\n \n\n return d2",
"def hash(self, pages):\n hashes = {}\n for addr, page in pages.items():\n hash = hashlib.sha1(page).hexdigest()\n hashes[addr] = hash\n return hashes",
"def __create_info_dict(self):\n d = ['mtype', 'stype', 'sval']\n keys = ['_'.join(i) for n in range(5) for i in itertools.permutations(d, n) if not len(i) == 0]\n out = {i: {} for i in keys}\n return out",
"def slot_key_db() -> Dict[str, List]:\n\n return {\n \"q50\": \"second_person_plural\",\n \"q28\": \"cot_caught\",\n \"q80\": \"rain_sun\",\n \"q66\": \"crawfish\",\n \"q110\": \"halloween\",\n \"q64\": \"sandwich\",\n \"q90\": \"side_road\",\n \"q105\": \"beverage\",\n \"q73\": \"shoes\",\n \"q79\": \"highway\",\n \"q58\": \"yard_sale\",\n \"q107\": \"rubbernecking\",\n \"q94\": \"frosting\",\n \"q14\": \"lawyer\",\n \"q76\": \"kitty_corner\",\n \"q65\": \"firefly\",\n \"q60\": \"verge\",\n \"q118\": \"brew_thru\",\n \"q103\": \"water_fountain\",\n }",
"def generate_dictionary(self, combinations):\n dictionary = dict()\n\n for combination in combinations:\n dictionary[combination] = 0\n #for combination -ends\n return dictionary",
"def createTask6Dictionary():\n \n \n myDict = {'PA' : [1.0, 346], 'NY' : [-0.5, 234], 'NJ' : [-1.0, 45], \\\n 'VA' : [0.0, 101], 'MD' : [0.3, 401]}\n\n return myDict",
"def primer_dict( db, plates ):\n dd = defaultdict(list)\n for well, primer in MASTER_MIX_TEMPLATE.items():\n dd[primer].append(well)\n for plate in plates:\n primers, _ = plate_to_custom_primers( db, plate )\n primer_counts = [ (primer.name, primers[pp])\n for pp in sorted(primers)\n for primer in (pp.fwd_primer, pp.rev_primer) ]\n for primer_name, lines in rows_for_custom_primers( primer_counts ):\n for wells in lines:\n dd[primer_name].extend( wells )\n return dict( (primer_name, sorted(l)) for primer_name, l in dd.items() )",
"def get_hash(self, descriptor):",
"def converter(L):\r\n picobotDict = {}\r\n for item in L:\r\n key = (int(item[0]), str(item[2:6]))\r\n value = (str(item[10]), int(item[12]))\r\n picobotDict[key] = value\r\n return picobotDict"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
str > str encrypts a PIN with md5, for testing purposes
|
def encode_hash(pin):
return hashlib.md5(pin.encode()).hexdigest()
|
[
"def Radius_User_Password(password, secret, authenticator):\r\n password_length = struct.pack(\"!B\",len(password))\r\n padd_size = 16 - (len(password) % 16)\r\n \r\n try:\r\n p = password.encode(\"utf-8\")\r\n except AttributeError:\r\n p = password\r\n \r\n while padd_size > 0:\r\n p = p + b'\\x00'\r\n padd_size = padd_size - 1\r\n \r\n S = secret.encode(\"utf-8\")\r\n I = authenticator\r\n \r\n result = b'' \r\n c = I\r\n while p:\r\n h = hashlib.md5()\r\n h.update(S)\r\n h.update(c)\r\n b = h.digest()\r\n\r\n for i in range(16):\r\n result += bytes((b[i] ^ p[i],))\r\n\r\n c = result[-16:]\r\n p = p[16:]\r\n \r\n return result",
"def encrypt(self, password, assoc=None):",
"def md5(string):\n return hashlib.md5(string.encode('utf-8')).hexdigest()",
"def encodePassword(password):",
"def encrypt(password):\n m = hashlib.sha256()\n m.update(password)\n return m.hexdigest()",
"def test_str(self):\n key = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n expected = str(binascii.hexlify(self.bytes_a))\n observed = str(key)\n self.assertEqual(expected, observed)",
"def encrypt_password(password):\n mid = ''.join([hex(ord(w))[2:] for w in password])\n return hashlib.sha1(mid).hexdigest()",
"def test_derive_user_password(self):\n assert derive_user_password(\"secret\") == bytes.fromhex(\n \"03 fc ed b6 66 60 25 1e c8 1a 1a 71 69 01 69 6a\"\n )",
"def encryptString(PT, key):\n # convert key to sha256 and then base64 encode per fernet spec\n\n key = sha256(key)\n key = base64.urlsafe_b64encode(key)\n f = Fernet(key)\n CT = f.encrypt(PT)\n return CT",
"def md5sum(s):\n m = hashlib.md5()\n m.update(s)\n return m.hexdigest()",
"def test_encryption():\n expected=\"Ymnx nx 956 u~ymts htzwxj\"\n actual=encrypt(\"This is 401 python course\",5)\n assert expected==actual",
"def address_hasher(address):\n return hashlib.md5(address).hexdigest()",
"def encrypt_string(string, key):\n f = Fernet(key)\n return f.encrypt(string)",
"def get_md5(string):\r\n byte_string = string.encode(\"utf-8\")\r\n md5 = hashlib.md5()\r\n md5.update(byte_string)\r\n result = md5.hexdigest()\r\n return result",
"def md5hex(data):\n return md5(data).hexdigest()",
"def encrypt(self, unencrypted, secret, tenant):",
"def mysql_password(_str):\n\tpass1 = hashlib.sha1(_str.encode()).digest()\n\tpass2 = hashlib.sha1(pass1).hexdigest()\n\treturn '*' + pass2.upper()",
"def key_to_md5(key):\n m = hashlib.md5()\n m.update(key.encode('utf-8'))\n hd = m.hexdigest()\n shorthd = hd[:6]\n if shorthd.isdigit():\n return shorthd + 'a'\n return shorthd",
"def digest(string):\n return sha512(string.encode('utf-8')).hexdigest()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test accuracy of learned model using unary features only (i.e., just predicting each pixel independently.) Implemented via a separate logistic regression.
|
def test_unary_only(self):
num_features = 65
num_states = 2
all_pixel, all_label = load_all_images_and_labels(os.path.join(os.path.dirname(__file__), 'train_data'), num_features, 3)
initial_w = np.zeros(num_features * num_states)
res = minimize(objective, initial_w, method="L-BFGS-B", args=(all_pixel, all_label, num_features, num_states),
jac=gradient)
weights = res.x
accuracy_training = accuracy(weights, all_pixel, all_label, num_features, num_states)
print ("accuracy on training set: %f" % accuracy_training)
assert (accuracy_training >= 0.9), "Unary classification accuracy on training data is less than 0.9"
all_pixel, all_label = load_all_images_and_labels(os.path.join(os.path.dirname(__file__), 'test_data'), num_features, 1)
accuracy_testing = accuracy(weights, all_pixel, all_label, num_features, num_states)
print ("accuracy on testing set: %f" % accuracy_testing)
assert (accuracy_testing >= 0.7), "Unary classification accuracy on testing data is less than 0.7"
|
[
"def logistic_regression(x_train, x_test, y_train, y_test):\n\tlog_reg_model = LogisticRegression()\n\tlog_reg_model = log_reg_model.fit(x_train, y_train)\n\n\tpredicted_labels = log_reg_model.predict(x_test)\n\tprint(log_reg_model.score(x_test, y_test))",
"def logistic_regression():\n train_x, test_x, train_y, test_y = prepare_data()\n train_y = train_y.reshape((train_y.shape[0], ))\n\n clf = LogisticRegression(random_state=0, solver='liblinear', max_iter=300,\n multi_class='ovr')\n start = time.time()\n clf.fit(train_x, train_y)\n end = time.time()\n \n y_pred = clf.predict(test_x)\n\n time_ = end - start\n accuracy = 100 * accuracy_score(test_y, y_pred)\n\n print(\"### LR ###\\n\")\n print(\"Training lasted %.2f seconds\" % time_)\n print(\"Accuracy = %.2f\" % (accuracy))\n\n return(time_, accuracy)",
"def test_l1logistic_binary():\n n_inf = 10\n X, y, w, b = make_classification(n_samples=200,\n random_state=6,\n n_informative=n_inf,\n n_features=20,\n w_scale=4.,\n include_intercept=True)\n\n l1log = UoI_L1Logistic(random_state=10).fit(X, y)\n assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8",
"def test_l1logistic_multiclass():\n n_features = 20\n n_inf = 10\n X, y, w, b = make_classification(n_samples=200,\n random_state=10,\n n_classes=5,\n n_informative=n_inf,\n n_features=n_features,\n shared_support=True,\n w_scale=4.)\n l1log = UoI_L1Logistic().fit(X, y)\n assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8",
"def test_l1logistic_multiclass_not_shared():\n n_features = 20\n n_inf = 10\n X, y, w, b = make_classification(n_samples=400,\n random_state=10,\n n_classes=5,\n n_informative=n_inf,\n n_features=n_features,\n shared_support=False,\n w_scale=4.)\n l1log = UoI_L1Logistic(shared_support=False).fit(X, y)\n assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8",
"def logistic_regression(df):\n X_train, X_test, y_train, y_test, X, y = split(df)\n log_reg = LogisticRegression()\n log_reg.fit(X_train, y_train)\n y_pred = log_reg.predict(X_test)\n print(\"Coefficients:\",log_reg.coef_) # determine most important questions\n print(\"Confusion Matrix:\", confusion_matrix(y_test, y_pred))\n print('Logistic Regression Accuracy: ', log_reg.score(X, y))\n print(\"Precision:\", precision_score(y_test, y_pred))\n print(\"Recall:\", recall_score(y_test, y_pred))",
"def logistic_regression(self, X, y):\n raise NotImplementedError",
"def binary_logistic_regression_loss(labels: np.array, predictions: np.array) -> np.array:\n return -(labels * np.log(predictions)) - ((1 - labels) * np.log(1 - predictions))",
"def test_logistic_regression_importances(self):\n # Setting up classifier\n clf = LogisticRegression(C=1., solver='lbfgs')\n clf.fit(X, y)\n\n # Setting up lorax\n lrx = TheLorax(clf, data, id_col='entity_id')\n lrx_out = lrx.explain_example(idx=1, pred_class=1, graph=False)\n\n feature1_contrib = lrx_out.contribution.loc['feature1']\n feature5_contrib = lrx_out.contribution.loc['feature5']\n\n # Test cases for correct feature importances\n self.assertEqual(feature1_contrib, 2.186415806126551)\n self.assertEqual(feature5_contrib, -3.228614405467005)\n\n # Test case if we can recover lr prediction\n # Can't use all of sample because it now contains intercept as last element\n sample = lrx.X_test.loc[1, ].values[:-1]\n lr_pred = clf.predict_proba(sample.reshape(1, -1))[0][1]\n lrx_pred = 1 / (1 + np.exp(-lrx_out.contribution.sum()))\n\n self.assertEqual(lrx_pred, lr_pred)",
"def accuracy(model, X_test, y_test):\n predictions = model.predict(X_test)\n return (np.array(predictions) == np.array(y_test)).mean()",
"def logistic_regression_loss_naive(W, X, y, reg):\n # Set the loss to a random number\n loss = 0\n # Initialize the gradient to zero\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n #############################################################################\n # START OF YOUR CODE #\n #############################################################################\n dim = X.shape[1]\n num_train = X.shape[0]\n f_mat = np.zeros_like(W)\n h_mat = np.zeros_like(W)\n loss1 = np.zeros_like(W)\n grad = 0\n y_ = np.zeros([y.shape[0],W.shape[1]])\n for i in range(y.shape[0]):\n y_[i,y[i]] = 1 \n \n for i in range(num_train):\n sample_x = X[i,:]\n for cate in range(W.shape[1]):\n grad = 0\n f_x = 0\n for index in range(dim):\n f_x += W[index,cate]*sample_x[index]\n \n f_mat[i,cate] = f_x\n h_x = sigmoid(f_x)\n loss += y_[i,cate]*np.log(h_x) + (1 - y_[i,cate]) * np.log(1 - h_x)\n grad += (h_x - y_[i,cate]) * sample_x\n h_mat[i,cate] = h_x - y_[i,cate]\n dW[:,cate] = grad.T\n \n loss = (-1 / num_train )* loss + 0.5 * reg * np.sum(W * W)\n dW = 1/ num_train * dW + reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW",
"def test_logistic_regr(in_dim=2):\n X, y = make_linear_cls_testdata(in_dim=in_dim)\n\n compare_scikt_and_tf(\n sk_LogisticRegression,\n LogisticRegression,\n X, y,\n sk_params={'penalty':'l2', 'C':100.0})",
"def __check_model_accuracy(self, model: Pipeline, test_data: DataList) -> float:\n predictions = model.predict(test_data.texts)\n return f1_score(test_data.labels, predictions, average='micro')",
"def logistic_regression(self,trainingData,testData,fileName):\n lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0)\n model = lr.fit(trainingData)\n model.save(fileName)\n return model.transform(testData)",
"def accuracy(self,X_test,Y_test): #returns the accuracy of the model for a given testing data set(X_test,Y_test), both should be provided\n predictions=self.predict(X_test)\n temp=np.abs(Y_test-predictions)\n temp=float(np.squeeze(np.sum(temp)))\n m=X_test.shape[1]\n accuracy=100-((temp/m)*100)\n return accuracy",
"def predictOneVsAll(all_theta, X,y):\n\n # Add ones to the X data matrix\n m=X.shape[0]\n X = np.column_stack((np.ones((m, 1)), X))\n print \"X shape:\",X.shape\n\n# ====================== YOUR CODE HERE ======================\n# Instructions: Complete the following code to make predictions using\n# your learned logistic regression parameters (one-vs-all).\n# You should set p to a vector of predictions (from 1 to\n# num_labels).\n#\n# Hint: This code can be done all vectorized using the max function.\n# In particular, the max function can also return the index of the \n# max element, for more information see 'help max'. If your examples \n# are in rows, then, you can use max(A, [], 2) to obtain the max \n# for each row.\n# =========================================================================\n pred =np.zeros(y.shape)\n n_correct, n_total = 0., 0.\n incorrect_indices = []\n for irow in xrange(X.shape[0]):\n n_total += 1\n pred[irow]=(predictOneVsAllHelper(all_theta,X[irow]))\n if predictOneVsAllHelper(all_theta,X[irow]) == y[irow]: \n n_correct += 1\n else: incorrect_indices.append(irow)\n\n print \"Training set accuracy: %0.1f%%\"%(100*(n_correct/n_total))\n return pred",
"def test_prediction_with_negative_values(model_obj):\n X_test = [-2, -4, -6, -8]\n y_test = \"setosa\"\n y_preds = model_obj.classify(X_test)\n assert y_test == y_preds",
"def Logistic_Regression(x, y):\n\n model = LogisticRegression()\n model.fit(X=x, y=y)\n\n Predict_y = model.predict(x)\n\n Results = FT.MachineLearning.Metrics.ModelEvaluation(y, Predict_y)\n\n return model, Results",
"def train_log_linear_with_one_hot():\n data_manager = DataManager(ONEHOT_AVERAGE, batch_size=BATCH_SIZE)\n model = LogLinear(data_manager.get_input_shape()[0])\n train_model(model, data_manager, N_EPOCHS, LEARNING_RATE, WEIGHT_DECAY)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the spanningtree edge appearance probability computations (used for TRBP) are correct.
|
def test_tree_probability_calculation(self):
height = 3
width = 3
tree_prob = ImageLoader.calculate_tree_probabilities_snake_shape(width, height)
assert (tree_prob[(0, 0), (0, 1)] == 0.75), "side edge probability does not equal to 0.75"
assert (tree_prob[(0, 1), (0, 0)] == 0.75), "side edge probability does not equal to 0.75"
assert (tree_prob[(1, 1), (1, 0)] == 0.5), "center edge probability does not equal to 0.5"
side_edge_count = 0
center_edge_count = 0
for keys in tree_prob:
if tree_prob[keys] == 0.75:
side_edge_count += 1
else:
center_edge_count += 1
assert (side_edge_count == 16), "number of side edges not correct: %d" % (side_edge_count)
assert (center_edge_count == 8), "number of center edges not correct"
|
[
"def test_random_spanning_tree_multiplicative_large():\n from math import exp\n from random import Random\n\n pytest.importorskip(\"numpy\")\n stats = pytest.importorskip(\"scipy.stats\")\n\n gamma = {\n (0, 1): -0.6383,\n (0, 2): -0.6827,\n (0, 5): 0,\n (1, 2): -1.0781,\n (1, 4): 0,\n (2, 3): 0,\n (5, 3): -0.2820,\n (5, 4): -0.3327,\n (4, 3): -0.9927,\n }\n\n # The undirected support of gamma\n G = nx.Graph()\n for u, v in gamma:\n G.add_edge(u, v, lambda_key=exp(gamma[(u, v)]))\n\n # Find the multiplicative weight for each tree.\n total_weight = 0\n tree_expected = {}\n for t in nx.SpanningTreeIterator(G):\n # Find the multiplicative weight of the spanning tree\n weight = 1\n for u, v, d in t.edges(data=\"lambda_key\"):\n weight *= d\n tree_expected[t] = weight\n total_weight += weight\n\n # Assert that every tree has an entry in the expected distribution\n assert len(tree_expected) == 75\n\n # Set the sample size and then calculate the expected number of times we\n # expect to see each tree. This test uses a near minimum sample size where\n # the most unlikely tree has an expected frequency of 5.15.\n # (Minimum required is 5)\n #\n # Here we also initialize the tree_actual dict so that we know the keys\n # match between the two. We will later take advantage of the fact that since\n # python 3.7 dict order is guaranteed so the expected and actual data will\n # have the same order.\n sample_size = 1200\n tree_actual = {}\n for t in tree_expected:\n tree_expected[t] = (tree_expected[t] / total_weight) * sample_size\n tree_actual[t] = 0\n\n # Sample the spanning trees\n #\n # Assert that they are actually trees and record which of the 75 trees we\n # have sampled.\n #\n # For repeatability, we want to take advantage of the decorators in NetworkX\n # to randomly sample the same sample each time. However, if we pass in a\n # constant seed to sample_spanning_tree we will get the same tree each time.\n # Instead, we can create our own random number generator with a fixed seed\n # and pass those into sample_spanning_tree.\n rng = Random(37)\n for _ in range(sample_size):\n sampled_tree = nx.random_spanning_tree(G, \"lambda_key\", seed=rng)\n assert nx.is_tree(sampled_tree)\n\n for t in tree_expected:\n if nx.utils.edges_equal(t.edges, sampled_tree.edges):\n tree_actual[t] += 1\n break\n\n # Conduct a Chi squared test to see if the actual distribution matches the\n # expected one at an alpha = 0.05 significance level.\n #\n # H_0: The distribution of trees in tree_actual matches the normalized product\n # of the edge weights in the tree.\n #\n # H_a: The distribution of trees in tree_actual follows some other\n # distribution of spanning trees.\n _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))\n\n # Assert that p is greater than the significance level so that we do not\n # reject the null hypothesis\n assert not p < 0.05",
"def test_random_spanning_tree_additive_large():\n from random import Random\n\n pytest.importorskip(\"numpy\")\n stats = pytest.importorskip(\"scipy.stats\")\n\n edges = {\n (0, 1): 1,\n (0, 2): 1,\n (0, 5): 3,\n (1, 2): 2,\n (1, 4): 3,\n (2, 3): 3,\n (5, 3): 4,\n (5, 4): 5,\n (4, 3): 4,\n }\n\n # Build the graph\n G = nx.Graph()\n for u, v in edges:\n G.add_edge(u, v, weight=edges[(u, v)])\n\n # Find the additive weight for each tree.\n total_weight = 0\n tree_expected = {}\n for t in nx.SpanningTreeIterator(G):\n # Find the multiplicative weight of the spanning tree\n weight = 0\n for u, v, d in t.edges(data=\"weight\"):\n weight += d\n tree_expected[t] = weight\n total_weight += weight\n\n # Assert that every tree has an entry in the expected distribution\n assert len(tree_expected) == 75\n\n # Set the sample size and then calculate the expected number of times we\n # expect to see each tree. This test uses a near minimum sample size where\n # the most unlikely tree has an expected frequency of 5.07.\n # (Minimum required is 5)\n #\n # Here we also initialize the tree_actual dict so that we know the keys\n # match between the two. We will later take advantage of the fact that since\n # python 3.7 dict order is guaranteed so the expected and actual data will\n # have the same order.\n sample_size = 500\n tree_actual = {}\n for t in tree_expected:\n tree_expected[t] = (tree_expected[t] / total_weight) * sample_size\n tree_actual[t] = 0\n\n # Sample the spanning trees\n #\n # Assert that they are actually trees and record which of the 75 trees we\n # have sampled.\n #\n # For repeatability, we want to take advantage of the decorators in NetworkX\n # to randomly sample the same sample each time. However, if we pass in a\n # constant seed to sample_spanning_tree we will get the same tree each time.\n # Instead, we can create our own random number generator with a fixed seed\n # and pass those into sample_spanning_tree.\n rng = Random(37)\n for _ in range(sample_size):\n sampled_tree = nx.random_spanning_tree(\n G, \"weight\", multiplicative=False, seed=rng\n )\n assert nx.is_tree(sampled_tree)\n\n for t in tree_expected:\n if nx.utils.edges_equal(t.edges, sampled_tree.edges):\n tree_actual[t] += 1\n break\n\n # Conduct a Chi squared test to see if the actual distribution matches the\n # expected one at an alpha = 0.05 significance level.\n #\n # H_0: The distribution of trees in tree_actual matches the normalized product\n # of the edge weights in the tree.\n #\n # H_a: The distribution of trees in tree_actual follows some other\n # distribution of spanning trees.\n _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))\n\n # Assert that p is greater than the significance level so that we do not\n # reject the null hypothesis\n assert not p < 0.05",
"def uas(gold_tree, predicted_tree):\n # Exercise 5.5\n assert gold_tree.n == predicted_tree.n and gold_tree.is_tree() and predicted_tree.is_tree()\n deplist1 = set(gold_tree.edges())\n correct = 0\n for dep2 in predicted_tree.edges():\n if dep2 in deplist1:\n correct += 1\n return correct / len(deplist1)",
"def test_equal_apportionment_zero_children(self):\r\n\r\n self.assertTrue(self.DUT.equal_apportionment(0, 0.95))",
"def test_random_spanning_tree_additive_small():\n pytest.importorskip(\"scipy\")\n\n edges = {\n (0, 1): 1,\n (0, 2): 1,\n (0, 5): 3,\n (1, 2): 2,\n (1, 4): 3,\n (2, 3): 3,\n (5, 3): 4,\n (5, 4): 5,\n (4, 3): 4,\n }\n\n # Build the graph\n G = nx.Graph()\n for u, v in edges:\n G.add_edge(u, v, weight=edges[(u, v)])\n\n solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)]\n solution = nx.Graph()\n solution.add_edges_from(solution_edges)\n\n sampled_tree = nx.random_spanning_tree(\n G, weight=\"weight\", multiplicative=False, seed=37\n )\n\n assert nx.utils.edges_equal(solution.edges, sampled_tree.edges)",
"def test_basics(self):\n self.report('Testing adding data, evaluation and marginal likelihood.' +\n ' Probabilistic test, might fail.')\n num_coeffs_vals = [2, 1, 4, 5] * 5\n num_tests = 0\n num_successes = 0\n for dataset in self.datasets:\n for dist_type in self.dist_types:\n for kernel_type in self.kernel_types[dist_type]:\n curr_num_coeffs = num_coeffs_vals.pop(0)\n curr_gp = build_nngp_with_dataset(dataset, kernel_type, curr_num_coeffs,\n dist_type)\n # Predictions & Marginal likelihood\n curr_preds, _ = curr_gp.eval(dataset[2], 'std')\n curr_gp_err = compute_average_prediction_error(dataset, curr_preds)\n const_err = compute_average_prediction_error(dataset, dataset[1].mean())\n lml = curr_gp.compute_log_marginal_likelihood()\n is_success = curr_gp_err < const_err\n num_tests += 1\n num_successes += is_success\n self.report(('(%s, ntr=%d, nte=%d):: GP-lml=%0.4f, GP-err=%0.4f, ' +\n 'Const-err=%0.4f. succ=%d')%(dataset[-1][:5], len(dataset[0]),\n len(dataset[2]), lml, curr_gp_err, const_err, is_success),\n 'test_result')\n succ_frac = num_successes / float(num_tests)\n self.report('Summary: num_successes / num_floats = %d/%d = %0.4f'%(num_successes,\n num_tests, succ_frac), 'test_result')\n assert succ_frac > 0.5",
"def test_n_observed_nodes(self):\n self.assertEqual(self.sp_graph.n_observed_nodes, 2)",
"def test_probe_count(self):\n ratio = float(len(self.probes_75)) / len(self.probes_100)\n self.assertTrue(1.95 < ratio < 2.05)",
"def test_loss_density_approximation(self):\n nb_points_per_interval = 10\n nb_itvs_per_side = 3\n nb_intervals_per_side = tf.constant(nb_itvs_per_side, dtype=tf.int64)\n \n nb_points = 2*nb_points_per_interval*nb_itvs_per_side + 1\n grid = numpy.linspace(-nb_itvs_per_side,\n nb_itvs_per_side,\n num=nb_points)\n parameters_0 = numpy.expand_dims(scipy.stats.uniform.pdf(grid, loc=-1., scale=2.).astype(numpy.float32),\n axis=0)\n parameters_1 = numpy.expand_dims(scipy.stats.triang.pdf(grid, 0.5, loc=0., scale=2.).astype(numpy.float32),\n axis=0)\n parameters = tf.Variable(numpy.concatenate((parameters_0, parameters_1), axis=0),\n dtype=tf.float32,\n trainable=False)\n samples_0 = numpy.random.uniform(low=-1.,\n high=1.,\n size=(1, 5000)).astype(numpy.float32)\n samples_1 = numpy.random.triangular(0.,\n 1.,\n 2.,\n size=(1, 5000)).astype(numpy.float32)\n samples = numpy.concatenate((samples_0, samples_1), axis=0)\n node_samples = tf.placeholder(tf.float32, shape=(2, 5000))\n node_approximate_prob = tfuls.approximate_probability(node_samples,\n parameters,\n nb_points_per_interval,\n nb_intervals_per_side)\n \n # For the piecewise linear function with parameters `parameters_0`,\n # the loss is the sum of two terms. The 1st term must be equal to\n # -1.0. The 2nd term (the integral of the square of the p.d.f of the\n # continuous uniform distribution of support [-1.0, 1.0]) must be\n # equal to 0.5.\n # For the piecewise linear function with parameters `parameters_1`,\n # the loss is the sum of two terms. The 1st term must be equal to\n # -4/3. The 2nd term (the integral of the square of the p.d.f of the\n # triangular distribution with lower limit 0.0, upper limit 2.0 and\n # mode 1.0) must be equal to 2/3.\n # The loss below is the sum of the two previous losses.\n node_loss_density_approx = tfuls.loss_density_approximation(node_approximate_prob,\n parameters,\n nb_points_per_interval)\n with tf.Session() as sess:\n if tf.__version__.startswith('0'):\n tf.initialize_all_variables().run()\n else:\n tf.global_variables_initializer().run()\n loss_density_approx = sess.run(node_loss_density_approx, feed_dict={node_samples:samples})\n print('Loss computed by the function: {}'.format(loss_density_approx))\n print('Loss computed by hand: {}'.format(-1.166667))",
"def test_agree_apportionment_zero_children(self):\r\n\r\n self.assertTrue(self.DUT.agree_apportionment(0, 0.95))",
"def test_gaussian_node(self):\n means = [0.0, 0.5, 1.0]\n stds = [1.0, 2.0, 3.0]\n gauss0 = GaussianNode(mean=means[0], std=stds[0], scope=0)\n gauss1 = GaussianNode(mean=means[1], std=stds[1], scope=1)\n gauss2 = GaussianNode(mean=means[2], std=stds[2], scope=2)\n sample1 = np.array([1, 2, 3])\n sample2 = np.array([10, 20, 30])\n x = torch.Tensor([sample1, sample2])\n\n # Get results\n res_gauss0 = gauss0(x)\n res_gauss1 = gauss1(x)\n res_gauss2 = gauss2(x)\n\n # Expect results from normal distributions\n normal0 = torch.distributions.Normal(loc=means[0], scale=stds[0])\n normal1 = torch.distributions.Normal(loc=means[1], scale=stds[1])\n normal2 = torch.distributions.Normal(loc=means[2], scale=stds[2])\n\n exp_gauss0 = normal0.log_prob(torch.Tensor([1, 10]))\n exp_gauss1 = normal1.log_prob(torch.Tensor([2, 20]))\n exp_gauss2 = normal2.log_prob(torch.Tensor([3, 30]))\n\n # Assertions\n self.assertEqual(len(res_gauss0.tolist()), 2)\n self.assertEqual(len(res_gauss1.tolist()), 2)\n self.assertEqual(len(res_gauss2.tolist()), 2)\n\n # Assert that results are numerically equal\n self.assertTrue(np.isclose(res_gauss0.tolist(), exp_gauss0, atol=DELTA).all())\n self.assertTrue(np.isclose(res_gauss1.tolist(), exp_gauss1, atol=DELTA).all())\n self.assertTrue(np.isclose(res_gauss2.tolist(), exp_gauss2, atol=DELTA).all())",
"def test_vertex_edge_count1(self):\n sum_of_the_degrees = sum( [ len( list( self.G[v] ) ) for v in self.G ] )\n number_of_edges = len( self.G.edges() )\n assert sum_of_the_degrees == number_of_edges * 2, \"sum of degrees: %i, num of edges: %i does not satisfy relationship\" % ( sum_of_the_degrees, number_of_edges )",
"def test_using_ego_graph(self):\n # This is the triangle graph with one additional edge.\n G = nx.lollipop_graph(3, 1)\n assert_equal(nx.local_efficiency(G), 23 / 24)",
"def test_notax_over_edge():\n ans = tax_table(18201)\n numpy.testing.assert_allclose(ans, 0.19)",
"def testReproducibleBackpropToBoxes(self):\n self._testReproducibleBackprop(test_image_not_boxes=False)",
"def test_refpoints(self):\n self.ld.compute(self.box, self.pos)\n density = self.ld.density\n\n npt.assert_array_less(np.fabs(density - 10.0), 1.5)\n\n neighbors = self.ld.num_neighbors\n npt.assert_array_less(np.fabs(neighbors - 1130.973355292), 200)",
"def testing_step(E_arr, I_arr, TP_arr,\n prob_infected_detected,\n prob_neighbor_detected,\n prob_exposed_detected,\n adj_mat=None):\n # TODO: Testing for other populations (high degree nodes).\n # Infected group is tested, detected with some probability.\n new_TP_arr = random_subset(I_arr & (1 - TP_arr), prob_infected_detected)\n # Assumes 0 negatives (false & true).\n n_infected_tested = new_TP_arr.sum()\n\n # Random subset of entire population tested, carriers (Exposed or Infected)\n # detected with some probability.\n carrier_arr = E_arr | I_arr\n new_TP_arr |= random_subset(carrier_arr & (1 - TP_arr), prob_exposed_detected)\n N = len(E_arr)\n # N, since entire population was tested, other than known positives.\n # Assumes 0 negatives (false & true). Divide by P(test=positive) for more realistic estimate.\n n_general_tested = (N - TP_arr.sum()) * prob_exposed_detected\n\n # Neighbors of those who tested positive are themselves tested.\n # Check if neighbors are tested, to avoid matrix multiplication if not required.\n if prob_neighbor_detected > 0:\n connections_to_positive = number_of_edges_to_group(new_TP_arr, adj_mat)\n neighbors_tested = random_subset(connections_to_positive > 0, prob_neighbor_detected).astype(int)\n # Don't double count those who are already known positives, they aren't retested.\n neighbors_tested &= (1 - (new_TP_arr | TP_arr))\n # Assumes 0 negatives (false & true). Divide by P(test=positive) for more realistic estimate.\n n_neighbors_tested = neighbors_tested.sum()\n # Of those who were tested, the ones who tested positive.\n neighbors_detected = neighbors_tested & carrier_arr\n new_TP_arr |= neighbors_detected\n else:\n n_neighbors_tested = 0\n\n TP_arr |= new_TP_arr\n\n return TP_arr, n_infected_tested, n_neighbors_tested, n_general_tested",
"def test_CbGpPWpGaD_traversal():\n graph = get_bupropion_subgraph()\n compound = 'DB01156' # Bupropion\n disease = 'DOID:0050742' # nicotine dependence\n metapath = graph.metagraph.metapath_from_abbrev('CbGpPWpGaD')\n rows, cols, pc_matrix, t = dwpc(graph, metapath, damping=0)\n rows, cols, dwpc_matrix, t = dwpc(graph, metapath, damping=0.4)\n i = rows.index(compound)\n j = cols.index(disease)\n assert pc_matrix[i, j] == 142\n assert dwpc_matrix[i, j] == pytest.approx(0.03287590886921623)",
"def test_create_Gamma_eta_tree_more_per_regression():\n\n\n # random - structure output check\n # data creation\n n = 200\n min_size_leaf = 1\n\n X = np.random.uniform(size = (n, 510), low = -1,high = 1)\n y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\\\n 10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)\n\n rf_class = sklearn.ensemble.RandomForestRegressor(n_estimators = 2,\n min_samples_leaf = min_size_leaf)\n random_forest = rf_class.fit(X = X,\n y = y.ravel())\n\n tree = random_forest.estimators_[0]\n\n max_depth_range = np.max(smooth_rf.depth_per_node(tree)) + 1\n\n G, n, ln, ld, li, fd, fi = smooth_rf.create_Gamma_eta_tree_more_per(tree)\n\n assert G.shape == (np.sum(tree.tree_.children_left == -1),\n max_depth_range), \\\n \"Gamma returned does not have the correct shape\"\n\n assert n.shape == G.shape, \\\n \"eta returned does not have the correct shape\"\n\n assert np.all(n >= 0), \\\n \"eta returned has negative values\"\n\n assert np.all(n[:,0] ==\n tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]),\\\n \"eta structure doesn't match up with number of observes per leaf\"\n\n # new tests (ln,ld,li)\n assert ln.shape[0] == G.shape[0] and ld.shape[0] == G.shape[0] and \\\n li.shape[0] == G.shape[0], \\\n \"leaf based outputs should have same number of leaves and Gamma\"\n\n assert np.all(np.ceil(ln) == ln) and np.all(ln > 0), \\\n \"leaf counts should be strictly positive and integers\"\n\n assert np.all(ln ==\n tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]), \\\n \"number of obs in each leaf not matching tree structure\"\n\n assert np.all(np.ceil(ld) == ld) and np.all(ld >= 0), \\\n \"leaf depth should be positive and integers\"\n\n assert np.all(li >= - 1e-10), \\\n \"leaf impurity (mse) should be non-negative\"\n\n # newest tests (fd, fi)\n assert fd.shape == G.shape and fi.shape == G.shape, \\\n \"shapes of full depth and impurity should make shape of Gamma\"\n\n assert np.all(fd[:,0] == ld) and np.all(np.ceil(fd) == fd) and \\\n np.all(fd >= 0), \\\n \"full depth shape should mirror leaf depth structure\"\n\n assert np.all(fi[:,0] == li) and np.all(fi >= - 1e-10), \\\n \"full impurity (mse) should mirror leaf impurity structure\"\n\n # for c_idx in range(fi.shape[1] - 1):\n # assert np.all(fi[:,c_idx] - fi[:,c_idx + 1] <= 1e-10), \\\n # \"impurity should be increasing (mse)\"\n\n # static check\n\n # tree structure:\n # ~upper: left, lower: right~\n # num obs depth\n # |--1 10 1\n # -0-| 34 0\n # | |--3 9 2\n # |-2-| 24 1\n # | |--5 8 3\n # |-4-| 15 2\n # |--6 7 3\n\n\n # eta\n # (1) 10 | 24 | 0 | 0\n # (3) 9 | 15 | 10 | 0\n # (5) 8 | 7 | 9 | 10\n # (6) 7 | 8 | 9 | 10\n\n # Gamma\n # (1) 10 | 18+24+28 = 70 | 0 | 0\n # (3) 9 * 2 = 18 | 24+28 = 52 | 10 | 0\n # (5) 8 * 3 = 24 | 28 | 18 | 10\n # (6) 7 * 4 = 28 | 24 | 18 | 10\n\n\n\n class inner_fake_tree():\n def __init__(self, nn, cl, cr, v):\n self.weighted_n_node_samples = nn\n self.children_left = cl\n self.children_right = cr\n self.value = v\n self.impurity = np.zeros(v.shape[0]) # this isn't a good test\n\n class fake_tree():\n def __init__(self, nn, cl, cr, v):\n self.tree_ = inner_fake_tree(nn, cl, cr, v)\n self.__class__ = sklearn.tree.tree.DecisionTreeRegressor\n\n weighted_n_node_samples = np.array([34,10,24,9,15,8,7], dtype = np.int)\n children_left = np.array([2,-1,4,-1,6,-1,-1], dtype = np.int)\n children_right = np.array([1,-1,3,-1,5,-1,-1], dtype = np.int)\n value = np.array([-99, 1, -99, 2, -99, 3, 4]).reshape((-1,1,1))\n\n test = fake_tree(weighted_n_node_samples,\n children_left,\n children_right,\n value)\n\n n_leaf = 4\n\n g_static, n_static, ln_static, ld_static, li_static, \\\n fd_static, fi_static = \\\n smooth_rf.create_Gamma_eta_tree_more_per(test)\n\n n_expected = np.array([[10,24,0,0],\n [9,15,10,0],\n [8,7,9,10],\n [7,8,9,10]])\n g_expected = np.array([[10,70,0,0],\n [18,52,10,0],\n [24,28,18,10],\n [28,24,18,10]])\n ln_expected = n_expected[:,0]\n ld_expected = np.array([1,2,3,3])\n fd_expected = np.array([[1,0,0,0],\n [2,1,0,0],\n [3,2,1,0],\n [3,2,1,0]])\n\n assert np.all(g_static == g_expected), \\\n \"static test's Gamma failed to reproduce correct solutions\"\n assert np.all(n_static == n_expected), \\\n \"static test's eta failed to reproduce correct solutions\"\n assert np.all(ln_static == ln_expected), \\\n \"static test's leaf count failed to reproduce correct solutions\"\n assert np.all(ld_static == ld_expected), \\\n \"static test's leaf depth failed to reproduce correct solutions\"\n assert np.all(fd_static == fd_expected), \\\n \"static test's full depth failed to reproduce correct solutions\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the loaded model has the correct matrix structure.
|
def test_model_matrix_structure(self):
loader = ImageLoader(10, 10)
train_dir = os.path.join(os.path.dirname(__file__), 'train_data')
images, models, labels, names = loader.load_all_images_and_labels(train_dir, 2, 1)
model = models[0]
model.create_matrices()
for edge, i in model.message_index.items():
from_index = model.var_index[edge[0]]
to_index = model.var_index[edge[1]]
assert model.message_from[i] == from_index, "Message sender index is wrong"
assert model.message_to[i] == to_index, "Message receiver index is wrong"
assert model.message_to_map.getrow(i).getcol(to_index) == 1, "Message receiver matrix map is wrong"
assert np.all(np.sum(model.message_to_map.todense(), axis=1) == 1), \
"Message sender map has a row that doesn't sum to 1.0"
assert np.allclose(model.edge_pot_tensor[:, :, :model.num_edges],
model.edge_pot_tensor[:, :, model.num_edges:]), "Edge tensor structure is wrong"
assert np.allclose(model.edge_pot_tensor[:, :, :model.num_edges],
model.edge_pot_tensor[:, :, model.num_edges:].transpose(1, 0, 2)), \
"Edge tensor is not symmetric"
|
[
"def test_model(model, datamodule):\n # TODO - replace expected shape\n expected_shape = (4, 1, 1)\n assert model(*next(datamodule.val_dataloader())).shape == expected_shape",
"def testModel(self, path):\n l = ObjLoader()\n self.model = l.loadModel(path)\n self.model.setCanvas(self.mainwindow.mainframe.canvas)\n self.model.calculate(MATRIX_VIEW)\n self.model.render()\n self.models.append(self.model)",
"def checkModel(path):\n # The following return needs to be removed once\n # the unittest related changes in Sasmodels are commited\n # return True\n # try running the model\n from sasmodels.sasview_model import load_custom_model\n Model = load_custom_model(path)\n model = Model()\n q = np.array([0.01, 0.1])\n _ = model.evalDistribution(q)\n qx, qy = np.array([0.01, 0.01]), np.array([0.1, 0.1])\n _ = model.evalDistribution([qx, qy])\n\n # check the model's unit tests run\n from sasmodels.model_test import run_one\n # TestSuite module in Qt5 now deletes tests in the suite after running,\n # so suite[0] in run_one() in sasmodels/model_test.py will contain [None] and\n # test.info.tests will raise.\n # Not sure how to change the behaviour here, most likely sasmodels will have to\n # be modified\n result = run_one(path)\n\n return result",
"def test_load_data_set(self):\n X, Y = self.engine.load_data_set()\n print(X[0].shape)\n print(Y[0])",
"def verifyRead(self, filename, numCells, numGroups, validator):\n\n print('\\n\\nFile: {fname}'.format(fname=filename))\n\n mod = smtk.model.Model(self.read(filename)[0])\n\n print(' {mt} model'.format(\n mt=smtk.model.ModelGeometryStyleName(mod.geometryStyle())))\n print('\\nFree cells:\\n %s' %\n '\\n '.join([x.name() for x in mod.cells()]))\n print('\\nGroups:\\n %s\\n' %\n '\\n '.join([x.name() for x in mod.groups()]))\n if (numCells >= 0 and len(mod.cells()) != numCells) or (numGroups >= 0 and len(mod.groups()) != numGroups):\n print(smtk.io.SaveJSON.fromModelResource(self.resource))\n\n self.assertEqual(\n mod.geometryStyle(), smtk.model.DISCRETE,\n 'Expected a discrete model, got a {mt} model'.format(\n mt=smtk.model.ModelGeometryStyleName(mod.geometryStyle())))\n if numCells >= 0:\n self.assertEqual(len(mod.cells()), numCells,\n 'Expected {nc} free cells'.format(nc=numCells))\n if numGroups >= 0:\n self.assertEqual(len(mod.groups()), numGroups,\n 'Expected {ng} groups'.format(ng=numGroups))\n if validator:\n validator(mod)",
"def test_mmlut_initialization(self):\n mml = self.cal.mmlut\n assert np.all(mml.origin == np.zeros(3))\n assert mml.nr == 0\n assert mml.nz == 0\n assert mml.rw == 0\n assert mml.data is None\n # assert isinstance(mml.data, np.ndarray)\n # assert mml.data.shape == (3,)",
"def test_create_grid_matrix(self):\n test_cases = ((8, (0, 0)), (48, (19, 19)))\n grid = e0011.import_grid_from_file('grid.txt')\n for test_case in test_cases:\n self.assertEqual(e0011.create_grid_matrix(grid)[test_case[1]], test_case[0])",
"def test_has_matrix_false_concrete_template(self):\n\n rng = qml.numpy.random.default_rng(seed=42)\n shape = qml.StronglyEntanglingLayers.shape(n_layers=2, n_wires=2)\n params = rng.random(shape)\n op = qml.StronglyEntanglingLayers(params, wires=range(2))\n assert not op.has_matrix",
"def test_create_zero_matrix_1_1(self):\n expected = [[0]]\n actual = create_zero_matrix(1, 1)\n self.assertEqual(expected, actual)",
"def verif_matrix(self):\n masks = get_training_masks()[:200]\n fichier = open(\"output/matrice.txt\", \"w\")\n\n print(\"Training model\")\n res_t, res_th = load_histograms(masks=masks)\n\n print(\"Testing model\")\n test_files = get_test_masks()[:20]\n recall = np.zeros((10, 10))\n precision = np.zeros((10, 10))\n accuracy = np.zeros((10, 10))\n\n distance = 400\n\n for w in range(50, 201, 50):\n for h in range(50, 201, 50):\n print(\"w\", w, \"h\", h)\n Y_pred = np.array([])\n Y_true = np.array([])\n proba = np.array([])\n for name, mask in test_files:\n image_test = cv2.imread(name)\n prediction = get_predicted_masks(image_test, mask, w, h, 0.15, res_t, res_th, distance)\n Y_pred = np.append(Y_pred, prediction.flatten())\n Y_true = np.append(Y_true, mask.flatten())\n fichier.write(str(w) + \" \" + str(h) + \" \" + str(met.get_confusion_matrix(Y_true, Y_pred))+\"\\n\\n\")",
"def check_model(path):\n # try running the model\n from sasmodels.sasview_model import load_custom_model\n Model = load_custom_model(path)\n model = Model()\n q = np.array([0.01, 0.1])\n Iq = model.evalDistribution(q)\n qx, qy = np.array([0.01, 0.01]), np.array([0.1, 0.1])\n Iqxy = model.evalDistribution([qx, qy])\n\n # check the model's unit tests run\n from sasmodels.model_test import run_one\n result = run_one(path)\n\n return result",
"def test_load_micromodels(self):\n self.orchestrator.build_micromodels()\n self.orchestrator.flush_cache()\n self.orchestrator.load_micromodels()\n for micromodel_name in self.micromodel_names:\n self.assertIn(micromodel_name, self.orchestrator.cache)",
"def test_model_load_succeeds_with_missing_data_key_when_data_exists_at_default_path(\n tf_keras_model, model_path, data\n):\n mlflow.tensorflow.save_model(\n tf_keras_model, path=model_path, keras_model_kwargs={\"save_format\": \"h5\"}\n )\n shutil.move(os.path.join(model_path, \"data\", \"model.h5\"), os.path.join(model_path, \"model.h5\"))\n model_conf_path = os.path.join(model_path, \"MLmodel\")\n model_conf = Model.load(model_conf_path)\n flavor_conf = model_conf.flavors.get(mlflow.tensorflow.FLAVOR_NAME, None)\n assert flavor_conf is not None\n del flavor_conf[\"data\"]\n model_conf.save(model_conf_path)\n\n model_loaded = mlflow.tensorflow.load_model(model_path)\n assert all(model_loaded.predict(data[0]) == tf_keras_model.predict(data[0]))",
"def test_model_version():\n\n assert model_version() == '0.19.0'",
"def test_save_and_load(self):\n\n with test_util.TempDirectory() as f:\n self.model.save(f)\n self.model = tc.load_model(f)\n loaded_model = tc.load_model(f)\n\n self.test__list_fields()\n print(\"Saved model list fields passed\")\n\n self.test_get()\n print(\"Saved model get passed\")\n\n self.test_summaries()\n print(\"Saved model summaries passed\")",
"def test_model_load_failure(self, mkey, mout, caplog):\n\n def model_load_rout(stime, verr=False, ierr=False):\n if verr:\n raise ValueError('Acceptable model load error')\n elif ierr:\n raise IOError('Acceptable model load error')\n else:\n raise TypeError('Unacceptable model load error')\n\n self.required_kwargs['model_load_rout'] = model_load_rout\n\n if mkey in ['verr', 'ierr']:\n self.required_kwargs['model_load_kwargs'] = {mkey: True}\n\n with caplog.at_level(logging.INFO, \"pysatModels\"):\n self.out = match.collect_inst_model_pairs(\n *self.input_args, **self.required_kwargs)\n lout = caplog.text\n\n assert self.out is mout\n assert lout.find('unable to load model data at') >= 0\n else:\n self.required_kwargs['model_load_kwargs'] = {}\n with pytest.raises(TypeError, match=mout):\n match.collect_inst_model_pairs(*self.input_args,\n **self.required_kwargs)\n return",
"def test_save_load(self):\n n_samples = 10\n n_features = 3\n n_tasks = 1\n\n # Create a dataset and an input function for processing it.\n\n np.random.seed(123)\n X = np.random.rand(n_samples, 2, n_features)\n y = np.zeros((n_samples, n_tasks))\n dataset = deepchem.data.NumpyDataset(X, y)\n\n model = deepchem.models.ScScoreModel(n_features, dropouts=0)\n\n model.fit(dataset, nb_epoch=1)\n pred1 = model.predict(dataset)\n\n model.save()\n model = TensorGraph.load_from_dir(model.model_dir)\n\n pred2 = model.predict(dataset)\n for m1, m2 in zip(pred1, pred2):\n self.assertTrue(np.all(m1 == m2))",
"def test_fit_classes():\n model, _ = load_iris_mdoel()\n np.testing.assert_array_equal(model.classes, np.array([0, 1, 2]))",
"def test_save_mat(fname, save_dict):\n\n test_dict = load_mat(fname)\n convert_1darray_to_item(save_dict, FLOWCASE_KW) # Loaded state for save_dict\n\n # Compare keys of dictionaries\n if save_dict.keys() == test_dict.keys():\n for key in save_dict.keys():\n if isinstance(save_dict[key], int):\n if save_dict[key] == test_dict[key]:\n continue\n else:\n sys.exit('ERROR in save_dict_to_mat: '\n 'Saved and loaded dictionary are incompatible in key ' + str(key))\n elif isinstance(save_dict[key], str):\n if save_dict[key] == test_dict[key]:\n continue\n else:\n sys.exit('ERROR in save_dict_to_mat: '\n 'Saved and loaded dictionary are incompatible in key ' + str(key))\n elif isinstance(save_dict[key], np.ndarray):\n if all(save_dict[key].flatten() == test_dict[key].flatten()):\n continue\n else:\n sys.exit('ERROR in save_dict_to_mat: '\n 'Saved and loaded dictionary are incompatible in key ' + str(key))\n else:\n return -1\n\n return 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Displays the main report from llvmcov
|
def show_summary(cov_executable):
print(
subprocess.run(
["llvm-cov", "report", "--instr-profile=default.profdata", cov_executable]
)
)
|
[
"def coverage_report(c):\n c.run('coverage html && open htmlcov/index.html', pty=True)",
"def test():\r\n from spyderlib.utils.qthelpers import qapplication\r\n app = qapplication()\r\n widget = CoverageWidget(None)\r\n widget.show()\r\n widget.analyze(__file__)\r\n sys.exit(app.exec_())",
"def coverage_report():\n sh(\"coverage combine\")\n sh(\"coverage report\")\n sh(\"coverage html\")\n info(\"WRITTEN TO: build/coverage.html/\")\n # -- DISABLED: sh(\"coverage xml\")",
"def view():\n coverage.launch_report(always=True)\n return True",
"def coverage_report(self):\n verbose = '--quiet' not in sys.argv\n self.cov.stop()\n if verbose:\n log.info(\"\\nCoverage Report:\")\n try:\n include = ['%s*' % package for package in self.packages]\n omit = ['*tests*']\n self.cov.report(include=include, omit=omit)\n self.cov.save()\n self.cov.xml_report(include=include, omit=omit)\n except misc.CoverageException as e:\n log.info(\"Coverage Exception: %s\" % e)",
"def get_lcov(self, fr, analysis, outfile=None):\n outfile.write(\"TN:\\n\")\n outfile.write(f\"SF:{fr.relative_filename()}\\n\")\n source_lines = fr.source().splitlines()\n\n for covered in sorted(analysis.executed):\n # Note: Coverage.py currently only supports checking *if* a line\n # has been executed, not how many times, so we set this to 1 for\n # nice output even if it's technically incorrect.\n\n # The lines below calculate a 64-bit encoded md5 hash of the line\n # corresponding to the DA lines in the lcov file, for either case\n # of the line being covered or missed in coverage.py. The final two\n # characters of the encoding (\"==\") are removed from the hash to\n # allow genhtml to run on the resulting lcov file.\n if source_lines:\n line = source_lines[covered-1].encode(\"utf-8\")\n else:\n line = b\"\"\n hashed = base64.b64encode(md5(line).digest()).decode().rstrip(\"=\")\n outfile.write(f\"DA:{covered},1,{hashed}\\n\")\n\n for missed in sorted(analysis.missing):\n assert source_lines\n line = source_lines[missed-1].encode(\"utf-8\")\n hashed = base64.b64encode(md5(line).digest()).decode().rstrip(\"=\")\n outfile.write(f\"DA:{missed},0,{hashed}\\n\")\n\n outfile.write(f\"LF:{len(analysis.statements)}\\n\")\n outfile.write(f\"LH:{len(analysis.executed)}\\n\")\n\n # More information dense branch coverage data.\n missing_arcs = analysis.missing_branch_arcs()\n executed_arcs = analysis.executed_branch_arcs()\n for block_number, block_line_number in enumerate(\n sorted(analysis.branch_stats().keys())\n ):\n for branch_number, line_number in enumerate(\n sorted(missing_arcs[block_line_number])\n ):\n # The exit branches have a negative line number,\n # this will not produce valid lcov. Setting\n # the line number of the exit branch to 0 will allow\n # for valid lcov, while preserving the data.\n line_number = max(line_number, 0)\n outfile.write(f\"BRDA:{line_number},{block_number},{branch_number},-\\n\")\n\n # The start value below allows for the block number to be\n # preserved between these two for loops (stopping the loop from\n # resetting the value of the block number to 0).\n for branch_number, line_number in enumerate(\n sorted(executed_arcs[block_line_number]),\n start=len(missing_arcs[block_line_number]),\n ):\n line_number = max(line_number, 0)\n outfile.write(f\"BRDA:{line_number},{block_number},{branch_number},1\\n\")\n\n # Summary of the branch coverage.\n if analysis.has_arcs():\n branch_stats = analysis.branch_stats()\n brf = sum(t for t, k in branch_stats.values())\n brh = brf - sum(t - k for t, k in branch_stats.values())\n outfile.write(f\"BRF:{brf}\\n\")\n outfile.write(f\"BRH:{brh}\\n\")\n\n outfile.write(\"end_of_record\\n\")",
"def lcov_summary(context, tracefile):\n cmd = ['lcov',\n '--gcov-tool', GCOV_EXECUTABLE,\n '--config-file', str(context.args.lcov_config_file),\n '--summary', tracefile]\n rc = subprocess.run(cmd, stderr=subprocess.STDOUT) # lcov prints to stderr for some reason\n return rc",
"def html_it():\n import coverage\n cov = coverage.coverage()\n cov.start()\n import tabbed # pragma: nested\n cov.stop() # pragma: nested\n cov.html_report(tabbed, directory=\"../html_tabbed\")",
"def determine_proper_llvm_cov(self):\n version_regex = re.compile('.*LLVM version ([\\d]+)\\.([\\d]+).*')\n return self.find_acceptable_binary('llvm-cov', version_regex, 3, 5)",
"def print_report(args: Args, blocks: List[FencedBlock]) -> None:\n report = []\n filename = click.format_filename(args.markdown_file)\n title1 = filename + \" fenced blocks\"\n if blocks:\n text1 = fenced_block_report(blocks, title=title1)\n report.append(text1)\n\n roles = [b.role.name for b in blocks]\n counts = Counter(roles)\n\n number_of_test_cases = counts[\"CODE\"] + counts[\"SESSION\"]\n report.append(\"{} test cases.\".format(number_of_test_cases))\n if counts[\"SKIP_CODE\"] > 0:\n report.append(\"{} skipped code blocks.\".format(counts[\"SKIP_CODE\"]))\n if counts[\"SKIP_SESSION\"] > 0:\n report.append(\n \"{} skipped interactive session blocks.\".format(counts[\"SKIP_SESSION\"])\n )\n\n num_missing_output = counts[\"CODE\"] - counts[\"OUTPUT\"]\n if num_missing_output:\n report.append(\"{} code blocks with no output block.\".format(num_missing_output))\n\n # del blocks are blocks that will be ignored.\n num_del = counts[\"DEL_CODE\"] + counts[\"DEL_OUTPUT\"]\n if num_del:\n report.append('{} blocks marked \"del-\". They are not tested.'.format(num_del))\n\n # Note if caller wanted --setup and its not happening.\n # Note if caller wanted --setup-doctest and its not happening.\n # This occurs if:\n # no --setup option\n # setup block was not found\n # setup block was skipped\n if args.setup_doctest and not counts[\"SETUP\"]:\n report.append(\"No setup block found, not honoring --setup-doctest.\")\n else:\n if args.setup and not counts[\"SETUP\"]:\n report.append(\"No setup block found.\")\n\n # Note if caller wanted --teardown and its not happening.\n if args.teardown and not counts[\"TEARDOWN\"]:\n report.append(\"No teardown block found.\")\n\n if args.skips:\n report.append(\"\")\n title2 = \"skip pattern matches (blank means no match)\"\n text2 = skips_report(args.skips, blocks, title=title2)\n report.append(text2)\n print(\"\\n\".join(report))",
"def runCoverage(self):\n cov = \"\"\n notes = []\n dele = True\n wid = \"\"\n\n self.__ifVerbose(\"Running Genome Coverage Statistics\")\n samDir = self.outdir + \"/SamTools\"\n i = datetime.now()\n self.__CallCommand(['coverage estimator', self.fOut + \"/\" + self.name + '_Coverage.txt'],\n ['python', self.__coverage_estimator, samDir + '/coverage.txt', self.name])\n self.__CallCommand(['genome region coverage estimator', samDir + '/genome_region_coverage_1.txt'],\n ['python', self.__resis_parser, samDir + '/bed_1_sorted_coverage.txt', samDir + '/coverage.txt', self.name])\n self.__CallCommand(['genome region coverage estimator', samDir + '/genome_region_coverage_2.txt'],\n ['python', self.__resis_parser, samDir + '/bed_2_sorted_coverage.txt', samDir + '/coverage.txt', self.name])\n self.__CallCommand(['cat' , samDir + '/genome_region_coverage.txt'],['cat', samDir + '/genome_region_coverage_1.txt', samDir + '/genome_region_coverage_2.txt'])\n self.__CallCommand(['sort', self.fOut + \"/\" + self.name + '_genome_region_coverage.txt' ],['sort', '-nk', '3', samDir + '/genome_region_coverage.txt'])\n self.__CallCommand('sed',['sed', '-i', '1d', self.fOut + \"/\" + self.name + '_genome_region_coverage.txt'])\n \n if os.path.isfile(self.fOut + \"/\" + self.name + '_Coverage.txt'):\n fh2 = open(self.fOut + \"/\" + self.name + '_Coverage.txt','r')\n for line in fh2:\n if line.startswith(\"Sample\"):\n continue\n cov_str = line.split(\"\\t\")\n cov = cov_str[1]\n wid = cov_str[2]\n if cov != '' and int(cov) < 10:\n self.__low = \"positive\"\n self.__logFH2.write(i.strftime('%Y/%m/%d %H:%M:%S') + \"\\t\" + \"Input:\" + \"\\t\" + self.name + \"\\t\" + \"low genome coverage depth\\n\")\n if wid != '' and float(wid) < 94.99:\n self.__low = \"positive\"\n self.__logFH2.write(i.strftime('%Y/%m/%d %H:%M:%S') + \"\\t\" + \"Input:\" + \"\\t\" + self.name + \"\\t\" + \"low genome coverage width\\n\")\n fh2.close()",
"def generate_html_report(self):\n coverage_bin = os.path.join(self.source_directory, 'tools', 'code_coverage',\n 'coverage.py')\n report_directory = os.path.join(self.output_directory, 'HTML')\n\n coverage_args = ['-p', self.prof_data]\n coverage_args += ['-b', self.build_directory]\n coverage_args += ['-o', report_directory]\n coverage_args += self.build_targets\n\n # Only analyze the directories of interest.\n coverage_args += ['-f', 'core']\n coverage_args += ['-f', 'fpdfsdk']\n coverage_args += ['-f', 'fxbarcode']\n coverage_args += ['-f', 'fxjs']\n coverage_args += ['-f', 'public']\n coverage_args += ['-f', 'samples']\n coverage_args += ['-f', 'xfa']\n\n # Ignore test files.\n coverage_args += ['-i', '.*test.*']\n\n # Component view is only useful for Chromium\n coverage_args += ['--no-component-view']\n\n return self.call([coverage_bin] + coverage_args) == 0",
"def coverage():\n sh('pytest --cov -- tests/unit')\n sh('coverage xml')",
"def coverage():\n local(\"nosetests --with-coverage --cover-package=botan\")",
"def show_configs(ini, toml):\n ini, ini_vals = _read_config(ini, \"covrc\")\n toml, toml_vals = _read_config(toml, \"covrc.toml\")\n for key, val in ini_vals.items():\n if val != toml_vals[key]:\n cog.error(f\"Mismatch! {key}: {val!r} vs {toml_vals[key]!r}\")\n\n ini2 = re.sub(r\"(?m)^\\[\", \"[coverage:\", ini)\n print()\n print(\".. tabs::\\n\")\n for name, syntax, text in [\n (\".coveragerc\", \"ini\", ini),\n (\"pyproject.toml\", \"toml\", toml),\n (\"setup.cfg, tox.ini\", \"ini\", ini2),\n ]:\n print(f\" .. code-tab:: {syntax}\")\n print(f\" :caption: {name}\")\n print()\n print(textwrap.indent(text, \" \" * 8))",
"def print_report(self):\n # print out the number and list of overall deleted files\n print(f\"{len(self.deleted_files)} deleted:\")\n for source, deleted_file in self.deleted_files:\n print(f\"{source} {deleted_file}\")\n print()\n\n # find added/updated input files: in this case the source # will consist of\n # the test name and the input test name separated by '/'.\n updated_input_files = [\n (source, updated_file) for (source, updated_file) in self.updated_files if \"/\" in source\n ]\n\n # print out the number and list of overall added/updated non-model files\n print(f\"{len(self.updated_files)} added/updated:\")\n for source, updated_file in self.updated_files:\n print(f\"{source} {updated_file}\")\n print()\n\n # now print out missing and/or empty updated output directories\n print(f\"{len(self.missing_or_empty_sources)} missing/empty sources in updated outputs:\")\n for source in self.missing_or_empty_sources:\n print(f\"{source}\")\n print()\n\n # if we updated any input files, let the user know that they need to\n # re-run the tests and update test outputs\n if len(updated_input_files) > 0:\n print(\n f\"WARNING: {len(updated_input_files)} input files for rsmcompare/rsmsummarize \"\n f\"tests have been updated. You need to re-run these tests and update test outputs\"\n )",
"def bundle_coverage(opts):\n info = firmware_pb2.FirmwareArtifactInfo()\n info.bcs_version_info.version_string = opts.bcs_version\n bundle_dir = get_bundle_dir(opts)\n zephyr_dir = pathlib.Path(__file__).parent\n platform_ec = zephyr_dir.resolve().parent\n build_dir = platform_ec / 'build/zephyr-coverage'\n tarball_name = 'coverage.tbz2'\n tarball_path = bundle_dir / tarball_name\n cmd = ['tar', 'cvfj', tarball_path, 'lcov.info']\n subprocess.run(cmd, cwd=build_dir, check=True)\n meta = info.objects.add()\n meta.file_name = tarball_name\n meta.lcov_info.type = firmware_pb2.FirmwareArtifactInfo.LcovTarballInfo.LcovType.LCOV\n\n write_metadata(opts, info)",
"def runCoverage(self):\n cov = \"\"\n wid = \"\"\n notes = []\n dele = True\n\n self.__ifVerbose(\"Running Genome Coverage Statistics\")\n samDir = self.outdir + \"/SamTools\"\n i = datetime.now()\n self.__CallCommand(['coverage estimator', self.fOut + \"/\" + self.name + '_Coverage.txt'],\n [self.__coverage_estimator, samDir + '/coverage.txt'])\n self.__CallCommand(['genome region coverage estimator', self.fOut + \"/\" + self.name + '_genome_region_coverage.txt'],\n [self.__resis_parser, samDir + '/bed_sorted_coverage.txt', samDir + '/coverage.txt'])\n\n if os.path.isfile(self.fOut + \"/\" + self.name + '_Coverage.txt'):\n fh2 = open(self.fOut + \"/\" + self.name + '_Coverage.txt','r')\n for line in fh2:\n if line.startswith(\"Average\"):\n cov_str = line.split(\":\")\n cov = cov_str[1].strip(\" \")\n if line.startswith(\"Percentage\"):\n wid_str = line.split(\":\")\n wid = wid_str[1].strip(\" \")\n if cov != '' and float(cov.strip()) < 10:\n self.__low = \"positive\"\n self.__logFH2.write(i.strftime('%Y/%m/%d %H:%M:%S') + \"\\t\" + \"Input:\" + \"\\t\" + self.name + \"\\t\" + \"low genome coverage depth\\n\")\n if wid != '' and float(wid) < 94.99:\n self.__low = \"positive\"\n self.__logFH2.write(i.strftime('%Y/%m/%d %H:%M:%S') + \"\\t\" + \"Input:\" + \"\\t\" + self.name + \"\\t\" + \"low genome coverage width\\n\")\n fh2.close()\n self.__CallCommand(['loci deletion parser', self.fOut + \"/\" + self.name + '_deleted_loci.txt'],\n [self.__del_parser, self.fOut + \"/\" + self.name, self.name, self.__bedlist])\n if os.path.isfile(self.fOut + \"/\" + self.name + '_deleted_loci.txt'):\n fh3 = open(self.fOut + \"/\" + self.name + '_deleted_loci.txt','r')\n for line in fh3:\n fields = line.rstrip(\"\\r\\n\").split(\"\\t\")\n notes.append(fields[6])\n for keys in notes:\n if \"Complete\" in keys or \"Partial\" in keys:\n dele = False\n if dele == False:\n fh3.close()\n else:\n fh3.close()\n self.__CallCommand('rm', ['rm', self.fOut + \"/\" + self.name + '_deleted_loci.txt'])",
"def test_Matrix_coverage(self):\n\n coverage = \"not found\"\n for l in check_output([\"python3\", \"coverage.py\", \"-r\", \".\", \"-f\", \"Matrix.cpp\"]).split(\"\\n\"):\n if l.startswith(\"Matrix.cpp\"):\n coverage = l.split()[3]\n\n self.assertEqual(coverage, \"100%\", msg=\"Test coverage is not 100%\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Expand mpi sequences for each omp
|
def expand_mpi(self, mpi):
for omp in self.omp:
max_ranks = int(self.max_cores / omp)
if mpi is None:
self.mpi[omp] = tools.expand_power_sequence(largest=max_ranks)
elif isinstance(mpi, int):
self.mpi[omp] = tools.expand_power_sequence(largest=mpi)
else:
self.mpi[omp] = np.array(mpi)
|
[
"def moveSequence(i : int, seq): # TEST\n for elt in seq:\n elt.moveBy(i)",
"def addGapsToHMMSeqs(self):\n for seq in self.records:\n seq.seq.insertAllGaps(self.total_gaps)",
"def seq_solve(cols,rows,board,processfirst,seq_processor,seq_priority_stgy,\n cache):\n \n # create stack\n procstack = []\n \n # push each column onto processing stack\n for i in range(len(cols)):\n procstack.append((COL,i))\n \n # push each row onto processing stack\n for i in range(len(rows)):\n procstack.append((ROW,i))\n\n # re-add priority rows/cols to top of stack\n for c in processfirst:\n procstack.remove(c)\n procstack.append(c)\n\n # pop items from processing stack until empty\n while len(procstack) > 0:\n rc,num = seq_priority_stgy(procstack,board,cols,rows)\n # TODO: DRY!\n if rc == ROW:\n # process row\n procres = None\n cells,hints = [c for c in board[num]],rows[num]\n # attempt cache lookup\n if cache is not None and (tuple(cells),hints) in cache:\n procres = cache[tuple(cells),hints]\n else:\n proc = seq_processor(cells,hints)\n while procres is None:\n procres = proc.next()\n yield \n if cache is not None:\n cache[tuple(cells),hints] = procres\n for i,v in procres:\n # (re)push altered col onto top of stack to re-process\n if (COL,i) in procstack:\n procstack.remove((COL,i))\n procstack.append((COL,i))\n # update board\n board[num][i] = v\n elif rc==COL:\n # process col\n procres = None\n cells,hints = [r[num] for r in board],cols[num]\n # attempt cache lookup\n if cache is not None and (tuple(cells),hints) in cache:\n procres = cache[tuple(cells),hints]\n else:\n proc = seq_processor([r[num] for r in board], cols[num])\n while procres is None:\n procres = proc.next()\n yield\n if cache is not None:\n cache[tuple(cells),hints] = procres\n for i,v in procres:\n # (re)push altered row onto top of stack to re-process\n if (ROW,i) in procstack:\n procstack.remove((ROW,i))\n procstack.append((ROW,i))\n # update board\n board[i][num] = v\n\n # check whether the board was solved\n solved = True\n for row in board:\n for col in row:\n if col is None:\n # found unknown - board is unsolved\n solved = False\n break\n if not solved: break\n \n # return whether solved or not\n yield solved",
"def extend(self, sequence):\n for item in sequence:\n self.append(item)",
"def _expand_to_particles(self, inputs):\n # [B, ...] -> [B*p, ...]\n inputs = torch.repeat_interleave(\n inputs, self._particles_per_replica, dim=0)\n if self._num_dynamics_replicas > 1:\n # [B*p, ...] -> [B*p, n, ...]\n inputs = inputs.unsqueeze(1).expand(\n -1, self._num_dynamics_replicas, *inputs.shape[1:])\n\n return inputs",
"def expandIupac(seq):\n # http://stackoverflow.com/questions/27551921/how-to-extend-ambiguous-dna-sequence\n d = {'A': 'A', 'C': 'C', 'B': 'CGT', 'D': 'AGT', 'G': 'G', \\\n 'H': 'ACT', 'K': 'GT', 'M': 'AC', 'N': 'GATC', 'S': 'CG', \\\n 'R': 'AG', 'T': 'T', 'W': 'AT', 'V': 'ACG', 'Y': 'CT', 'X': 'GATC'}\n seqs = []\n for i in product(*[d[j] for j in seq]):\n seqs.append(\"\".join(i))\n return seqs",
"def plant(sequences, motifs, sl, sc, ml):\n locations = [0] * sc\n for i in range(sc):\n loc = random.randint(0, sl - ml - 1)\n locations[i] = loc\n sequences[i][loc:loc+ml] = motifs[i]\n return sequences, locations",
"def interSequence(seq_maps):\n seq_map = {}\n \n return seq_map",
"def map(self, seqs, ids):\n return PoolIterator(self, seqs, ids, self.nproc * 2 + 10)",
"def random_sequence_qmc(size_mv, i, n=1, randomized=True):\n size_mv = np.int(size_mv)\n n = np.int(n)\n random_seed = random.randrange(10**9)\n #u = np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0), scrambling=0, seed=random_seed)).reshape((n,size_mv))\n if randomized:\n shift = np.random.rand(1,size_mv)\n u = np.mod(np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0))).reshape((n,size_mv)) + shift, 1)\n else: \n u = np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0))).reshape((n,size_mv))\n \n while test_random(u):\n random_seed = random.randrange(10**9)\n #u = np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0), scrambling=0, seed=random_seed)).reshape((n,size_mv))\n if randomized:\n shift = np.random.rand(1,size_mv)\n u = np.mod(np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0))).reshape((n,size_mv)) + shift, 1)\n else: \n u = np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0))).reshape((n,size_mv))\n \n return(u)",
"def multistream_force_align(orig_streams, mode='fill'):\n INPUT_IDX = 0\n TARGET_IDX = 1\n LEN_IDX = 2\n new_streams = [([], [], s[2]) for s in orig_streams]\n curr_idxs = [0]*len(orig_streams)\n inputs, targets, input_lens = extract_stream_elements(orig_streams)\n # for each sequence\n for i, l1 in enumerate(input_lens[0]):\n # compute the lens, find stream with longest length\n lens = [input_len_vec[i] for input_len_vec in input_lens]\n max_idx = np.argmax(lens)\n # compute the number of copies to generate\n copies_to_make = [input_lens[max_idx][i] - l[i] for l in input_lens]\n # for each stream, append the original stream and copies to make to new stream\n for j in range(len(orig_streams)):\n input_vec = inputs[j]\n target_vec = targets[j]\n l = lens[j] # length of sequence for current stream\n for k in range(l):\n new_streams[j][INPUT_IDX].append(input_vec[curr_idxs[j] + k])\n new_streams[j][TARGET_IDX].append(target_vec[curr_idxs[j] + k])\n copies = copies_to_make[j]\n # make copies to fill shorter streams\n for k in range(copies):\n last_element = input_vec[curr_idxs[j] + l - 1]\n last_element_target = target_vec[curr_idxs[j] + l - 1]\n new_streams[j][INPUT_IDX].append(np.copy(last_element))\n new_streams[j][TARGET_IDX].append(np.copy(last_element_target))\n new_streams[j][LEN_IDX][i] = l + copies\n curr_idxs[j] += l\n # convert the lists to numpy arrays\n new_streams = [(np.array(x[INPUT_IDX]), np.array(x[TARGET_IDX]), x[LEN_IDX]) for x in new_streams]\n return new_streams",
"def create_long_sequence():\n\n return final_sequences('long')",
"def _set_seq(self,sequence,start=0):\n if start+len(sequence) > self._slen: \n sys.stderr.write(\"Error not long enough to add\\n\")\n sys.exit()\n z = 0\n for i in xrange(start, start+len(sequence)):\n self._set_nt(sequence[z],i)\n z+=1",
"def collapse_sequences(overlay):\n sequences = []\n for node in overlay.nodes:\n if any([node in seq for seq in sequences]): continue\n seq = [node]\n while len(node.consumers) == 1 and len(list(node.consumers)[0].inputs) == 1:\n node = list(node.consumers)[0]\n seq.append(node)\n if len(seq) > 1:\n sequences.append(seq)\n\n structure_map = {}\n for seq in sequences:\n structure_map[seq[-1]] = OverlayStructure(\"Sequence\", {\"sequence\": seq})\n\n return overlay.collapse_structures(structure_map)",
"def generate_random_seq(seq_len, number_seq, number_mutations, list_all_20_aa, probabilities_all_20_aa):\n\n # seq_list = []\n # sublist = ''.join(np.random.choice(list_all_20_aa, p=probabilities_all_20_aa) for _ in range(subset_num))\n # subdict = { my_key: prob_table[my_key] for my_key in sublist }\n # pick_list = []\n # for key, prob in subdict.items():\n # pick_list.extend([key] * int((prob * 100)))\n\n # generate a reference sequence based on the aa propensity of TM or non-TM region\n\n orig_seq = \"\".join(np.random.choice(list_all_20_aa, p=probabilities_all_20_aa) for _ in range(int(seq_len)))\n\n # generate sequence cluster by randomly replacing predetermined number of residues in reference seq\n seq_matrix = []\n # firstly, choose a set of positions whoose aa will be replaced\n for n in range(number_seq):\n # sys.write something to show that the programming is still running\n if n != 0 and n % 200 == 0:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n # create indices (list of positions)\n inds = list(range(seq_len))\n # number of mutations is calculated beforehand. E.g. if ident=0.9, seqlen=100, number_mutations = 10)\n # create a sample of positions to mutate, e.g. [77, 81, 18, 46, 42, 53, 65, 2, 89, 69, ..... and so on\n list_of_aa_positions_to_be_mutated = random.sample(inds, number_mutations)\n orig_seq_as_list = list(orig_seq)\n # based on aa propensity, replace the residue at each chosen position\n for pos in list_of_aa_positions_to_be_mutated:\n orig_seq_as_list[pos] = np.random.choice(list_all_20_aa, p=probabilities_all_20_aa)\n seq_incl_mutations = \"\".join(orig_seq_as_list)\n\n # append each new sequence to the seq_matrix\n seq_matrix.append(list(seq_incl_mutations))\n\n # convert the seq_matrix into a np.array to ease further steps (slicing columns)\n seq_matrix = np.array(seq_matrix)\n\n return orig_seq, seq_matrix",
"def append_tomography_sequence( orig_sequence, quorum ):\n # This is kind of like a tensor product for sequences\n # The situation is complicated in that the original waveform list is stored part of a segment which is part of a sequence,\n # and we have to drill down to get to the appropriate lists.\n \n # When the pulse is \"sampled\" (ie converted from a descriptive list to a series of samples that are programmed into the AWG)\n # the list that describes the pulses is traversed backwards. This means that the tomography pulses that have been appended\n # push the original pulses further back in time relative to the start of the measurement. We don't need to make any further\n # adjustments.\n \n # The outer loop effectively refers to AWG channel numbers,\n # so the two inputs have to have this outer loop unrolled together\n \n for this_sequence, appendix_seq in zip(orig_sequence.sequences, quorum.sequences):\n \n new_segment_list = []\n\n new_counts = []\n new_wfm_counter = 0\n \n new_marker_masks = []\n new_markers_counter = 0\n\n new_adv_modes = []\n\n # Each sequence has separate lists for the pulse control structure\n # We need to unroll these together to be able to build extended versions.\n # This is all because the sequence uses a structure of lists rather than a list of structure\n \n for this_segment, this_count, this_marker_mask, this_adv_mode in zip(\n this_sequence.segments,\n this_sequence.counts,\n this_sequence.marker_masks,\n this_sequence.adv_modes):\n\n # The reason for the following assertion is that the tomography pulse is appended\n # to the segment and that whole segment will be repeated.\n assert (this_count == 1), \"We can't adapt a sequence that uses replicated waveforms\"\n\n # For this segment, we are going to build several new segments in the new segment list\n for segment_id, this_appendix_segment in enumerate(appendix_seq.segments):\n # copy the original segment non-destructively (shallow copy is not sufficient)\n new_segment = deepcopy(this_segment)\n # Append the appendix waveform(s) and update the pulse count summaries\n new_segment.waveform.extend(this_appendix_segment.waveform)\n \n # Special case for the sequence start marker. We only want the pulse to be copied for\n # segment zero. Otherwise force the first marker to blank (if it exists at all)\n\n if (segment_id > 0):\n if new_segment.markers != []:\n new_segment.markers[0] = []\n \n # add this to the new segment list we're building\n new_segment_list.append(new_segment)\n new_counts.append(this_count)\n new_marker_masks.append(this_marker_mask)\n new_adv_modes.append(this_adv_mode)\n \n new_wfm_counter += 1 # Not sure what these are supposed to count so it's difficult to make it right.\n new_markers_counter += 1 # Also there seems to be a bug in the original\n \n # now attach the new extended lists back onto the original sequence channel\n this_sequence.segments = new_segment_list\n this_sequence.counts = new_counts\n this_sequence.adv_modes = new_adv_modes\n this_sequence.marker_masks = new_marker_masks\n this_sequence._markers_counter = new_markers_counter\n this_sequence._wfm_counter = new_wfm_counter",
"def transform_multiple_evolve(self, n_iterations, p):\n sequence = self.axiom\n for _ in range(n_iterations):\n self.mutate_transformations(p)\n sequence = self.transform_sequence(sequence)\n return sequence",
"def seqProcessing(sp,sample_keys,mlineage,size_par,mean_depth,purity):\n all_cur_id = []\n all_mut_id = []\n for key in sample_keys:\n smuts = list(sp[key].neutral + sp[key].advant)\n all_cur_id += smuts\n sample_size = 10000 ## the number of cells for sequencing analysis\n sample_id = random.sample(all_cur_id,sample_size)\n id_count = Counter(sample_id)\n for x in id_count.keys():\n xlineage = traceLineage(mlineage,x)\n all_mut_id += xlineage*id_count[x]\n mut_count = Counter(all_mut_id)\n prob_par=size_par*1.0/(size_par+mean_depth)\n sampleAF = {}\n for x in mut_count.keys():\n true_af = mut_count[x]*0.5*purity/sample_size\n if true_af > 0.005:\n site_depth = np.random.negative_binomial(size_par,prob_par)\n if site_depth >= 10:\n var_reads = np.random.binomial(site_depth,true_af)\n seq_af = var_reads*1.0/site_depth\n if var_reads >= 3:\n sampleAF[str(x)] = (site_depth,seq_af)\n #sampleAF[str(x)] = seq_af\n return sampleAF",
"def composite_sequence(self):\n\n augmenters = self.augmenters\n \n from imgaug import augmenters as iaa\n self.seq = iaa.Sequential([\n #pick up one affine transformation\n iaa.OneOf([\n augmenters[\"fliplr\"],\n augmenters[\"flipud\"],\n augmenters[\"rotate\"] \n ]),\n\n #pick up one or tow CLAHE \n iaa.OneOf([\n augmenters[\"CLAHE\"],\n iaa.Sequential([\n augmenters[\"CLAHE\"],\n augmenters[\"CLAHE\"]\n ])\n ]),\n\n iaa.OneOf([\n iaa.OneOf([\n augmenters[\"impulse_noise\"], \n augmenters[\"poisson_noise\"], \n augmenters[\"gaussian_noise\"], \n augmenters[\"dropout\"]\n ]),\n iaa.OneOf([\n augmenters[\"gaussian_blur\"],\n augmenters[\"median_blur\"]\n ])\n ])\n ])\n \n return self.seq"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Expand leaf sequences for each omp
|
def expand_leaf(self, leaf):
for omp in self.omp:
if leaf is None:
if self.scaling_type == 'strong':
max_ranks = int(self.max_cores / omp)
self.leaf[omp] = max_ranks * np.array(self.leaf_per_max_rank)
elif self.scaling_type == 'weak':
self.leaf[omp] = np.array(self.leaf_per_rank)
else:
self.leaf[omp] = tools.ensure_sequence(leaf)
|
[
"def expand_mpi(self, mpi):\n for omp in self.omp:\n max_ranks = int(self.max_cores / omp)\n\n if mpi is None:\n self.mpi[omp] = tools.expand_power_sequence(largest=max_ranks)\n elif isinstance(mpi, int):\n self.mpi[omp] = tools.expand_power_sequence(largest=mpi)\n else:\n self.mpi[omp] = np.array(mpi)",
"def expand_all(self):\n for node in self.nodes:\n node.expand_all()",
"def _collapse_helper(self) -> None:\n self._expanded = False\n for subtree in self._subtrees:\n subtree._collapse_helper()",
"def expand(self, node):\n for mv in node.pos.legal():\n child_pos = node.pos.copy()\n child_pos.make_move(mv)\n child = MCTSNode(child_pos)\n self.children[node].add(child)",
"def collapse_sequences(overlay):\n sequences = []\n for node in overlay.nodes:\n if any([node in seq for seq in sequences]): continue\n seq = [node]\n while len(node.consumers) == 1 and len(list(node.consumers)[0].inputs) == 1:\n node = list(node.consumers)[0]\n seq.append(node)\n if len(seq) > 1:\n sequences.append(seq)\n\n structure_map = {}\n for seq in sequences:\n structure_map[seq[-1]] = OverlayStructure(\"Sequence\", {\"sequence\": seq})\n\n return overlay.collapse_structures(structure_map)",
"def expandIupac(seq):\n # http://stackoverflow.com/questions/27551921/how-to-extend-ambiguous-dna-sequence\n d = {'A': 'A', 'C': 'C', 'B': 'CGT', 'D': 'AGT', 'G': 'G', \\\n 'H': 'ACT', 'K': 'GT', 'M': 'AC', 'N': 'GATC', 'S': 'CG', \\\n 'R': 'AG', 'T': 'T', 'W': 'AT', 'V': 'ACG', 'Y': 'CT', 'X': 'GATC'}\n seqs = []\n for i in product(*[d[j] for j in seq]):\n seqs.append(\"\".join(i))\n return seqs",
"def expand(self, state):\n successor_function = self.graph.successor(state.content)\n new_nodes_list = []\n for successor in successor_function:\n new_node = State(\n content=successor[0],\n total_cost=state.cost + successor[1],\n depth=state.depth + 1,\n parent=state\n )\n new_nodes_list.append(new_node)\n self.closure.add(state)\n return new_nodes_list",
"def __expand_concat(self, param_parent_node, concat_seq):\n\n # Dict that contains key-value mappings of pulse/sequence shorthand notations to their complete notations.\n self.xml_tags = {\"A\": \"ATOMICSEQUENCE\", \"C\": \"ConcatSequence\", \"D\": \"DELAYATOMICSEQUENCE\",\n \"SincRFPulse\": \"HARDRFPULSE\", \"TrapGradPulse\": \"TRAPGRADPULSE\", \"ADC\": \"TRAPGRADPULSE\"}\n seq_tree = ET.SubElement(param_parent_node, 'ConcatSequence', attrib=concat_seq.make_attrib())\n for event in concat_seq.events:\n if event.type == 'C':\n self.__expand_concat(seq_tree, event)\n elif event.type == 'A':\n atomic_seq = ET.SubElement(seq_tree, self.xml_tags[event.type], attrib=event.make_attrib())\n for pulse in event.events:\n if pulse.type == 'SincRFPulse':\n ET.SubElement(atomic_seq, self.xml_tags[pulse.type], attrib=pulse.make_attrib())\n elif pulse.type == 'TrapGradPulse':\n ET.SubElement(atomic_seq, self.xml_tags[pulse.type], attrib=pulse.make_attrib())\n elif pulse.type == 'ADC':\n ET.SubElement(atomic_seq, self.xml_tags[pulse.type], attrib=pulse.make_attrib())\n elif event.type == 'D':\n ET.SubElement(seq_tree, self.xml_tags[event.type], attrib=event.make_attrib())\n\n return seq_tree",
"def sequence_nodes(self, parent_id, ids):\n pass",
"def expand_iSeq_sSeq_Layer_to_Network(iSeq_set, sSeq_set, network):\n iSeq_set_exp = []\n sSeq_set_exp = []\n for i, layer in enumerate(network.layers):\n num_nodes = 0\n if isinstance(layer, system_parameters.ParamsSFASuperNode):\n if layer.pca_node_class:\n num_nodes += 1\n if layer.ord_node_class:\n num_nodes += 1\n if layer.exp_funcs != [identity] and layer.exp_funcs:\n num_nodes += 1\n if layer.red_node_class:\n num_nodes += 1\n if layer.clip_func or layer.clip_inv_func:\n num_nodes += 1\n if layer.sfa_node_class:\n num_nodes += 1\n elif isinstance(layer, system_parameters.ParamsSFALayer):\n num_nodes += 1 # For the switchboard\n if layer.pca_node_class:\n num_nodes += 1\n if layer.ord_node_class:\n num_nodes += 1\n if layer.exp_funcs != [identity] and layer.exp_funcs:\n num_nodes += 1\n if layer.red_node_class:\n num_nodes += 1\n if layer.clip_func or layer.clip_inv_func:\n num_nodes += 1\n if layer.sfa_node_class:\n num_nodes += 1\n # ## Modified to support iSeq_set, sSeqSet not lists\n # #if isinstance(iSeq_set,list):\n j = min(i, len(iSeq_set) - 1)\n # ##else:\n # ## j = i\n for _ in range(num_nodes):\n iSeq_set_exp.append(iSeq_set[j])\n sSeq_set_exp.append(sSeq_set[j])\n # print iSeq_set_exp\n return iSeq_set_exp, sSeq_set_exp",
"def expand(self):\n if self.Length <= 1:\n return False\n \n children = self.Children[:]\n self.Children[:] = []\n curr_pair = self\n start = self.Start\n end = self.End\n for i in range(1, self.Length):\n new_pair = StructureNode(Data=Stem(start+i, end-i, 1))\n curr_pair.Children.append(new_pair)\n new_pair.Parent = curr_pair\n curr_pair = new_pair\n new_pair.Children[:] = children\n for c in new_pair.Children:\n c.Parent = new_pair\n self.Length = 1\n return True",
"def collect(self):\n\n for i, t in enumerate(ndindex(2, 2, 2)):\n rshift = (t - array([0.5, 0.5, 0.5])) * self.lengths / 2\n M_child = mpolar.shift(rshift, OUTWARD, self.children[i].outward)\n \n self.outward += M_child",
"def create_long_sequence():\n\n return final_sequences('long')",
"def expand(self, policy):\n if self.children != {}: return\n actionWeights = policy(self.state)\n for action in actionWeights:\n succ = self.state.generateSuccessor(self.state.agent, action)\n self.children[action] = TreeNode(succ, actionWeights[action], self)",
"def expand(leaderboard, frequent_aa):\n return [i+(j,) for i in leaderboard for j in frequent_aa]",
"def rebuildPlusM(self):\n temp_path = []\n for process in self.path:\n if process[2][3] == \"+M_target\":\n temp_path.append([process[0], process[1], self.matrix_field(process[2][0]+1, process[2][1], self.match, \"M\")])\n temp_path.append([process[0]-1, process[1]-1, self.matrix_field(process[2][0], process[2][1], self.delete, \"I\")])\n elif process[2][3] == \"+M_input\":\n temp_path.append([process[0], process[1], self.matrix_field(process[2][0], process[2][1]+1, self.match, \"M\")])\n temp_path.append([process[0]-1, process[1]-1, self.matrix_field(process[2][0], process[2][1], self.delete, \"D\")])\n else:\n temp_path.append(process)\n self.path = temp_path",
"def merge_children(self):\n self.active = []\n for ch in self.children:\n self.active += ch.active",
"def __advanced_concat(self):\n self.number_suffix_tokens = self.__first_level_merge()\n expanded_nums = {idx: self.__expand(seq)\n for idx, seq in enumerate(self.number_suffix_tokens)}\n\n solutions = set(''.join(x) for x in itertools.product(\n *list(expanded_nums.values())))\n return solutions",
"def expandMultiElementItems(self):\n for model_idx in self.iterTopLevelElements(0):\n if len(model_idx.internalPointer().subnodes) > 1:\n self.setExpanded(model_idx, True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract multidimensional table of model timing data
|
def extract_data(self):
print('Extracting performance data')
omp_dict = {}
for omp, omp_set in self.models.items():
leaf_dict = {}
for leaf, leaf_set in omp_set.items():
mpi_dict = {}
for mpi, mod in leaf_set.items():
mpi_dict[mpi] = mod.table.to_xarray()
leaf_xr = xr.concat(mpi_dict.values(), dim='mpi')
leaf_xr.coords['mpi'] = list(mpi_dict.keys())
leaf_dict[leaf] = leaf_xr
omp_xr = xr.concat(leaf_dict.values(), dim='leaf')
omp_xr.coords['leaf'] = list(leaf_dict.keys())
omp_dict[omp] = omp_xr
full_xr = xr.concat(omp_dict.values(), dim='omp')
full_xr.coords['omp'] = list(omp_dict.keys())
self.data = full_xr
self.extract_zupcs()
|
[
"def generate_time_tables(self):\n from dbanalysis.classes import weather_getter\n self.total_routes = 0\n self.failed_routes = 0\n w_getter = weather_getter.weather_getter()\n weather = w_getter.get_weather()\n import datetime\n dt = datetime.datetime.now()\n count = 0\n for route in self.routes:\n if len(route) < 1:\n continue \n times = self.time_tabler.get_dep_times_five_days(route,dt)\n \n for variation in times:\n self.total_routes +=1\n #if not self.selector.get_unavailable(route,int(variation)):\n try: \n count +=1\n print(count,route+'_'+str(variation))\n X=times[variation]\n \n # merge with weather data to add weather features.\n #X['matrix'] = pd.merge(X['matrix'],weather[['day','hour','rain','temp','vappr']],on = ['day','hour'])\n X['matrix']['rain']=0.08\n X['matrix']['temp']=10.0\n X['matrix']['vappr']=10.0\n \n \n \n self.run_route(X['matrix'],X['pattern'])\n try:\n pass\n except Exception as e:\n print(e)\n \n except Exception as e:\n print(e)\n self.failed_routes +=1\n try:\n pass \n except Exception as e:\n \n print(e,'broken timetabler',route)\n pass",
"def make_moment_table(mesh_data,key_descriptor,observable,qn,key_list=None,prune=False):\n # process results into dictionaries\n results_dict = make_results_dict(mesh_data,key_descriptor)\n\n # find common keys\n if (key_list is not None):\n common_key_list = key_list\n else:\n common_key_list = common_keys([results_dict])\n\n # tabulate values\n key_function = make_key_function(key_descriptor)\n table_data = []\n for key in common_key_list:\n results_data = results_dict[key]\n value = results_data.get_moment(observable,qn)\n if (prune and np.isnan(value)):\n continue\n table_data += [\n key + (value,)\n ]\n\n # convert to structured array\n table = np.array(\n table_data,\n dtype = list(key_descriptor)+[(\"value\",float)]\n )\n return table",
"def time_values_overview(self) -> pd.DataFrame:\n\n table = pd.DataFrame(\n [self._tsc_df.time_interval(_id) for _id in self._tsc_df.ids],\n index=self._tsc_df.ids,\n columns=[\"start\", \"end\"],\n )\n\n table[\"delta_time\"] = self._tsc_df.delta_time\n return table",
"def extract_timers(self):\n raw = self.json[self.fields.timers]\n final = {}\n for step in raw:\n del raw[step][self.fields.order]\n for k,v in raw[step].items():\n final[str(step)] = float(v)\n return final",
"def get_all_timesteps(self):\n for h in self.real_handles:\n h.seek(0, SEEK_SET)\n nx = self.para['nx']\n ny = self.para['ny']\n\n ms = []\n ret_lst = []\n while True:\n try:\n m, data = self.get_next_timestep()\n except ff.NoMoreRecords:\n break\n ms.append(m)\n ret_lst.append(data)\n\n ret = np.stack(ret_lst, axis=0)\n return np.array(ms), ret",
"def multi_temp_task_table_calc(non_empty_rows):\n multi_task_data = []\n for task in ['work','meditation','movement','break','total']: \n row = {}\n row['task']=task\n row[task+'_numeric']=sum([session[task+'_numeric'] for session in non_empty_rows])\n if row[task+'_numeric']==0:\n continue\n row['string']=timedelta_to_string(datetime.timedelta(minutes=np.float64(row[task+'_numeric'])))\n multi_task_data.append(row)\n return multi_task_data",
"def construct_time_array():\n global time_diffs\n time_diffs = np.zeros((no_epochs, no_epochs))\n for m in range(0, no_epochs):\n t11 = time.Time(str(fields[m].header['STARTMJD']), format='mjd')\n t12 = time.Time(str(fields[m].header['ENDMJD']), format='mjd')\n t1 = t11+(t12-t11)/2\n for n in range(m+1, no_epochs):\n t21 = time.Time(str(fields[n].header['STARTMJD']), format='mjd')\n t22 = time.Time(str(fields[n].header['ENDMJD']), format='mjd')\n t2 = t21 + (t22 - t21) / 2\n time_delta = (t2-t1).to_value('sec')\n time_diffs[m, n] = time_delta\n time_diffs[n, m] = -1*time_delta",
"def readStationTimetbl(filename):\r\n with open(filename) as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',');\r\n stationTimetbl = [];\r\n for row in reader:\r\n platformTime = [];\r\n for time in row:\r\n platformTime.append(utilities.convertTimeFormatToSecs(time));\r\n stationTimetbl.append(platformTime);\r\n \r\n return stationTimetbl;",
"def _build_metrics_times_data(self, time_metrics):\n to_return = [{'name': name, 'latencies': latencies}\n for name, latencies in iteritems(time_metrics)]\n return to_return",
"def createTimeseriesLifeCycle(survey_records):\n survey_timeseries = [[key]+[col for col in value] for key, value in {k:[g['Age'].tolist(),g['deck'].tolist(),g['superstructure'].tolist(),g['substructure'].tolist(), g['stateCode'].tolist(), g['yearBuilt'].tolist(),g[\"yearReconstructed\"].tolist()] for k, g in survey_records.groupby('structureNumber')}.items()]\n # for key, value in {k:[g['Age'].tolist(),g['ADT Type'].tolist(),g['Category'],g['superstructure'].tolist()] for k, g in survey_records.groupby('structureNumber')}.items():\n return survey_timeseries",
"def kappa_tau_table(models,returns):\n kappa = []\n tau = []\n tuples = []\n table = np.array(['kappa','tau'])\n \n for model in models:\n for index in ['FTSE','S&P']:\n res = new_garch_model(returns=returns,index=index,**model).fit()\n table = np.vstack((table,[res.x[0],res.x[1]]))\n tuples.append((*model.values(),index))\n \n index = pd.MultiIndex.from_tuples(tuples, names=['ticker','sample size','mean process','p','q','index'])\n table = pd.DataFrame(table[1:,:],columns=table[0,:],index=index)\n \n return table",
"def extract_interaction_time_and_information(cell):\n\n alist = []\n for screen in cell:\n adict = {}\n infor_arr = np.array([ele[0:6] for ele in screen['cor']]) # drop information about coordinates\n\n def build_cor_arr(list_cor):\n cor_arr = []\n for ele in list_cor:\n if len(ele[6]) == 0:\n cor_arr.append([ele[0], ele[1], ele[0], ele[1]])\n else:\n cor_arr.append([ele[0], ele[1], ele[6][-1][-2], ele[6][-1][-1]])\n return np.array(cor_arr)\n\n cor_arr = build_cor_arr(screen['cor'])\n \n if infor_arr.ndim == 2:\n responsive = infor_arr[infor_arr[:,4] != 0] # drop non-responsive interactions\n cor_arr_res = cor_arr[infor_arr[:,4] != 0]\n\n if len(responsive) != 0: \n time_arr = responsive[:,5]\n gesture_arr = responsive[:,3]\n orientation_arr = responsive[:,2]\n \n # use the time array to remove swipe-trail recording issue (swipe time > trail time)\n true_arr = np.append([np.diff(time_arr) > 0],[True])\n true_time_arr = time_arr[true_arr]\n true_gesture_arr = gesture_arr[true_arr]\n true_coor_arr = cor_arr_res[true_arr]\n true_orientation_arr = orientation_arr[true_arr]\n\n else:\n true_time_arr, true_gesture_arr, true_coor_arr, true_orientation_arr = [], [], [], []\n\n elif (infor_arr.ndim == 1) and (len(infor_arr) > 1) and (infor_arr[4] != 0):\n true_time_arr = [infor_arr[5]]\n true_gesture_arr = [infor_arr[3]]\n true_coor_arr = cor_arr\n true_orientation_arr = [infor_arr[2]]\n else:\n true_time_arr, true_gesture_arr, true_coor_arr, true_orientation_arr = [], [], [], []\n\n adict['_screen'] = screen['an'] # use _screen (with _) to ensure it appears first when print\n adict['interaction_count'] = len(true_time_arr)\n adict['interaction_times'] = np.array(true_time_arr)\n adict['interaction_labels'] = [GESTURE_CODES_MOBILE[i] for i in true_gesture_arr]\n adict['interaction_coors'] = np.array(true_coor_arr)\n adict['orientations'] = np.array(true_orientation_arr)\n adict['start_time'] = screen['at']\n adict['view_time'] = screen['vt']\n\n alist.append(adict)\n\n return alist",
"def make_rme_table(mesh_data,key_descriptor,observable,qnf,qni):\n\n # tabulate values\n key_function = make_key_function(key_descriptor)\n table_data = [\n key_function(results_data) + (results_data.get_rme(observable,(qnf,qni)),)\n for results_data in mesh_data\n ]\n\n # convert to structured array\n table = np.array(\n table_data,\n dtype = list(key_descriptor)+[(\"value\",float)]\n )\n return table",
"def parse_trials(trials):\n # Initialize\n trials_dict = {'tid': [],\n 'loss': [],\n 'trajectories': [],\n 'mc': []}\n for tidx in range(len(trials)):\n # Main\n trials_dict['tid'] += [trials.trials[tidx]['tid']]\n trials_dict['loss'] += [trials.trials[tidx]['result']['loss']]\n trials_dict['trajectories'] += [trials.trials[tidx]['result']['trajectories']]\n\n # Model Configs\n mc = trials.trials[tidx]['result']['mc']\n trials_dict['mc'] += [mc]\n \n trials_df = pd.DataFrame(trials_dict)\n return trials_df",
"def _extract_data_to_dataframe_at_time(t):\n print(\"Publishing data for day {} (index {})\".format(t[1], t[0]))\n itime = t[0]\n nb_cells = nc.dimensions['n_cells'].size\n npst = np.ma.column_stack((\n np.arange(start=1, stop=nb_cells + 1, dtype='i4'),\n vfunc_jd_to_dt(np.full((nb_cells), nc.variables['time'][itime])),\n nc.variables['water_elevation_catchment_mean'][itime, :],\n nc.variables['water_elevation_catchment_median'][itime, :],\n nc.variables['water_elevation_catchment_std'][itime, :],\n nc.variables['water_elevation_catchment_mad'][itime, :],\n nc.variables['streamflow_catchment_mean'][itime, :],\n nc.variables['streamflow_catchment_median'][itime, :],\n nc.variables['streamflow_catchment_std'][itime, :],\n nc.variables['streamflow_catchment_mad'][itime, :],\n vfunc_jd_to_dt(np.full((nb_cells), nc.variables['time_added_to_hydb'][itime])),\n np.full((nb_cells), nc.variables['is_analysis'][itime])\n ))\n\n df = pd.DataFrame(npst,\n index=np.arange(start=1, stop=nb_cells + 1, dtype='i4'),\n columns=['cell_id', 'date', 'elevation_mean', 'elevation_median', 'elevation_stddev', 'elevation_mad',\n 'flow_mean', 'flow_median', 'flow_stddev', 'flow_mad', 'update_time', 'is_analysis']\n )\n\n # force cell_id type to smallint\n df = df.astype({\n 'cell_id': 'int16',\n 'is_analysis': 'boolean'\n })\n print(df)\n return df",
"def load_traj_MOP( fname):\n time, lat, lon, elev, brightness = [],[],[],[],[]\n x,y,z = [],[],[]\n #load all the comments and find the event names\n tab1 = []\n tab2 = []\n with open( fname, 'rt') as f:\n ef1 = f.readline()\n ef2 = f.readline()\n loc1 = f.readline()\n header1 = f.readline().split(',')\n while True:\n dat = f.readline()\n if dat.startswith('#'):\n break\n tab1.append( dat)\n loc2 = dat\n header2 = f.readline().split(',')\n while True:\n dat = f.readline()\n if not dat or dat.startswith('#'):\n break\n tab2.append( dat)\n #extract out named columns\n header1[0] = header1[0].lstrip('#')\n header2[0] = header2[0].lstrip('#')\n header1 = [a.strip() for a in header1]\n header2 = [a.strip() for a in header2]\n tabd1 = []\n for row in tab1:\n dd = {}\n dat = row.split(',')\n dat = [a.strip() for a in dat]\n for b in range(len(dat)):\n dd[ header1[b]] = dat[b]\n tabd1.append( dd)\n tabd2 = []\n for row in tab2:\n dd = {}\n dat = row.split(',')\n dat = [a.strip() for a in dat]\n for b in range(len(dat)):\n dd[ header2[b]] = dat[b]\n tabd2.append( dd)\n #tabd = list of dicts\n for tab in (tabd1,tabd2):\n for row in tab:\n #print(row)\n time.append( row['datetime']) #iso string\n lat.append( row['latitude']) #in deg\n lon.append( row['longitude']) # in deg\n elev.append( float(row['height'])) #in m\n x.append( float( row['X_geo'])) #in km\n y.append( float( row['Y_geo'])) #in km\n z.append( float( row['Z_geo'])) #in km\n if 'brightness' in row:\n brightness.append( row['brightness']) # float\n else:\n brightness.append( 255.0 ) # float\n logger.debug('split_traj_finished, ' + str(fname))\n # 2 sets of data from 2 cameras here\n # try globally sorting by time\n lat.sort( key=dict(zip(lat,time)).get)\n lon.sort( key=dict(zip(lon,time)).get)\n elev.sort( key=dict(zip(elev,time)).get)\n x.sort( key=dict(zip(x,time)).get)\n y.sort( key=dict(zip(y,time)).get)\n z.sort( key=dict(zip(z,time)).get)\n brightness.sort( key=dict(zip(brightness,time)).get)\n time.sort() #keep time as str\n return time, lat, lon, elev, x,y,z, brightness",
"def get_vehicle_state_table(self, key, steps=None):\n table = []\n # Get list of all vehicles, sorted by id:\n vehicles = sorted(self.all_vehicles, key=lambda vehicle: vehicle.id)\n for vehicle in vehicles:\n df = vehicle.get_state_table(keys=['step','time',key], steps=steps)\n df = df.rename(columns={key:vehicle.id}).set_index(['step','time'])\n table.append( df ) \n table = pd.concat(table, axis=1)\n table.columns.name = 'vehicle_id'\n return table",
"def extract_time_variants(self):\n\n for scenario in self.scenarios_to_run:\n self.scaleup_data[scenario] = {}\n for parameter in self.time_variant_parameters:\n self.scaleup_data[scenario][parameter] = copy.copy(self.time_variant_parameters[parameter])",
"def build_t(self, result):\n def g2i(k):\n return int(m.group(k))\n\n def hours(dateObj):\n seconds = (dateObj - firstDate).total_seconds()\n return seconds / 3600\n\n xref = {}\n firstDate = None\n self.dates = set()\n for k, row in enumerate(result):\n if k == 0: continue\n dateText = row[0]\n m = self.reDate.match(dateText)\n if not m:\n raise ValueError(sub(\n \"Couldn't parse '{}' as a date!\", dateText))\n thisDate = date(g2i(1), g2i(2), g2i(3))\n if firstDate is None:\n firstDate = thisDate\n self.dates.add(thisDate)\n if thisDate not in xref:\n xref[thisDate] = {k}\n else: xref[thisDate].add(k)\n # Convert set to sorted list\n self.dates = sorted(list(self.dates))\n # Build array t\n self.t = np.array([hours(x)/24 for x in self.dates], dtype=float)\n return xref"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return timing table of specific model
|
def get_model_table(self, leaf, omp, mpi):
m = self.models[omp][leaf][mpi]
return m.table
|
[
"def get_model_time(db_name, img_num, model_name):\n #print(\"Connecting to database...\")\n connection = connect_db(db_name)\n cursor = connection.cursor()\n \n cmd = 'SELECT performance FROM exec_data WHERE model_name=\\''+model_name+'\\' and img_num=\\''+str(img_num)+'\\''\n cursor.execute(cmd)\n result = cursor.fetchall()\n return result[0][0]",
"def init_timing_log(metadata):\n return Table(\"timing_log\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(cn.task_name, Text),\n Column(cn.timing_tag, Text),\n Column(cn.task_time, Float))",
"def create_table_for(self, model):",
"def generate_time_tables(self):\n from dbanalysis.classes import weather_getter\n self.total_routes = 0\n self.failed_routes = 0\n w_getter = weather_getter.weather_getter()\n weather = w_getter.get_weather()\n import datetime\n dt = datetime.datetime.now()\n count = 0\n for route in self.routes:\n if len(route) < 1:\n continue \n times = self.time_tabler.get_dep_times_five_days(route,dt)\n \n for variation in times:\n self.total_routes +=1\n #if not self.selector.get_unavailable(route,int(variation)):\n try: \n count +=1\n print(count,route+'_'+str(variation))\n X=times[variation]\n \n # merge with weather data to add weather features.\n #X['matrix'] = pd.merge(X['matrix'],weather[['day','hour','rain','temp','vappr']],on = ['day','hour'])\n X['matrix']['rain']=0.08\n X['matrix']['temp']=10.0\n X['matrix']['vappr']=10.0\n \n \n \n self.run_route(X['matrix'],X['pattern'])\n try:\n pass\n except Exception as e:\n print(e)\n \n except Exception as e:\n print(e)\n self.failed_routes +=1\n try:\n pass \n except Exception as e:\n \n print(e,'broken timetabler',route)\n pass",
"def timetable(db_path, filters,\n\tdefault_join=\"outer\",\n\tjoin_types=[],\n\t):\n\n\tcol_entries=[\n\t\t(\"Animal\",\"id\"),\n\t\t(\"Animal\",\"death_date\"),\n\t\t(\"Treatment\",),\n\t\t(\"FMRIMeasurement\",\"date\"),\n\t\t(\"OpenFieldTestMeasurement\",\"date\"),\n\t\t(\"ForcedSwimTestMeasurement\",\"date\"),\n\t\t(\"TreatmentProtocol\",\"code\"),\n\t\t(\"CageStay\",\"start_date\"),\n\t\t(\"Cage\",\"id\"),\n\t\t(\"Cage\",\"Treatment\",\"\"),\n\t\t(\"Cage\",\"TreatmentProtocol\",\"code\"),\n\t\t(\"SucrosePreferenceMeasurement\",\"date\"),\n\t\t]\n\tjoin_entries=[\n\t\t(\"Animal.treatments\",),\n\t\t(\"FMRIMeasurement\",),\n\t\t(\"OpenFieldTestMeasurement\",\"Animal.measurements\"),\n\t\t(\"ForcedSwimTestMeasurement\",\"Animal.measurements\"),\n\t\t(\"Treatment.protocol\",),\n\t\t(\"Animal.cage_stays\",),\n\t\t(\"CageStay.cage\",),\n\t\t(\"Cage_Treatment\",\"Cage.treatments\"),\n\t\t(\"Cage_TreatmentProtocol\",\"Cage_Treatment.protocol\"),\n\t\t(\"SucrosePreferenceMeasurement\",\"Cage.measurements\"),\n\t\t]\n\n\t# setting outerjoin to true will indirectly include controls\n\tdf = query.get_df(db_path, col_entries=col_entries, join_entries=join_entries, filters=filters, default_join=default_join, join_types=join_types)\n\n\treturn df",
"def _getTimelineModel(self):\n return timeline_model.GCITimeline",
"def get_table(self, key):\n if self.sens and self.has_key(key):\n return self.tbdict_sen[key]\n if self.has_key(key):\n return self.tbdict[key]",
"def get_timings(self):\r\n return self.times",
"def get_history_model(\n target: attributes.InstrumentedAttribute) -> bases.TemporalProperty:\n assert hasattr(target.class_, 'temporal_options')\n\n return target.class_.temporal_options.history_tables[target.property]",
"def _get_table(self, obj):\r\n if isinstance(obj, Marble):\r\n return obj\r\n else:\r\n return obj.table",
"def get_timings(prog_name) :\n\n [prob_sizes, timings] = db.get_timings(prog_name)\n \n return [prob_sizes, timings]",
"def get_vehicle_state_table(self, key, steps=None):\n table = []\n # Get list of all vehicles, sorted by id:\n vehicles = sorted(self.all_vehicles, key=lambda vehicle: vehicle.id)\n for vehicle in vehicles:\n df = vehicle.get_state_table(keys=['step','time',key], steps=steps)\n df = df.rename(columns={key:vehicle.id}).set_index(['step','time'])\n table.append( df ) \n table = pd.concat(table, axis=1)\n table.columns.name = 'vehicle_id'\n return table",
"def get(self, timeout) -> TPModel:",
"def kappa_tau_table(models,returns):\n kappa = []\n tau = []\n tuples = []\n table = np.array(['kappa','tau'])\n \n for model in models:\n for index in ['FTSE','S&P']:\n res = new_garch_model(returns=returns,index=index,**model).fit()\n table = np.vstack((table,[res.x[0],res.x[1]]))\n tuples.append((*model.values(),index))\n \n index = pd.MultiIndex.from_tuples(tuples, names=['ticker','sample size','mean process','p','q','index'])\n table = pd.DataFrame(table[1:,:],columns=table[0,:],index=index)\n \n return table",
"def forecast_table_name():\n return None",
"def show_table(models: typing.List[Model]):\n if not models:\n click.echo(\"Empty!\")\n return\n\n headers = list(flatten_dict(models[0].to_dict()).keys())\n table = Texttable(MAX_TABLE_WIDTH)\n\n table.add_rows([headers] + [_convert_model_values(md) for md in models])\n click.echo(table.draw() + \"\\n\")",
"def ts_describe(self, transport, table):\n t = table\n if isinstance(t, six.string_types):\n t = Table(self, table)\n return transport.ts_describe(t)",
"def time(self, name, **kwargs):\n return self.column('time', name, **kwargs)",
"def timeFlow(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return array of total leaf blocks versus mpi
|
def get_leaf_blocks(self, leaf, omp):
if self.scaling_type == 'strong':
return np.full_like(self.mpi[omp], leaf)
elif self.scaling_type == 'weak':
return leaf * self.mpi[omp]
|
[
"def total_nodes(self):\n return np.vstack((self.nodes, self.hanging_nodes))",
"def num_blocks(self):\n return self._num_blocks",
"def _get_block_ids_for_sparse_super(self, total_block_count, blocks_per_group) \\\r\n -> []:\r\n block_ids = []\r\n total_block_group_count = int(math.ceil(total_block_count / blocks_per_group))\r\n\r\n if total_block_group_count>1:\r\n block_ids.append(blocks_per_group)\r\n\r\n # 3^x\r\n block_group_id = 3\r\n while block_group_id < total_block_group_count:\r\n block_id = (block_group_id * blocks_per_group)\r\n block_id = block_id\r\n block_ids.append(block_id)\r\n block_group_id = 3 * block_group_id\r\n\r\n # 5^x\r\n block_group_id = 5\r\n while block_group_id < total_block_group_count:\r\n block_id = (block_group_id * blocks_per_group)\r\n block_id = block_id\r\n block_ids.append(block_id)\r\n block_group_id = 5 * block_group_id\r\n\r\n # 7^x\r\n block_group_id = 7\r\n while block_group_id < total_block_group_count:\r\n block_id = (block_group_id * blocks_per_group)\r\n block_id = block_id\r\n block_ids.append(block_id)\r\n block_group_id = 7 * block_group_id\r\n\r\n return block_ids",
"def _get_block_ids_for_non_sparse_super(self, total_block_count, blocks_per_group) \\\r\n -> []:\r\n block_ids = []\r\n block_group_id = 1\r\n total_block_group_count = int(math.ceil(total_block_count / blocks_per_group))\r\n while block_group_id < total_block_group_count:\r\n block_id = (block_group_id * blocks_per_group)\r\n block_id = block_id\r\n block_ids.append(block_id)\r\n block_group_id += 1\r\n\r\n return block_ids",
"def n_splittable_leaf_nodes(tree: Tree) -> int:\n output = len(tree.splittable_leaf_nodes)\n return output",
"def find_clu_size_seq(self):\n if np.all([type(i)==int for i in self.clusters]):\n sorted_cluster = sorted(self.clusters)\n else:\n sorted_cluster = sorted(self.clusters, key=lambda v: str(v))\n return [len(self.clu2elm_dict[clu]) for clu in sorted_cluster]",
"def get_numeros_blocs_non_vides(vtkMultiBlockDataSet):\r\n list_of_blocks = []\r\n for numbloc in range(vtkMultiBlockDataSet.GetNumberOfBlocks()):\r\n if vtkMultiBlockDataSet.GetBlock(numbloc) != None:\r\n if vtkMultiBlockDataSet.GetBlock(numbloc).GetNumberOfPoints() != 0:\r\n list_of_blocks.append(numbloc)\r\n return list_of_blocks",
"def sizes_of_cluster_orbits(self):\n return [block_['diameter'] for block_ in self._all_cluster_blocks]",
"def nr_of_blocks(self, img_arr_shape):\n\n # maximal coordinates that can be reached (inclusive)\n or_z = img_arr_shape[0] - 1\n or_y = img_arr_shape[1] - 1\n or_x = img_arr_shape[2] - 1\n\n # starting coordinates\n z, y, x = 0, 0, 0\n\n # centered starting coordinates in the padded image = origin of the original image\n cent_z, cent_y, cent_x = (self.block_size // 2), (self.block_size // 2), (self.block_size // 2)\n\n while cent_z <= or_z + (self.block_size // 2) - self.block_displacement:\n cent_z += self.block_displacement\n z += 1\n nr_block_z = z + 1\n\n while cent_y <= or_y + (self.block_size // 2) - self.block_displacement:\n cent_y += self.block_displacement\n y += 1\n nr_block_y = y + 1\n\n while cent_x <= or_x + (self.block_size // 2) - self.block_displacement:\n cent_x += self.block_displacement\n x += 1\n nr_block_x = x + 1\n\n return [nr_block_z, nr_block_y, nr_block_x]",
"def group_children_by_parent(bin_routing_map):\n \n tmp = np.where(np.transpose(bin_routing_map))\n children_per_parent = np.reshape(tmp[1],[bin_routing_map.shape[1], -1])\n \n return children_per_parent",
"def n_blocks(self) -> int:\n return self.GetNumberOfBlocks()",
"def get_bottom_blocks(tower):\n g = nx.subgraph(tower.graph.copy(), np.arange(1, len(tower) + 1))\n ids = []\n for i in tower.ordered_blocks:\n if len(g.pred[i]) == 0:\n ids.append(i)\n return ids",
"def get_number_of_blocks(image_size, max_block_size):\n\n return [int(ceil(image_size_element / max_block_size_element)) for image_size_element, max_block_size_element in\n zip(image_size, max_block_size)]",
"def create_num_bases(self):\r\n if self.dimension==1:\r\n KV_xi=self.knot_vector(self.number_elements,self.order,self.mp)\r\n self.num_bases=len(KV_xi)-(self.order+1)\r\n return\r\n \"\"\"\r\n Generates knot vectors for each patch\r\n \"\"\"\r\n# print(self.number_elements)\r\n KV_xi=lambda patch_num: self.knot_vector(self.number_elements[patch_num,0],self.order[patch_num,0],self.mp[patch_num,0])\r\n KV_eta=lambda patch_num: self.knot_vector(self.number_elements[patch_num,1],self.order[patch_num,1],self.mp[patch_num,1])\r\n \r\n \"\"\"\r\n Finds number of bases in knot vectors\r\n \"\"\"\r\n num_basis_xi=lambda patch_num: len(KV_xi(patch_num))-(self.order[patch_num,0]+1)\r\n num_basis_eta=lambda patch_num: len(KV_eta(patch_num))-(self.order[patch_num,1]+1)\r\n \r\n if np.array_equal(self.order[1,:],np.ones(2)*-1)!=1:\r\n self.num_bases=np.array([ (num_basis_xi(patch_num),num_basis_eta(patch_num)) \\\r\n for patch_num in range(len(self.order))]) \r\n else:\r\n self.num_bases=np.vstack((np.array([num_basis_xi(0),num_basis_eta(0)]),np.zeros(2)))",
"def __len__(self):\n totalLength = 0\n for node in self.grid.iter():\n totalLength += len(node.get('grid'))\n\n return totalLength",
"def compute_all_leaf_transition_probs(self):\n self.P = {}\n with tqdm(total=self.num_leaves) as pbar:\n for nint in self.leaf_nints:\n self.P[nint] = self.get_leaf_transition_probs(nint)\n pbar.update(1)",
"def get_leaves_per_sample(self, bufferx, param_space):\n leaf_per_sample = np.array([tree.apply(bufferx) for tree in self])\n return leaf_per_sample",
"def n_bscans(self) -> int:\n return len(self.substructure)",
"def getNumParallels(self) -> \"int\":\n return _coin.ScXMLScxmlElt_getNumParallels(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Setup fig, ax, checking if ax already provided
|
def _setup_fig_ax(self, ax):
fig = None
if ax is None:
fig, ax = plt.subplots()
return fig, ax
|
[
"def _validate_axes_instance(ax):\n if ax is None:\n ax = plt.gca()\n elif not isinstance(ax, mpl.axes.Axes):\n raise TypeError(\"ax must be a matplotlib.axes.Axes instance.\")\n\n return ax",
"def _setup_figure(self):\n if self.fig_size:\n size = self.fig_size\n else:\n size = 6, 1.0 + 0.42 * len(self.designs) * self.rows\n\n self.fig = plt.figure(figsize=size)",
"def _set_ax_subplot(self, axes, x_var, y_var, row, col, omp,\n x_scale, y_scale):\n ax = axes[row, col]\n nrows = axes.shape[0]\n ncols = axes.shape[1]\n\n if col == 0:\n self._set_ax_text(ax=ax, omp=omp, fixed_var='omp')\n if self.scaling_type == 'strong':\n self._set_ax_legend(ax=ax)\n\n if row == 0:\n self._set_ax_title(ax=ax)\n if self.scaling_type == 'weak':\n self._set_ax_legend(ax=ax)\n\n if row == nrows - 1:\n ax.set_xlabel(self.config['plot']['labels'][x_var])\n\n ax.set_ylabel(self.config['plot']['labels'][y_var])\n\n self._set_ax_scale(ax=ax, x_var=x_var, y_var=y_var,\n x_scale=x_scale, y_scale=y_scale)\n self._set_ax_xticks(ax=ax)\n self._set_ax_dashed(ax=ax, y_var=y_var)",
"def passed_or_new_ax(func):\n @wraps(func)\n def inner(*args, **kwargs):\n if 'ax' in kwargs:\n return func(*args, **kwargs)\n else:\n fig, ax = plt.subplots()\n kwargs.update({'ax':ax})\n return func(*args, **kwargs)\n return inner",
"def _plot(self, datum, ax):\n raise NotImplementedError",
"def __init__(self, ax):\n super(TimeSeriesTester, self).__init__(ax)",
"def _implicit_ax(plot_func, params=None):\n\n @wraps(plot_func)\n def wrapper(*args, **kwargs):\n fig = None\n ax = kwargs.get('ax', None)\n if ax is None and len(args) == 1:\n fig, ax = matplotlib_utils.get_figure(params=params)\n kwargs['ax'] = ax\n res = plot_func(*args, **kwargs)\n if fig:\n matplotlib_utils.plot_style(fig=fig, ax=ax)\n return res\n\n return wrapper",
"def make_figure():\n fig, ax = plt.subplots()\n\n return fig, ax",
"def _prepare_plot_package(self):\n pass",
"def plot_all(self, ax=None):\n if ax is None:\n fig, ax = plt.subplots()\n\n ax = self.plot_principal_radii_of_gyration(ax=ax)\n ax = self.plot_principal_inertia_ellipsoids(ax=ax)\n ax = self.plot_geometry(ax=ax)\n ax = self.plot_mass_centers(ax=ax)\n\n return ax",
"def __init__(self, fig=None, *args, **kwargs):\n # Retrieve the series ...................\n _series = kwargs.pop('series',None)\n Subplot.__init__(self,fig,*args,**kwargs)\n# # Force fig to be defined .....\n# if fig is None:\n# fig = TSFigure(_series)\n # Process options .......................\n if _series is not None:\n assert hasattr(_series, \"dates\")\n self._series = _series.ravel()\n self.xdata = _series.dates\n self.freq = _series.dates.freq\n self.xaxis.set_major_locator\n\n else:\n self._series = None\n self.xdata = None\n self.freq = None\n self._austoscale = False\n # Get the data to plot\n self.legendsymbols = []\n self.legendlabels = []",
"def setupAxes(widget=None):\n\n if widget is None:\n return None\n\n for axe in widget.figure.axes:\n axe.cla()\n del axe\n gc.collect()\n\n widget.figure.clf()\n # used constrained_layout = True instead\n # figure.subplots_adjust(left=0.075, right=0.95, bottom=0.1, top=0.975)\n axe = widget.figure.add_subplot(1, 1, 1, facecolor=None)\n\n axe.set_facecolor((0, 0, 0, 0))\n axe.set_xlim(0, 360)\n axe.set_ylim(0, 90)\n axe.grid(True, color='#404040')\n axe.tick_params(axis='x',\n bottom=True,\n colors='#2090C0',\n labelsize=12)\n axeTop = axe.twiny()\n axeTop.set_facecolor((0, 0, 0, 0))\n axeTop.set_xlim(0, 360)\n axeTop.tick_params(axis='x',\n top=True,\n colors='#2090C0',\n labelsize=12)\n axeTop.set_xticks(np.arange(0, 361, 45))\n axeTop.grid(axis='both', visible=False)\n axeTop.set_xticklabels(['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'N'])\n axeTop.spines['bottom'].set_color('#2090C0')\n axeTop.spines['top'].set_color('#2090C0')\n axeTop.spines['left'].set_color('#2090C0')\n axeTop.spines['right'].set_color('#2090C0')\n axe.set_xticks(np.arange(0, 361, 45))\n axe.set_xticklabels(['0', '45', '90', '135', '180', '225', '270', '315', '360'])\n axe.tick_params(axis='y',\n colors='#2090C0',\n which='both',\n labelleft=True,\n labelright=True,\n labelsize=12)\n axe.set_xlabel('Azimuth in degrees',\n color='#2090C0',\n fontweight='bold',\n fontsize=12)\n axe.set_ylabel('Altitude in degrees',\n color='#2090C0',\n fontweight='bold',\n fontsize=12)\n return axe",
"def get_fig_axes(axes=None):\n if axes is None:\n fig = plt.figure()\n ax = plt.gca()\n else:\n ax = axes\n fig = ax.figure\n\n return fig, ax",
"def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n self._axes_object = axes_histogram\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n if self._x_range is not None:\n #x_axis.SetRangeUser(*self._x_range)\n x_axis.SetLimits(*self._x_range)\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n\n # Style y-axis\n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n y_axis.SetLabelOffset(self.PLOT_Y_AXIS_LABEL_OFFSET)\n y_axis.SetTitle(self._y_title)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n\n # Redraw the drawable with the new style\n drawable.Draw(option)",
"def _plot_ax_sig2(self, ax, sig, panel, signame='', plot_errors=False, units=None, label=None, annot=True):\n # type(ax[panel]) might want to make sure its correct\n shape = self.data[sig]['data'].shape\n if len(shape) == 2:\n print('Data format of {} has another dimension -> taking mean'.format(sig))\n time = self.data[sig]['time']\n data = np.nanmean(self.data[sig]['data'],axis=1)\n try:\n errors = np.nanmean(self.data[sig]['errors'],axis=1)\n except:\n print('No errors found in {}'.format(sig))\n errors = self.data[sig]['errors']\n \n else: \n time = self.data[sig]['time']\n data = self.data[sig]['data']\n errors = self.data[sig]['errors'] # may be None, deal with later\n \n if not units:\n units = self.data[sig]['units']\n else:\n pass\n \n # correct other units\n if sig == 'ANE_DENSITY':\n data = data/1e20\n if sig == 'WMHD':\n data = data/1e6\n units = r'$MW$'\n if sig == 'AYC_TE0':\n data = data/1e3\n units = r'$keV$'\n if sig == 'Ploss':\n data = data/1e6\n units = r'$MW$'\n errors = errors/1e6\n if sig == 'AIM_DA_TO':\n data = data/1e19\n \n if signame!='': \n label=signame\n \n # Decide how to plot depending on plot_error call\n if plot_errors == True:\n #linestyle = '.'\n ax[panel].errorbar(time,data, yerr=errors,ecolor=\"red\",label=label)\n elif plot_errors == 'fill':\n try:\n if errors == None: \n ax[panel].plot(time,data,label=label)\n except:\n ax[panel].plot(time,data,label=label)\n ax[panel].fill_between(time, data-np.nan_to_num(errors), data + np.nan_to_num(errors), alpha=0.3)\n else: \n ax[panel].plot(time,data,label=label)\n \n if annot:\n ax[panel].annotate(r'$%s \\ [%s]$' %(signame, units), xy=(0.01,0.65), xycoords='axes fraction', fontsize=11)\n\n #ax[panel].legend()",
"def setup_plot():\n style.use('fivethirtyeight')\n mondays = WeekdayLocator(MONDAY)\n alldays = DayLocator()\n weekFormatter = DateFormatter('%b %d') # e.g., Jan 12\n dayFormatter = DateFormatter('%b %d')\n\n plt.ion()\n fig = plt.figure()\n plt.xlabel(\"Time\")\n plt.ylabel(\"BTC Price ($)\")\n ax = fig.add_subplot(111)\n ax.xaxis.set_major_locator(mondays)\n ax.xaxis.set_minor_locator(alldays)\n ax.xaxis.set_major_formatter(weekFormatter)\n\n fig.canvas.draw()\n plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')\n plt.show(block=False)\n\n return fig, ax",
"def setup_time_series_acf_figure():\n fig = plt.figure(figsize=(15, 10))\n ax1a = plt.subplot2grid((3, 4), (0, 0), colspan=3)\n ax1b = plt.subplot2grid((3, 4), (0, 3), colspan=1)\n ax2a = plt.subplot2grid((3, 4), (1, 0), colspan=3)\n ax2b = plt.subplot2grid((3, 4), (1, 3), colspan=1)\n ax3a = plt.subplot2grid((3, 4), (2, 0), colspan=3)\n ax3b = plt.subplot2grid((3, 4), (2, 3), colspan=1)\n\n axl = [ax1a, ax2a, ax3a]\n axr = [ax1b, ax2b, ax3b]\n ax = fig.get_axes()\n return fig, ax, axl, axr",
"def create_ax(x_dim, y_dim, shx=False, shy=False):\n\n fig,axarr = plt.subplots( y_dim, x_dim, sharex=shx, sharey=shy, squeeze=False )\n\n return fig,axarr",
"def test_no_data(self):\n # Setup\n paxfig = paxplot.pax_parallel(n_axes=3)\n\n # Setting limits not supported\n with self.assertRaises(AttributeError):\n paxfig.set_lim(ax_idx=0, bottom=-1.0, top=3.0)\n\n # Setting ticks (raises set_lim error)\n with self.assertRaises(AttributeError):\n paxfig.set_ticks(ax_idx=0, ticks=[0.0, 1.0, 2.0])\n\n # Setting labels supported\n paxfig.set_label(ax_idx=1, label='foo')\n\n # axis inversion\n with self.assertRaises(AttributeError):\n paxfig.invert_axis(ax_idx=2)\n\n # Legend won't fail but just creates blank legend\n paxfig.add_legend(labels=[])\n\n # Adding colorbar supported\n paxfig.add_colorbar(\n ax_idx=0,\n cmap='viridis',\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check for valid x_var and args
|
def _check_x_var(self, x_var, omp, mpi):
x_map = {'omp': mpi, 'mpi': omp}
name_map = {'omp': 'mpi', 'mpi': 'omp'}
if x_map[x_var] is None:
raise ValueError(f"must specify {name_map[x_var]} if x_var='{x_var}'")
|
[
"def check_arguments(self, args, func_obj, called_func):\n # variable, default_value: UnitXObject\n args_without_default = []\n for variable, default_value in self.defined_args:\n if not default_value:\n args_without_default.append([variable, None])\n\n #\n # An error which arguments are not enougn.\n #\n if len(args) < len(args_without_default):\n msg = Constants.TYPE_ERR_ARGS % (self.name, len(args_without_default), len(args))\n if args: \n last_unitx_obj = args[-1]\n self.mediator.get_parser().notifyErrorListeners(msg, last_unitx_obj.token, Exception(msg))\n else: \n self.mediator.get_parser().notifyErrorListeners(msg, self.ctx.start, Exception(msg))\n\n #\n # An error which arguments are too much.\n #\n if len(args) > len(self.defined_args):\n msg = Constants.TYPE_ERR_ARGS % (self.name, len(self.defined_args), len(args))\n last_unitx_obj = args[-1]\n self.mediator.get_parser().notifyErrorListeners(msg, last_unitx_obj.token, Exception(msg))\n\n return",
"def check_args(self, test, args):\n\n if variables.VariableSetManager.has_deferred(args):\n return\n\n self._check_args(test, **args)",
"def _check_args(self):\n if not self.wm_class and not self.wm_instance and not self.wm_title:\n raise RaiseorlaunchError(\n \"You need to specify \" '\"wm_class\", \"wm_instance\" or \"wm_title.'\n )\n if (self.workspace or self.target_workspace) and self.scratch:\n raise RaiseorlaunchError(\n \"You cannot use the scratchpad on a specific workspace.\"\n )\n if not check_positive(self.event_time_limit):\n raise RaiseorlaunchError(\n \"The event time limit must be a positive integer or float!\"\n )\n if self.workspace and self.target_workspace:\n if not self.workspace == self.target_workspace:\n raise RaiseorlaunchError(\n \"Setting workspace and initial workspace is ambiguous!\"\n )",
"def validate_arguments(self, args):\n\t\tif args.org == None:\n\t\t\tprint('Please specify Organization name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.repo == None:\n\t\t\tprint('Please specify Repositories name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == None:\n\t\t\tprint('Please specify type of the event. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == 'commits' and args.branch == None:\n\t\t\tprint('Please specify branch name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == 'issues' and args.state == None:\n\t\t\tprint('Please specify state of the issues. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == 'pullRequests' and args.branch == None and args.state == None:\n\t\t\tprint('Please specify branch and state of the pulls. Exiting.')\n\t\t\tsys.exit(0)\n\t\t\t\n\t\treturn",
"def check_display(self, args):\n \n if len(args) < 1 or args[0] not in self._display_opts:\n self.help_display()\n raise self.InvalidCmd\n \n if args[0] == 'variable' and len(args) !=2:\n raise self.InvalidCmd('variable need a variable name')",
"def test_ignorearg(self):\n self.assertEqual(check_args(self.ignorearg), {})",
"def check_args_for_required(self):\n for arg in self._storage['input']:\n if arg['value'] == '__required__':\n raise ValueError(\"Required argument {} missing in node {}\".format(arg['name'], self._storage['name']))",
"def check_args(args):\n if args.edit:\n edit_configs()\n sys.exit(0)\n\n if not any([args.TV, args.Movie, args.Music]):\n print('No media type flag set')\n sys.exit(1)\n\n if len(args.files) == 0:\n print('No files given to tag')\n sys.exit(2)",
"def validate(self, *args):\n pass",
"def check_no_arguments_passed(args) -> bool:\n for arg in vars(args):\n if getattr(args, arg) is not None:\n return False\n return True",
"def vararg(self):\n return _core.LLVMIsFunctionVarArg(self.ptr) != 0",
"def _validate_arguments(kw):\n X = OMIC.parse(kw.get('X'))\n group_by = kw.get('group_by')\n if group_by is not None:\n group_by = OMIC.parse(group_by).name\n else:\n group_by = 'none'\n rank_genes = kw.get('rank_genes')\n clustering = kw.get('clustering')\n log = kw.get('log')\n if rank_genes:\n assert X == OMIC.transcriptomic, \\\n f\"Only visualize transcriptomic in case of rank_genes>0, but given: {X.name}\"\n title = '_'.join(i for i in [\n X.name, group_by,\n str(clustering), ('rank' if rank_genes else ''), ('log' if log else 'raw')\n ] if len(i) > 0)\n return title",
"def _check_args(self, **kwds):\n # Danger: accessing locals creates a hidden cache of references\n # https://bugs.python.org/issue6116\n caller_locals = inspect.currentframe().f_back.f_locals\n updates = {}\n for kwd, types in kwds.items():\n if not isinstance(types, tuple):\n types = (types,)\n \n val = caller_locals[kwd]\n \n # If the arg is not already one of the required types, then see if\n # it can be converted.\n if not isinstance(val, types):\n for ctype in (float, int):\n if ctype in types:\n try:\n val = ctype(val)\n break\n except Exception:\n pass\n \n # If no conversions were possible, then raise TypeError\n if not isinstance(val, types):\n names = tuple([typ.__name__ for typ in types])\n if len(names) > 2:\n names = ', '.join(names[:-1]) + ', or ' + names[-1]\n else:\n names = ' or '.join(names)\n raise TypeError(\"Argument %s must be %s (got %s).\" % \n (kwd, names, type(caller_locals[kwd]).__name__))",
"def _argcheck(self, mu, eta):\n return mu >= 0.0 and eta >= 0.0 and eta <= 1.0",
"def test_neoxargs_duplicates(self):\n self.assertTrue(NeoXArgs.validate_keys())",
"def check_args(self, *args):\n if self._nb_args >= 0 and len(args) != self._nb_args:\n raise ValueError(\n \"Incorrect number of parameters specified. \"\n \"Got {}, expected {}.\".format(len(args), self._nb_args)\n )",
"def cli_inputs_check(args):\n \n# list_args = [\"--grants\", \"--affiliations\", \"--cc_email\"]\n# \n# for arg in list_args:\n# if args[arg]:\n# args[arg] = args[arg].split(\",\")\n# \n# int_args = [\"--cutoff_year\"]\n# \n# for arg in int_args:\n# if args[arg]:\n# try:\n# args[arg] = int(args[arg])\n# except:\n# pass\n \n tracker_validate(instance=args, schema=tracker_schema.cli_schema, format_checker=jsonschema.FormatChecker())",
"def test_bad_cosmo(self):\n self.assertEqual(check_args(self.bad_cosmo), {})",
"def _check_qargs(self, qargs):\n if not all(isinstance(i, tuple) and\n isinstance(i[0], QuantumRegister) and\n isinstance(i[1], int) for i in qargs):\n raise QiskitError(\"qarg not (QuantumRegister, int) tuple\")\n if not all(self.has_register(i[0]) for i in qargs):\n raise QiskitError(\"register not in this circuit\")\n for qubit in qargs:\n qubit[0].check_range(qubit[1])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set axis properties for subplot (see plot_multiple)
|
def _set_ax_subplot(self, axes, x_var, y_var, row, col, omp,
x_scale, y_scale):
ax = axes[row, col]
nrows = axes.shape[0]
ncols = axes.shape[1]
if col == 0:
self._set_ax_text(ax=ax, omp=omp, fixed_var='omp')
if self.scaling_type == 'strong':
self._set_ax_legend(ax=ax)
if row == 0:
self._set_ax_title(ax=ax)
if self.scaling_type == 'weak':
self._set_ax_legend(ax=ax)
if row == nrows - 1:
ax.set_xlabel(self.config['plot']['labels'][x_var])
ax.set_ylabel(self.config['plot']['labels'][y_var])
self._set_ax_scale(ax=ax, x_var=x_var, y_var=y_var,
x_scale=x_scale, y_scale=y_scale)
self._set_ax_xticks(ax=ax)
self._set_ax_dashed(ax=ax, y_var=y_var)
|
[
"def configure_axes(fig, x_axis_kwargs, y_axis_kwargs):\n fig.update_xaxes(showline=True, linewidth=1, linecolor='black',\n ticks='outside')\n if x_axis_kwargs:\n fig.update_xaxes(**x_axis_kwargs)\n fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#CCC')\n fig.update_yaxes(showline=True, linewidth=1, linecolor='black',\n ticks='outside')\n if y_axis_kwargs:\n fig.update_yaxes(**y_axis_kwargs)",
"def set_axes(self, axes, index):\r\n try:\r\n self.sub_plots[index].axes = axes\r\n except IndexError:\r\n raise IndexError, \"No sub-plot exists at index:{0!s}\".format(index)\r\n \r\n # Call the reload to load the proper settings to the new axes object\r\n self.sub_plots[index].reload()",
"def setup_axes(*axes, **kwargs):\n\n # Handle some shortcut syntaxes\n if not_empty(kwargs, 'xticks') and not not_empty(kwargs, 'xticklabels'):\n kwargs['xticklabels'] = kwargs['xticks']\n\n if not_empty(kwargs, 'yticks') and not not_empty(kwargs, 'yticklabels'):\n kwargs['yticklabels'] = kwargs['yticks']\n\n if not_empty(kwargs, 'xticks'):\n try:\n kwargs['xticks'] = [float(x) for x in kwargs['xticks'].split()]\n except (AttributeError):\n pass\n\n if not_empty(kwargs, 'xticklabels'):\n try:\n kwargs['xticklabels'] = kwargs['xticklabels'].split()\n except (AttributeError, KeyError):\n pass\n\n if not_empty(kwargs, 'yticks'):\n try:\n kwargs['yticks'] = [float(x) for x in kwargs['yticks'].split()]\n except (AttributeError, KeyError):\n pass\n if not_empty(kwargs, 'yticklabels'):\n try:\n kwargs['yticklabels'] = kwargs['yticklabels'].split()\n except (AttributeError, KeyError):\n pass\n \n if not_empty(kwargs, 'rticks'):\n try:\n kwargs['rticks'] = [float(x) for x in kwargs['rticks'].split()]\n except (AttributeError, KeyError):\n pass\n if not_empty(kwargs, 'rticklabels'):\n try:\n kwargs['rticklabels'] = kwargs['rticklabels'].split()\n except (AttributeError, KeyError):\n pass\n \n # Setup each axis\n for ax in axes:\n if not_empty(kwargs, 'title') and ax.primary:\n # Only put title on the top set of axes\n ax.set_title(kwargs['title'])\n\n if ax.ratio:\n # Use ratio y-axis values\n if not_empty(kwargs, 'rmin'):\n ax.set_ylim(bottom=kwargs['rmin'])\n if not_empty(kwargs, 'rmax'):\n ax.set_ylim(top=kwargs['rmax'])\n if not_empty(kwargs, 'rlabel'):\n ax.set_ylabel(kwargs['rlabel'], y=1, ha='right')\n else:\n # Use normal y-axis values\n if not_empty(kwargs, 'ymin'):\n ax.set_ylim(bottom=float(kwargs['ymin']))\n if not_empty(kwargs, 'ymax'):\n ax.set_ylim(top=float(kwargs['ymax']))\n if not_empty(kwargs, 'ylabel'):\n ax.set_ylabel(kwargs['ylabel'], y=1, ha='right')\n if not_empty(kwargs, 'yticks'):\n ax.set_yticks(kwargs['yticks'])\n if not_empty(kwargs, 'yticklabels'):\n if 'ytickrot' in kwargs:\n ax.set_yticklabels(kwargs['yticklabels'], rotation=kwargs['ytickrot'])\n else:\n ax.set_yticklabels(kwargs['yticklabels'])\n\n if not_empty(kwargs, 'xmin'):\n ax.set_xlim(left=float(kwargs['xmin']))\n if not_empty(kwargs, 'xmax'):\n ax.set_xlim(right=float(kwargs['xmax']))\n\n if not_empty(kwargs, 'xlabel'):\n ax.set_xlabel(kwargs['xlabel'], x=1, ha='right') \n if not_empty(kwargs, 'xticks'):\n ax.set_xticks(kwargs['xticks'])\n if not_empty(kwargs, 'xticklabels'):\n if 'xtickrot' in kwargs:\n ax.set_xticklabels(kwargs['xticklabels'], rotation=kwargs['xtickrot'])\n else:\n ax.set_xticklabels(kwargs['xticklabels'])\n\n if ax.get_xscale() != 'log':\n ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n if ax.get_yscale() != 'log':\n if 'yticks' not in kwargs or not kwargs['yticks']:\n ax.yaxis.set_major_locator(ticker.MaxNLocator(5, prune='upper' if (not ax.ratio and not ax.primary) else None))\n ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())\n ax.yaxis.set_label_coords(-0.15,1)\n for spine in ax.spines.values(): \n spine.set_zorder(100)\n\n # Clean up for ratio plots\n if axes[-1].ratio:\n for ax in axes[:-1]:\n for tick in ax.get_xticklabels():\n tick.set_visible(False)\n ax.set_xlabel(\"\")\n axes[-1].yaxis.set_major_locator(ticker.MaxNLocator(4, prune='upper'))\n # Show a line at 1.0 if it's in range\n if axes[-1].get_ylim()[0] < 1.0 < axes[-1].get_ylim()[1]:\n axes[-1].plot(axes[-1].get_xlim(), [1.0,1.0], color='#B3B3B3', linewidth=2.0,linestyle='--')\n axes[-1].yaxis.grid(True) # Show lines on tick marks\n if not_empty(kwargs, 'ratio_sb') and kwargs['ratio_sb']:\n axes[-1].yaxis.get_major_formatter().set_powerlimits((-1,1))\n if not_empty(kwargs, 'rticks'):\n axes[-1].set_yticks(kwargs['rticks'])\n if not_empty(kwargs, 'rticklabels'):\n axes[-1].set_yticklabels(kwargs['rticklabels'])",
"def subplot(self, *args, **kwargs):\n self.axes = plt.subplot(*args, **kwargs)\n return self",
"def __set_subplots(self):\n self.logger.debug(\"running\")\n if len(self.__plot_names) < 1:\n return\n r = len(self.__plot_names)\n c = 1\n for i in range(0, r):\n self.__plots[self.__plot_names[i]] = [(r, c, i + 1), True]\n self.logger.debug(\"done\")",
"def set_axes_parameters(self):\n # update date, values, and name to the current device\n self.dates = self.plots[self.curr_pos][0]\n self.values = self.plots[self.curr_pos][1]\n\n # set the title of the figure and axes\n self.fig.canvas.set_window_title(self.title)\n name = self.names[self.curr_pos]\n self.ax.set_title(name)\n self.ax.set_xlabel('Time (hour)')\n self.ax.set_ylabel('Frame Size (bytes)')\n\n # setup formatting for datetime axes\n self.ax.xaxis.set_major_locator(self.hours)\n self.ax.xaxis.set_major_formatter(self.hourFmt)\n self.ax.format_xdata = mdates.DateFormatter(self.date_format)\n self.ax.set_xlim(self.dates.min()-.001, self.dates.max()+.001)\n self.ax.grid(True)\n self.fig.autofmt_xdate()",
"def setupAxes(widget=None):\n\n if widget is None:\n return None\n\n for axe in widget.figure.axes:\n axe.cla()\n del axe\n gc.collect()\n\n widget.figure.clf()\n # used constrained_layout = True instead\n # figure.subplots_adjust(left=0.075, right=0.95, bottom=0.1, top=0.975)\n axe = widget.figure.add_subplot(1, 1, 1, facecolor=None)\n\n axe.set_facecolor((0, 0, 0, 0))\n axe.set_xlim(0, 360)\n axe.set_ylim(0, 90)\n axe.grid(True, color='#404040')\n axe.tick_params(axis='x',\n bottom=True,\n colors='#2090C0',\n labelsize=12)\n axeTop = axe.twiny()\n axeTop.set_facecolor((0, 0, 0, 0))\n axeTop.set_xlim(0, 360)\n axeTop.tick_params(axis='x',\n top=True,\n colors='#2090C0',\n labelsize=12)\n axeTop.set_xticks(np.arange(0, 361, 45))\n axeTop.grid(axis='both', visible=False)\n axeTop.set_xticklabels(['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'N'])\n axeTop.spines['bottom'].set_color('#2090C0')\n axeTop.spines['top'].set_color('#2090C0')\n axeTop.spines['left'].set_color('#2090C0')\n axeTop.spines['right'].set_color('#2090C0')\n axe.set_xticks(np.arange(0, 361, 45))\n axe.set_xticklabels(['0', '45', '90', '135', '180', '225', '270', '315', '360'])\n axe.tick_params(axis='y',\n colors='#2090C0',\n which='both',\n labelleft=True,\n labelright=True,\n labelsize=12)\n axe.set_xlabel('Azimuth in degrees',\n color='#2090C0',\n fontweight='bold',\n fontsize=12)\n axe.set_ylabel('Altitude in degrees',\n color='#2090C0',\n fontweight='bold',\n fontsize=12)\n return axe",
"def adjust_axes(self):\n\n # reduce number of ticks\n self.ax.yaxis.set_major_locator(MaxNLocator(2))\n self.ax.xaxis.set_major_locator(MaxNLocator(2))\n\n # log format for tick labels\n self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))\n\n self.ax.set_xlabel('Time')\n self.ax.set_ylabel('Intensity')\n\n # despine for aesthetics\n self.ax.spines['right'].set_visible(False)\n self.ax.spines['top'].set_visible(False)",
"def EditAxes(self, event=None):\n from terapy.core.axedit import AxesPropertiesDialog\n old_labels = [x.copy() for x in self.labels]\n dlg = AxesPropertiesDialog(self,axlist=old_labels)\n if dlg.ShowModal() == wx.ID_OK:\n labels = dlg.GetValue()\n dlg.Destroy()\n ConvertUnits(self.labels, labels)\n for x in self.plots:\n x.array.Rescale(new_labels=self.labels, defaults=old_labels)\n x.SetData(x.array)\n \n wx.CallAfter(self.Update)\n else:\n dlg.Destroy()",
"def set_fancy_plot(plt):\n\n plt.rcParams.update({\n 'font.size': 14, \n 'font.family': 'serif',\n\n 'lines.linewidth': 1,\n 'lines.markersize': 8.0,\n 'figure.subplot.wspace': 0.,\n 'axes.linewidth': 0.5,\n 'axes.formatter.use_mathtext': True,\n\n 'axes.edgecolor': '#111',\n 'axes.facecolor': '#fafafa',\n\n\n 'axes.xmargin': 0.1,\n 'xtick.direction': 'in',\n 'xtick.major.size': 9.,\n 'xtick.major.pad': 5.,\n 'xtick.minor.size': 4.,\n 'xtick.top': True,\n 'xtick.minor.visible': True,\n 'xtick.major.width': 0.5,\n 'xtick.minor.width': 0.5,\n\n 'axes.ymargin': 0.1,\n 'ytick.direction': 'in',\n 'ytick.major.size': 9,\n 'ytick.major.pad': 5.,\n 'ytick.minor.size': 4,\n 'ytick.right': True,\n 'ytick.minor.visible': True,\n 'ytick.major.width': 0.5,\n 'ytick.minor.width': 0.5,\n })",
"def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n self._axes_object = axes_histogram\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n if self._x_range is not None:\n #x_axis.SetRangeUser(*self._x_range)\n x_axis.SetLimits(*self._x_range)\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n\n # Style y-axis\n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n y_axis.SetLabelOffset(self.PLOT_Y_AXIS_LABEL_OFFSET)\n y_axis.SetTitle(self._y_title)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n\n # Redraw the drawable with the new style\n drawable.Draw(option)",
"def subplot(self, *args, **kwargs):\n plt.subplot(*args, **kwargs)\n return self",
"def build_axes(ratio=False, double=False, **kwargs):\n # Two axes and ratio\n if ratio and double:\n fig = plt.figure(figsize=(8.0, 8.0))\n gs = gridspec.GridSpec(3,1,height_ratios=[2,2,1], hspace=0.0, wspace=0.0)\n ax = plt.subplot(gs[0])\n axes = [ax, plt.subplot(gs[1], sharex=ax), plt.subplot(gs[2], sharex=ax)]\n\n # Single axes and ratio\n elif ratio:\n fig = plt.figure(figsize=(9.0, 6.0))\n gs = gridspec.GridSpec(2,1,height_ratios=[3,1], hspace=0.0, wspace=0.0)\n ax = plt.subplot(gs[0])\n axes = [ax, plt.subplot(gs[1], sharex=ax)]\n\n # Single axis\n else:\n if 'customization' in kwargs and kwargs['customization'] == 'jes_cor':\n fig = plt.figure(figsize=(8.0,7.0))\n else:\n fig = plt.figure()\n axes = [fig.add_subplot(111)]\n\n for ax in axes:\n ax.ratio = False\n ax.primary = False\n ax.secondary = False\n ax.extra_handles = []\n axes[0].primary = True\n if double: axes[-2].secondary = True\n if ratio: axes[-1].ratio = True\n\n return fig, axes",
"def _setup_subplots(self):\n\n # 1. Initialize subplots\n # ----------------------\n\n # squeeze keyword: https://stackoverflow.com/questions/44598708/\n # do not share axes, that makes problems if the grid is incomplete\n subplots_args = {\n \"nrows\": self._nrows,\n \"ncols\": self._ncols,\n # todo: this is somewhat problematic, because this won't add space\n # for titles etc. Please do this differently\n \"figsize\": (\n self._ncols * self.figsize[0],\n self._nrows * self.figsize[1],\n ),\n \"squeeze\": False,\n }\n if self._ndim == 3:\n subplots_args[\"subplot_kw\"] = {\"projection\": \"3d\"}\n\n self._fig, self._axs = plt.subplots(**subplots_args)\n\n # 2. Setup frames\n # ---------------\n\n for isubplot in range(self._nrows * self._ncols):\n if self._ndim == 1:\n for loc in [\"top\", \"left\", \"right\"]:\n self._axli[isubplot].spines[loc].set_visible(False)\n self._axli[isubplot].spines[\"bottom\"].set_position(\"center\")\n\n # 3. Hide plots\n # -------------\n\n # Since we initialize a grid of subplots, but might have less\n # subplots to actually show, we hide some of them here.\n\n for isubplot in range(self._nsubplots, self._nrows * self._ncols):\n self._axli[isubplot].set_visible(False)\n\n # 4. Setup labels\n # ---------------\n\n # Number of hidden plots\n ihidden = self._nrows * self._ncols - self._nsubplots + 1\n\n # Column number from which we have to start hiding plots\n # (note that all hidden plots are in the last row)\n icol_hidden = self._ncols - ihidden\n\n self.log.debug(\"ihidden = {}\".format(ihidden))\n self.log.debug(\"icol_hidden = {}\".format(icol_hidden))\n\n if self._ndim == 1:\n for isubplot in range(self._nrows * self._ncols):\n self._axli[isubplot].set_yticks([])\n\n irow = isubplot // self._ncols\n icol = isubplot % self._ncols\n\n if irow == self._nrows - 2 and icol >= icol_hidden:\n self._axli[isubplot].set_xlabel(\n self.data._get_axis_label(self._axis_columns[0])\n )\n elif irow == self._nrows - 1 and icol <= icol_hidden:\n self._axli[isubplot].set_xlabel(\n self.data._get_axis_label(self._axis_columns[0])\n )\n else:\n self._axli[isubplot].set_xticklabels([])\n\n elif self._ndim == 2:\n for isubplot in range(self._nrows * self._ncols):\n irow = isubplot // self._ncols\n icol = isubplot % self._ncols\n\n # Set labels and ticks:\n\n if icol == 0:\n self._axli[isubplot].set_ylabel(\n self.data._get_axis_label(self._axis_columns[1])\n )\n else:\n self._axli[isubplot].set_yticklabels([])\n\n if irow == self._nrows - 2 and icol >= icol_hidden:\n self._axli[isubplot].set_xlabel(\n self.data._get_axis_label(self._axis_columns[0])\n )\n elif irow == self._nrows - 1 and icol <= icol_hidden:\n self._axli[isubplot].set_xlabel(\n self.data._get_axis_label(self._axis_columns[0])\n )\n else:\n self._axli[isubplot].set_xticklabels([])\n\n elif self._ndim == 3:\n for isubplot in range(self._nsubplots):\n self._axli[isubplot].set_xlabel(\n self.data._get_axis_label(self._axis_columns[0])\n )\n self._axli[isubplot].set_ylabel(\n self.data._get_axis_label(self._axis_columns[1])\n )\n self._axli[isubplot].set_zlabel(\n self.data._get_axis_label(self._axis_columns[2])\n )\n\n else:\n raise ValueError(\"Unsupported dimension {}\".format(self._ndim))\n\n # 3. Add title to subplots\n # ------------------------\n\n for isubplot in range(self._nsubplots - int(self.draw_legend)):\n self._axli[isubplot].set_title(self._plot_title(isubplot))\n\n # 4. Set ranges\n # --------------\n\n # Set the xrange explicitly in order to not depend\n # on which get_clusters are shown etc.\n\n for isubplot in range(self._nsubplots):\n if self._ndim == 1:\n self._axli[isubplot].set_ylim([-1, 1])\n if self._ndim >= 1:\n self._axli[isubplot].set_xlim(self._get_lims(0))\n if self._ndim >= 2:\n self._axli[isubplot].set_ylim(self._get_lims(1))\n if self._ndim >= 3:\n self._axli[isubplot].set_zlim(self._get_lims(2))",
"def config_axis(self, axis, ticks, labels=None):\n if axis == \"x\":\n self.ax.set_xticks(ticks)\n if labels:\n self.ax.set_xticklabels(labels, minor=False)\n if axis == \"y\":\n self.ax.set_yticks(ticks)\n if labels:\n self.ax.set_yticklabels(labels, minor=False)",
"def set_subplots(prefix=None, hue=\"\", confidence=\"\", force=True):\n\n Subplotting._last_args = (prefix, hue, confidence, force)\n\n if Subplotting.subplotting_declared and not force:\n return\n\n if prefix:\n hue = prefix + \"Preference\"\n confidence = prefix + \"Selectivity\"\n\n for name in Subplotting.plotgroups_to_subplot:\n if name in plotgroups.keys():\n pg = plotgroups[name]\n if name in pg.plot_templates.keys():\n pt = pg.plot_templates[name]\n pt[\"Hue\"] = hue\n pt[\"Confidence\"] = confidence\n else:\n Subplotting().warning(\"No template %s defined for plotgroup\"\n \" %s\" % (name, name))\n else:\n Subplotting().warning(\"No plotgroup %s defined\" % name)\n\n Subplotting.subplotting_declared = True",
"def plotSubplot(ax,EOverEcs, GammaNum,GammaAn1,GammaAn2, iD0, iD1, iAr, iNe, setLeg=False, setXLabel=False, setYLabel=False, fig=None):\n\n l1,=ax.plot(EOverEcs,GammaNum[:,iD0,iD1,iAr,iNe], 'b' )\n l2,=ax.plot(EOverEcs,GammaAn1[:,iD0,iD1,iAr,iNe], 'r' )\n l3,=ax.plot(EOverEcs,GammaAn2[:,iD0,iD1,iAr,iNe], 'g' )\n\n if setXLabel:\n ax.set_xlabel(r'$E/E_{c,\\mathrm{tot}}$')\n if setYLabel:\n ax.set_ylabel(r'$\\Gamma$ [s$^{-1}$]')\n\n if setLeg and fig:\n ax.legend([l1,l2,l3],['DREAM kinetic','DREAM formula','NF 2019 formula'], loc=\"upper left\")",
"def show(self):\r\n # Initialise the spider plot\r\n plt.clf()\r\n axis_pic = plt.subplot(111, polar=True)\r\n axis_pic.spines['polar'].set_visible(False)\r\n axis_pic.set_yticklabels([])\r\n\r\n # If you want the first axis to be on top:\r\n axis_pic.set_theta_offset(pi / 2)\r\n axis_pic.set_theta_direction(-1)\r\n\r\n # Draw one axe per variable + add labels labels yet\r\n plt.xticks(self._angles[:-1], self._metrics_name)\r\n\r\n # Draw y labels\r\n axis_pic.set_rlabel_position(0)\r\n if self._scale == 'hide':\r\n plt.yticks([0.0], color=\"grey\", size=7)\r\n elif self._scale == 'norm':\r\n plt.yticks([0.2, 0.4, 0.6, 0.8],\r\n [\"0.2\", \"0.4\", \"0.6\", \"0.8\"],\r\n color=\"grey\", size=7)\r\n elif self._scale == 'sparse':\r\n plt.yticks([0.5], [\"0.5\"], color=\"grey\", size=7)\r\n elif self._scale == 'dense':\r\n ticks = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\r\n labels = [\"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\", \"0.6\",\r\n \"0.7\", \"0.8\", \"0.9\"]\r\n plt.yticks(ticks, labels, color=\"grey\", size=7)\r\n else:\r\n # default\r\n plt.yticks([0.0], color=\"grey\", size=7)\r\n plt.ylim(0, 1)\r\n\r\n # plot border\r\n axis_pic.plot(self._angles, [1]*(self._nb_var + 1), color='grey',\r\n linewidth=1, linestyle='solid')\r\n\r\n for i in range(len(self._labels)):\r\n axis_pic.plot(self._angles, self._metrics_data[i], linewidth=1,\r\n linestyle='solid', label=self._labels[i])\r\n axis_pic.fill(self._angles, self._metrics_data[i], alpha=0.1)\r\n\r\n # Add legend\r\n plt.legend(loc='upper right', bbox_to_anchor=(0., 0.))\r\n plt.title(self._title, y=1.1, color='g')\r\n plt.show()",
"def reload(self):\r\n # recreate all the settings, as a new axes object has been set\r\n self.axes.grid(self.parent.grid, color=self.parent.grid_color)\r\n self.axes.grid(self.parent.grid)\r\n self.axes.set_title(self.title)\r\n self.axes.set_xlabel(self.parent.x_label)\r\n self.axes.set_ylabel(self.parent.y_label)\r\n self.axes.xaxis.set_major_formatter(self.formatter[0])\r\n self.axes.yaxis.set_major_formatter(self.formatter[1])\r\n if self.parent.secondary_y_axis:\r\n self.create_y2_axis(self.y2_label)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plot dashed line on axis
|
def _set_ax_dashed(self, ax, y_var):
x = [1, self.max_cores]
if y_var == 'efficiency':
ax.plot(x, [100, 100], ls='--', color='black')
elif y_var == 'speedup':
ax.plot(x, x, ls='--', color='black')
|
[
"def draw_dashed_line(surf, color, start_pos, end_pos, width=1, dash_length=10):\r\n origin = Point(start_pos)\r\n target = Point(end_pos)\r\n displacement = target - origin\r\n length = len(displacement)\r\n slope = displacement/length\r\n\r\n for index in range(0, int(length/dash_length), 2):\r\n start = origin + (slope * index * dash_length)\r\n end = origin + (slope * (index + 1) * dash_length)\r\n pygame.draw.line(surf, color, start.get(), end.get(), width)",
"def dashed(self):\n self.ps('[1 2] .5 setdash')",
"def dashed_line(img, pt1, pt2, color, dash_length=15, dash_interval=15, **kwargs):\n\n np_pt1 = np.array(pt1)\n np_pt2 = np.array(pt2)\n line_length = np.sqrt(((np_pt1 - np_pt2) ** 2).sum())\n get_point = lambda s: np_pt1 + s / line_length * (np_pt2 - np_pt1)\n # returns a point which is s units away from pt1\n\n covered = 0\n segments = []\n while covered < line_length:\n p1 = get_point(covered)\n covered = min(line_length, covered + dash_length)\n p2 = get_point(covered)\n covered += dash_interval\n segments.append((tuple(p1.astype(int)), tuple(p2.astype(int))))\n\n # making last dash go until the end\n\n segments[-1] = (segments[-1][0], tuple(get_point(line_length).astype(int)))\n\n for p1, p2 in segments:\n cv2.line(img, p1, p2, color, **kwargs)",
"def draw_line(outfile):\n x = numpy.arange(10)\n y = x\n\n pylab.plot(x,y)\n pylab.xlabel('X')\n pylab.ylabel('Y')\n pylab.title('Straight line')\n pylab.draw()\n pylab.savefig(outfile)",
"def plotline(ax, z1, z2, *args, **kwargs):\n return ax.plot((z1.real, z2.real), (z1.imag, z2.imag), *args, **kwargs)",
"def plot_line(line, symbol='k-', **kwargs):\n x, y = zip(*line.GetPoints())\n plt.plot(x, y, symbol, **kwargs)",
"def line_dash_set(self):\n return self._data.get(b'strokeStyleLineDashSet')",
"def addOriginPointer(ax):\n ax.axhline(0, c='#606060', linestyle='dashed', linewidth=0.2)\n ax.axvline(0, c='#606060', linestyle='dashed', linewidth=0.2)\n ax.scatter(0, 0, 25, c='#606060')",
"def plotly_line_plot(data, title, xaxis_label, yaxis_label):",
"def line_plot(self, observable):\n\n data = getattr(self, observable)\n\n fig, ax = plt.subplots(figsize=(20,10))\n ax.plot(data)\n\n observable_title = (observable\n .replace('_', ' ')\n .title()\n )\n ax.set_title(f\"Timeseries: {observable_title}\", fontsize=28)\n ax.set_xlabel(\"Time\", fontsize=16)\n ax.set_ylabel(self.xlabels[observable], fontsize=16)",
"def draw_line(p1, p2, *varargin, **others):\n \n plt.plot([p1[0], p2[0]], [p1[1], p2[1]], *varargin, **others)",
"def print_dashed_line(line_length):\n print '\\n', '-' *line_length",
"def plot_line(self, x, y, data, fig=None):\n\n if fig is None:\n fig = plt.figure(self._figure_name)\n fig.clf()\n fig.add_subplot(111)\n ax = fig.gca()\n ax.set_title(\n \"{0:s} line={1:d}\".format(\n self.lineplot_pars[\"title\"][0], int(\n y + 1)))\n ax.set_xlabel(self.lineplot_pars[\"xlabel\"][0])\n ax.set_ylabel(self.lineplot_pars[\"ylabel\"][0])\n\n if not self.lineplot_pars[\"xmax\"][0]:\n xmax = len(data[y, :])\n else:\n xmax = self.lineplot_pars[\"xmax\"][0]\n ax.set_xlim(self.lineplot_pars[\"xmin\"][0], xmax)\n\n if self.lineplot_pars[\"logx\"][0]:\n ax.set_xscale(\"log\")\n if self.lineplot_pars[\"logy\"][0]:\n ax.set_yscale(\"log\")\n\n if self.lineplot_pars[\"pointmode\"][0]:\n ax.plot(data[y, :], self.lineplot_pars[\"marker\"][0])\n else:\n ax.plot(data[y, :])\n\n plt.draw()\n plt.show(block=False)\n time.sleep(self.sleep_time)",
"def plot_line(slope, intercept):\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n plt.plot(x_vals, y_vals, '--')",
"def show_lineplot(self, *args, **kwargs):\n raise NotImplementedError()",
"def create_horizontal_line():\n d = Drawing(100, 1)\n d.add(Line(0, 0, 1000, 0))\n return d",
"def lateralBracingVisualize(x_g,y_g):\n for i in range(len(x_g)-1):\n plt.plot([x_g[i],x_g[i+1]],[y_g[i],-y_g[i+1]], color='black',linestyle='--')\n plt.plot([x_g[i],x_g[i+1]],[-y_g[i],y_g[i+1]], color='black',linestyle='--')",
"def _draw_line(self, ax, origin, end, **kwargs):\n try:\n if origin in self.weyl_points:\n o1, o2, o3 = self.weyl_points[origin]\n else:\n o1, o2, o3 = origin\n except ValueError: # pragma: nocover\n raise ValueError(\"origin '%s' is not in weyl_points \"\n \"or a list (c1, c2, c3)\" % origin)\n try:\n if end in self.weyl_points:\n c1, c2, c3 = self.weyl_points[end]\n else:\n c1, c2, c3 = end\n except ValueError: # pragma: nocover\n raise ValueError(\"origin '%s' is not in weyl_points \"\n \"or a list (c1, c2, c3)\" % origin)\n ax.plot([o1, c1], [o2, c2], [o3, c3], **kwargs)",
"def plot_time_lines(ax, zz):\n for day in [\"2020-03-09\"]:\n ax.axvline(day, ls=\"--\", color=\"blue\", lw=1.5)\n plt.text(day, zz, \"LOCKDOWN\", fontsize=14, rotation=45)\n for day in [\"2020-05-04\"]:\n ax.axvline(day, ls=\"--\", color=\"blue\", lw=1.5)\n plt.text(day, zz, \"PHASE 2\", fontsize=14, rotation=45)\n for day in [\"2020-06-03\"]:\n ax.axvline(day, ls=\"--\", color=\"blue\", lw=1.5)\n plt.text(day, zz, \"PHASE 3\", fontsize=14, rotation=45)\n for day in [\"2020-11-06\"]:\n ax.axvline(day, ls=\"--\", color=\"blue\", lw=1.5)\n plt.text(day, zz, \"LOCKDOWN\", fontsize=14, rotation=45)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a fake user id. This gets used when FakeIdentServer is being used and the userid was initially set to None. It creates a random UUID as specified by RFC 4122.
|
def generate_fake_userid():
return str(uuid.uuid4())
|
[
"def generate_user_uid(user):\n\n return urlsafe_base64_encode(force_bytes(user.pk))",
"def new_id():\n bs = uuid4().bytes\n return urlsafe_b64encode(bs).strip().replace('=', '')",
"def create_unique_id():\n from uuid import uuid4\n\n return str(uuid4())",
"def generate_uuid(self, user_name):\n\t\timport random\n\t\tuuid = \"\"\n\t\tfirst_name, last_name = user_name.split()\n\t\tuuid += first_name[0]",
"def _gen_uuid(self):\r\n return uuid.uuid4().hex",
"def random_id():\n return str(uuid.uuid5(uuid.uuid4(), socket.gethostname())).replace(\"-\", \"\")",
"def gen_uuid():\n return str(uuid.uuid1().hex)",
"def _random_uuid():\n uuid = \"\".join(random.choices(string.hexdigits, k=32))\n return UUID(uuid)",
"def new_system_id():\n return uuid.uuid1().hex",
"def _new_session_id(self):\n return os.urandom(32).encode('hex')",
"def get_uuid():\n return str(UUID(int=random.randint(0, 2**128 - 1))) # nosec",
"def generate_uuid():\n return uuid1(node=random.randint(0, 2**31 - 1))",
"def generate_user_id(num_char):\n letters = string.ascii_lowercase + string.ascii_uppercase + string.digits\n return ''.join(random.choice(letters) for i in range(num_char))",
"def new_id(self) -> str:\n return uuid.uuid4().hex",
"def create_user(self):\n sid = \"#{}\".format(uuid.uuid4()) # the public facing session ID\n uid = \"#{}\".format(uuid.uuid4()) # the private user ID\n user = User(uid)\n\n self.users[uid] = user\n self.sid_to_uid[sid] = [uid, int(time.time())]\n return sid",
"def _generate_id(cls, user_id: str, exploration_id: str) -> str:\n return '%s.%s' % (user_id, exploration_id)",
"def _createIdentifier(bits=160, _urandom=urandom):\n return urandom(bits // 8).encode(\"hex\")",
"def gen_id():\n return \"{:04x}\".format(random.randint(0, int(0xFFFF)))",
"def generate_random_username():\n return os.urandom(100).hex()[:RANDOM_USERNAME_LENGTH]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Dispatch a request onto an _IdentChannel instance.
|
def handle_accept(self):
_IdentChannel(self.userid, *self.accept())
|
[
"def dispatch(self, req):\n check_type(req, Request)\n self.messages.put(req)",
"def handle(self) -> None:\n while True:\n raw_command = self.request.recv(1024)\n if not raw_command:\n break\n result = dispatch(self.state, raw_command)\n self.request.send(result)",
"def call(self, request):\n return self.wait(self.send(request))",
"def custom_dispatch(self, *args, **kw):\n self.request.rest_keys = self.request.route_kwargs\n\n action_name = self.request.path.rstrip('/').rsplit('/', 1)[-1]\n generic = not self.resource.get_pk(self.request)\n\n if self.find_action(action_name, generic, self.request.method):\n result = self.call_action_handler(\n self.request.method,\n self.request,\n action_name,\n generic\n )\n else:\n result = self.call_rest_handler(self.request.method, self.request)\n\n return self.response_from_result(result)",
"def dispatch(self, alias, rpc_command, source=None, filter=None):\n session = self._cache.switch(alias)\n try:\n logger.info(\"alias: %s, rpc_command: %s, source: %s, filter: %s\" \n % (alias, rpc_command, source, filter))\n session.dispatch(rpc_command, source, filter)\n except NcclientException as e:\n logger.error(str(e))\n raise str(e)",
"async def dispatch(self, event_name: str, *args, **kwargs) -> Any:\r\n name = \"on_\" + event_name\r\n\r\n log.info(f\"Dispatching event {name!r}, client: {self!r}\")\r\n\r\n try:\r\n method = getattr(self, name)\r\n\r\n except AttributeError:\r\n return\r\n\r\n return await utils.maybe_coroutine(method, *args, **kwargs)",
"def dispatch_request(self,req):\r\n try:\r\n while True:\r\n handler = self.active_handlers.popleft()\r\n try:\r\n return self.send_request_to_handler(req,handler)\r\n finally:\r\n self.active_handlers.append(handler)\r\n except IndexError:\r\n return False",
"def _DiscoveryHandler(self,conn,request):\n node=request.getQuerynode()\n if node:\n nodestr=node\n else:\n nodestr='None'\n handler=self.getDiscoHandler(node,request.getTo())\n if not handler:\n self.DEBUG(\"No Handler for request with jid->%s node->%s ns->%s\"%(request.getTo().__str__().encode('utf8'),nodestr.encode('utf8'),request.getQueryNS().encode('utf8')),'error')\n conn.send(Error(request,ERR_ITEM_NOT_FOUND))\n raise NodeProcessed\n self.DEBUG(\"Handling request with jid->%s node->%s ns->%s\"%(request.getTo().__str__().encode('utf8'),nodestr.encode('utf8'),request.getQueryNS().encode('utf8')),'ok')\n rep=request.buildReply('result')\n if node: rep.setQuerynode(node)\n q=rep.getTag('query')\n if request.getQueryNS()==NS_DISCO_ITEMS:\n # handler must return list: [{jid,action,node,name}]\n if type(handler)==dict: lst=handler['items']\n else: lst=handler(conn,request,'items')\n if lst==None:\n conn.send(Error(request,ERR_ITEM_NOT_FOUND))\n raise NodeProcessed\n for item in lst: q.addChild('item',item)\n elif request.getQueryNS()==NS_DISCO_INFO:\n if type(handler)==dict: dt=handler['info']\n else: dt=handler(conn,request,'info')\n if dt==None:\n conn.send(Error(request,ERR_ITEM_NOT_FOUND))\n raise NodeProcessed\n # handler must return dictionary:\n # {'ids':[{},{},{},{}], 'features':[fe,at,ur,es], 'xdata':DataForm}\n for id in dt['ids']: q.addChild('identity',id)\n for feature in dt['features']: q.addChild('feature',{'var':feature})\n if dt.has_key('xdata'): q.addChild(node=dt['xdata'])\n conn.send(rep)\n raise NodeProcessed",
"def __call__(self, obj, *args, **kwargs):\n if self._can_dispatch(obj, *args, **kwargs):\n self.handler(obj)",
"async def _dispatch(self, client, subcommand=None, **kwargs):\n self.log.debug(\"Got command %s args: %s\", subcommand, kwargs)\n if not subcommand:\n self.machine.bcp.transport.send_to_client(client, \"vpcom_bridge_response\", error=\"command missing\")\n try:\n method = getattr(self, \"vpx_\" + subcommand)\n except AttributeError:\n self.machine.bcp.transport.send_to_client(client, \"vpcom_bridge_response\",\n error=\"Unknown command {}\".format(subcommand))\n return\n\n try:\n result = method(**kwargs)\n # pylint: disable-msg=broad-except\n except Exception as e:\n self.machine.bcp.transport.send_to_client(client, \"vpcom_bridge_response\",\n error=\"Exception: {}\".format(e))\n return\n self.machine.bcp.transport.send_to_client(client, \"vpcom_bridge_response\", result=result)",
"def post(self, *args, **kw):\n return self.custom_dispatch(*args, **kw)",
"async def handle_request(\n self, request: JsonRpcRequest, result_channel: trio.MemorySendChannel,\n ) -> None:\n try:\n handler = self.get_handler(request.method)\n params = request.params\n if isinstance(params, list):\n result = await handler(*params)\n elif isinstance(params, dict):\n result = await handler(**params)\n else:\n result = await handler()\n except JsonRpcException as jre:\n result = jre\n except Exception as exc:\n logger.exception(\n 'An unhandled exception occurred in handler \"%s\"', handler.__name__,\n )\n result = JsonRpcInternalError(\"An unhandled exception occurred.\")\n await result_channel.send((request, result))",
"def _call_method(self) -> None:\n req = self.request\n\n path = req.path[:]\n if not path.startswith(\"/\"):\n path = urlsplit(path).path\n\n target = path[1:].split(\"/\", 1)[0]\n method = getattr(self, target, self.index)\n\n resp = method(req)\n\n if dict(resp.headers).get(\"Connection\") == \"close\":\n # FIXME: Can we kill the connection somehow?\n pass\n\n resp(self)",
"def execute(self, request: PlcRequest) -> Awaitable[PlcResponse]:\n pass",
"async def _identify(self):\n voice_client = self.client\n \n data = {\n 'op' : self.IDENTIFY,\n 'd' : {\n 'server_id' : str(voice_client.channel.guild.id),\n 'user_id' : str(voice_client.client.id),\n 'session_id' : voice_client._session_id,\n 'token' : voice_client._token,\n },\n }\n await self.send_as_json(data)",
"def consume_req(self, request):\n # Spawn a greenlet to sleep briefly with each request and\n # then respond with a result through the remote client.\n log.debug('Remote endpoint got request: %s', str(request))\n greenlet = gevent.spawn(self.process_remote_request, request)\n self._workers.append(greenlet)",
"def dispatch_request(self, urls, request):\n if request.path.startswith('/socket.io'):\n try:\n socketio_manage(request.environ, self._namespaces, request)\n except:\n print(\"Exception while handling socketio connection\")\n return Response()\n else:\n response = urls.dispatch(\n lambda e, v: self._routes[e](\n request, **v))\n if isinstance(response, (unicode, str)):\n headers = {'Content-type': 'text/html'}\n response = Response(response, headers=headers)\n if not response:\n headers = {'Content-type': 'text/html'}\n response = Response('404 Not Found', headers=headers)\n response.status_code = 404\n return response",
"def dispatch_event(self, event):\n self.redis_client.store_event(event)",
"def sendRequest(self, channel, requestType, data, wantReply=0):\n if channel.localClosed:\n return\n log.msg('sending request %r' % (requestType))\n self.transport.sendPacket(MSG_CHANNEL_REQUEST, struct.pack('>L',\n self.channelsToRemoteChannel[channel])\n + common.NS(requestType)+chr(wantReply)\n + data)\n if wantReply:\n d = defer.Deferred()\n self.deferreds.setdefault(channel.id, []).append(d)\n return d"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List all instances of CollegeCoach
|
def collegecoachs_get(label=None, page=None, per_page=None): # noqa: E501
return query_manager.get_resource(
label=label,
page=page,
per_page=per_page,
rdf_type_uri=COLLEGECOACH_TYPE_URI,
rdf_type_name=COLLEGECOACH_TYPE_NAME,
kls=CollegeCoach)
|
[
"def online_colleges():\n college_list = tasks.json_list(os.path.join(pathlib.Path(__file__).parent.absolute(),'static/college_info.json'))\n return render_template('online_colleges.html',title='collegeSMART - Online Colleges',colleges=college_list)",
"def collegecoachs_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=COLLEGECOACH_TYPE_URI,\n rdf_type_name=COLLEGECOACH_TYPE_NAME, \n kls=CollegeCoach)",
"def search_courses(session):\n page = session.get(URL)\n bs = BeautifulSoup(page.text, 'lxml')\n colleges = get_college(bs)\n for college in colleges:\n terms = get_term(session, bs, college)\n for term in terms[1:]:\n majors = get_majors(session, bs, college, term)\n for major in majors:\n for career in CAREER:\n doc_ref = db.collection('colleges').document(college) \\\n .collection('majors').document(major) \\\n .collection('terms').document(term) \\\n .collection('career').document(career)\n\n values = get_param_for_courses(bs, college, term, career, major)\n page = session.post(URL, data=values, headers=headers)\n bs1 = BeautifulSoup(page.text, 'lxml')\n try:\n get_courses(bs1, doc_ref)\n except AttributeError as ex:\n print('No course found')\n time.sleep(randint(0, 1))",
"def get_courses(self):\n return self.q(css='ul.listing-courses .course-item')",
"def test_list(self):\n col = baker.make(\"College\", active=True)\n colI = baker.make(\"College\",active=False)\n response = self.client.get(reverse('makeReports:college-list'))\n self.assertContains(response,col.name)\n self.assertNotContains(response,colI.name)",
"def api_courses_get():\n\tpass",
"def all_courses():\r\n categories = db.session.query(Category).all()\r\n courses = db.session.query(Course).all()\r\n\r\n return render_template('course_list.html',\r\n categories=categories,\r\n courses=courses)",
"def courses():\n current_date = date.today()\n if current_date.month > 6: # Jul, Aug, Sep, Oct, Nov, Dec\n start_year = current_date.year\n else: # Jan, Feb, Mar, Apr, May, Jun\n start_year = current_date.year - 1\n ending = (start_year + 1) % 100 # last two digits of end_date\n school_year = f\"{start_year}-{ending}\"\n return (\n db_session.query(CurriculumCourse)\n .filter(CurriculumCourse.school_year == school_year)\n .filter(CurriculumCourse.course_code != '000')\n .filter(CurriculumCourse.course_active == True)\n .filter(CurriculumCourse.school_code.in_(school_codes))\n .filter(CurriculumCourse.sections.any(CourseSection.assigned_seats > 0))\n )",
"def coaches(self):\n return Coach.objects.filter(_node__parent=self._node)",
"def list_course_histories(self, request, pk):\n return self.list(request, pk)",
"def find_classes_by_school(school_name):\n\n list_of_classes_by_school = (db.session.query(Class).join(School).filter(School.name == school_name).all())\n\n \n return list_of_classes_by_school",
"def public_courses_list():\n\n # Get the (possibly cached) courses\n # that the current user is in.\n courses = get_courses(current_user.netid)\n\n # Give back the courses that the\n # student is in. This information\n # is possibly cached.\n return success_response({\n \"courses\": courses\n })",
"def list_courses(name, command_list, StudentList, Commands):\r\n\r\n try:\r\n student_obj = get_student(name, StudentList)\r\n\r\n except StudentDNE_Error:\r\n print('ERROR: Student', name, 'does not exist.')\r\n return\r\n\r\n if student_obj.courses == []:\r\n print(name, 'is not taking any courses.')\r\n return\r\n\r\n print(name, 'is taking', print_items(student_obj.list_courses()))\r\n Commands.push(command_list)",
"def courses(self):\n \n if \"_courses\" not in self.__dict__:\n self._courses = []\n courses_to_exclude = []\n\n course_df = pd.read_pickle(\"courses.pkl\")\n\n for parameter in [\"faculty\", \"institute\"]:\n if parameter in self._course_parameters:\n for element in self._course_parameters[parameter]:\n if element[0] == \"-\":\n course_list = courses_to_exclude\n parameter_value = element[1:]\n else:\n course_list = self._courses\n parameter_value = element\n \n indexes = course_df[parameter] == parameter_value\n course_list.extend(\n list(course_df.loc[indexes, \"coursecode\"].values)\n )\n\n if \"coursecode\" in self._course_parameters:\n for course in self._course_parameters[\"coursecode\"]:\n if course[0] == \"-\":\n courses_to_exclude.append(course[1:])\n else:\n self._courses.append(course)\n\n # Remove duplicates\n self._courses = list(dict.fromkeys(self._courses))\n\n for course_to_exclude in courses_to_exclude:\n if course_to_exclude in self._courses:\n self._courses.remove(course_to_exclude)\n\n if \"search\" in self._course_parameters:\n for search_query in self._course_parameters[\"search\"]:\n if search_query[0] == \"-\":\n course_list = courses_to_exclude\n regex = self.regexpify(search_query[1:])\n else:\n course_list = self._courses\n regex = self.regexpify(search_query)\n\n indexes = course_df[\"coursecode\"].str.contains(regex)\n course_list.extend(list(course_df[\"coursecode\"].loc[indexes]))\n\n return self._courses",
"def build_courses(browser, course_pair_list):\n return [Course(pair[0], pair[1], browser) for pair in course_pair_list]",
"def getPeople(self):\n\n secman = getSecurityManager()\n \n #There *has* to be a better way to do this...\n localPeople = self.getReferences(relationship='classifications_people')\n\n #Get the intersection of people referenced to this classification and people within/referenced to the parent\n classificationPeople = list(set(localPeople) & set(self.aq_parent.getPeople()))\n \n #Determine the valid people to show\n visiblePeople = []\n currentDateTime = DateTime()\n for person in classificationPeople:\n if currentDateTime >= person.getEffectiveDate() and (currentDateTime < person.getExpirationDate() or person.getExpirationDate() is None):\n if secman.checkPermission(View, person):\n visiblePeople.append(person)\n \n #Return only the visible people\n return visiblePeople",
"def get_memberships(self):\n pass",
"def teachers_schools():\n class_school = db.session.query(TeacherSchool.teacher_school_id,\n School.name, Teacher.teacher_name).join(School).join(Teacher).all()\n \n return class_school",
"def my_teaching_courses(request):\n\tif request.user.is_authenticated() and RegProfessor.objects.get(user=request.user).active:\n\t\treg_professor = RegProfessor.objects.get(user=request.user)\n\t\tcourses = Course.objects.filter(instructor=reg_professor)\n\telse:\n\t\tmessages.error(request, 'Your account is not active yet, please conatct admin.')\n\treturn render_to_response(\"professor/myteachingcourses.html\", locals(), context_instance=RequestContext(request))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a single CollegeCoach by its id
|
def collegecoachs_id_get(id): # noqa: E501
return query_manager.get_resource(id=id,
rdf_type_uri=COLLEGECOACH_TYPE_URI,
rdf_type_name=COLLEGECOACH_TYPE_NAME,
kls=CollegeCoach)
|
[
"def get_course(course_id):\r\n try:\r\n course = db.session.query(Course).filter_by(id=course_id).one()\r\n return course\r\n except NoResultFound:\r\n return None",
"def getCourse(self, courseId):\n courseList = self.getCourseList()\n for course in courseList:\n if course['courseInfoId'] == int(courseId):\n return course",
"def get_course(course_id, depth=0):\n course = modulestore().get_course(course_id, depth=depth)\n if course is None:\n raise ValueError(u\"Course not found: {0}\".format(course_id))\n return course",
"def get_course_from_db(course_id):\n\n try:\n return Course.objects.get(courseId=course_id)\n except Course.DoesNotExist:\n print(\"Course: \" + str(course_id) + \" could not be found in Course when searched in get_course_from_db\")\n return None\n except Course.MultipleObjectsReturned:\n print(\"Course: \" + str(course_id) + \" has multiple entries in Course when searched in get_course_from_db\")\n return None",
"def collegecoachs_get(label=None, page=None, per_page=None): # noqa: E501\n\n\n return query_manager.get_resource(\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=COLLEGECOACH_TYPE_URI,\n rdf_type_name=COLLEGECOACH_TYPE_NAME, \n kls=CollegeCoach)",
"def getChapter(self, courseId, chapterId):\n chapterList = self.getChapterList(courseId)\n for chapter in chapterList:\n if chapter['id'] == int(chapterId):\n return chapter",
"def get(self, id): \n student = get(id)\n return student",
"def get(cert_id):\n return database.get(Certificate, cert_id)",
"def get_by_id(cls, centro_id):\n c = cls.query.get(centro_id)\n if c is None:\n raise NoResultFound('no existe un Centro con id {}'.format(centro_id))\n return c",
"def get_champion_by_id(id):\n raw = get_static_data(\"champion.json\")\n champion_raw = next((x for x in raw['data'].values() if x['key'] == str(id)), None)\n if champion_raw is None:\n raise ValueError(\"No champion found with ID: {}\".format(id))\n\n return Champion(champion_raw)",
"def soccerleagues_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=SOCCERLEAGUE_TYPE_URI,\n rdf_type_name=SOCCERLEAGUE_TYPE_NAME, \n kls=SoccerLeague)",
"def get_category(ses,id_to_find):\n\n category = ses.query(Category).filter_by(id=id_to_find).one()\n return category",
"def get_all_colleague_data_by_id(self, id):\n colleague = DBSession.query(Colleague).filte(Colleague.colleague_id == id).first()\n return colleague",
"def get(self, id):\r\n catergory = get_a_catergory(id)\r\n if not catergory:\r\n api.abort(404)\r\n else:\r\n return catergory",
"async def get_customer_by_id(self,id):\n async with self._db.acquire() as conn:\n data= await conn.execute(Customer.select().where((Customer.c.customer_id == id)))\n return await data.fetchone()",
"def get_chapter_by_id(chapterId): # noqa: E501\n return \"do some magic!\"",
"def findCourse(self, courses, searchString):\n searchString = searchString.lower()\n for c in courses:\n try:\n if c['name'].lower() == searchString or \\\n str(c['id']) == searchString:\n return c\n except:\n pass\n return None",
"def getByID( self, company_id, load_level = 'light', hide = True ):\n qry = \"\"\"SELECT * FROM `%s`.`companies` WHERE `id` = \"%s\" \"\"\" % ( self.db_name, company_id )\n if hide:\n qry += \" AND `display` = 1;\"\n else:\n qry += \";\"\n company = Mysql.ex( qry )\n if len( company ) == 0:\n return False\n company = self.getLoadLevel( company[0], load_level )\n return company",
"def lookupCity(self, _id):\n cityname=\"\"\n cityresult=self._citiesTable.loc[self._citiesTable[self.ID]==_id]\n\n if(len(cityresult)==1):\n cityname=cityresult[self.CITYNAME].iloc[0]\n\n return(cityname)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get an approximation for the amount of power the sail derives from the wind.
|
def get_sail_power(angle_to_wind):
if closest_starboard < angle_to_wind % tau < closest_port:
return 0.2
s = sin(angle_to_wind)
return (
0.4 * s * s +
0.1 * cos(angle_to_wind) +
0.6 # get a little bit anyway
)
|
[
"def get_solar_generator_power(self):\n return self._get_content_of_own_consumption()[5]",
"def getTerminalPower(self):\n return float(self.query(\"MEAS:POW?\"))",
"def estimate_power(self):\n p = self._solve_power_for_pct(.50)\n p05 = norm._cdf(self._compute_stouffer_z_at_power(.051))\n p99 = norm._cdf(self._compute_stouffer_z_at_power(.99))\n if p05 <= .95:\n lbp = .05\n elif p99 >= .95:\n lbp = .99\n else:\n lbp = self._solve_power_for_pct(.95)\n\n if p05 <= .05:\n ubp = .05\n elif p99 >= .05:\n ubp = .99\n else:\n ubp = self._solve_power_for_pct(.05)\n return p, (lbp, ubp)",
"def getTerminalPower(self):\n return float(self.instr.query(\"MEAS:POW?\"))",
"def get_power(self):\n return self.power_total",
"def get_power(self):\r\n return self._power",
"def power(self):\n return self.curr * self.emf",
"def measure_p(self):\n self._ser.write('MEAS:POW?')\n __value = float(self._ser.read()[:-1])\n print(f'IT6861A OUT Power: {__value}W')\n return __value",
"def _get_power_at_freq(self) -> float:\n\t\toriginal_span = self.span()\n\t\toriginal_rbw = self.rbw()\n\t\tneeds_reset = False\n\t\tif not (original_span == 0.25e6 and original_rbw == 1e3):\n\t\t\tneeds_reset = True\n\t\t\tself.span(0.25e6)\n\t\t\tself.rbw(1e3)\n\t\tif not self._parameters_synced:\n\t\t\t# call configure to update both\n\t\t\t# the parameters on the device and the\n\t\t\t# setpoints and units\n\t\t\tself.configure()\n\t\tdata = self._get_sweep_data()\n\t\tmax_power = np.max(data)\n\t\tif needs_reset:\n\t\t\tself.span(original_span)\n\t\t\tself.rbw(original_rbw)\n\t\t\tself.configure()\n\t\tsleep(2*self.sleep_time.get())\n\t\treturn max_power",
"def water_weight(gal):\n return gal * 8.338",
"def measurePower(self,low):\n if math.fabs(low[0]) > 2.0:\n return 100.0\n self._awg.setOffset(self._awgChannel,low[0])\n minimum = self.measureAveragePower()\n print \"Measuring power at %g : %g\" % (low[0],minimum)\n self.d.set(minimum=minimum, offset=low[0])\n self.d.commit()\n linpower = math.pow(10.0,minimum/10.0)/10.0\n return minimum",
"def weight_exp(self):\n return torch.ceil(torch.log2(torch.sqrt(self.running_var + self.eps)))",
"def measure_p(self):\n self._ser.write('MEAS:POW?')\n __value = float(self._ser.read()[:-1])\n print(f'C62012P OUT Power: {__value}W')\n return __value",
"def getEnergy(self) -> float:\n ...",
"def get_power(self, wsi, level: int) -> float:\n objective_power = wsi.properties.get(\"openslide.objective-power\")\n if objective_power:\n downsample_ratio = self.get_downsample_ratio(wsi, level)\n return float(objective_power) / downsample_ratio\n\n raise ValueError(\"Objective `power` cannot be obtained for this file. Please use `level` (or `mpp`) instead.\")",
"def bass_power(self, filtered=True):\n return self.get_freq_power(1, filtered)",
"def purchase_power(inflation, usdegp):\n power = usdegp / (usdegp * (1 + inflation))\n return power",
"def lows_power(self, filtered=True):\n return (\n self.get_freq_power(0, filtered) + self.get_freq_power(1, filtered)\n ) * 0.5",
"def analyte_injected_pmol(self):\n return (self.analyte_injected_ng()/self.molweight)*1000"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get an approximation for the heeling moment at a given angle to the wind.
|
def get_heeling_moment(angle_to_wind):
a = angle_to_wind % tau
if a > pi:
a -= tau
if closest_starboard < a < closest_port:
return 0
return sin(0.5 * a) - 0.25 * sin(1.5 * a)
|
[
"def solar_angle(hour):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.results.solar.angle\", \r\n hour)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)",
"def haversin(angle):\n return ((1.0 - math.cos(angle*math.pi/180.0))/2.0)",
"def angle_to_wind(self):\n wd = self.wind_direction\n if wd > 180:\n wd -= 360\n return -wd",
"def hour_from_ang(self, angle):\r\n pi = math.pi\r\n hr = math.ceil((((angle / (2 * pi)) * 12) - 3.5)) % 12\r\n if(hr == 0):\r\n hr = 12\r\n return hr",
"def haversinrad(angle):\n return ((1.0 - math.cos(angle))/2.0)",
"def degrees_to_hours(angle):\n return angle / 15.",
"def altitude_angle(self):\n\t\ta = math.sin(math.radians(self.latitude)) * math.sin(math.radians(self.declination_angle()))\n\t\tb = math.cos(math.radians(self.latitude)) * math.cos(math.radians(self.declination_angle())) * math.cos(math.radians(self.hour_angle()))\n\t\tc = a+b\n\t\td = math.asin(c)\n\t\treturn math.degrees(d) #units = degress",
"def hour_angle(self):\n\n\t\t#turn the solar time into total seconds (since midnight)\n\t\tseconds_solartime = self.solar_time().hour*3600 + self.solar_time().minute*60 + self.solar_time().second\n\t\tseconds_from_solar_noon = abs(seconds_solartime - 12*3600)#noon in seconds\t\t\n\t\treturn (float(seconds_from_solar_noon)/60)/4 #units = degrees",
"def radians_to_hours(angle):\n return degrees_to_hours(degrees(angle))",
"def get_sail_power(angle_to_wind):\n if closest_starboard < angle_to_wind % tau < closest_port:\n return 0.2\n s = sin(angle_to_wind)\n return (\n 0.4 * s * s +\n 0.1 * cos(angle_to_wind) +\n 0.6 # get a little bit anyway\n )",
"def _ascent_angle_manager(self, altitude):\n TURN_START_ALTITUDE = 1000\n TURN_START_ANGLE = 80\n TURN_END_ALTITUDE = 50*1000\n\n if altitude > TURN_START_ALTITUDE and altitude < TURN_END_ALTITUDE:\n frac = ((TURN_END_ALTITUDE - altitude) /\n (TURN_END_ALTITUDE - TURN_START_ALTITUDE))\n turn_angle = frac * TURN_START_ANGLE\n elif altitude >= TURN_END_ALTITUDE:\n turn_angle = 0\n else:\n turn_angle = 90\n self.vessel.auto_pilot.target_pitch = turn_angle\n return",
"def hours_to_degrees(angle):\n return angle * 15.",
"def local_hour_angle(model_time, longitude, right_ascension):\n return local_mean_sidereal_time(model_time, longitude) - right_ascension",
"def relh(temperature, _dewpoint):\n # Get temperature in Celsius\n tmpc = temperature.value(\"C\")\n dwpc = _dewpoint.value(\"C\")\n\n e = 6.112 * np.exp((17.67 * dwpc) / (dwpc + 243.5))\n es = 6.112 * np.exp((17.67 * tmpc) / (tmpc + 243.5))\n _relh = (e / es) * 100.00\n return dt.humidity(_relh, \"%\")",
"def hours_to_radians(angle):\n return radians(hours_to_degrees(angle))",
"def azimuth_angle(self):\n\t\tdiv = math.cos(math.radians(self.declination_angle())) * (math.sin(math.radians(self.hour_angle())) / math.cos(math.radians(self.altitude_angle())))\n\t\treturn math.degrees(math.asin(div))",
"def ApproxHeel(self, Fs, gammas, Fk, gammak, deltaFs, deltagammas):\n tanheel = (Fs * self.hs * np.sin(gammas) + Fk * self.hk * np.sin(gammak)) / (self.hb * self.wb)\n heel = np.arctan(tanheel)\n dheel = self.hs * (deltaFs * np.sin(gammas) + Fs * np.cos(gammas) * deltagammas) \\\n / ((1.0 + tanheel ** 2) * self.hb * self.wb)\n return heel, dheel",
"def altitude(self,has,dec):\n delta = np.deg2rad(dec)\n has = np.deg2rad(has)\n latrad = np.deg2rad(self.lat) \n h = np.arcsin(np.sin(latrad)*np.sin(delta) + np.cos(latrad) * \n np.cos(delta)*np.cos(latrad))\n h = np.rad2deg(h)\n return h",
"def get_steering_angle(lowpoint, highpoint):\n delx = float(highpoint[0])-float(lowpoint[0])\n dely = float(highpoint[1])-float(lowpoint[1])\n hyp = math.sqrt(delx**2 + dely**2)\n theta = -1*math.asin(delx/hyp)\n return theta"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
looks through keys_list and picks the entries out of the database that have an objType in the passed list return order is arbitrary
|
def searchObjTypeList(self,keys_list=None,objType_list=[".obj.pub",".obj.pub.article",".obj.pub.book"]):
if not keys_list:
keys_list = self.getEntryList()
return [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType in objType_list]
|
[
"def searchObjTypeDerive(self,keys_list=None,query_objType=\".obj.pub\"):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\treturn [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType[:len(query_objType)] == query_objType]",
"def get_objects(obj_type):\n return _db_content[obj_type].values()",
"def searchKeywords(self,keys_list=None,keyword_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#we make the query set case insensitive by converting all the strings to lowercase\n\t\tlist_of_keywords_lower = map(lambda x: x.lower(), keyword_list)\n\n\t\t#we define a function that checks how many elements are in common between the query set and the keywords set \n\t\tdef numInCommon(list1,list2):\n\t\t\treturn len(set(list1)&set(list2))\n\t\t\n\t\t#get keys whose value has some intersection with the query set \n\t\tr_keys = [k for k in self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\") if numInCommon(self.entries[k].keywords,list_of_keywords_lower)>0 ]\n\t\tr_keys.sort(key=lambda x: numInCommon(self.entries[x].keywords,list_of_keywords_lower), reverse=True)\n\t\treturn r_keys",
"def searchAuthors(self,keys_list=None,author_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#only select keys for which all the authors in the query list are in the obj.authors set\n\t\treturn [k for k in self.searchObjTypeDerive(keys_list) if set(author_list).issubset(self.entries[k].authors)]",
"def comp_obj_lists(db_obj_list, bpt_obj_list):\n\n def make_search_crit_list(search_crit_list_str, obj_list):\n \"\"\"takes in list of search criteria as strings, like [\"first_name\",\"last_name\"] and makes tuple of that object's values\n returns list of all tuples made\"\"\"\n search_crit_list=[]\n dict_connection={}\n\n for obj in obj_list:\n list2btuple=[]\n for crit_str in search_crit_list_str:\n list2btuple.append(getattr(obj, crit_str))\n this_tup=tuple(list2btuple)\n search_crit_list.append(this_tup)\n dict_connection[this_tup]= obj\n\n return search_crit_list, dict_connection\n\n def trim_obj_list(in_db, bpt_obj_list,search_crit_list_str):\n \"\"\"takes list of which objects are in the db as of moment and which should be but haven't been found yet,\n makes search criteria list of db entries and filters should be in by, returns\n modified lists of who is in d and who may not be, after that filter\"\"\"\n print \"trim_obj_list(\"\n\n in_db_search_crit_list,in_db_dict_connection=make_search_crit_list(search_crit_list_str, in_db)\n bpt_search_crit_list,bpt_dict_connection=make_search_crit_list(search_crit_list_str, bpt_obj_list)\n\n bpt_set = set(bpt_search_crit_list)\n db_set = set(in_db_search_crit_list)\n intersect=bpt_set.intersection(db_set)\n\n just_found=[]\n\n for obj_tup in intersect:\n if obj_tup is not None:\n obj=bpt_dict_connection.get(obj_tup)\n if obj is not None:\n bpt_obj_list.remove(obj)\n just_found.append(obj)\n\n #reminder: make shallow copies, not pointers.\n return list(in_db), list(bpt_obj_list),just_found\n\n\n in_db=list(db_obj_list)\n not_found_yet=list(bpt_obj_list)\n\n all_found=[]\n print \"all_found len: \",len(all_found)\n\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\", \"first_name\",\"last_name\",\"sk8name\"])\n print \"pass 1: %s found in DB by emial, fname, lname,sk8name, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and fname and lname NOT sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\", \"first_name\",\"last_name\"])\n print \"pass 2: %s found in DB by email, fname, lname,but NOT sk8name, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n############this looks like it could be problematic, prodice false positives. commented out########\n\n #next, email and fname and lname NOT sk8name\n # in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\",\"last_name\"])\n # print \"pass 3: %s found in DB by email, lname, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n #all_found+=just_found\n #print \"all_found len: \",len(all_found)\n#######################################\n\n\n #next, email and fname and lname NOT sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\", \"first_name\",])\n print \"pass 4: %s found in DB by email, fname, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and sk8nme\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\", \"sk8name\"])\n print \"pass 5: %s found in DB by email, and sk8name, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and fname and lname and sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"first_name\",\"last_name\",\"sk8name\"])\n print \"pass 6: %s found in DB by fname, lname,sk8name, NOT email %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and fname and lname and sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"first_name\",\"sk8name\"])\n print \"pass 7: %s found in DB by fname,sk8name %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and fname and lname and sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"last_name\",\"sk8name\"])\n print \"pass 8: %s found in DB by lname, sk8name, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n return in_db, not_found_yet,all_found",
"def searchAttribute(self,keys_list=None,attribute=\"objType\",value=\".obj.pub\"):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\treturn [k for k in keys_list if k in self.getEntryList() and hasattr(self.entries[k],attribute) and getattr(self.entries[k],attribute) == value ]",
"def get_objs_with_key_and_typeclass(self, oname, otypeclass_path, candidates=None):\r\n cand_restriction = candidates != None and Q(pk__in=[_GA(obj, \"id\") for obj in make_iter(candidates) if obj]) or Q()\r\n return self.filter(cand_restriction & Q(db_key__iexact=oname, db_typeclass_path__exact=otypeclass_path))",
"def get_objs_with_key_or_alias(self, ostring, exact=True,\r\n candidates=None, typeclasses=None):\r\n if not isinstance(ostring, basestring):\r\n if hasattr(ostring, \"key\"):\r\n ostring = ostring.key\r\n else:\r\n return []\r\n if is_iter(candidates) and not len(candidates):\r\n # if candidates is an empty iterable there can be no matches\r\n # Exit early.\r\n return []\r\n\r\n # build query objects\r\n candidates_id = [_GA(obj, \"id\") for obj in make_iter(candidates) if obj]\r\n cand_restriction = candidates != None and Q(pk__in=make_iter(candidates_id)) or Q()\r\n type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()\r\n if exact:\r\n # exact match - do direct search\r\n return self.filter(cand_restriction & type_restriction & (Q(db_key__iexact=ostring) |\r\n Q(db_tags__db_key__iexact=ostring) & Q(db_tags__db_tagtype__iexact=\"alias\"))).distinct()\r\n elif candidates:\r\n # fuzzy with candidates\r\n key_candidates = self.filter(cand_restriction & type_restriction)\r\n else:\r\n # fuzzy without supplied candidates - we select our own candidates\r\n key_candidates = self.filter(type_restriction & (Q(db_key__istartswith=ostring) | Q(db_tags__db_key__istartswith=ostring))).distinct()\r\n candidates_id = [_GA(obj, \"id\") for obj in key_candidates]\r\n # fuzzy matching\r\n key_strings = key_candidates.values_list(\"db_key\", flat=True)\r\n index_matches = string_partial_matching(key_strings, ostring, ret_index=True)\r\n if index_matches:\r\n return [obj for ind, obj in enumerate(key_candidates) if ind in index_matches]\r\n else:\r\n alias_candidates = self.filter(id__in=candidates_id, db_tags__db_tagtype__iexact=\"alias\")\r\n alias_strings = alias_candidates.values_list(\"db_key\", flat=True)\r\n index_matches = string_partial_matching(alias_strings, ostring, ret_index=True)\r\n if index_matches:\r\n return [alias.db_obj for ind, alias in enumerate(alias_candidates) if ind in index_matches]\r\n return []",
"def fetch_rows(db_keys, _bq_key):\n return [\n _convert(e) for e in ndb.get_multi(ndb.Key(urlsafe=k) for k in db_keys)\n if e\n ]",
"def build_type_object_list(self, object_list, list_types):\n type_object_list = []\n for o in object_list:\n for t in list_types:\n # Ensure it matches only the exact type given (ex. \"SEQUENCE\", not \"SEQUENCE SET\"\n if re.match(r'^' + t + '$', o.get('objtype')):\n type_object_list.append(o)\n\n if self.args and self.args.debug:\n self._debug_print(\"\\nTYPE OBJECT LIST \" + str(list_types))\n for o in type_object_list:\n self._debug_print(o)\n\n return type_object_list",
"def get_objs_with_tag(self, key=None, category=None, model=\"objects.objectdb\", tagtype=None):\r\n objclass = ContentType.objects.get_by_natural_key(*model.split(\".\", 1)).model_class()\r\n key_cands = Q(db_tags__db_key__iexact=key.lower().strip()) if key is not None else Q()\r\n cat_cands = Q(db_tags__db_category__iexact=category.lower().strip()) if category is not None else Q()\r\n tag_crit = Q(db_tags__db_model=model, db_tags__db_tagtype=tagtype)\r\n return objclass.objects.filter(tag_crit & key_cands & cat_cands)",
"def make_search_crit_list(search_crit_list_str, obj_list):\n search_crit_list=[]\n dict_connection={}\n\n for obj in obj_list:\n list2btuple=[]\n for crit_str in search_crit_list_str:\n list2btuple.append(getattr(obj, crit_str))\n this_tup=tuple(list2btuple)\n search_crit_list.append(this_tup)\n dict_connection[this_tup]= obj\n\n return search_crit_list, dict_connection",
"def get_object_refs(obj_type):\n for obj in _db_content[obj_type].values():\n yield obj.obj",
"def get_key_types(obj):\n if isinstance(obj, union_type.UnionType):\n return obj.get_contained_type()\n\n keys = obj.keys()\n return union_type.UnionType.create_from_type_list(keys)",
"def object_dicts(self,\n qbbo_list = [],\n requery=False,\n params={},\n query_tail=\"\"):\n\n object_dicts = {} #{qbbo:[object_list]}\n\n for qbbo in qbbo_list:\n\n if qbbo == \"TimeActivity\":\n #for whatever reason, this failed with some basic criteria, so\n query_tail = \"\"\n elif qbbo in self._NAME_LIST_OBJECTS and query_tail == \"\":\n #just something to avoid confusion from 'deleted' accounts later\n query_tail = \"WHERE Active IN (true,false)\"\n\n object_dicts[qbbo] = self.get_objects(qbbo,\n requery,\n params,\n query_tail)\n\n return object_dicts",
"def get_all_scripts_on_obj(self, obj, key=None):\r\n if not obj:\r\n return []\r\n obj = obj.dbobj\r\n player = _GA(_GA(obj, \"__class__\"), \"__name__\") == \"PlayerDB\"\r\n if key:\r\n dbref = self.dbref(key)\r\n if dbref or dbref == 0:\r\n if player:\r\n return self.filter(db_player=obj, id=dbref)\r\n else:\r\n return self.filter(db_obj=obj, id=dbref)\r\n elif player:\r\n return self.filter(db_player=obj, db_key=key)\r\n else:\r\n return self.filter(db_obj=obj, db_key=key)\r\n elif player:\r\n return self.filter(db_player=obj)\r\n else:\r\n return self.filter(db_obj=obj)",
"def get_first_object(obj_type):\n return next(iter(_db_content[obj_type].values()))",
"def get_interesting_instances_class_list(self, model_class):\n filtered_list = []\n #We will need all instances to determine type so fetch all at once\n instances = db.get(self.interesting_instances)\n for i in instances:\n #If no exception then key is of right type.\n if (i and (i.class_name() == model_class.class_name())):\n filtered_list.append(i)\n return filtered_list",
"def find(self, oid, limit=0):\n ext = []\n if isinstance(oid, str):\n oid = OID(oid)\n keyset = sorted(self.keys())\n for k in keyset:\n v = self[k]\n if k.match(oid):\n ext.append((k, v))\n if limit and len(ext) == limit:\n break\n return ext"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
searches through keys_list and selects the keys for which the entry in the database has an objType string that begins with the string query_objType return order is arbitrary
|
def searchObjTypeDerive(self,keys_list=None,query_objType=".obj.pub"):
if not keys_list:
keys_list = self.getEntryList()
return [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType[:len(query_objType)] == query_objType]
|
[
"def searchObjTypeList(self,keys_list=None,objType_list=[\".obj.pub\",\".obj.pub.article\",\".obj.pub.book\"]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\n\t\treturn [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType in objType_list]",
"def searchKeywords(self,keys_list=None,keyword_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#we make the query set case insensitive by converting all the strings to lowercase\n\t\tlist_of_keywords_lower = map(lambda x: x.lower(), keyword_list)\n\n\t\t#we define a function that checks how many elements are in common between the query set and the keywords set \n\t\tdef numInCommon(list1,list2):\n\t\t\treturn len(set(list1)&set(list2))\n\t\t\n\t\t#get keys whose value has some intersection with the query set \n\t\tr_keys = [k for k in self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\") if numInCommon(self.entries[k].keywords,list_of_keywords_lower)>0 ]\n\t\tr_keys.sort(key=lambda x: numInCommon(self.entries[x].keywords,list_of_keywords_lower), reverse=True)\n\t\treturn r_keys",
"def get_objs_with_key_or_alias(self, ostring, exact=True,\r\n candidates=None, typeclasses=None):\r\n if not isinstance(ostring, basestring):\r\n if hasattr(ostring, \"key\"):\r\n ostring = ostring.key\r\n else:\r\n return []\r\n if is_iter(candidates) and not len(candidates):\r\n # if candidates is an empty iterable there can be no matches\r\n # Exit early.\r\n return []\r\n\r\n # build query objects\r\n candidates_id = [_GA(obj, \"id\") for obj in make_iter(candidates) if obj]\r\n cand_restriction = candidates != None and Q(pk__in=make_iter(candidates_id)) or Q()\r\n type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()\r\n if exact:\r\n # exact match - do direct search\r\n return self.filter(cand_restriction & type_restriction & (Q(db_key__iexact=ostring) |\r\n Q(db_tags__db_key__iexact=ostring) & Q(db_tags__db_tagtype__iexact=\"alias\"))).distinct()\r\n elif candidates:\r\n # fuzzy with candidates\r\n key_candidates = self.filter(cand_restriction & type_restriction)\r\n else:\r\n # fuzzy without supplied candidates - we select our own candidates\r\n key_candidates = self.filter(type_restriction & (Q(db_key__istartswith=ostring) | Q(db_tags__db_key__istartswith=ostring))).distinct()\r\n candidates_id = [_GA(obj, \"id\") for obj in key_candidates]\r\n # fuzzy matching\r\n key_strings = key_candidates.values_list(\"db_key\", flat=True)\r\n index_matches = string_partial_matching(key_strings, ostring, ret_index=True)\r\n if index_matches:\r\n return [obj for ind, obj in enumerate(key_candidates) if ind in index_matches]\r\n else:\r\n alias_candidates = self.filter(id__in=candidates_id, db_tags__db_tagtype__iexact=\"alias\")\r\n alias_strings = alias_candidates.values_list(\"db_key\", flat=True)\r\n index_matches = string_partial_matching(alias_strings, ostring, ret_index=True)\r\n if index_matches:\r\n return [alias.db_obj for ind, alias in enumerate(alias_candidates) if ind in index_matches]\r\n return []",
"def get_objects(obj_type):\n return _db_content[obj_type].values()",
"def searchAuthors(self,keys_list=None,author_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#only select keys for which all the authors in the query list are in the obj.authors set\n\t\treturn [k for k in self.searchObjTypeDerive(keys_list) if set(author_list).issubset(self.entries[k].authors)]",
"def searchAttribute(self,keys_list=None,attribute=\"objType\",value=\".obj.pub\"):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\treturn [k for k in keys_list if k in self.getEntryList() and hasattr(self.entries[k],attribute) and getattr(self.entries[k],attribute) == value ]",
"def getAllQueryItemKeys(*args, **kwargs):\n \n pass",
"def get_objs_with_key_and_typeclass(self, oname, otypeclass_path, candidates=None):\r\n cand_restriction = candidates != None and Q(pk__in=[_GA(obj, \"id\") for obj in make_iter(candidates) if obj]) or Q()\r\n return self.filter(cand_restriction & Q(db_key__iexact=oname, db_typeclass_path__exact=otypeclass_path))",
"def search_items(self, query, firstonly=False):\n results = []\n for keyname in self.keynames():\n todokey = self.get_key(keyname)\n founditems = todokey.search_items(query)\n if founditems:\n if firstonly:\n return [(keyname, founditems)]\n results.append((keyname, founditems))\n return results",
"def get_key_types(obj):\n if isinstance(obj, union_type.UnionType):\n return obj.get_contained_type()\n\n keys = obj.keys()\n return union_type.UnionType.create_from_type_list(keys)",
"def searchable_keys():\n return lass.model_base.DBSession.query(\n lass.metadata.models.Key\n ).filter(\n lass.metadata.models.Key.searchable\n ).order_by(\n sqlalchemy.asc(lass.metadata.models.Key.plural)\n ).all()",
"def search_items(keywords, meta_types=None):",
"def search_items(self, query, firstonly=False):\n if firstonly:\n keyresult = self.find_item(query)\n if keyresult:\n return [keyresult]\n debug('Falsey key result: {}'.format(keyresult))\n return []\n # Find multiple matches.\n intval, querypat = self.parse_query(query)\n found = []\n for index, item in enumerate(self.data):\n itemtext = item.to_str(color=False)\n if (intval is not None) and (intval == index):\n found.append(self.TodoKeyResult(index, item))\n elif (querypat is not None) and querypat.search(itemtext):\n found.append(self.TodoKeyResult(index, item))\n\n return found",
"def make_search_crit_list(search_crit_list_str, obj_list):\n search_crit_list=[]\n dict_connection={}\n\n for obj in obj_list:\n list2btuple=[]\n for crit_str in search_crit_list_str:\n list2btuple.append(getattr(obj, crit_str))\n this_tup=tuple(list2btuple)\n search_crit_list.append(this_tup)\n dict_connection[this_tup]= obj\n\n return search_crit_list, dict_connection",
"def get_objs_with_tag(self, key=None, category=None, model=\"objects.objectdb\", tagtype=None):\r\n objclass = ContentType.objects.get_by_natural_key(*model.split(\".\", 1)).model_class()\r\n key_cands = Q(db_tags__db_key__iexact=key.lower().strip()) if key is not None else Q()\r\n cat_cands = Q(db_tags__db_category__iexact=category.lower().strip()) if category is not None else Q()\r\n tag_crit = Q(db_tags__db_model=model, db_tags__db_tagtype=tagtype)\r\n return objclass.objects.filter(tag_crit & key_cands & cat_cands)",
"def get_keywords_in_KB(KB):\n query = KB[\"memcached\"].find_one({\"object\": \"homepageKeys\"})\n if query is None: # create the dictionary of keywords and cache\n indexing = dict()\n #query =\n #{\"$or\": [\n #{\"$and\": [\n #{\"chronos:hasKeyword\": {\"$exists\": True, \"$ne\": []}},\n #{\"chronos:group\": \"missions\"}\n #]},\n query = {\"$and\": [\n {\"schema:about\": {\"$exists\": True, \"$ne\": []}},\n {\"chronos:group\": \"urls\"}\n ]}\n #]\n #}\n projection = { # \"chronos:hasKeyword\": True,\n \"schema:about\": True,\n \"skos:prefLabel\": True,\n \"schema:headline\": True,\n \"schema:description\": True}\n objects = KB['webpages'].find(query, projection)\n\n #pprint(objects[5])\n\n for o in objects:\n for k in o[\"schema:about\"]:\n doc = KB['base'].find_one({\"_id\": ObjectId(k[\"_id\"])})\n print(doc[\"_id\"])\n if str(doc[\"_id\"]) in indexing.keys():\n # append\n indexing[str(doc[\"_id\"])][\"linked\"].append(str(o[\"_id\"]))\n else:\n # create key > value\n try:\n q = KB[\"base\"].find_one({\"_id\": ObjectId(doc[\"skos:exactMatch\"][0][\"_id\"])})\n indexing[str(doc[\"_id\"])] = {\n \"broader\": q[\"skos:prefLabel\"],\n \"pref_label\": doc[\"skos:prefLabel\"],\n \"linked\": [str(o[\"_id\"])]\n }\n except KeyError:\n print(\"Passed: This is a subject: \" + str(doc[\"_id\"]))\n pass\n\n index = indexing\n indexing = json.dumps(indexing)\n KB[\"memcached\"].insert({\"object\": \"homepageKeys\", \"time\": time.time(), \"value\": indexing})\n else: # retrieve from cache\n index = json.loads(query[\"value\"])\n\n sort = []\n for k, v in sorted(index.items(), key=lambda x: len(x[1][\"linked\"]), reverse=True):\n sort.append([k, len(index[k][\"linked\"]), index[k][\"pref_label\"], index[k][\"broader\"]])\n\n return sort",
"def object_dicts(self,\n qbbo_list = [],\n requery=False,\n params={},\n query_tail=\"\"):\n\n object_dicts = {} #{qbbo:[object_list]}\n\n for qbbo in qbbo_list:\n\n if qbbo == \"TimeActivity\":\n #for whatever reason, this failed with some basic criteria, so\n query_tail = \"\"\n elif qbbo in self._NAME_LIST_OBJECTS and query_tail == \"\":\n #just something to avoid confusion from 'deleted' accounts later\n query_tail = \"WHERE Active IN (true,false)\"\n\n object_dicts[qbbo] = self.get_objects(qbbo,\n requery,\n params,\n query_tail)\n\n return object_dicts",
"def fetch_rows(db_keys, _bq_key):\n return [\n _convert(e) for e in ndb.get_multi(ndb.Key(urlsafe=k) for k in db_keys)\n if e\n ]",
"def _query_names(bbdb, query, *, useclass=True, exclude_bases=None):\n if query.lower() == \"all\":\n return [db[\"file\"] for db in bbdb._alldb]\n query, subq = query.split(\":\") if query.count(\":\") else (query, None)\n if subq is None:\n c_hits = [db[\"file\"] for db in bbdb._alldb if query in db[\"class\"]]\n n_hits = [db[\"file\"] for db in bbdb._alldb if query == db[\"name\"]]\n t_hits = [db[\"file\"] for db in bbdb._alldb if query == db[\"type\"]]\n if not c_hits and not n_hits:\n return t_hits\n if not c_hits and not t_hits:\n return n_hits\n if not t_hits and not n_hits:\n return c_hits\n if not n_hits:\n return c_hits if useclass else t_hits\n assert False, f\"invalid database or query\"\n else:\n excon = None\n if subq.lower().endswith(\"x\"):\n excon = True\n if subq.lower().endswith(\"y\"):\n excon = False\n hits = list()\n assert query == \"Het\"\n for db in bbdb._alldb:\n if not query in db[\"class\"]:\n continue\n nc = [_ for _ in db[\"connections\"] if _[\"direction\"] == \"C\"]\n nn = [_ for _ in db[\"connections\"] if _[\"direction\"] == \"N\"]\n nc, tc = len(nc), subq.count(\"C\")\n nn, tn = len(nn), subq.count(\"N\")\n if nc >= tc and nn >= tn:\n if nc + nn == tc + tn and excon is not True:\n hits.append(db[\"file\"])\n elif nc + nn > tc + tn and excon is not False:\n hits.append(db[\"file\"])\n if exclude_bases is not None:\n hits0, hits = hits, []\n for h in hits0:\n base = bbdb._dictdb[h][\"base\"]\n if base == \"\" or base not in exclude_bases:\n hits.append(h)\n print(\"exclude_bases\", len(hits0), len(hits))\n return hits"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
searches through keys_list and picks the entries out of the databsae that have keywords in the list_of_keywords set return order is sorted from most matching keywords to fewest search is case insensitive
|
def searchKeywords(self,keys_list=None,keyword_list=[]):
if not keys_list:
keys_list = self.getEntryList()
#we make the query set case insensitive by converting all the strings to lowercase
list_of_keywords_lower = map(lambda x: x.lower(), keyword_list)
#we define a function that checks how many elements are in common between the query set and the keywords set
def numInCommon(list1,list2):
return len(set(list1)&set(list2))
#get keys whose value has some intersection with the query set
r_keys = [k for k in self.searchObjTypeDerive(keys_list,query_objType=".obj.pub") if numInCommon(self.entries[k].keywords,list_of_keywords_lower)>0 ]
r_keys.sort(key=lambda x: numInCommon(self.entries[x].keywords,list_of_keywords_lower), reverse=True)
return r_keys
|
[
"def join_strings_by_keywords(list, keywords, join=' '):\n res = []\n append = False\n for i, elem in enumerate(list):\n if (append):\n try:\n res[-1] = res[-1] + join + elem\n except:\n res.append(elem)\n append = False\n continue\n else:\n if any(elem.lower() in s.lower() for s in keywords):\n if (i > 0 and i < len(list)-1):\n append = True\n else:\n if(i == 0):\n append = True\n else:\n if (i < len(list)-1):\n append = True\n else:\n res.append(elem)\n\n return res",
"def filter_keywords(\n self, input_keywords: List[str], keyword_group: str\n ) -> List[str]:\n try:\n table = self._keyword_groups[keyword_group]\n except KeyError:\n raise ValueError(\n f\"Keyword group {keyword_group} is unknown. Available groups \"\n f\"are: {self._keyword_groups.keys()}.\"\n )\n\n # Normalize the input keywords\n input_keywords = [k.lower().strip() for k in input_keywords]\n\n output_keywords: List[str] = []\n\n for input_keyword in input_keywords:\n if input_keyword in table.keys():\n # Keyword is in group and already the canonical form\n output_keywords.append(input_keyword)\n else:\n # See if the keyword is an alternative form\n for keyword, alternates in table.items():\n if input_keyword in alternates:\n output_keywords.append(keyword)\n\n return output_keywords",
"def _parse_by_key_word(self, words_list):\n start = self._find_word(words_list, config.START_KEYWORDS)\n if start:\n stop = self._find_word(\n words_list, config.END_KEYWORDS, start=start)\n if not stop:\n stop = len(words_list)+1\n return words_list[start+1:stop]\n return []",
"def word_search(doc_list, keyword):\n tmp = []\n tmpindex = []\n for h,i in zip(range(len(doc_list)),doc_list):\n tmp = [j.rstrip('.,').lower() for j in i.split()]\n if keyword in tmp:\n tmpindex.append(h)\n\n return tmpindex",
"def test_priority_keyword_merge():\n kw_list_1 = [(\"A\", \"not-keyword\"), (\"B\", \"non-reserved\")]\n\n kw_list_2 = [(\"A\", \"reserved\"), (\"C\", \"non-reserved\")]\n\n result = priority_keyword_merge(kw_list_1, kw_list_2)\n\n expected_result = [(\"A\", \"reserved\"), (\"B\", \"non-reserved\"), (\"C\", \"non-reserved\")]\n\n assert sorted(result) == sorted(expected_result)\n\n kw_list_1 = [(\"A\", \"not-keyword\"), (\"B\", \"non-reserved\")]\n\n kw_list_2 = [(\"A\", \"reserved\"), (\"C\", \"non-reserved\")]\n\n result_2 = priority_keyword_merge(kw_list_2, kw_list_1)\n\n expected_result_2 = [\n (\"A\", \"not-keyword\"),\n (\"B\", \"non-reserved\"),\n (\"C\", \"non-reserved\"),\n ]\n\n assert sorted(result_2) == sorted(expected_result_2)\n\n kw_list_1 = [(\"A\", \"not-keyword\"), (\"B\", \"non-reserved\")]\n\n kw_list_2 = [(\"A\", \"reserved\"), (\"C\", \"non-reserved\")]\n\n kw_list_3 = [(\"B\", \"reserved\")]\n\n result_3 = priority_keyword_merge(kw_list_2, kw_list_1, kw_list_3)\n\n expected_result_3 = [(\"A\", \"not-keyword\"), (\"B\", \"reserved\"), (\"C\", \"non-reserved\")]\n\n assert sorted(result_3) == sorted(expected_result_3)\n\n kw_list_1 = [(\"A\", \"not-keyword\"), (\"B\", \"non-reserved\")]\n\n result_4 = priority_keyword_merge(kw_list_1)\n\n expected_result_4 = kw_list_1\n\n assert sorted(result_4) == sorted(expected_result_4)",
"def test_priority_keyword_merge() -> None:\n kw_list_1 = [(\"A\", \"not-keyword\"), (\"B\", \"non-reserved\")]\n\n kw_list_2 = [(\"A\", \"reserved\"), (\"C\", \"non-reserved\")]\n\n result = priority_keyword_merge(kw_list_1, kw_list_2)\n\n expected_result = [(\"A\", \"reserved\"), (\"B\", \"non-reserved\"), (\"C\", \"non-reserved\")]\n\n assert sorted(result) == sorted(expected_result)\n\n kw_list_1 = [(\"A\", \"not-keyword\"), (\"B\", \"non-reserved\")]\n\n kw_list_2 = [(\"A\", \"reserved\"), (\"C\", \"non-reserved\")]\n\n result_2 = priority_keyword_merge(kw_list_2, kw_list_1)\n\n expected_result_2 = [\n (\"A\", \"not-keyword\"),\n (\"B\", \"non-reserved\"),\n (\"C\", \"non-reserved\"),\n ]\n\n assert sorted(result_2) == sorted(expected_result_2)\n\n kw_list_1 = [(\"A\", \"not-keyword\"), (\"B\", \"non-reserved\")]\n\n kw_list_2 = [(\"A\", \"reserved\"), (\"C\", \"non-reserved\")]\n\n kw_list_3 = [(\"B\", \"reserved\")]\n\n result_3 = priority_keyword_merge(kw_list_2, kw_list_1, kw_list_3)\n\n expected_result_3 = [(\"A\", \"not-keyword\"), (\"B\", \"reserved\"), (\"C\", \"non-reserved\")]\n\n assert sorted(result_3) == sorted(expected_result_3)\n\n kw_list_1 = [(\"A\", \"not-keyword\"), (\"B\", \"non-reserved\")]\n\n result_4 = priority_keyword_merge(kw_list_1)\n\n expected_result_4 = kw_list_1\n\n assert sorted(result_4) == sorted(expected_result_4)",
"def __order_search_list(self, search_list: list):\n hashtags_collides = []\n search_list.sort(key=lambda x: x.name)\n # create another list, that count the number of hashtags collides according to the rules\n for j in search_list:\n temp_count = 0\n for i in (hashtag for item in self._shopping_cart.get_cart_list() for hashtag in item.hashtags):\n for h in j.hashtags:\n if h == i:\n temp_count += 1\n hashtags_collides.append(temp_count)\n\n # short the original list based on the second list\n search_list[:] = [x for _, x in sorted(zip(hashtags_collides, search_list), key=lambda x: x[0], reverse=True)]\n return search_list",
"def order_by_key(results_list: list, order_key: str) -> list:\n reordered_results = sorted(results_list, key=itemgetter(order_key))\n return reordered_results",
"def GetTopKeywords(self, num_keywords):\n pairs = [(key,val) for key,val in self.all_word_freq_dict.iteritems()]\n pairs = sorted(pairs, cmp=self.decrease_sort)\n\n #return keywords that aren't too common; those that aren't stop words\n return filter(lambda word: word not in Profile.stop_words, map(lambda (k,v): k, pairs))[:num_keywords]",
"def predict_another_keywords(self, keywords, limit=5):\n new_keywords = []\n category = self.predict_category(keywords)\n category_keywords = {k: v for k, v in sorted(self.category_keyword_score[category].items(),\n key=lambda item: item[1],\n reverse=True)}\n for keyword in category_keywords:\n if keyword not in keywords:\n new_keywords.append(keyword)\n if len(new_keywords) >= limit:\n break\n return new_keywords",
"def search_results_sorted(self, key, document_list, reverse=False):\n if document_list:\n values_list = []\n empty_values_list = []\n documents = self.get_sorting_docs_indexes(document_list)\n # Selecting proper sorting couch query params\n if key in [\"metadata_created_date\", \"metadata_description\", \"metadata_doc_type_rule_id\"]:\n doc_field = key\n else:\n doc_field = 'mdt_indexes'\n # Creating special, sorting capable list of tuples (Docname, Sorting field value)\n for doc in documents:\n if doc_field == 'mdt_indexes':\n if key in doc[doc_field]:\n value = doc[doc_field][key]\n else:\n value = ''\n else:\n value = doc[key]\n if value:\n values_list.append((doc.get_id, value))\n else:\n empty_values_list.append(doc.get_id)\n try:\n document_list = sorted(values_list, key=itemgetter(1), reverse=reverse)\n document_list = map(lambda doc: doc[0], document_list)\n document_list = document_list + empty_values_list\n except TypeError, e:\n log.error('sorting TypeError error in indexes: %s, in documents_list: %s' % (e, document_list))\n pass\n return document_list",
"def search_by_keywords(keywords, reldate, database='pubmed', \n sort_type='Most Recent', retmax=20, datetype='edat'):\n results_by_term = OrderedDict()\n for term in keywords:\n formatted_term = '+'.join(term.split()) + \"[Title/Abstract]\"\n results_by_term[term] = entrezSearch(db=database, sort=sort_type, \n retmode='xml', retmax=retmax, term=formatted_term, reldate=reldate, \n datetype=datetype)\n return results_by_term",
"def get_keywords(list_item_ids, tfidf_matrix, tfidf_feature_names, number, itemID):\n ind = list_item_ids.index(itemID)\n tf_idf_vector = tfidf_matrix[ind]\n sorted_items = sort_coo(tf_idf_vector.tocoo())\n keywords = extract_topn_from_vector(\n tfidf_feature_names, sorted_items, number)\n mean_keys = np.array(list(keywords.values())).mean()\n return keywords, round(mean_keys, 3)",
"def keyword_list(self, keyword_list):\n\n self._keyword_list = keyword_list",
"def searchAuthors(self,keys_list=None,author_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#only select keys for which all the authors in the query list are in the obj.authors set\n\t\treturn [k for k in self.searchObjTypeDerive(keys_list) if set(author_list).issubset(self.entries[k].authors)]",
"def sort(list_in, sort_key=lambda s: s.lower()):\n return sorted(list_in, key=sort_key)",
"def fetch_keyword_set(file_name):\n keywords = set()\n with open(file_name, \"r\") as f:\n for line in f:\n keywords.add(line.replace(\"-\", \"\").strip())\n\n keywords = nlp(\" \".join(keywords))\n keywords = [\n preprocess_token(token) for token in keywords if is_token_allowed(token)\n ]\n\n return set(keywords)",
"def matched_keys(key_path: Any, all_keys: Sequence, case_ignored: bool, space_trimmed: bool = False) -> List:\n normalized = normalize(key_path, case_ignored, space_trimmed)\n keys = [k for k in all_keys if key_matches(k, normalized, case_ignored)]\n\n if len(keys) > 1:\n logger.warning(f\"Multiple matching of '{key_path}': {','.join((str(k) for k in keys))}\")\n return keys",
"def analyze_keyphrases(posts):\n if type(posts) is not str:\n text = combine_posts(posts)\n else:\n text = posts\n keywords = RAKE.rake.run(text)\n high_freq_keywords = []\n for pair in keywords:\n \"\"\" set threshold to select keyphrases \"\"\"\n try:\n if pair[1] > 3.5:\n high_freq_keywords.append([pair[0], pair[1]])\n except IndexError:\n continue\n return high_freq_keywords"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
searches through keys_list and picks the entries out of the database that have all the authors in the specified list return order is arbtitrary search is case sensitive
|
def searchAuthors(self,keys_list=None,author_list=[]):
if not keys_list:
keys_list = self.getEntryList()
#only select keys for which all the authors in the query list are in the obj.authors set
return [k for k in self.searchObjTypeDerive(keys_list) if set(author_list).issubset(self.entries[k].authors)]
|
[
"def searchKeywords(self,keys_list=None,keyword_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#we make the query set case insensitive by converting all the strings to lowercase\n\t\tlist_of_keywords_lower = map(lambda x: x.lower(), keyword_list)\n\n\t\t#we define a function that checks how many elements are in common between the query set and the keywords set \n\t\tdef numInCommon(list1,list2):\n\t\t\treturn len(set(list1)&set(list2))\n\t\t\n\t\t#get keys whose value has some intersection with the query set \n\t\tr_keys = [k for k in self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\") if numInCommon(self.entries[k].keywords,list_of_keywords_lower)>0 ]\n\t\tr_keys.sort(key=lambda x: numInCommon(self.entries[x].keywords,list_of_keywords_lower), reverse=True)\n\t\treturn r_keys",
"def sortByFirstAuthor(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].authors[0] )\n\t\treturn r_list",
"def search_author(self, in_author):\n author = in_author.lower()\n list_of_authors = []\n if author and not author.isspace():\n no_author=True\n for book in self.booklist:\n if book.author == author:\n list_of_authors.append(Library.return_book_string(self, book))\n no_author=False\n if no_author:\n list_of_authors.append(\"Author not found\")\n else:\n list_of_authors.append(\"Write in Author\")\n return list_of_authors",
"def getSortedInstituteCodes(authorList, authorInstituteCodes):\n instituteCodes = []\n for author in authorList:\n codes = authorInstituteCodes[author]\n if not hasattr(codes, '__iter__'):\n codes = [codes]\n for code in codes:\n if not code in instituteCodes:\n instituteCodes.append(code)\n return instituteCodes",
"def search_by_authors(authors, reldate, database='pubmed', \n sort_type='Most Recent', retmax=20, datetype='edat'):\n results_by_term = OrderedDict()\n for term in authors:\n formatted_term = term + '[Author]'\n results_by_term[term] = entrezSearch(db=database, sort=sort_type, \n retmode='xml', retmax=retmax, term=formatted_term, reldate=reldate, \n datetype=datetype)\n return results_by_term",
"def filter_by_author(contributions, authors):\n filtered_contributions = []\n for contribution in contributions:\n if contribution[\"author\"] in authors:\n filtered_contributions.append(contribution)\n return filtered_contributions",
"def save_authors_list(authors_list):\n \n client = MongoClient(mongo_constants['server_name'], mongo_constants['port_number'])\n db = client[mongo_constants['database']]\n coll = db[mongo_constants['author_list']]\n\n for author in authors_list:\n if coll.find({'orcid': author['orcid']}).count() == 0:\n author['worksFound'] = 0\n coll.insert(author)\n print 'Inserted Author: ' + author['orcid']",
"def search_for_books(self, query):\n books = []\n book = Book(self.db)\n\n for author in self.db.cursor().execute('SELECT author_id FROM authors WHERE ' + query):\n books.extend(self.get_books(author[0]))\n\n return books",
"def test_get_authors_list(self):\n self.assertEqual(Author.get_authors_list('bEst'), ['Best Author 1'])\n self.assertEqual(Author.get_authors_list('1'), ['Best Author 1'])\n self.assertEqual(Author.get_authors_list(' '), ['Best Author 1', 'zlast author'])\n self.assertEqual(Author.get_authors_list('new'), ['trueAuthorNew'])\n self.assertEqual(Author.get_authors_list('TRUE'), ['trueAuthorNew'])\n self.assertEqual(Author.get_authors_list('Best Author 1'), ['Best Author 1'])\n self.assertEqual(Author.get_authors_list('trueAuthorNew'), ['trueAuthorNew'])",
"def find_in_author_search_list(author_name):\n \n client = MongoClient(mongo_constants['server_name'], mongo_constants['port_number'])\n db = client[mongo_constants['database']]\n coll = db[mongo_constants['author_search_list']]\n \n doc = {'author_name': author_name}\n\n return coll.find(doc).count()",
"def getAuthorNamesAndEmail(authorInitialsList):\n db = DiaryDatabaseWrapper.DiaryDatabaseWrapper() \n authorNameList = list()\n authorEmailList = list()\n for authorInitials in authorInitialsList:\n authorRows = db.selectFromTable('authors',('name','email'),\\\n 'WHERE initials=\\'' + authorInitials + '\\'')\n authorNameList.append(authorRows[0][0])\n authorEmailList.append(authorRows[0][1])\n db.close()\n return authorNameList, authorEmailList",
"def search_authors():\n insert_query = request.args.get('q')\n if not query_author:\n abort(400, \"Bad Request: Not valid search\")\n res = query_author.query_handler_author(insert_query)\n return jsonify(res), 201",
"def searchObjTypeList(self,keys_list=None,objType_list=[\".obj.pub\",\".obj.pub.article\",\".obj.pub.book\"]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\n\t\treturn [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType in objType_list]",
"def get_all_authors():\n \n client = MongoClient(mongo_constants['server_name'], mongo_constants['port_number'])\n db = client[mongo_constants['database']]\n coll = db[mongo_constants['author_list']]\n\n return coll.find()",
"def get_authors(self):\n if self.anyauthor in self['author']:\n return None\n return [self['author']]\n #return [a for a in self['author'] if a != self.anyauthor]",
"def matched_keys(key_path: Any, all_keys: Sequence, case_ignored: bool, space_trimmed: bool = False) -> List:\n normalized = normalize(key_path, case_ignored, space_trimmed)\n keys = [k for k in all_keys if key_matches(k, normalized, case_ignored)]\n\n if len(keys) > 1:\n logger.warning(f\"Multiple matching of '{key_path}': {','.join((str(k) for k in keys))}\")\n return keys",
"def get_authors():\n\n _authors = mongo.db.books.find({}, {\"author\": 1, \"_id\": 0})\n author_list = [author[\"author\"] for author in _authors]\n\n authors = []\n for author in author_list:\n if author not in authors:\n authors.append(author)\n return authors",
"def lookup_authorities_for_urns(self, urns):\n raise GFedv2NotImplementedError(\"Method not implemented\")",
"def print_authors(bib_sorted,trim=0):\n authors={}\n for key,value in bib_sorted:\n author_list = value.fields['author'].split(' and ')\n if len(author_list) <= trim :\n for author in author_list :\n try:\n authors[author].append(key)\n except:\n authors[author] = [key] \n i = 0\n for author in sorted(authors.keys()):\n i = i+1\n print i,author,authors[author]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
searches through keys_list and picks the entries for which the object in the database has attribute defined and == value return order is arbitrary default is to search attribute='objType' and attribute(value)=='.obj.pub' ""
|
def searchAttribute(self,keys_list=None,attribute="objType",value=".obj.pub"):
if not keys_list:
keys_list = self.getEntryList()
return [k for k in keys_list if k in self.getEntryList() and hasattr(self.entries[k],attribute) and getattr(self.entries[k],attribute) == value ]
|
[
"def searchObjTypeDerive(self,keys_list=None,query_objType=\".obj.pub\"):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\treturn [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType[:len(query_objType)] == query_objType]",
"def searchObjTypeList(self,keys_list=None,objType_list=[\".obj.pub\",\".obj.pub.article\",\".obj.pub.book\"]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\n\t\treturn [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType in objType_list]",
"def searchAuthors(self,keys_list=None,author_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#only select keys for which all the authors in the query list are in the obj.authors set\n\t\treturn [k for k in self.searchObjTypeDerive(keys_list) if set(author_list).issubset(self.entries[k].authors)]",
"def searchKeywords(self,keys_list=None,keyword_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#we make the query set case insensitive by converting all the strings to lowercase\n\t\tlist_of_keywords_lower = map(lambda x: x.lower(), keyword_list)\n\n\t\t#we define a function that checks how many elements are in common between the query set and the keywords set \n\t\tdef numInCommon(list1,list2):\n\t\t\treturn len(set(list1)&set(list2))\n\t\t\n\t\t#get keys whose value has some intersection with the query set \n\t\tr_keys = [k for k in self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\") if numInCommon(self.entries[k].keywords,list_of_keywords_lower)>0 ]\n\t\tr_keys.sort(key=lambda x: numInCommon(self.entries[x].keywords,list_of_keywords_lower), reverse=True)\n\t\treturn r_keys",
"def filter_search(self,search_key, search_value, element_list):\n filter_result=[]\n for element in element_list:\n for element_key in element:\n if element_key == search_key and element[element_key] == search_value:\n #print(element)\n filter_result.append(element)\n\n return filter_result",
"def search(data, **attrs):\n return filter(lambda x: attrs.items() <= x.items(), data)",
"def findall(self, **kwargs):\r\n found = []\r\n searches = kwargs.items()\r\n\r\n for obj in self.list():\r\n try:\r\n if all(getattr(obj, attr) == value\r\n for (attr, value) in searches):\r\n found.append(obj)\r\n except AttributeError:\r\n continue\r\n return found",
"def cmp_ents(ent1, ent2, attrlist):\n for attr in attrlist:\n try:\n assert ent1[attr] == ent2[attr]\n except AssertionError as error:\n logging.error(\"key:%s, ent1:%s, ent2:%s, error:%s, attrlist:%s\",\n str(attr), str(ent1), str(ent2), str(error), str(attrlist))\n raise error",
"def make_search_crit_list(search_crit_list_str, obj_list):\n search_crit_list=[]\n dict_connection={}\n\n for obj in obj_list:\n list2btuple=[]\n for crit_str in search_crit_list_str:\n list2btuple.append(getattr(obj, crit_str))\n this_tup=tuple(list2btuple)\n search_crit_list.append(this_tup)\n dict_connection[this_tup]= obj\n\n return search_crit_list, dict_connection",
"def attr_matches(self, text):\n import re\n m = re.match(r\"(\\w+(\\.\\w+)*)\\.(\\w*)\", text)\n if not m:\n return []\n expr, attr = m.group(1, 3)\n try:\n thisobject = eval(expr, self.namespace)\n except Exception:\n return []\n\n # get the content of the object, except __builtins__\n words = set(dir(thisobject))\n words.discard(\"__builtins__\")\n\n if hasattr(thisobject, '__class__'):\n words.add('__class__')\n words.update(get_class_members(thisobject.__class__))\n matches = []\n n = len(attr)\n for word in words:\n if word[:n] == attr:\n try:\n val = getattr(thisobject, word)\n except Exception:\n continue # Exclude properties that are not set\n word = self._callable_postfix(val, \"%s.%s\" % (expr, word))\n matches.append(word)\n matches.sort()\n return matches",
"def get_objs_with_key_or_alias(self, ostring, exact=True,\r\n candidates=None, typeclasses=None):\r\n if not isinstance(ostring, basestring):\r\n if hasattr(ostring, \"key\"):\r\n ostring = ostring.key\r\n else:\r\n return []\r\n if is_iter(candidates) and not len(candidates):\r\n # if candidates is an empty iterable there can be no matches\r\n # Exit early.\r\n return []\r\n\r\n # build query objects\r\n candidates_id = [_GA(obj, \"id\") for obj in make_iter(candidates) if obj]\r\n cand_restriction = candidates != None and Q(pk__in=make_iter(candidates_id)) or Q()\r\n type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()\r\n if exact:\r\n # exact match - do direct search\r\n return self.filter(cand_restriction & type_restriction & (Q(db_key__iexact=ostring) |\r\n Q(db_tags__db_key__iexact=ostring) & Q(db_tags__db_tagtype__iexact=\"alias\"))).distinct()\r\n elif candidates:\r\n # fuzzy with candidates\r\n key_candidates = self.filter(cand_restriction & type_restriction)\r\n else:\r\n # fuzzy without supplied candidates - we select our own candidates\r\n key_candidates = self.filter(type_restriction & (Q(db_key__istartswith=ostring) | Q(db_tags__db_key__istartswith=ostring))).distinct()\r\n candidates_id = [_GA(obj, \"id\") for obj in key_candidates]\r\n # fuzzy matching\r\n key_strings = key_candidates.values_list(\"db_key\", flat=True)\r\n index_matches = string_partial_matching(key_strings, ostring, ret_index=True)\r\n if index_matches:\r\n return [obj for ind, obj in enumerate(key_candidates) if ind in index_matches]\r\n else:\r\n alias_candidates = self.filter(id__in=candidates_id, db_tags__db_tagtype__iexact=\"alias\")\r\n alias_strings = alias_candidates.values_list(\"db_key\", flat=True)\r\n index_matches = string_partial_matching(alias_strings, ostring, ret_index=True)\r\n if index_matches:\r\n return [alias.db_obj for ind, alias in enumerate(alias_candidates) if ind in index_matches]\r\n return []",
"def find(self, oid, limit=0):\n ext = []\n if isinstance(oid, str):\n oid = OID(oid)\n keyset = sorted(self.keys())\n for k in keyset:\n v = self[k]\n if k.match(oid):\n ext.append((k, v))\n if limit and len(ext) == limit:\n break\n return ext",
"def node_with_attr(attr_name,value):\n result = [ node for node in NODE if attr_name in NODE[node] and NODE[node][attr_name] == value ]\n BuiltIn().log(\"Found %d nodes with condition `%s`=`%s`\" % (len(result),attr_name,value))\n return result",
"def find_objects(self, **criteria):\n\n matchers = self._get_matchers(criteria)\n\n found = []\n for node in self._root.iter('node'):\n match = True\n for matcher in matchers:\n rhs_value = node.get(matcher.key, '')\n if not matcher(rhs_value):\n match = False\n break\n if match:\n found.append(self._get_attrs(node))\n\n return found",
"def comp_obj_lists(db_obj_list, bpt_obj_list):\n\n def make_search_crit_list(search_crit_list_str, obj_list):\n \"\"\"takes in list of search criteria as strings, like [\"first_name\",\"last_name\"] and makes tuple of that object's values\n returns list of all tuples made\"\"\"\n search_crit_list=[]\n dict_connection={}\n\n for obj in obj_list:\n list2btuple=[]\n for crit_str in search_crit_list_str:\n list2btuple.append(getattr(obj, crit_str))\n this_tup=tuple(list2btuple)\n search_crit_list.append(this_tup)\n dict_connection[this_tup]= obj\n\n return search_crit_list, dict_connection\n\n def trim_obj_list(in_db, bpt_obj_list,search_crit_list_str):\n \"\"\"takes list of which objects are in the db as of moment and which should be but haven't been found yet,\n makes search criteria list of db entries and filters should be in by, returns\n modified lists of who is in d and who may not be, after that filter\"\"\"\n print \"trim_obj_list(\"\n\n in_db_search_crit_list,in_db_dict_connection=make_search_crit_list(search_crit_list_str, in_db)\n bpt_search_crit_list,bpt_dict_connection=make_search_crit_list(search_crit_list_str, bpt_obj_list)\n\n bpt_set = set(bpt_search_crit_list)\n db_set = set(in_db_search_crit_list)\n intersect=bpt_set.intersection(db_set)\n\n just_found=[]\n\n for obj_tup in intersect:\n if obj_tup is not None:\n obj=bpt_dict_connection.get(obj_tup)\n if obj is not None:\n bpt_obj_list.remove(obj)\n just_found.append(obj)\n\n #reminder: make shallow copies, not pointers.\n return list(in_db), list(bpt_obj_list),just_found\n\n\n in_db=list(db_obj_list)\n not_found_yet=list(bpt_obj_list)\n\n all_found=[]\n print \"all_found len: \",len(all_found)\n\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\", \"first_name\",\"last_name\",\"sk8name\"])\n print \"pass 1: %s found in DB by emial, fname, lname,sk8name, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and fname and lname NOT sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\", \"first_name\",\"last_name\"])\n print \"pass 2: %s found in DB by email, fname, lname,but NOT sk8name, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n############this looks like it could be problematic, prodice false positives. commented out########\n\n #next, email and fname and lname NOT sk8name\n # in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\",\"last_name\"])\n # print \"pass 3: %s found in DB by email, lname, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n #all_found+=just_found\n #print \"all_found len: \",len(all_found)\n#######################################\n\n\n #next, email and fname and lname NOT sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\", \"first_name\",])\n print \"pass 4: %s found in DB by email, fname, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and sk8nme\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"email\", \"sk8name\"])\n print \"pass 5: %s found in DB by email, and sk8name, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and fname and lname and sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"first_name\",\"last_name\",\"sk8name\"])\n print \"pass 6: %s found in DB by fname, lname,sk8name, NOT email %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and fname and lname and sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"first_name\",\"sk8name\"])\n print \"pass 7: %s found in DB by fname,sk8name %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n #next, email and fname and lname and sk8name\n in_db, not_found_yet,just_found=trim_obj_list(in_db, not_found_yet,[\"last_name\",\"sk8name\"])\n print \"pass 8: %s found in DB by lname, sk8name, %s not found, %s total \"%( str(len(just_found)), str(len(not_found_yet)), str((len(in_db)+len(not_found_yet))) )\n all_found+=just_found\n print \"all_found len: \",len(all_found)\n\n return in_db, not_found_yet,all_found",
"def get_objs_with_tag(self, key=None, category=None, model=\"objects.objectdb\", tagtype=None):\r\n objclass = ContentType.objects.get_by_natural_key(*model.split(\".\", 1)).model_class()\r\n key_cands = Q(db_tags__db_key__iexact=key.lower().strip()) if key is not None else Q()\r\n cat_cands = Q(db_tags__db_category__iexact=category.lower().strip()) if category is not None else Q()\r\n tag_crit = Q(db_tags__db_model=model, db_tags__db_tagtype=tagtype)\r\n return objclass.objects.filter(tag_crit & key_cands & cat_cands)",
"def index(objects, attr):\n return {getattr(obj, attr): obj for obj in objects}",
"def find_and_cmp(list1, list2, cmp_function, id_attr=None, cmp_length=True):\n id_attr = id_attr or ['system_name']\n if cmp_length:\n assert len(list1) == len(list2)\n queue = []\n for ent1 in list1:\n for ent2 in list2:\n if all(ent1.entity[r] == ent2.entity[r] for r in id_attr):\n queue.append((ent1, ent2))\n list2.remove(ent2)\n break\n for ent1, ent2 in queue:\n assert len(ent1.keys()) == len(ent2.keys())\n cmp_function(ent1, ent2)",
"def search(cls, ignore_case=False, **kw):\n\n result = []\n\n cls._initialize_cache()\n\n for k, v in kw.items():\n for obj in cls.__cache:\n ob_value = getattr(obj, k, None)\n if ignore_case:\n if isinstance(v, str) and isinstance(ob_value, str):\n r = (v.lower() == ob_value.lower())\n else:\n r = (v == ob_value)\n if r:\n if obj in result:\n continue\n else:\n result.append(obj)\n else:\n continue\n\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
sorts publicaitons in keys_list by the year of the corresponding database entry return order is so that [0] is newest and [1] is oldest unless invert is specified as true (default is false)
|
def sortByYear(self,keys_list=None,invert=False):
if not keys_list:
keys_list = self.getEntryList()
r_list = self.searchObjTypeDerive(keys_list,query_objType=".obj.pub")
r_list.sort(key=lambda x : self.entries[x].year,reverse=not invert)
return r_list
|
[
"def sort_by_year(sort_list):\n sort_list.sort(key=lambda song: song.year)",
"def sortByTitle(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].title )\n\t\treturn r_list",
"def sort_key_by_date(keys):\n list_of_keys = []\n for key in keys:\n id, date = key.split(' ')\n list_of_keys.append([id, datetime.strptime(date, '%m%d%Y')])\n sorted_list = sorted(list_of_keys)\n dates = [date_to_string(x[1]) for x in sorted_list]\n ids = [x[0] for x in sorted_list]\n sorted_list = [id + ' ' + date for id, date in zip(ids, dates)]\n return sorted_list",
"def _sorted_keys(self, keys):\n sorted_keys = []\n if ('epoch' in keys) and ('epoch' not in self.keys_ignored):\n sorted_keys.append('epoch')\n\n for key in sorted(keys):\n if not (\n (key in ('epoch', 'dur')) or\n (key in self.keys_ignored) or\n key.startswith(self.keys_ignored_start_with) or\n key.endswith('_best')\n ):\n sorted_keys.append(key)\n\n if ('dur' in keys) and ('dur' not in self.keys_ignored):\n sorted_keys.append('dur')\n return sorted_keys",
"def get_projects_sorted_by_contrib(self, filter_out_projs_with_zero_pubs=True):\n map_project_name_to_tuple = dict() # tuple is (count, most_recent_pub_date, project)\n #publications = self.publication_set.order_by('-date')\n\n # Go through all the projects by this person and track how much\n # they've contributed to each one (via publication)\n #print(\"******{}*******\".format(self.get_full_name()))\n for pub in self.publication_set.all():\n for proj in pub.projects.all():\n #print(\"pub\", pub, \"proj\", proj)\n if proj.name not in map_project_name_to_tuple:\n most_recent_date = proj.start_date\n if most_recent_date is None:\n most_recent_date = pub.date\n if most_recent_date is None:\n most_recent_date = datetime.date(2012, 1, 1) # when the lab was founded\n\n map_project_name_to_tuple[proj.name] = (0, most_recent_date, proj)\n\n tuple_cnt_proj = map_project_name_to_tuple[proj.name]\n most_recent_date = tuple_cnt_proj[1]\n if pub.date is not None and pub.date > most_recent_date:\n most_recent_date = pub.date\n\n map_project_name_to_tuple[proj.name] = (tuple_cnt_proj[0] + 1, # pub cnt\n most_recent_date, # most recent pub date\n tuple_cnt_proj[2]) # project\n\n list_tuples = list([tuple_cnt_proj for tuple_cnt_proj in map_project_name_to_tuple.values()])\n list_tuples_sorted = sorted(list_tuples, key=lambda t: (t[0], t[1]), reverse=True)\n\n #print(\"list_tuples_sorted\", list_tuples_sorted)\n\n ordered_projects = []\n if len(list_tuples_sorted) > 0:\n list_cnts, list_dates, ordered_projects = zip(*list_tuples_sorted)\n\n if len(ordered_projects) <= 0 and not filter_out_projs_with_zero_pubs:\n # if a person hasn't published but is still on projects\n # default to this\n ordered_projects = self.get_projects()\n\n return ordered_projects",
"def sortByFirstAuthor(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].authors[0] )\n\t\treturn r_list",
"def years_movies_released():\n reader = initialize_reader()\n years_list = [row[23] for row in reader]\n years_dicts = [{\"year\": i, \"movies_released\": years_list.count(i)} for i in years_list]\n new_list = sorted(years_dicts, key=lambda i: i['movies_released'])\n year_less_movies = new_list[:1]\n print(f\"The year {year_less_movies[0].get('year')} had less movies released with {year_less_movies[0].get('movies_released')}\")\n new_list = sorted(years_dicts, key=lambda i: i['movies_released'], reverse=True)\n year_more_movies = new_list[:1]\n print(f\"The year {year_more_movies[0].get('year')} had more movies released with {year_more_movies[0].get('movies_released')}\")",
"def __sort(self):\n\n f_logger.debug('Feed.__sort')\n\n for index in range(1, len(self.__list_of_articles)):\n current_article = self.__list_of_articles[index]\n position = index\n\n while position > 0 \\\n and self.__list_of_articles[position - 1].published_date < current_article.published_date:\n self.__list_of_articles[position] = self.__list_of_articles[position - 1]\n position -= 1\n\n self.__list_of_articles[position] = current_article",
"def canonsort_keys(keys, canonical_order=None):\r\n canonical_map = dict((k, i) for i, k in enumerate(canonical_order or []))\r\n head = [k for k in keys if k in canonical_map]\r\n tail = [k for k in keys if k not in canonical_map]\r\n return sorted(head, key=lambda k: canonical_map[k]) + sorted(tail)",
"def sorted_database(database):\n artistkeys = sorted(list(database.keys()))\n return_list = []\n for key in artistkeys:\n artworkkeys = sorted(list(database[key].keys()))\n temp_artwork_list = []\n for art_piece_key in artworkkeys:\n temp_artwork_list.append(\n (art_piece_key, database[key][art_piece_key]))\n return_list.append((key, temp_artwork_list))\n return return_list",
"def sort_events(event_list):\n #Again, I basically stole this function from an older lab. The only thing I had to change was adding a 2nd for loop to sort by both date and month.\n #It took me a while to actually figure out what order to put these for loops, but I found that sorting by day first was better.\n for i in range(1, len(event_list)):\n cur = event_list[i]\n previndex = i - 1\n while previndex >= 0 and cur[1] < event_list[previndex][1]:\n event_list[previndex + 1] = event_list[previndex]\n previndex -= 1\n event_list[previndex + 1] = cur\n for i in range(1, len(event_list)):\n cur = event_list[i]\n previndex = i - 1\n while previndex >= 0 and cur[0] < event_list[previndex][0]:\n event_list[previndex + 1] = event_list[previndex]\n previndex -= 1\n event_list[previndex + 1] = cur\n return event_list",
"def sorted_stories_list(hnList):\r\n return sorted(hnList,key=lambda x:x['votes'],reverse=True)",
"def sort(self):\n self.data = self.data.sort_values(by=['year', 'month', 'day'], ascending=True)",
"def get_sorted_shares_list(shares_list: list[dict]) -> list[dict]:\n return sorted(shares_list, key=lambda share: share['roi'], reverse=True)",
"def _sort_lines(self, lines):\n def sort_key_func(item):\n try:\n return datetime.strptime(item[0], ARCHIVE_DT_FORMAT)\n except ValueError as err:\n self.log.error(str(err))\n raise ValueError\n\n return list(sorted(lines, key=sort_key_func))",
"def test_feed_entries_date_sort(self):\n urls = feedsreader.load_feeds_urls()\n self.assertTrue(len(urls) > 1)\n\n feeds_by_date = feedsreader.read_feeds_sort_pub_date(urls)\n entry_0 = feeds_by_date[0].get('entry_date')\n entry_1 = feeds_by_date[1].get('entry_date')\n self.assertTrue(entry_0 > entry_1)",
"def sort_by_date(events: List[Event]) -> List[Event]:\n\n temp = events.copy()\n return sorted(temp, key=attrgetter(\"start\"))",
"def _sort_last_ten(self, values, years):\n # Pair together values and years with a dictionary.\n last_ten = {}\n for value, year in zip(values, years):\n last_ten[value] = year\n\n last_ten_vals = sorted(values)[::-1]\n last_ten_years = [last_ten[value] for value in last_ten_vals]\n return last_ten_vals, last_ten_years",
"def search_results_by_date(self, documents):\n newlist = sorted(documents, key=itemgetter('metadata_created_date'))\n return newlist"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
sorts publications in keys_list alphabetically by the first author of the corresponding database entry return order is 0 to 9 to A to a to Z to z to special characters
|
def sortByFirstAuthor(self,keys_list=None):
if not keys_list:
keys_list = self.getEntryList()
r_list = self.searchObjTypeDerive(keys_list,query_objType=".obj.pub")
r_list.sort(key = lambda x: self.entries[x].authors[0] )
return r_list
|
[
"def sortByTitle(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].title )\n\t\treturn r_list",
"def sortByYear(self,keys_list=None,invert=False):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\n\t\tr_list.sort(key=lambda x : self.entries[x].year,reverse=not invert)\n\t\treturn r_list",
"def test_sort_nonbuilder(self):\n cmd = \"mkauthlist -f --doc %(csv)s %(tex)s -sn\"%self.files\n print(cmd)\n subprocess.check_output(cmd,shell=True)\n\n with open(self.tex,'r') as f:\n authors = [l for l in f.readlines() if l.startswith('\\\\author')]\n self.assertEqual(authors[0],'\\\\author{A.~Drlica-Wagner}\\n')\n self.assertEqual(authors[-1],'\\\\author{T.~M.~C.~Abbott}\\n')\n self.assertEqual(authors[4],'\\\\author{Y.~Zhang}\\n')",
"def canonsort_keys(keys, canonical_order=None):\r\n canonical_map = dict((k, i) for i, k in enumerate(canonical_order or []))\r\n head = [k for k in keys if k in canonical_map]\r\n tail = [k for k in keys if k not in canonical_map]\r\n return sorted(head, key=lambda k: canonical_map[k]) + sorted(tail)",
"def print_authors(bib_sorted,trim=0):\n authors={}\n for key,value in bib_sorted:\n author_list = value.fields['author'].split(' and ')\n if len(author_list) <= trim :\n for author in author_list :\n try:\n authors[author].append(key)\n except:\n authors[author] = [key] \n i = 0\n for author in sorted(authors.keys()):\n i = i+1\n print i,author,authors[author]",
"def arrange(l: Dict[str, List[str]]) -> None:\n for key in l:\n l[key].sort()",
"def sort(list_in, sort_key=lambda s: s.lower()):\n return sorted(list_in, key=sort_key)",
"def getSortedInstituteCodes(authorList, authorInstituteCodes):\n instituteCodes = []\n for author in authorList:\n codes = authorInstituteCodes[author]\n if not hasattr(codes, '__iter__'):\n codes = [codes]\n for code in codes:\n if not code in instituteCodes:\n instituteCodes.append(code)\n return instituteCodes",
"def pid_sort_key(pid_key):\n order = {\n 'p': 'a',\n 'i': 'b',\n 'd': 'c'\n }\n new_key = list(pid_key)\n new_key[4] = order[pid_key[4]]\n return ''.join(new_key)",
"def _sort_key(self, author):\n if (\n self.config(\"show_line_count\")\n or self.config(\"show_contribution\")\n or self.config(\"sort_authors_by\") == \"contribution\"\n ):\n key = \"contribution\"\n else:\n key = \"name\"\n\n func = getattr(author, key)\n return func()",
"def sorted_database(database):\n artistkeys = sorted(list(database.keys()))\n return_list = []\n for key in artistkeys:\n artworkkeys = sorted(list(database[key].keys()))\n temp_artwork_list = []\n for art_piece_key in artworkkeys:\n temp_artwork_list.append(\n (art_piece_key, database[key][art_piece_key]))\n return_list.append((key, temp_artwork_list))\n return return_list",
"def sort_subject_list() -> None:\n with open(\"resources/subject_list.txt\", \"r+\") as outfile:\n lines = outfile.readlines()\n lines.sort()",
"def _sort(list_in, sort_key=None):\n if sort_key is None:\n sort_key = lambda s: s.lower()\n return sorted(list_in, key=sort_key)",
"def git_annotate_author_order(commits, git_actor_dedupe_table):\n author_commits = collections.defaultdict(list)\n\n for k, c in commits.items():\n if 'order' in c:\n author = git_actor_dedupe_table[c['author']]['standard_actor']\n author_commits[author].append((c['order'], k))\n\n for author, val in author_commits.items():\n for i, (order, c) in enumerate(sorted(val, key=lambda x: x[0])):\n commits[c]['author_order'] = i + 1",
"def sort_key_by_date(keys):\n list_of_keys = []\n for key in keys:\n id, date = key.split(' ')\n list_of_keys.append([id, datetime.strptime(date, '%m%d%Y')])\n sorted_list = sorted(list_of_keys)\n dates = [date_to_string(x[1]) for x in sorted_list]\n ids = [x[0] for x in sorted_list]\n sorted_list = [id + ' ' + date for id, date in zip(ids, dates)]\n return sorted_list",
"def key_sort(l, *keys):\r\n l = list(l)\r\n for key in keys:\r\n #Find out if we want a reversed ordering\r\n if key.startswith('-'):\r\n reverse = True\r\n key = key[1:]\r\n else:\r\n reverse = False\r\n\r\n attrs = key.split('.')\r\n def fun(x):\r\n # Calculate x.attr1.attr2...\r\n for attr in attrs:\r\n x = getattr(x, attr)\r\n # If the key attribute is a string we lowercase it\r\n if isinstance(x, basestring):\r\n x = x.lower()\r\n return x\r\n l.sort(key=fun, reverse=reverse)\r\n return l",
"def sort(li):\n #first sort on document id\n li = sorted(li,key=lambda x: x[0])\n \n #then sort on document ranking\n li = sorted(li,key=lambda x: x[1], reverse=True)\n \n #sort on window length\n # li = sorted(li,key=lambda x: x[3])\n \n #then sort on number of present words\n # li = sorted(li,key=lambda x: x[2], reverse=True)\n return li",
"def sort_by_name(fathers_of_the_founders):\n sorting = sorted(fathers_of_the_founders.items(), key=lambda t: t[0])\n return print(sorting)",
"def sortCaseInsensitive():\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
sorts publications in keys_list alphabetically by the title of the corresponding database entry return order is 0 to 9 to A to a to Z to z to special characters
|
def sortByTitle(self,keys_list=None):
if not keys_list:
keys_list = self.getEntryList()
r_list = self.searchObjTypeDerive(keys_list,query_objType=".obj.pub")
r_list.sort(key = lambda x: self.entries[x].title )
return r_list
|
[
"def sortByFirstAuthor(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].authors[0] )\n\t\treturn r_list",
"def sortByYear(self,keys_list=None,invert=False):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\n\t\tr_list.sort(key=lambda x : self.entries[x].year,reverse=not invert)\n\t\treturn r_list",
"def arrange(l: Dict[str, List[str]]) -> None:\n for key in l:\n l[key].sort()",
"def pid_sort_key(pid_key):\n order = {\n 'p': 'a',\n 'i': 'b',\n 'd': 'c'\n }\n new_key = list(pid_key)\n new_key[4] = order[pid_key[4]]\n return ''.join(new_key)",
"def sortCaseInsensitive():\n pass",
"def sort(list_in, sort_key=lambda s: s.lower()):\n return sorted(list_in, key=sort_key)",
"def key_sort(l, *keys):\r\n l = list(l)\r\n for key in keys:\r\n #Find out if we want a reversed ordering\r\n if key.startswith('-'):\r\n reverse = True\r\n key = key[1:]\r\n else:\r\n reverse = False\r\n\r\n attrs = key.split('.')\r\n def fun(x):\r\n # Calculate x.attr1.attr2...\r\n for attr in attrs:\r\n x = getattr(x, attr)\r\n # If the key attribute is a string we lowercase it\r\n if isinstance(x, basestring):\r\n x = x.lower()\r\n return x\r\n l.sort(key=fun, reverse=reverse)\r\n return l",
"def title_insertion_sort(unsorted_object):\n\n all_videos = unsorted_object\n\n for i in range(1, len(all_videos)):\n key = all_videos[i]\n j = i - 1\n\n while j >= 0 and str(all_videos[j].title) > str(key.title):\n # Shift elements upwards\n all_videos[j+1] = all_videos[j]\n j -= 1\n\n # Insert key into position\n all_videos[j + 1] = key\n\n return all_videos",
"def sort_plugs(plugs):\n sorted_plugs = OrderedDict()\n for i in sorted(plugs, key=lambda x: x.lower()):\n sorted_plugs[i] = plugs[i]\n return sorted_plugs",
"def _sort(list_in, sort_key=None):\n if sort_key is None:\n sort_key = lambda s: s.lower()\n return sorted(list_in, key=sort_key)",
"def sort_key_by_date(keys):\n list_of_keys = []\n for key in keys:\n id, date = key.split(' ')\n list_of_keys.append([id, datetime.strptime(date, '%m%d%Y')])\n sorted_list = sorted(list_of_keys)\n dates = [date_to_string(x[1]) for x in sorted_list]\n ids = [x[0] for x in sorted_list]\n sorted_list = [id + ' ' + date for id, date in zip(ids, dates)]\n return sorted_list",
"def make_alphabetic(hits, processname, sortnames=False, lang=\"sv\"):\n def fix_lastname(name):\n vonaf_pattern = re.compile(r\"^(%s) \" % \"|\".join(VONAV_LIST))\n name = re.sub(vonaf_pattern, r\"\", name)\n return name.replace(\" \", \"z\")\n\n results = []\n for hit in hits:\n processname(hit, results)\n\n letter_results = {}\n # Split the result into start letters\n for first_letter, result in results:\n if first_letter == \"Ø\":\n first_letter = \"Ö\"\n if first_letter == \"Æ\":\n first_letter = \"Ä\"\n if first_letter == \"Ü\":\n first_letter = \"Y\"\n if lang == \"en\" and first_letter == \"Ö\":\n first_letter = \"O\"\n if lang == \"en\" and first_letter in \"ÄÅ\":\n first_letter = \"A\"\n if first_letter not in letter_results:\n letter_results[first_letter] = [result]\n else:\n letter_results[first_letter].append(result)\n\n # Sort result dictionary alphabetically into list\n if lang == \"en\":\n collator = icu.Collator.createInstance(icu.Locale(\"en_EN.UTF-8\"))\n else:\n collator = icu.Collator.createInstance(icu.Locale(\"sv_SE.UTF-8\"))\n for _n, items in list(letter_results.items()):\n if sortnames:\n items.sort(key=lambda x: collator.getSortKey(fix_lastname(x[0]) + \" \" + x[1]))\n else:\n items.sort(key=lambda x: collator.getSortKey(x[0]))\n\n letter_results = sorted(list(letter_results.items()), key=lambda x: collator.getSortKey(x[0]))\n return letter_results",
"def sort(li):\n #first sort on document id\n li = sorted(li,key=lambda x: x[0])\n \n #then sort on document ranking\n li = sorted(li,key=lambda x: x[1], reverse=True)\n \n #sort on window length\n # li = sorted(li,key=lambda x: x[3])\n \n #then sort on number of present words\n # li = sorted(li,key=lambda x: x[2], reverse=True)\n return li",
"def canonsort_keys(keys, canonical_order=None):\r\n canonical_map = dict((k, i) for i, k in enumerate(canonical_order or []))\r\n head = [k for k in keys if k in canonical_map]\r\n tail = [k for k in keys if k not in canonical_map]\r\n return sorted(head, key=lambda k: canonical_map[k]) + sorted(tail)",
"def natural_sort_key(key):\n def convert(text):\n return int(text) if text.isdigit() else text\n return [convert(c) for c in re.split('([0-9]+)', key)]",
"def sorted_database(database):\n artistkeys = sorted(list(database.keys()))\n return_list = []\n for key in artistkeys:\n artworkkeys = sorted(list(database[key].keys()))\n temp_artwork_list = []\n for art_piece_key in artworkkeys:\n temp_artwork_list.append(\n (art_piece_key, database[key][art_piece_key]))\n return_list.append((key, temp_artwork_list))\n return return_list",
"def sort(settings):\n\tfilter = settings.format(settings.content)\n\tfilter.sort()\n\tsettings.content = filter.content",
"def sortfunc(pt1, pt2):\n return cmp(pt1.title, pt2.title)",
"def sort_subject_list() -> None:\n with open(\"resources/subject_list.txt\", \"r+\") as outfile:\n lines = outfile.readlines()\n lines.sort()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make an access token request and get new token(s). If auth_code is passed then both access and refresh tokens will be requested, otherwise the existing refresh token is used to request an access token.
|
def google_token_request(self, auth_code=None):
# Build request parameters. Order doesn't seem to matter, hence using dict.
token_request_data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
if auth_code is None:
# Use existing refresh token to get new access token.
token_request_data['refresh_token'] = self.refresh_token
token_request_data['grant_type'] = 'refresh_token'
else:
# Request new access and refresh token.
token_request_data['code'] = auth_code
token_request_data['grant_type'] = 'authorization_code'
# 'urn:ietf:wg:oauth:2.0:oob' signals to the Google Authorization
# Server that the authorization code should be returned in the
# title bar of the browser, with the page text prompting the user
# to copy the code and paste it in the application.
token_request_data['redirect_uri'] = 'urn:ietf:wg:oauth:2.0:oob'
token_request_data['access_type'] = 'offline'
# Make token request to Google.
oauth2_token_request_url = 'https://www.googleapis.com/oauth2/v4/token'
resp = requests.post(oauth2_token_request_url, data=token_request_data)
# If request is successful then Google returns values as a JSON array
values = resp.json()
self.access_token = values['access_token']
if auth_code: # Need to save value of new refresh token
self.refresh_token = values['refresh_token']
self.token_expiry = dt.datetime.now() + dt.timedelta(seconds=int(values['expires_in']))
logging.info('Access token expires on %s', self.token_expiry.strftime("%Y/%m/%d %H:%M"))
|
[
"def get_access_token(self, auth_code: str):\n api_url = f'{self.root}/token'\n params = {\n 'client_id': self.client_id,\n 'client_secret': self.secret,\n 'code': auth_code,\n 'grant_type': 'authorization_code',\n 'redirect_uri': self.callback_url,\n }\n auth_header = self.security_obj.generate_authorization_header(\n url=api_url, params=params, method='POST', app_id=self.client_id\n )\n log.info('auth_header: %s', auth_header)\n\n resp = self.request(api_url, method='POST', auth_header=auth_header, data=params)\n\n return resp",
"def exchange_code_for_token(self, code):\n\n u = UP_API_OAUTH_TOKEN_HOST + '?client_id=' + self.client_id\n u += '&client_secret=' + self.app_secret\n u += '&grant_type=authorization_code'\n u += '&code=' + code\n\n res = requests.get(u)\n\n if res.status_code == 200:\n self.refresh_token = res.json()['refresh_token']\n self.access_token = res.json()['access_token']\n return self.access_token\n return None",
"def get_access_token():\n\n demisto.debug(\"Generate a new access token\")\n\n integration_context: dict = get_integration_context()\n\n data: dict = {\n 'code': AUTH_CODE,\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'redirect_uri': REDIRECT_URI,\n 'grant_type': 'authorization_code'\n }\n\n response: requests.Response = requests.post(\n ACCESS_TOKEN_URL,\n data=data,\n verify=DISABLE_SSL\n )\n\n if not response.ok:\n error = error_parser(response)\n raise ValueError(f'Failed to get access token [{response.status_code}] - {error}')\n\n response_json: dict = response.json()\n access_token = response_json.get('access_token', '')\n expires_in: int = response_json.get('expires_in', 3595)\n refresh_token = response_json.get('refresh_token', '')\n\n time_now: int = epoch_seconds()\n time_buffer = 5 # seconds by which to shorten the validity period\n if expires_in - time_buffer > 0:\n expires_in -= time_buffer\n integration_context['refresh_token'] = refresh_token\n integration_context['access_token'] = access_token\n integration_context['valid_until'] = time_now + expires_in\n set_integration_context(integration_context)\n\n return access_token",
"def finish_oauth(self, code):\n r = requests.post(\n self._login_uri(\"/oauth/token\"),\n data={\n \"code\": code,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n },\n )\n\n if r.status_code != 200:\n raise ApiError(\n \"OAuth token exchange failed\",\n status=r.status_code,\n json=r.json(),\n )\n\n token = r.json()[\"access_token\"]\n scopes = OAuthScopes.parse(r.json()[\"scopes\"])\n expiry = datetime.now() + timedelta(seconds=r.json()[\"expires_in\"])\n refresh_token = r.json()[\"refresh_token\"]\n\n return token, scopes, expiry, refresh_token",
"def exchange_code_for_token(\n self,\n app_config,\n http_client,\n code,\n form_encode=False,\n redirect_suffix=\"\",\n client_auth=False,\n ):\n json_data = self.exchange_code(\n app_config, http_client, code, form_encode, redirect_suffix, client_auth\n )\n\n access_token = json_data.get(\"access_token\", None)\n if access_token is None:\n logger.debug(\n \"Got successful get_access_token response with missing token: %s\", json_data\n )\n raise OAuthExchangeCodeException(\"Missing `access_token` in OAuth response\")\n\n return access_token",
"def exchange(authCode=None):\n payload = {'response_type': RESPONSE_TYPE,\n 'client_id': CLIENT_ID,\n 'grant_type': GRANT_TYPE,\n 'code': authCode,\n 'client_secret': CLIENT_SECRET,\n 'redirect_uri': url_for('authorized', _external=True)}\n print payload\n r = requests.post(carepass.access_token_url, params=payload)\n return r.json()",
"def get_access_token(grant_type, client_id, client_secret, redirect_uri, code=None, refresh_token=None):\n oauth_token_url = ACCESS_TOKEN_URL\n post_params = {\n 'grant_type': grant_type, # Use 'authorization_code' for new tokens\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'redirect_uri': redirect_uri,\n }\n\n # Need to add in code or refresh_token, depending on the grant_type\n if grant_type == 'authorization_code':\n post_params['code'] = code\n else:\n post_params['refresh_token'] = refresh_token\n\n r = requests.post(oauth_token_url, post_params)\n if r.status_code != 200:\n return HttpResponse(status=401)\n\n # Parse the response for the access_token, expiration time, and (possibly)\n # the refresh token\n response_data = r.json()\n print(\"response_data\", response_data)\n access_token = response_data['access_token']\n seconds_to_expire = response_data['expires_in']\n # Convert the expiration time in seconds to a DateTime\n expires = timezone.now() + timedelta(seconds=seconds_to_expire)\n # Whether a refresh token is included in the response depends on the\n # grant_type - it only appears to be returned for 'authorization_code',\n # but to be safe check the response_data for it\n refresh_token = None\n if 'refresh_token' in response_data:\n refresh_token = response_data['refresh_token']\n\n return (access_token, str(expires), refresh_token)",
"def get_token(request, refresh=False):\n api_url = \"https://ssl.reddit.com/api/v1/access_token\"\n is_expired = request.session.get(\"expires\", 0) < int(unixtime())\n headers = settings.OAUTH_REDDIT_BASE_HEADERS\n client_auth = requests.auth.HTTPBasicAuth(\n settings.OAUTH_REDDIT_CLIENT_ID, settings.OAUTH_REDDIT_CLIENT_SECRET\n )\n\n if is_expired and request.GET.get(\"code\", None):\n # Received an access code to get a new access_token. Use\n # this above anything else.\n post_data = {\n \"grant_type\": \"authorization_code\",\n \"code\": request.GET.get(\"code\"),\n \"redirect_uri\": settings.OAUTH_REDDIT_REDIRECT_URI,\n }\n response = requests.post(\n api_url, auth=client_auth, headers=headers, data=post_data\n )\n t = response.json()\n request.session[\"access_token\"] = t.get(\"access_token\", \"\")\n request.session[\"refresh_token\"] = t.get(\"refresh_token\", \"\")\n request.session[\"token_type\"] = t.get(\"token_type\", \"\")\n request.session[\"expires\"] = int(unixtime()) + int(t.get(\"expires_in\", 0))\n request.session[\"scope\"] = t.get(\"scope\", \"\")\n if settings.DEBUG:\n print(\"Initial access_token acquired.\")\n\n elif (refresh or is_expired) and request.session.get(\"refresh_token\", False):\n\n # The previous access_token is expired, use refresh_token to\n # get a new one.\n post_data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": request.session.get(\"refresh_token\"),\n }\n response = requests.post(\n api_url, auth=client_auth, headers=headers, data=post_data\n )\n t = response.json()\n request.session[\"access_token\"] = t.get(\"access_token\", \"\")\n request.session[\"token_type\"] = t.get(\"token_type\", \"\")\n request.session[\"expires\"] = int(unixtime()) + int(t.get(\"expires_in\", 0))\n request.session[\"scope\"] = t.get(\"scope\", \"\")\n if settings.DEBUG:\n print(\"New access_token acquired.\")\n else:\n if settings.DEBUG:\n if request.session.get(\"access_token\", False):\n print(\"Re-using cached access_token.\")\n else:\n print(\"No access_token found anywhere!\")\n\n # If there is an access_token now, return it. Or wipe session vals.\n if request.session.get(\"access_token\", False):\n return request.session.get(\"access_token\")\n else:\n request.session[\"access_token\"] = None\n request.session[\"refresh_token\"] = None\n request.session[\"token_type\"] = None\n request.session[\"expires\"] = 0\n request.session[\"scope\"] = None\n return False",
"def access_token(self):\n if not self._access_token:\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n payload = urllib.urlencode({\n 'grant_type': 'refresh_token',\n 'client_id': OAUTH_CLIENT_ID,\n 'refresh_token': self.refresh_token\n })\n\n request = urllib2.Request(OAUTH_URL, headers=headers, data=payload)\n request.get_method = lambda: 'POST'\n\n try:\n response = urllib2.urlopen(request)\n data = json.load(response)\n self._access_token = data['access_token']\n except urllib2.HTTPError:\n # the refresh token has expired or become invalid\n self._refresh_token = None\n self.get_oauth_tokens()\n\n return self._access_token",
"def request_access_token(code: str) -> AccessToken:\n logger.info(\"Requesting an access token\")\n webex_teams_access_token = teams_api.access_tokens.get(\n client_id=WEBEX_TEAMS_CLIENT_ID,\n client_secret=WEBEX_TEAMS_CLIENT_SECRET,\n code=code,\n redirect_uri=WEBEX_TEAMS_REDIRECT_URI,\n )\n\n return AccessToken.from_webex_access_token(webex_teams_access_token)",
"def exchange_code_for_token(code):\n url = '{domain}/oauth2/token'.format(domain=cognito_config.domain)\n if cognito_config.client_secret:\n authorization_string = cognito_config.client_id + ':' + cognito_config.client_secret\n authorization = 'Basic ' + base64.b64encode(authorization_string.encode('utf-8')).decode('utf-8')\n headers = {'Content-type': 'application/x-www-form-urlencoded', 'Authorization': authorization}\n else:\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n data = {'grant_type': 'authorization_code',\n 'client_id': cognito_config.client_id,\n 'redirect_uri': cognito_config.redirect_uri,\n 'code': code\n }\n try:\n response = requests.post(url=url, headers=headers, data=data)\n tokens = json.loads(response.text)\n except requests.exceptions.HTTPError as e:\n raise AuthorizationExchangeError(str(e)) from e\n # check token expiry\n published_time = datetime(*eut.parsedate(response.headers['Date'])[:6])\n expiry = tokens.pop('expires_in')\n expiry_time = published_time + timedelta(int(expiry))\n if datetime.now() > expiry_time:\n raise AuthorizationExchangeError(\"Request is expired\")\n # check token type bearer\n token_type = tokens.pop('token_type')\n if token_type != 'Bearer':\n raise AuthorizationExchangeError(\"Invalid token type\")\n return tokens",
"def UseCode(code):\n access_token = client.exchange_code_for_token(client_id=client_id, client_secret=secret, code=code)\n client.access_token = access_token\n open(\"access_token\", \"w\").write(access_token)\n callback(client)",
"def refresh_access_token():\n client = Client(sm.access_token)\n auth_dict = client.refresh_access_token(\n client_id=sm.client_id,\n client_secret=sm.client_secret,\n refresh_token=sm.refresh_token)\n logger.debug('Auth Dict: %s', auth_dict)\n\n # Save the dict back to Secret Manager\n sm.set_auth_dict(auth_dict)",
"def request_tokens(self):\n # Generate auth request and extract returned value\n _log.debug(\"Requesting new auth tokens from Ecobee.\")\n url = 'https://api.ecobee.com/token'\n params = {\n 'grant_type': 'ecobeePin',\n 'code': self.authorization_code,\n 'client_id': self.api_key\n }\n response = make_ecobee_request(\"POST\", url, data=params)\n for token in [\"access_token\", \"refresh_token\"]:\n if token not in response:\n raise RuntimeError(f\"Request tokens response did not contain {token}: {response}\")\n self.access_token = response.get('access_token')\n self.refresh_token = response.get('refresh_token')\n self.authorization_stage = \"AUTHORIZED\"",
"def test_request_another_access_token(self):\r\n request_token = self._obtain_request_token()\r\n self._request_authorization(request_token)\r\n request_token = self._update_token_from_db(request_token)\r\n self._obtain_access_token(request_token)\r\n\r\n parameters = self._make_access_token_parameters(request_token)\r\n response = self.c.get(\"/oauth/access_token/\", parameters)\r\n self.assertEqual(response.status_code, 400)\r\n self.assertEqual(response.content, 'Invalid request token.')",
"def DoLazadaResetAccessToken(config, auth_code):\n oauth2_service = Oauth2Service()\n with oauth2_service:\n lazada_oauth2_dict = oauth2_service.GetOauth2Tokens(_SYSTEM_LAZADA)\n\n lazada_client = LazadaClient(\n domain=config.get('Lazada', 'Domain'),\n app_key=config.get('Lazada', 'AppKey'),\n app_secret=config.get('Lazada', 'AppSecret'),\n with_refresh=False)\n\n CreateLazadaOauth2Tokens(oauth2_service, lazada_client, code=auth_code)",
"def get_access_information(self, code):\n if self.config.grant_type == 'password':\n data = {'grant_type': 'password',\n 'username': self.config.user,\n 'password': self.config.pswd}\n else:\n data = {'code': code, 'grant_type': 'authorization_code',\n 'redirect_uri': self.redirect_uri}\n retval = self._handle_oauth_request(data)\n return {'access_token': retval['access_token'],\n 'refresh_token': retval.get('refresh_token'),\n 'scope': set(retval['scope'].split(' '))}",
"def add_token(code):\n tokens = authorize(code)\n re_token = tokens['refresh_token']\n acc_token = tokens['access_token']\n exp_in = int(tokens['expires_in'])\n\n token: SSOToken = SSOToken(refresh_token=re_token, access_token=acc_token,\n accountID=current_user.id,\n access_token_expires=(datetime.utcnow() + timedelta(seconds=exp_in)))\n\n auth_info = who_am_i(token)\n char_name = auth_info['CharacterName']\n char_id = auth_info['CharacterID']\n if char_name != current_user.get_eve_name():\n flask.abort(409, 'You did not grant authorization for the right character \"' + current_user.get_eve_name() +\n '\". Instead you granted it for \"' + char_name + '\"')\n\n scopenames = auth_info['Scopes'].split(' ')\n\n token.characterID = char_id\n\n for scope_name in scopenames:\n token.scopes.append(EveApiScope(scopeName=scope_name))\n\n current_user.add_sso_token(token)\n\n db.session.commit()",
"def exchange_code_for_credentials(code):\n # Build OAuth2 web server flow from authorization code\n flow = OAuth2WebServerFlow(\n CLIENT_ID,\n CLIENT_SECRET,\n GOOGLE_CALENDAR_API)\n flow.redirect_uri = request.base_url\n\n # Get credentials from authorization code\n try:\n credentials = flow.step2_exchange(code)\n return credentials\n except Exception as e:\n error_message = \"Unable to get a Google access token because {0}\"\n gcal_logger.warning(error_message.format(e.message))\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return an instance of BlackjackMDP where peeking is the optimal action at least 10% of the time.
|
def peekingMDP():
# BEGIN_YOUR_CODE (our solution is 2 lines of code, but don't worry if you deviate from this)
# raise Exception("Not implemented yet")
return BlackjackMDP(cardValues = [16, 5, 4], multiplicity = 3, threshold = 20, peekCost = 1)
# END_YOUR_CODE
|
[
"def trainCFR(deck,history,players,reachProbs,currentPlayer,sets,limit,absLevel,forgetful,probabilistic):\n his = deepcopy(history)\n #if game over, return payoff and halt recursion\n if isTerminal(his,players):\n #if last player folded, current p gets pot\n if his[-1] == \"Fold\":\n return payoff(players)\n \n #if final round, winner of showdown gets pot\n else:\n commCards = players[0].communityCards\n #gets both players best ranks\n rankList = [poker.getBest(p.holeCards,commCards) for p in players]\n winners = poker.getWinningHands(rankList)\n\n #if tie, no payoff (both have bets returned)\n if len(winners) == 2:\n return 0\n\n #if current player won, get full payoff\n elif winners[0] == currentPlayer:\n return payoff(players)\n\n #return negative payoff if loser\n else:\n return -payoff(players)\n\n #----- non-terminal, game continuing -----\n\n #checks if previous betting round is over, draws new cards if so\n if roundOver(his):\n his+=[\"Round\"]\n #flop\n if len(players[0].communityCards) < 3:\n newCards = poker.drawX(3,deck)\n #turn & river\n else:\n newCards = poker.drawX(1,deck)\n\n #updates com cards\n for p in players:\n p.communityCards += newCards\n\n #if bet limit reached, ban raising\n if his[len(his) - limit : len(his)] == [\"Raise\"]*limit:\n actions = [\"Call\",\"Fold\"]\n #prevents index error\n elif len(his) == 0:\n actions = [\"Call\",\"Fold\",\"Raise\"]\n #necessary response actions, fold removed when check is possible\n elif his[-1] == \"Raise\":\n actions = [\"Call\",\"Fold\",\"Raise\"]\n elif his[-1] == \"Check\" or his[-1] == \"Call\" or his[-1]==\"Round\":\n actions = [\"Check\",\"Raise\"]\n \n #converts cards for this player to number value\n if probabilistic: \n cardValue = getCardAbstraction(players[currentPlayer].holeCards,players[0].communityCards,probabilistic=probabilistic)\n else:\n cardValue = getCardAbstraction(players[currentPlayer].holeCards,players[0].communityCards,absLevel)\n #calculates position of opponent (next player)\n opponent = (currentPlayer + 1) % 2\n \n #creates/gets infoset object for this game state and retrieves strategy\n if forgetful:\n iSet = sets.getInfoSet((forgetfulHistory(his),cardValue),actions)\n else:\n iSet = sets.getInfoSet((getHistoryString(his),cardValue),actions)\n strat = iSet.getStrat(reachProbs[currentPlayer]) \n\n #stores regrets for each possible action evaluated\n newRegrets = [0 for i in range(len(actions))]\n\n for i in range(len(actions)):\n #gets each action and its probability of being chosen\n actionProb = strat[i]\n action = actions[i]\n #modifies current player's reach probability\n newReachProbs = reachProbs.copy()\n newReachProbs[currentPlayer] *= actionProb\n\n #gets copy of players for each scenario\n pl = deepcopy(players)\n if action == \"Raise\":\n pl[currentPlayer].bet = pl[opponent].bet + 20\n elif action == \"Call\":\n pl[currentPlayer].bet = pl[opponent].bet\n\n d = deepcopy(deck)\n\n #recursive call, passes updated values after processing of this action\n newRegrets[i] = -trainCFR(d,his+[action],pl,newReachProbs,opponent,sets,limit,absLevel,forgetful,probabilistic)\n\n #value is regrets weighted by action probability\n nodeValue = 0\n for i in range(len(strat)):\n nodeValue += strat[i] * newRegrets[i]\n\n #updates cumulative regrets\n for i in range(len(strat)):\n iSet.cumRegrets[i] += reachProbs[opponent]*(newRegrets[i] - nodeValue)\n\n return nodeValue",
"def _takeActionAdaptive(self, state, epsilon):\n self.eps_list.append(epsilon)\n if np.random.rand() <= epsilon:\n # greedy\n return self.env.action_space.sample()\n # not greedy\n return np.argmax(self.Q_table[state, :])",
"def greedy_policy(current_state: tuple, eps: float):\n prob = [eps / actions_set_len] * actions_set_len\n arg_min_index = np.where(state_action_values[current_state] == max(\n state_action_values[current_state]))[0]\n prob[np.random.choice(arg_min_index)] = 1 - eps + eps / actions_set_len\n return prob",
"def epsilon_greedy(env, Q, epsilon):\n def policy(obs):\n P = np.ones(env.action_space.n, dtype=float) * epsilon / env.action_space.n #initiate with same prob for all actions\n best_action = np.argmax(Q[obs]) #get best action\n P[best_action] += (1.0 - epsilon)\n return P\n return policy",
"def create_greedy_policy(Q):\n\n def policy_fn(observation):\n A = np.zeros_like(Q[observation], dtype=float) #probabilities of two actions\n a_greedy = np.argmax(Q[observation])\n A[a_greedy] = 1.0 # set probability of greedy action to 1 and other remains 0.\n return A\n return policy_fn",
"def greedy_heuristic(protocol_obj):\n if protocol_obj.happyness == 0:\n return protocol_obj.exp_extraction * 1.15 # 15% increase\n else: \n return protocol_obj.exp_extraction",
"def create_greedy_policy(Q):\n\n def policy_fn(state):\n # All actions that available in the given state\n actions = np.arange(len(Q[state]))\n best_action = np.random.choice(actions[Q[state] == np.max(Q[state])])\n A = np.where(actions == best_action, 1.0, 0.0)\n return A\n\n return policy_fn",
"def create_epsilon_greedy_policy(Q, epsilon):\n\n def policy_fn(state):\n # All actions that available in the given state\n actions = np.arange(len(Q[state]))\n best_action = np.random.choice(actions[Q[state] == np.max(Q[state])])\n ramdomProb = epsilon / len(Q[state])\n A = np.where(actions == best_action, 1 - epsilon + ramdomProb, ramdomProb)\n return A\n\n return policy_fn",
"def treePolicy(self, exploration_coeff = math.sqrt(2)):\n best_child = random.choice(self.children) \n\n if self.state[\"p\"] == self.getRoot().state[\"p\"]:\n best_val = 0\n else: \n best_val = 999\n\n for c in self.children:\n if c.num_visits != 0:\n if self.state[\"p\"] == self.getRoot().state[\"p\"]:\n #Maximize wins for root-player\n val = c.num_wins/c.num_visits + exploration_coeff * math.sqrt(math.log2(self.num_visits) / c.num_visits)\n if val > best_val:\n best_val = val\n best_child = c\n else:\n #Minimize wins for root-player\n val = c.num_wins/c.num_visits - exploration_coeff * math.sqrt(math.log2(self.num_visits) / c.num_visits)\n if val < best_val:\n best_val = val\n best_child = c\n \n return best_child",
"def get_optimal_policy(self):\n # first compute the solution to the DARE: P_optimal\n self.compute_optimal_cost_matrix()\n # policy K = - gamma * (R + gamma* B'PB)^-1 * B' * P * A\n # action can be computed as K * x\n return - self.gamma * np.linalg.pinv(self.R+self.gamma*self.B.T.dot(self.P_optimal).dot(self.B)).dot(self.B.T).dot(self.P_optimal).dot(self.A)",
"def make_greedy_policy():\n policy_improvement() # make policy greedy with respect to V~V*",
"def bestActionFor(mdp,state,Q):\r\n\r\n\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r",
"def get_optimal_policy(self):\n # first compute the solution to the CARE: P_optimal\n self.compute_optimal_cost_matrix()\n # policy K = - gamma * R^-1 * B' * P\n # action can be computed as K * x\n return - self.gamma * np.linalg.pinv(self.R).dot(self.B.T).dot(self.P_optimal)",
"def _takeAction(self, state, episode):\n epsilon = 1 - episode / 5000\n self.eps_list.append(epsilon)\n if np.random.rand() <= epsilon:\n # greedy\n return self.env.action_space.sample()\n # not greedy\n return np.argmax(self.Q_table[state, :])",
"def test_reward_score_odd_col():\n\n # Create an istance of panther env reach top for testing\n # env = panther_env_reach_top.PantherEnvReachTop(config_file_path='/Components/plark-game/plark_game/game_config/10x10/balanced.json',panther_start_col=5)\n env = gym.make('panther-env-reach-top-v0', panther_start_col=5)\n\n model = PPO2('CnnPolicy', env)\n\n # Manually move the panther up and down and check that it gets the correct reward score\n # Move the panther up\n obs, reward, done, info = env.step(0)\n assert reward == 0.5\n # Move the panther up and right\n obs, reward, done, info = env.step(1)\n assert reward == 0.5\n # Move the panther down and right\n obs, reward, done, info = env.step(2)\n assert reward == -0.2\n # Move the panther down\n obs, reward, done, info = env.step(3)\n assert reward == -0.2\n # Move the panther down and left\n obs, reward, done, info = env.step(4)\n assert reward == -0.2\n # Move the panther up and left\n obs, reward, done, info = env.step(5)\n assert reward == 0.5",
"def get_action_to_play(self, epsilon):\n if random.random() < epsilon:\n #play random action\n return random.randint(0, self.n - 1)\n else:\n #play greedy action\n m = max(self.action_value_estimates)\n max_indices = [i for i, j in enumerate(self.action_value_estimates) if j == m]\n return random.choice(max_indices)",
"def P(n,X,Y,Z):\n\n\t# Check if state has already been calculated\n\tif M[n][X][Y][Z] >= 0.0:\n\t\treturn M[n][X][Y][Z]\n\n\t# If the deck is empty or all ones, all twos and all threes have been picked up, the game is won\n\tif n==0 or (X==0 and Y==0 and Z==0):\n\t\treturn 1.0\n\n\t# Probability to win in the follow up game\n\twin_prob = 0\n\n\t# Add probability to pick nor a one, nor two nor three, times the probability of winning the game with n-1 cards.\n\t# It is >0, not >=0, since if n-X-Y-Z==0 there aren't cards other than 1s, 2s or 3s\n\tif n-X-Y-Z > 0:\n\t\twin_prob += ((n-X-Y-Z)/n) * P(n-1,X,Y,Z)\n\n\t# Add probability (X/n) to pick a one (only if I am not saying \"one\" during the peek numbered N-n)\n\t# times the probability of winning the game with n-1 cards and X-1 ones left in the deck\n\tif X > 0 and not ((N-n)%3 == 0):\n\t\twin_prob += (X/n) * P(n-1,X-1,Y,Z)\n\n\t# Same as before but with the two\n\tif Y > 0 and not ((N-n)%3 == 1):\n\t\twin_prob += (Y/n) * P(n-1,X,Y-1,Z)\n\n\t# Same as before but with the three\n\tif Z > 0 and not ((N-n)%3 == 2):\n\t\twin_prob += (Z/n) * P(n-1,X,Y,Z-1)\n\n\t# Update the value in the matrix\n\tM[n][X][Y][Z] = win_prob\n\treturn win_prob",
"def cpoker(self, mask, target, args):\n global CHATLVL_COMMANDLOCK, MAIN_CHANNEL, POKER_CHANNEL\n \"\"\"\n if (target == MAIN_CHANNEL):\n self.bot.privmsg(mask.nick, \"Poker is heavily limited in {main} atm, due to the spam! ''!join {channel}'' to play with others!\".format(**{\n \"main\" : MAIN_CHANNEL,\n \"channel\": POKER_CHANNEL,\n }))\n return\n \"\"\"\n CHATLVL_COMMANDLOCK.acquire()\n if self.chatroulettethreads.get(target, False):\n CHATLVL_COMMANDLOCK.release()\n return \"Another game is in progress!\"\n self.debugPrint('commandlock acquire chatpoker')\n points = args.get('<points>')\n textcommands = self.__textToPokerCommand(\" \".join(args.get('TEXT')))\n createdGame = False\n if points:\n try:\n points = abs(int(points))\n except Exception:\n CHATLVL_COMMANDLOCK.release()\n self.debugPrint('commandlock release chatpoker 2')\n return\n else:\n points = 50\n if (args.get('reveal') or textcommands.get('reveal')) and self.ChatpokerPrev.get(target, False):\n self.ChatpokerPrev[target].reveal(mask.nick)\n CHATLVL_COMMANDLOCK.release()\n return\n if self.spam_protect('chatgames', mask, target, args, specialSpamProtect='chatgames', updateTimer=False): # TODO check, different timers?\n CHATLVL_COMMANDLOCK.release()\n self.debugPrint('commandlock release chatpoker spam')\n return\n if not self.Chatpoker.get(target, False):\n tourneydata = self.ChatgameTourneys.get(target, False)\n if tourneydata:\n self.Chatpoker[target] = Poker(self.bot, self.on_cpoker_done, self.Chatpoints, self.Chatevents,\n target,\n tourneydata['minpoints'],\n gamecost = 0,\n gamecostreceiver=target,\n chatpointsDefaultKey=tourneydata['pointkey'],\n chatpointsReservedKey=tourneydata['pointreservedkey'],\n chatpointsStatisticsKey=tourneydata['statisticskey'])\n for name in tourneydata['players'].keys():\n self.Chatpoker[target].sponsor(name, tourneydata['ante'] * tourneydata['players'][name])\n self.ChatgameTourneys[target]['minpoints'] = int(self.ChatgameTourneys[target]['minpoints'] * tourneydata['minpincreasemult'] + tourneydata['minpincreaseadd'])\n else:\n points = max([points, 20])\n self.Chatpoker[target] = Poker(self.bot, self.on_cpoker_done, self.Chatpoints, self.Chatevents, target, points)\n createdGame = True\n if args.get('start') or textcommands.get('start'):\n self.Chatpoker[target].beginFirstRound(mask.nick)\n if args.get('call') or textcommands.get('call'):\n self.Chatpoker[target].call(mask.nick)\n if args.get('fold') or textcommands.get('fold'):\n self.Chatpoker[target].fold(mask.nick)\n if args.get('join') or args.get('signup') or textcommands.get('join'):\n worked = self.Chatpoker[target].signup(mask.nick)\n if createdGame and (not worked):\n self.Chatpoker[target] = False\n del self.Chatpoker[target]\n self.bot.privmsg(target, \"Removed poker game again.\")\n if args.get('raise'):\n self.Chatpoker[target].raise_(mask.nick, points)\n CHATLVL_COMMANDLOCK.release()",
"def training_policy(self, state):\n #print(\"state: %s\" % state)\n # TODO: change this to to policy the agent is supposed to use while training\n # At the moment we just return an action uniformly at random.\n\n currState = self.correctStateFormat(state)\n \n best_action_chance = ( 1 - self.epsilon + float(self.epsilon/2) )\n rand_numb = float(random.randint(1, 1000)/1000)\n # exploid\n if(rand_numb <= best_action_chance):\n #print()\n #print(currState)\n #print(self.Qsa(currState, 0), self.Qsa(currState, 1))\n return np.argmax([ self.Qsa(currState, 0), self.Qsa(currState, 1) ])\n\n # explore\n return random.randint(0, 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a copy of the move
|
def copy(self):
return Move(self.x, self.y, self.z, self.dir)
|
[
"def move_copy(self, position):\n return Coord(self.x + position[0], self.y + position[1])",
"def _move(self, at, to):\n copy = self.copy()\n i, j = at\n r, c = to\n copy.board[i][j], copy.board[r][c] = copy.board[r][c], copy.board[i][j]\n return copy",
"def next_move(self) -> Move:",
"def Move(self, *args):\n return _snap.TRnd_Move(self, *args)",
"def apply_move(self, move, spec):\n new_spec = deepcopy(spec)\n return new_spec",
"def copy(self):\n new_piece = Bishop(self.pos, self.team)\n new_piece.moved = self.moved\n return new_piece",
"def copy(self):\n new_matrix = []\n for col in self.matrix:\n new_matrix.append(col[:])\n return Board(new_matrix, self.current_pieces[:], self.move_str[:])",
"def clone(self):\n\t\tst = GameState()\n\t\tst.player_to_move = self.player_to_move\n\t\treturn st",
"def copy_move(self, start_point=None, end_point=None):\r\n if start_point and end_point:\r\n vector = (end_point[0] - start_point[0], end_point[1] - start_point[1], end_point[2] - start_point[2])\r\n else:\r\n vector = (0,0,0)\r\n self.copy_move_by_vec(vector)",
"def move( self, move_vector ):",
"def mov(self, dest: Any, src: Any) -> Any:\n ...",
"def reverse_move(self):\n self.arr = self.arr_old.copy()\n self.position = self.position_old.copy()",
"def moving(self):\n return [m for m in self if m.is_moving()]",
"def resign(cls):\n return Move(is_resign=True)",
"def copy(self):\n return Acquire2(where=self.where, redo=self.redo, steps=self.steps)",
"def Clone(self):\n st = GoState(self.size)\n st.board=copy.deepcopy(self.board [:])\n st.playerJustMoved = self.playerJustMoved\n st.points1=self.points1\n st.points2=self.points2\n st.size=self.size\n st.lastpass=self.lastpass\n st.komove=self.komove\n st.moves1=copy.deepcopy(self.moves1)\n st.moves2=copy.deepcopy(self.moves2)\n st.tocheck=self.tocheck\n return st",
"def make_move(self, move):\n # move = random.choice(self.board.get_legal_moves()) # Randomly choose a move from the legal moves\n return self.board.make_move(move), self.calc_reward(), self.is_game_over(), self.get_legal_moves() # Make the move",
"def test_copy_move(self):\n # XXX: python-easwebday does not support webdav COPY/MOVE operation yet.\n # with tmp_repo() as ra:\n # with tmp_repo() as rb:\n # roota = ra.get_dir('/')\n # rootb = rb.get_dir('/')\n pass",
"def copy_board(board):\n return board.copy()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the flags for the move to start animating
|
def start(self):
self.animating = True
self.finished = False
self.angle = 0
|
[
"def start_move_beam_mark(self):\n QApplication.setOverrideCursor(QCursor(Qt.BusyCursor))\n self.emit(\"infoMsg\", \"Move beam mark\")\n self.in_move_beam_mark_state = True\n self.start_graphics_item(\\\n self.graphics_move_beam_mark_item,\n start_pos = self.graphics_beam_item.start_coord)\n #self.graphics_move_beam_mark_item.set_beam_mark(\\\n # self.beam_info_dict, self.pixels_per_mm) ",
"def startBackgroundMoving(self):\n self.moving = True",
"def set_animate(self, on: bool = True) -> None:\n if on and self.frame == -1:\n self.frame = 0\n elif not on:\n self.frame = -1\n self.sprite = 0",
"def move_to_start(self):\n if self._moving_between_tiles:\n self.__move_between_tiles()\n else:\n self._direction = self.astar.get_direction(self._coord, self.start_coord)\n\n self._moving_between_tiles = True\n self.display_eyes_score()\n self._draw_character(self._coord, self.__image)\n\n if self._coord == self.start_coord:\n self.reset_character()",
"def move(self):\n if not self.pause:\n if self.step_down:\n self.mov_x *= -1\n self.step_down = False\n if self.saucer_count > 10:\n if self.saucer_timer == 0:\n if self.saucer.current_shape == self.saucer.shape_2:\n self.saucer.current_shape = self.saucer.shape_3\n self.saucer_timer = 10\n pass\n elif self.saucer.current_shape == self.saucer.shape_3:\n self.saucer.shape(self.saucer.current_shape)\n self.saucer.current_shape = self.saucer.shape_4\n self.saucer_timer = 10\n pass\n elif self.saucer.current_shape == self.saucer.shape_4:\n self.saucer.hideturtle()\n self.saucer.current_shape = self.saucer.shape_1\n self.saucer.shape(self.saucer.current_shape)\n self.saucer.goto(-380, 325)\n self.saucer.showturtle()\n self.saucer_count = 0\n else:\n new_x = self.saucer.xcor() + 9\n self.saucer.goto(new_x, 325)\n self.saucer_timer = 1\n if new_x > 380:\n self.saucer.hideturtle()\n self.saucer.goto(-380, 325)\n self.saucer.showturtle()\n self.saucer_count = 0\n else:\n self.saucer_timer -= 1\n if self.timer != 0:\n self.timer -= 1\n self.moved = False\n else:\n self.moved = True\n for column in self.invader_list:\n for inv in column:\n if inv.current_shape == inv.shape_3:\n inv.hideturtle()\n else:\n new_x = (inv.xcor() + self.mov_x)\n inv.goto(new_x, inv.ycor())\n inv.change_shape()\n if inv.isvisible() and not self.step_down:\n if -305 > new_x or 305 < new_x:\n self.step_down = True\n for col in self.invader_list:\n for vader in col:\n new_y = (vader.ycor() + self.mov_y)\n vader.goto(vader.xcor(), new_y)\n vader.change_shape()\n if time() - self.start_time > self.interval:\n self.saucer_count += 1\n print(self.saucer_count)\n if self.timer_abs > 1:\n self.timer_abs -= 1\n print(f\"timer is {self.timer_abs}\")\n self.start_time = time()\n\n self.timer = self.timer_abs",
"def start(self, target):\n # TODO: multiple targets\n # TODO: weakref the targets\n if self._state is not ANIMATION_NOT_STARTED:\n raise RuntimeError\n\n self._state = ANIMATION_RUNNING\n self.targets = [(target, dict())]\n for target_, props in self.targets:\n relative = props.get('_relative', False)\n for name, value in self.props.items():\n initial = self._get_value(target_, name)\n is_number(initial)\n is_number(value)\n if relative:\n value += initial\n props[name] = initial, value\n\n self.broadcast('on_start')",
"def setAnimationEnabled(self, newval: 'SbBool') -> \"void\":\n return _coin.SoTrackballDragger_setAnimationEnabled(self, newval)",
"def resume(self):\n self.isStopped = False\n self.animate()",
"def set_mark_moveable(self, move):\n self._set_mark_moveable(move)",
"def update_start(self):\n\n # Add 1 to the index when accessing the widgets\n # due to the column headings\n started = self.start_checkbox.isChecked()\n finished = self.finish_checkbox.isChecked()\n if started and finished:\n self.finish_checkbox.setChecked(False)\n if media_objects.get_movie() is not None:\n media_objects.get_movie().set_started(started)",
"def process_move(self, coords: Coords, toggle_flag: bool):\n\n if not self.initialized:\n # First move\n self.initialise_grid(coords)\n self.reveal_cell(coords)\n elif toggle_flag:\n self.flag_cell(coords)\n else:\n self.reveal_cell(coords)\n\n if self.cells_hidden == self.number_of_mines:\n # Victory!\n self.game_over = True",
"def start(self):\n key_frames = list()\n if self.fade == \"in\":\n key_frames.append(KeyFrame(0.0, alpha=255))\n key_frames.append(KeyFrame(self.fade_duration, alpha=0))\n else:\n key_frames.append(KeyFrame(0.0, alpha=0))\n key_frames.append(KeyFrame(self.fade_duration, alpha=255))\n self.animation = Animation(self, key_frames, should_loop=False, unscaled=\"True\")\n self.animator = Animator(self, animation_list=[self.animation])\n self.animator.play()\n self.creation_time = Time.now()",
"def walk_animation(self):\n\n # # This animation is hardcoded for 4 frames and 16x24 map tiles\n # for frame in range(4):\n # self.image = self.frames[self.direction][frame]\n # yield None\n # self.move(3*DX[self.direction], 2*DY[self.direction])\n # yield None\n # self.move(3*DX[self.direction], 2*DY[self.direction])",
"def update(self):\n self._move()\n self._animate()",
"def move(self):\n for segment in range(len(self.snake)):\n if segment == len(self.snake) - 1:\n self.snake[segment].forward(20)\n else:\n self.snake[segment].goto(self.snake[segment + 1].pos())",
"def process_movement_animation(self, keys: List[bool]) -> None:\n if Player.anim_counter % self.velocity == 0:\n if keys[pygame.K_a]:\n try:\n self.image = next(Player.left_anim_iter)\n except(StopIteration):\n Player.left_anim_iter = iter(left_anim_imgs)\n self.image = next(Player.left_anim_iter)\n elif keys[pygame.K_d]:\n try:\n self.image = next(Player.right_anim_iter)\n except(StopIteration):\n Player.right_anim_iter = iter(right_anim_imgs)\n self.image = next(Player.right_anim_iter)\n else:\n self.image = pygame.image.load(\"img/SonicStatiqueRight.png\")\n Player.anim_counter += 1",
"def start(self):\n self.ani = animation.FuncAnimation(\n self.figure,\n self.update_graph,\n frames=10000,\n interval=200,\n repeat=False)\n\n self.ani._start()\n\n print('started animation')",
"def start_jump(self):\n if not self.jumping:\n self.jumping = True\n self.in_air = True",
"def _started(self):\n\n self.active = True\n self.stopped = False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return url for the notice detail
|
def notice_detail_url(notice_id):
return reverse('information:notice-detail', args=[notice_id])
|
[
"def get_absolute_url(self):\n return ('view-note', (), {'category': self.category, 'slug': self.slug})",
"def issue(self):\n return self._url('issue')",
"def get_note_url(note):\n client = get_unauthorized_evernote_client()\n return 'https://%s/Home.action?#n=%s' % (client.service_host, note.guid)",
"def NsUrl(self) -> str:",
"def get_absolute_url(self):\n return reverse('sensor-detail', args=[self.id])",
"def alert_url(self, model):\n if model.id and hasattr(model, \"alert_template\"):\n template = model.alert_template\n if template:\n params = {}\n params[model.resource_content_type + \"_id\"] = model.id\n params[\"template_id\"] = template.id\n params[\"committee_ids\"] = model.committee_id\n params[\"prefill\"] = \"1\"\n return url_for(\"alerts.new\", **params)",
"def getSignaletiqueUrl():",
"def get_success_url(self):\n return reverse('user_details', kwargs={'user_uuid': self.user.uuid})",
"def _url(route):\n return \"%s%s\" % (c['base_address'], route)",
"def get_absolute_url(self):\n model = self.model_mapping.get(self.notification_type)\n instance = model.objects.get(id=self.object_id)\n return instance.get_absolute_url()",
"def get_show_url(self, name):",
"def get_success_url(self):\n if self.success_url is None:\n try:\n return self.get_absolute_url(self.object, 'detail')\n except NoReverseMatch:\n pass\n pass\n return super(ModelViewMixin, self).get_success_url()",
"def __str__(self):\n return self.url",
"def get_absolute_url(self):\n\t\treturn reverse('plate-detail', args=[str(self.id)])",
"def detail_url(consumption_record_id):\n return reverse(\n \"consumption:consumption_record-detail\", args=[consumption_record_id]\n )",
"def get_absolute_url(self):\n return reverse('questions:question_paper_details', args=[self.id])",
"def get_absolute_url(self):\n return reverse('household-detail', args=[str(self.id)])",
"def get_edit_url(self):\n return ('edit-note', (), {'category': self.category, 'slug': self.slug})",
"def get_product_detail_url(asin):\n return '{base_url:s}/#/products/{asin:s}'.format(\n base_url=app_settings.PRICE_MONITOR_BASE_URL,\n asin=asin,\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
create and return a sample scope
|
def sample_scope(description='General', **kwargs):
defaults = {}
defaults.update(kwargs)
return models.Scope.objects.create(description=description, **defaults)
|
[
"def training_scope(**kwargs):\n return lib.training_scope(**kwargs)",
"def sub_scope(self, kind, name, node, lineno):\n generator = kind(self.space, name, node, lineno, self.symbols,\n self.compile_info)\n return generator.assemble()",
"def create_one_sample():\n return {\n macrobe: vals['rpkm']\n for macrobe, vals in create_values().items()\n }",
"def _create_samples(session, match):\n all_samples = defaultdict(data_models.Sample)\n project_id = match.get('project_id')\n project_status = match.get('project_status', 'open')\n sample_id = match.get('sample_id')\n sample_time_since = match.get('createddate')\n process_limit_date = match.get('process_limit_date')\n detailed = request.args.get('detailed') in ['true', 'True', True]\n if detailed:\n list_process_complete = None\n list_process_queued = None\n else:\n list_process_complete = list(status_cfg.step_completed_to_status) \\\n + list(status_cfg.additional_step_completed) \\\n + list(status_cfg.library_type_step_completed) \\\n + status_cfg.started_steps\n list_process_queued = status_cfg.step_queued_to_status\n udfs_to_fields = {\n 'Prep Workflow': 'planned_library',\n 'Species': 'species',\n 'Required Yield (Gb)': 'required_yield',\n 'Coverage (X)': 'coverage'\n }\n for result in queries.get_sample_info(session, project_id, sample_id, project_status=project_status,\n time_since=sample_time_since, udfs=list(udfs_to_fields)):\n (pjct_name, sample_name, container, wellx, welly, udf_name, udf_value) = result\n s = all_samples[sanitize_user_id(sample_name)]\n s.sample_name = sanitize_user_id(sample_name)\n s.project_name = pjct_name\n s.plate_name = container\n s.original_name = sample_name\n if udf_name in udfs_to_fields:\n setattr(all_samples[sanitize_user_id(sample_name)], udfs_to_fields[udf_name], udf_value)\n\n for result in queries.get_samples_and_processes(session, project_id, sample_id, project_status=project_status,\n workstatus='COMPLETE', list_process=list_process_complete,\n time_since=sample_time_since, process_limit_date=process_limit_date):\n (pjct_name, sample_name, process_name, process_status, date_run, process_id) = result\n all_samples[sanitize_user_id(sample_name)].add_completed_process(process_name, date_run, process_id)\n\n for result in queries.get_sample_in_queues_or_progress(\n session, project_id, sample_id, list_process=list_process_queued,\n time_since=sample_time_since, project_status=project_status, process_limit_date=process_limit_date):\n pjct_name, sample_name, process_name, queued_date, queue_id, process_id, process_date = result\n if not process_id:\n all_samples[sanitize_user_id(sample_name)].add_queue_location(process_name, queued_date, queue_id)\n else:\n all_samples[sanitize_user_id(sample_name)].add_inprogress(process_name, process_date, process_id)\n\n return all_samples.values()",
"def new_scope(self):\n self.append(Scope(self.peek()))\n return",
"def create_scope(api_url, token, scope):\r\n r = requests.post(api_url + 'preview/secret/scopes/create',\r\n headers={\"Authorization\" : \"Bearer \" + token},\r\n json={\"scope\": scope})\r\n response_body = r.json()\r\n if r.status_code != 200:\r\n raise Exception('Error creating scope: ' + json.dumps(response_body))\r\n return(response_body)",
"def sample(self):\n self.exp_wt_start = np.random.choice(self.ps.int_exp_wt)\n self.lmbda = np.random.choice(self.ps.int_lambda_soft)\n myns = float('inf')\n while myns > self.ps.max_len:\n walk = self.make()\n myns = len(walk)\n return walk",
"def create_samples(self):\n sample_list = []\n temp_list = []\n for index in range(len(self.data['samples'])):\n for key in self.data.keys():\n temp_list.append(self.data[key][index])\n sample_list.append(sample.Sample(temp_list[0], temp_list[2:], temp_list[1]))\n temp_list.clear()\n return sample_list",
"def create_oscilloscope(inst_name,port):\n global inst\n inst_name = str(inst_name)\n inst[inst_name] = hv_os.oscilloscope(port)\n return inst[inst_name]",
"def pre_create_trial(self):",
"def scope(arg):\n core.push_id(arg)\n yield\n core.pop_id()",
"def create_sample(*args: Any, **kwargs: Any) -> SampleType:\n return cast(SampleType, Sample(*args, **kwargs))",
"def _create_sample(self, counter_name, counter_type, counter_unit,\n counter_volume, resource_id, **kwargs):\n kwargs.update({\"counter_name\": counter_name,\n \"counter_type\": counter_type,\n \"counter_unit\": counter_unit,\n \"counter_volume\": counter_volume,\n \"resource_id\": resource_id})\n return self.clients(\"ceilometer\").samples.create(**kwargs)",
"def baseline_sample(self):\n return self.sample_db[self.obj_function.default_params]",
"def _proposal(self, currval, params):\n\t\treturn self._sample_impl(params)",
"def testAddProductScope(self):\n add_product_scope.main(client, self.__class__.campaign_id)",
"def create(self, req, body):\n\n context = req.environ['nova.context']\n authorize(context, action='create')\n\n \ttry:\n params = body['scope']\n name = params['name']\n value = params['value']\n\t print name,value\n\t attribute_list = self.attribute_api.list(context)\n\t attname = [att.name for att in attribute_list]\n except KeyError:\n msg = _(\"Invalid request body\")\n raise webob.exc.HTTPBadRequest(explanation=msg)\n\t\n\tif name in attname:\n\t scope=self.api.create_scope(context,name,value)\n\t #return {'scope',scope}\n\t #print \"yes\"\n else: \n\t msg = _(\"Attribute Not there\")\n raise webob.exc.HTTPBadRequest(explanation=msg)\n #try:\n #attribute=self.api.create_attribute(context, name)\n\n #return {'attribute': attribute}\n\n #except exception.InvalidAttribute as exc:\n #raise webob.exc.HTTPBadRequest(explanation=exc.format_message())\n '''\n try:\n if 'public_key' in params:\n scope = self.api.import_key_pair(context,\n context.user_id, name,\n params['public_key'])\n scope = self._filter_scope(scope, user_id=True)\n else:\n scope, private_key = self.api.create_key_pair(\n context, context.user_id, name)\n scope = self._filter_scope(scope, user_id=True)\n scope['private_key'] = private_key\n\n return {'scope': scope}\n\n except exception.ScopeLimitExceeded:\n msg = _(\"Quota exceeded, too many key pairs.\")\n raise webob.exc.HTTPRequestEntityTooLarge(\n explanation=msg,\n headers={'Retry-After': 0})\n except exception.InvalidScope as exc:\n raise webob.exc.HTTPBadRequest(explanation=exc.format_message())\n except exception.ScopeExists as exc:\n raise webob.exc.HTTPConflict(explanation=exc.format_message())\n '''",
"def build_subscope(rc):\n root_scope, root_scope_vrf_id = defineRoot(rc)\n scopes = GetApplicationScopes(rc)\n root_scope_id = GetAppScopeId(scopes,root_scope)\n sub_scope = input(\"Name of the sub scope under Root Scope \" + root_scope + \" you want to create: \")\n subnet = input(\"Which subnet or IP you want your query is (X.X.X.X/Y): \")\n print(\"Building sub scope: \"+CYELLOW+sub_scope+ \" under Root Scope \" +CYELLOW+root_scope+ CEND)\n \n # Now build the sub scope\n req_payload = {\n \"short_name\": sub_scope,\n \"short_query\": {\n \"type\": \"subnet\",\n \"field\": \"ip\",\n \"value\": subnet\n },\n \"parent_app_scope_id\": root_scope_id\n }\n \n resp = rc.post(\"/app_scopes\", json_body=json.dumps(req_payload))\n parsed_resp = json.loads(resp.content)\n if resp.status_code == 200:\n sub_scope_id = str(parsed_resp[\"id\"])\n print(\"Sub scope: \"+CYELLOW+sub_scope+ \"with scope ID \" +CYELLOW+sub_scope_id +\" has been created\"+ CEND)\n else:\n print(\"Error occured during sub scope creation\")\n print(\"Error code: \"+str(resp.status_code))\n print(\"Content: \")\n print(resp.content)\n sys.exit(3)\n\n return sub_scope, sub_scope_id",
"def create(initSampleCount=..., initSeedCount=..., pointDistribution=...) -> retval:\n ...",
"def test_core_get_stored_values_scope_v1(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
create and return sample notice
|
def sample_notice(source, **kwargs):
defaults = {
'scope': sample_scope(),
'title': 'Test title',
'message': 'Lorem ipsum dolor sit amet',
}
defaults.update(kwargs)
return models.Notice.objects.create(source=source, **defaults)
|
[
"def send_notice(notice):\n\tlogging.debug(\"called : %s\", __name__)\n\n if notice is None:\n\t\tlogging.error(\"empty notice is recieved\")\n return\n\n\ttime = notice.print_time\n\ttitle = notice.title\n\tbody = view.get_text_notice(notice, True)\n\n\ttprint(\"Sending notice {} dated {}.\".format(title, time))\n\tlogging.info(\"Sending notice %s dated %s.\", title, time)\n\n\treturn push(title, body)",
"def notice(self, client, message):\n if client and message:\n messages = utils.split_message(message, self.config.max_length)\n for msg in messages:\n client.fwrite(':{c.srv} NOTICE {c.nick} :{msg}', msg=msg)",
"def ShortExplanation(self):\n return 'failed: %s' % (self.message,)",
"def _construct_msg(self) -> str:\n return '\\n'.join([\n self._formatted_filename(), self._err_description()])",
"def make_error_msg(msg, sequence_name, img_idx, det_idx):\n return \"{0}, image index {1}, detection index {2} : {3}\".format(sequence_name, img_idx, det_idx, msg)",
"def noise_title(train_sigma, test_sigma):\n title = ''\n if train_sigma != 0:\n title += 'training noise'\n title += r'$\\sim\\operatorname{Normal}(0,\\sigma=%.1f)$' % train_sigma\n title += '\\n'\n else:\n title += 'no training noise\\n'\n if test_sigma != 0:\n title += 'testing noise'\n title += r'$\\sim\\operatorname{Normal}(0,\\sigma=%.1f)$' % test_sigma\n else:\n title += 'no testing noise'\n return title",
"def log_ok(s):\n print(\"[ {0}][+] {1}\".format(get_curr_time_str(), s))",
"def log_notice(msg, exc_info=None):\n global _isDaemon\n if _isDaemon:\n syslog.syslog(syslog.LOG_NOTICE, msg)\n else:\n sys.stdout.write(\"N: %s\\n\" % msg)\n log_tb(exc_info)",
"def test_warning(self, message, title):\n\t\tstandardout.standard_out.warning(message, title=title)\n\t\tself.assertIn(title, self._mock_stdout.getvalue())\n\t\tself.assertIn(message, self._mock_stdout.getvalue())",
"def trial(self, *args, **kwargs):\n self.info(*args, **kwargs)",
"def notice_file(self):\n notice = \"\"\n for lic in self.licenses():\n if not lic.is_internal():\n notice += (\n \"===========================================================\\n\"\n )\n notice += \"Notices for file(s):\\n\"\n notice += \"\\n\".join(sorted(lic.exes))\n notice += \"\\n\"\n notice += \"The notices is included for the library: {}\\n\\n\".format(\n lic.name\n )\n notice += (\n \"===========================================================\\n\"\n )\n notice += lic.read()\n notice += \"\\n\\n\"\n return notice",
"def IOLogGenerated(self, delay, stream_name, data):\n logger.info(\"IOLogGenerated(%r, %r, %r)\", delay, stream_name, data)",
"def logger_warning(self,text):\n logging.warning(self.log_my_name()+' '+text)",
"def get_sample_warnings(self):\r\n\r\n # Loop through samples\r\n for s in self.Samples:\r\n s_id = str(s.sample_id)\r\n plate_id = str(self.barc_id)\r\n\r\n # Check if sample warning exists\r\n if s.warning:\r\n warn_str = 'Sample ' + s_id + \\\r\n ' on Plate ' + plate_id + \\\r\n ' is EMPTY & ' + s.warning\r\n self.warnings.append(warn_str)",
"def create_welcome_message(username):\n general_greetings_list = [\"hello\", \"hi\", \"welcome\"]\n secondary_statement_list = [\"hope you're having a great day!\",\n \"miao miao miao (that's cat for have a good day)!\",\n \"enjoy!\",\n \"good luck!\",\n \"happy writing!\"]\n first = random.choice(general_greetings_list)\n uname = username.capitalize()\n second = random.choice(secondary_statement_list)\n msg = first + \" \" + uname + \"! \" + second\n return msg",
"def output(msg):\n print(f\"{datetime.now()}\\t{msg}\")",
"def notice(self, message):\n return self._send_message(self._client.notice, message)",
"def create_tweet():\n\n try:\n text = \"Die Tagesenergie-Werte vom \" + timetool.get_date()\n text = text + \"\\nMagie-O-Meter: \" + grabber.get_magicvalue()\n text = text + \"\\nEnergie Impulswert: \" + grabber.get_energyimpulsvalue()\n text = text + \"\\nBewusstwerdungsindex: \" + grabber.get_consiousvalue()\n except AttributeError as ae:\n loggingservice.log(repr(ae), logfile_name)\n text = grabber.get_errortext()\n return text",
"def typestr(self):\n print(self.interval[0], self.interval[1])\n tmstr = str(self.interval[0]).zfill(2) + ':' + str(self.interval[1]).zfill(2)\n if self.sampletype == 0:\n return 'Average over ' + tmstr + ' with output every ' + tmstr\n elif self.sampletype == 1:\n return 'Snapshot every ' + tmstr\n elif self.sampletype == 2:\n return 'Maximum every ' + tmstr\n elif self.sampletype < 0:\n returnstr = 'Average over ' + str(abs(self.sampletype)) + ' hours with output every ' + tmstr\n return returnstr"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test model attributes against a payload, with instance being self in a testcase class
|
def test_all_model_attributes(insance, payload, model, serializer):
ignored_keys = ['image']
relevant_keys = sorted(set(payload.keys()).difference(ignored_keys))
for key in relevant_keys:
try:
insance.assertEqual(payload[key], getattr(model, key))
except:
insance.assertEqual(payload[key], serializer.data[key])
|
[
"def test_to_check_instance_variables(self):\n self.assertEquals(self.new_source.id, 'newsbyelkwal')\n self.assertEquals(self.new_source.name, 'My News')\n self.assertEquals(self.new_source.description, 'get the latest updates')\n self.assertEquals(self.new_source.url, 'https://google.com')\n self.assertEquals(self.new_source.category, 'general')\n self.assertEquals(self.new_source.country, 'kenya') (edited)",
"def test_get_custom_attributes(self):\n pass",
"def test_field_content(self):\n data = self.serializer.data\n\n self.assertEqual(data['name'], self.dog_attr['name'])",
"def testattr(self):\n self.assertTrue(hasattr(self.basemodel, \"created_at\"))\n self.assertTrue(hasattr(self.basemodel, \"id\"))\n self.assertFalse(hasattr(self.basemodel, \"updated_at\"))\n self.assertFalse(hasattr(self.basemodel, \"random_attr\"))\n self.assertFalse(hasattr(self.basemodel, \"name\"))\n self.basemodel.name = \"Betty\"\n self.basemodel.age = 89\n self.assertTrue(hasattr(self.basemodel, \"name\"))\n self.assertEqual(self.basemodel.name, \"Betty\")\n self.assertTrue(hasattr(self.basemodel, \"age\"))\n delattr(self.basemodel, \"name\")\n self.assertFalse(hasattr(self.basemodel, \"name\"))\n self.assertEqual(self.basemodel.__class__.__name__, \"BaseModel\")",
"def test_attribute_case():\n first_name = 'TestFirstName'\n last_name = 'TestLastName'\n custom_attr = 'TestCustomAttr'\n config = {'firstName': first_name,\n 'lastName': last_name,\n 'CustomAttr': custom_attr}\n profile = models.UserProfile(config)\n assert profile.first_name == first_name\n assert profile.firstName == first_name\n assert profile.last_name == last_name\n assert profile.lastName == last_name\n assert profile.display_name is None\n assert profile.displayName is None\n assert profile.CustomAttr == custom_attr\n\n display_name = 'TestName'\n profile.displayName = display_name\n assert profile.display_name == display_name\n\n new_display_name = 'NewTestName'\n profile.display_name = new_display_name\n assert profile.displayName == new_display_name\n\n new_custom_attr = 'NewCustomAttr'\n profile.CustomAttr = new_custom_attr\n\n req_format = profile.request_format()\n assert req_format['CustomAttr'] == new_custom_attr\n assert req_format['firstName'] == first_name\n assert req_format['lastName'] == last_name\n assert req_format['displayName'] == new_display_name",
"def test_get(self):\n correct_fields = {\n \"features\": self.features,\n \"num_features\": self.num_features,\n \"target\": self.target,\n \"method\": self.method,\n \"num_examples\": self.num_examples,\n }\n\n print(self.model)\n for field, ans in correct_fields.items():\n self.assertEqual(self.model._get(field), ans, \"{} failed\".format(field))",
"def test_Review_attributes(self):\n rev = Review()\n place_id = getattr(rev, \"place_id\")\n user_id = getattr(rev, \"user_id\")\n text = getattr(rev, \"text\")\n self.assertIsInstance(place_id, str)\n self.assertIsInstance(user_id, str)\n self.assertIsInstance(text, str)",
"def test_init(self):\n payload = payloads.DeriveKeyResponsePayload()\n\n self.assertIsNone(payload.unique_identifier)\n self.assertIsNone(payload.template_attribute)",
"def test_attribute_content(self):\n topic = TopicFactory.create()\n self.assertIsNotNone(topic.topicID)\n self.assertIsNotNone(topic.name)\n self.assertIsNotNone(topic.numberOfPhotos)\n self.assertIsNotNone(topic.authorID)\n self.assertIsNotNone(topic.tags)",
"def test_create_model_must_return_sent_payload(self):\n # Picking a model with complete payload\n model = sample_models[0]\n # Perform POST request with model as payload\n response = self.client.post(url_for('aimodels.create_model'), json=model)\n # Ensure response matches data that has been sent previously\n self.assertEqual(model, response.json)\n self.assertEqual(201, response.status_code)",
"def test_instance(self):\n self.assertIsInstance(self.newtest, Amenity)",
"def test_post_model_returns_string(self):\n instance = Post()\n instance.title = 'title'\n assert str(instance) == 'title'",
"def test_serializer_field_values(self):\n pass",
"def test_set_attribute(self):\n database = Mock()\n database.sessions.find_one.return_value = JOHN\n measurement = database.measurements.find_one.return_value = dict(\n _id=\"id\", metric_uuid=METRIC_ID, status=\"red\",\n sources=[\n dict(\n source_uuid=SOURCE_ID, parse_error=None, connection_error=None, value=\"42\", total=None,\n entities=[dict(key=\"entity_key\", title=\"entity title\")])])\n database.measurements.find.return_value = [measurement]\n\n def insert_one(new_measurement):\n new_measurement[\"_id\"] = \"id\"\n\n database.measurements.insert_one = insert_one\n database.reports = Mock()\n database.reports.find.return_value = [create_report()]\n database.datamodels = Mock()\n database.datamodels.find_one.return_value = dict(\n _id=123, metrics=dict(metric_type=dict(direction=\"<\", scales=[\"count\"])),\n sources=dict(source_type=dict(entities={})))\n with patch(\"bottle.request\", Mock(json=dict(attribute=\"value\"))):\n measurement = set_entity_attribute(METRIC_ID, SOURCE_ID, \"entity_key\", \"attribute\", database)\n entity = measurement[\"sources\"][0][\"entity_user_data\"][\"entity_key\"]\n self.assertEqual(dict(attribute=\"value\"), entity)\n self.assertEqual(\n dict(description=\"John changed the attribute of 'entity title' from '' to 'value'.\", email=JOHN[\"email\"],\n uuids=[REPORT_ID, SUBJECT_ID, METRIC_ID, SOURCE_ID]),\n measurement[\"delta\"])",
"def test_model_prediction(self):\n self.assertTrue(type(self.pred) is dict)",
"def test_init(self):\n payload = payloads.DeriveKeyRequestPayload()\n\n self.assertIsNone(payload.object_type)\n self.assertIsNone(payload.unique_identifiers)\n self.assertIsNone(payload.derivation_method)\n self.assertIsNone(payload.derivation_parameters)\n self.assertIsNone(payload.template_attribute)",
"def test_text_attr(self):\n self.assertTrue(hasattr(self.review, \"text\"))\n self.assertTrue(self.review.text)",
"def test_action_post_bad_json(self):\n self.checkBadJSONPayload(self.postToApi)",
"def test_instance(self):\n ins = Review()\n self.assertIsInstance(ins, Review)",
"def test_is_subclass(self):\n self.assertIsInstance(self.review, BaseModel)\n self.assertTrue(hasattr(self.review, \"id\"))\n self.assertTrue(hasattr(self.review, \"created_at\"))\n self.assertTrue(hasattr(self.review, \"updated_at\"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test retrieving a notice's detail
|
def test_retrieve_notice_detail(self):
notice = sample_notice(source=self.user)
serializer = serializers.NoticeSerializer(notice, context=serializer_context)
url = notice_detail_url(notice_id=notice.id)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
|
[
"def test_notification_get(self):\n pass",
"def _ReadLMNoticeContents(self):\n return self.RemoteCommand(f'type {self.temp_dir}\\\\{self._LM_NOTICE_LOG}')[0]",
"def test_validate_notice(session, desc, valid, doc_type, notice, mhr_num, account, message_content):\n # setup\n json_data = get_valid_registration()\n json_data['note']['documentType'] = doc_type\n if notice:\n json_data['note']['givingNoticeParty'] = notice\n else:\n del json_data['note']['givingNoticeParty']\n del json_data['note']['effectiveDateTime']\n registration: MhrRegistration = MhrRegistration.find_by_mhr_number(mhr_num, account)\n error_msg = validator.validate_note(registration, json_data, True, STAFF_ROLE)\n current_app.logger.debug(error_msg)\n if valid:\n assert error_msg == ''\n else:\n assert error_msg != ''\n if message_content:\n assert error_msg.find(message_content) != -1",
"def get_missing_message(self, param):",
"def test_verify_message_format(self):\n\n def message_assert(message):\n fields = [('publisher_id', 'publisher_id'),\n ('event_type', 'event_type'),\n ('priority', 'WARN'),\n ('payload', dict(a=3))]\n for k, v in fields:\n self.assertEqual(message[k], v)\n self.assertTrue(len(message['message_id']) > 0)\n self.assertTrue(len(message['timestamp']) > 0)\n\n self.stubs.Set(nova.notifier.no_op_notifier, 'notify',\n message_assert)\n notify('publisher_id', 'event_type',\n nova.notifier.api.WARN, dict(a=3))",
"def test_warn_severity(check_plugin):\n error = check_plugin.warning('A123', 'No worries, its just a warning')\n assert error.severity == Nit.WARNING",
"def test_warning(self, message, title):\n\t\tstandardout.standard_out.warning(message, title=title)\n\t\tself.assertIn(title, self._mock_stdout.getvalue())\n\t\tself.assertIn(message, self._mock_stdout.getvalue())",
"def test_missing_lookup_name(self):\n self.assertIn(\n (\"WARNING: No field definition for ``lookup_key_name`` found in \"),\n self.warning)",
"def test_message(message):\n print \"Got a status message: \" + message['data']",
"def test_default_severity_level() -> None:\n assert Notification(\"test\").severity == \"information\"",
"def test_info(self, message, title):\n\t\tstandardout.standard_out.info(message, title=title)\n\t\tself.assertIn(title, self._mock_stdout.getvalue())\n\t\tself.assertIn(message, self._mock_stdout.getvalue())",
"def test_known_issue(self):\n self.fail_ki('See BZ123456')",
"def test_warn():\n warn(\"Test\")\n eq_(\"\\nWarning: Test\\n\\n\", sys.stderr.getvalue())",
"def ShortExplanation(self):\n return 'failed: %s' % (self.message,)",
"def test_info(self):\n with self._fms as server:\n server.login()\n foundset = server.find(query=[{u'id': 1}], scripts={u'after': [u'testScript_dataInfo', None]})\n expected_info = json.loads(server.last_script_result[u'after'][1])\n\n self.assertDictEqual(foundset.info, expected_info[u'general'])\n self.assertDictEqual(foundset[0].portal_notes.info, expected_info[u'portal_notes'])",
"def test_getDeprecationWarningString(self):\n version = Version(\"Twisted\", 8, 0, 0)\n self.assertEqual(\n getDeprecationWarningString(self.test_getDeprecationWarningString, version),\n \"%s.DeprecationWarningsTests.test_getDeprecationWarningString \"\n \"was deprecated in Twisted 8.0.0\" % (__name__,),\n )",
"def test_silent_info(capsys):\n output = Silent()\n output.log(OutputMethod.INFO)\n captured = capsys.readouterr()\n assert not captured.out",
"def handle_inform(self, msg):\n print msg",
"def fail_detail(self):\n return self._fail_detail"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test partially updating a notice's detail using patch
|
def test_partial_update_notice(self):
notice = sample_notice(source=self.user)
scope = sample_scope(description='Private', is_general=False)
scope_serializer = serializers.ScopeSerializer(scope, context=serializer_context)
payload = {
'scope': scope_serializer.data['url'],
'message': 'An updated message'
}
url = notice_detail_url(notice.id)
res = self.client.patch(url, payload)
notice.refresh_from_db()
notice_serializer = serializers.NoticeSerializer(notice, context=serializer_context)
self.assertEqual(res.status_code, status.HTTP_200_OK)
test_all_model_attributes(self, payload, notice, notice_serializer)
|
[
"def test_full_update_notice(self):\n notice = sample_notice(source=self.user)\n scope = sample_scope(description='Private test', is_first_year=True)\n scope_serializer = serializers.ScopeSerializer(scope, context=serializer_context)\n payload = {\n 'source': self.user.id,\n 'scope': scope_serializer.data['url'],\n 'title': 'Test title 3',\n 'message': 'An updated message'\n }\n\n url = notice_detail_url(notice.id)\n res = self.client.put(url, payload)\n\n notice.refresh_from_db()\n notice_serializer = serializers.NoticeSerializer(notice, context=serializer_context)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n test_all_model_attributes(self, payload, notice, notice_serializer)",
"def test_update_notification(self):\n pass",
"def test_full_update_notification(self):\n pass",
"def test_update_partial_more_expensive_retirement_missing_info(self):\n self.client.force_authenticate(user=self.admin)\n\n self.retirement2.price = 999\n self.retirement2.save()\n\n data = {\n 'retirement': reverse(\n 'retirement:retirement-detail',\n kwargs={'pk': 2},\n ),\n }\n\n response = self.client.patch(\n reverse(\n 'retirement:reservation-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n response.content\n )\n\n response_data = json.loads(response.content)\n\n content = {\n 'non_field_errors': [\n \"The new retirement is more expensive than the current one. \"\n \"Provide a payment_token or single_use_token to charge the \"\n \"balance.\"\n ]\n }\n\n self.assertEqual(response_data, content)\n\n self.retirement2.price = 199\n self.retirement2.save()",
"def test_update(self, record):",
"def test_partial_update_fighter(self):\n test_update = {\n \"stamina\":\"56\"\n }\n prev_stamina = self.fighter.stamina\n res = self.request.patch(\"http://127.0.0.1:8000/fighter/1/\",test_update)\n self.assertFalse(res.data.get(\"stamina\") == prev_stamina)",
"def test_update_escalation(self):\n pass",
"def test_update_partial_ordered_more_than_1(self):\n self.client.force_authenticate(user=self.admin)\n\n self.order_line.quantity = 2\n self.order_line.save()\n\n FIXED_TIME = datetime(2030, 1, 10, tzinfo=LOCAL_TIMEZONE)\n\n data = {\n 'is_present': True,\n 'retirement': reverse(\n 'retirement:retirement-detail',\n kwargs={'pk': 2},\n ),\n }\n\n with mock.patch(\n 'django.utils.timezone.now', return_value=FIXED_TIME):\n response = self.client.patch(\n reverse(\n 'retirement:reservation-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n response.content\n )\n\n response_data = json.loads(response.content)\n\n content = {\n 'non_field_errors': [\n \"The order containing this reservation has a quantity \"\n \"bigger than 1. Please contact the support team.\"\n ]\n }\n\n self.assertEqual(response_data, content)",
"def test_simple_update_via_patch(self):\n admin_client = APIClient()\n admin_client.login(username=\"admin\", password=\"admin\")\n # This is the builtin user 'Administrator' with username 'admin'. The pk is valid.\n user_pk = User.objects.get(username=\"admin\").pk\n\n response = admin_client.patch(\n reverse(\"user-detail\", args=[user_pk]),\n {\"last_name\": \"New name tu3ooh5Iez5Aec2laefo\"},\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(pk=user_pk)\n self.assertEqual(user.last_name, \"New name tu3ooh5Iez5Aec2laefo\")\n self.assertEqual(user.username, \"admin\")",
"def test_services_partial_update(self):\n pass",
"def test_update_partial_with_forbidden_fields(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'is_active': False,\n 'is_present': True,\n }\n\n response = self.client.patch(\n reverse(\n 'retirement:reservation-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n response_data = json.loads(response.content)\n\n content = {\n 'non_field_errors': [\n \"Only is_present and retirement can be updated. To change \"\n \"other fields, delete this reservation and create a new one.\"\n ]\n }\n\n self.assertEqual(response_data, content)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_update(self):\n # this is really tested graphically, no unit test here\n pass",
"def test_rirs_partial_update(self):\n pass",
"def test_update_product_live_time_details_internal_error(monkeypatch):\n update_data = {\n 'time_of_day_product': datetime.time(10, 20, 30),\n 'time_zone': 'GMT',\n 'store_id': 1\n }\n\n db.insert_product_live_time_data()\n response = product_live_time.update_product_live_time_details(\n 11, update_data)\n\n assert response.status == http_status.INTERNAL_ERROR",
"def test_patch_incident_comment(self): \n response = base.patch_incident(self.credentials, '1', 'comment')\n self.assertEqual(response.status_code, 200)",
"def test_update_recipe_invalid_field(client):\n resp = client.patch('/recipe/1', json={'random': 'xxxx'})\n assert resp.status_code == server.HTTP_BAD_REQUEST",
"def test_partial_update_outlet(self):\n sample_id = 1\n url = reverse('v1:outlet-detail', kwargs={'outlet_id': sample_id})\n data = {'name': 'NewNews'}\n response = self.client.patch(url, data, format='json')\n result = json.loads(response.content.decode('utf-8'))\n expected = Outlet.objects.get(id=sample_id)\n self.assertEqual(result['name'], expected.name)\n self.assertEqual(result['website'], expected.website)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_patch_investment(self):\n pass",
"def test_update_partial(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n \"max_use\": 1000,\n \"max_use_per_user\": 20,\n \"details\": \"Any package for clients (updated max_use)\",\n }\n\n response = self.client.patch(\n reverse(\n 'coupon-detail',\n kwargs={'pk': self.coupon.id},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n response.content,\n )\n\n response_data = json.loads(response.content)\n\n content = {\n \"url\": \"http://testserver/coupons/\" + str(self.coupon.id),\n \"id\": self.coupon.id,\n \"applicable_product_types\": [\n \"package\"\n ],\n \"value\": \"13.00\",\n \"percent_off\": None,\n \"code\": response_data['code'],\n \"start_time\": \"2019-01-06T15:11:05-05:00\",\n \"end_time\": \"2020-01-06T15:11:06-05:00\",\n \"max_use\": 1000,\n \"max_use_per_user\": 20,\n \"details\": \"Any package for clients (updated max_use)\",\n \"owner\": \"http://testserver/users/\" + str(self.user.id),\n \"organization\": None,\n \"applicable_retreats\": [],\n \"applicable_retreat_types\": [],\n \"applicable_timeslots\": [],\n \"applicable_packages\": [],\n \"applicable_memberships\": [],\n \"users\": [],\n \"is_applicable_to_physical_retreat\": False,\n \"is_applicable_to_virtual_retreat\": False\n }\n\n self.assertEqual(\n response_data,\n content\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test updating a notice's detail using put
|
def test_full_update_notice(self):
notice = sample_notice(source=self.user)
scope = sample_scope(description='Private test', is_first_year=True)
scope_serializer = serializers.ScopeSerializer(scope, context=serializer_context)
payload = {
'source': self.user.id,
'scope': scope_serializer.data['url'],
'title': 'Test title 3',
'message': 'An updated message'
}
url = notice_detail_url(notice.id)
res = self.client.put(url, payload)
notice.refresh_from_db()
notice_serializer = serializers.NoticeSerializer(notice, context=serializer_context)
self.assertEqual(res.status_code, status.HTTP_200_OK)
test_all_model_attributes(self, payload, notice, notice_serializer)
|
[
"def test_partial_update_notice(self):\n notice = sample_notice(source=self.user)\n scope = sample_scope(description='Private', is_general=False)\n scope_serializer = serializers.ScopeSerializer(scope, context=serializer_context)\n payload = {\n 'scope': scope_serializer.data['url'],\n 'message': 'An updated message'\n }\n\n url = notice_detail_url(notice.id)\n res = self.client.patch(url, payload)\n\n notice.refresh_from_db()\n notice_serializer = serializers.NoticeSerializer(notice, context=serializer_context)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n test_all_model_attributes(self, payload, notice, notice_serializer)",
"def test_update(self, record):",
"def test_update_valid_info(self):\n self.app.post('/api/tour', json=sample_tour)\n new_values = {'destination_country': 'aaa', 'duration_days': 0}\n response = self.app.put('/api/tour/1', json=new_values)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n expected_tour = sample_tour.copy()\n expected_tour.update(new_values)\n expected_tour['tour_id'] = 1\n response = self.app.get('/api/tour/1')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(status.HTTP_200_OK, response.status_code)",
"def test_api_update_question(self):\r\n chg_question = {'question_text': 'Are you hungery?'}\r\n res = self.client.put(\r\n reverse('details', kwargs={'pk': question.id}),\r\n chg_question, format='json'\r\n )\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def test_should_successfully_update_a_todo(self):\n response = self.client.put('/todo/%s/' % (self.existingTodo.id), self.editTodo, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n assert(response.data['name'], self.editTodo['name'])",
"def test_UPDATE_student(self):\n url = reverse('get_update_delete_student', args=[self.student.id])\n data = {\n 'person': {\n 'first_name': 'First Name TestCase 2',\n 'last_name': 'Last Name TestCase 2',\n 'date_of_birth': '1991-07-13',\n 'gender': 'm',\n 'type': 'voluntary',\n 'cpf': '1234951',\n 'andress': {\n 'type': 'residential',\n 'zip_code': '81230162',\n 'street': 'Street TestCase',\n 'number': '123',\n 'complement': 'TestCase complement',\n 'district': 'TestCase distric',\n 'city': 'TestCase city',\n 'state': 'TestCase state',\n 'country': 'TestCase country'\n },\n 'company': self.company.id\n }\n }\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_update_document_using_put(self):\n pass",
"def test_update_account_using_put(self):\n pass",
"def test_put_incident(self):\n response = base.put_incident(self.credentials, '1')\n self.assertEqual(response.status_code, 200)",
"def test_update_view(self):\n update_data = {'answer': 'updated answer'}\n\n response = self.client.put(self.url, update_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n update_answer = Answer.objects.get(id=self.answer.id)\n self.assertNotEqual(update_answer.answer, self.answer.answer)\n self.assertEqual(update_answer.answer, update_data.get('answer'))\n with self.assertRaises(Answer.DoesNotExist):\n Answer.objects.get(question=self.question, answer=self.answer.answer)\n\n response_json = json.dumps(response.data)\n self.assertIn(str(update_answer.id), response_json)\n self.assertIn(update_answer.answer, response_json)\n self.assertIn(str(update_answer.votes_count), response_json)\n\n response = self.client.put(self.bad_url, update_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_update_notification(self):\n pass",
"def test_update_outlet(self):\n sample_id = 1\n url = reverse('v1:outlet-detail', kwargs={'outlet_id': sample_id})\n data = {'name': 'NewNews', 'website': 'news2.com', 'description': ''}\n response = self.client.put(url, data, format='json')\n result = json.loads(response.content.decode('utf-8'))\n expected = Outlet.objects.get(id=sample_id)\n self.assertEqual(result['name'], expected.name)\n self.assertEqual(result['website'], expected.website)\n self.assertEqual(result['description'], expected.description)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_update_incident(self):\n self.add_incident(\"Corruption Case 7\")\n\n response = self.app.patch(\n # dumps converts data into json\n \"api/v1/incidents/1\",\n data=json.dumps(self.incident1),\n headers={\"Content-Type\": \"application/json\"})\n # content type notifies the data being sent is in json formart\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(result[\"data\"][0][\"message\"],\n \"incident record has been updated\")",
"def test_update_news_post(self):\n\n client = self.app.test_client()\n authorization = self.create_user()\n news_post_id = self.create_news_post(authorization)\n response = client.put('/news/' + news_post_id,\n data=dumps({'postBody': 'edited message 2'}), content_type='application/json',\n headers={'Authorization': authorization})\n self.assertEqual(response.status_code, 200)",
"def test_signup_detail_put(self):\n url = \"/api/signupdetail/1/\"\n data = {\"companyName\": \"abcDetail\", \"companyWebsite\": \"http://www.abc.com\",\n \"shortIntro\": \"company test company test\", \"location\": \"vadodara\", \"foundedIn\": \"2010\",\n \"facebookUrl\": \"\", \"googleUrl\": \"\", \"twitterHandler\": \"\"\n }\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_patch_obj_id_put(self):\n pass",
"def test_update(self):\n self.instance.update(permissions=\"admin\")\n\n self.session.patch.assert_called_once_with(\n url_for_inviter(), data='{\"permissions\": \"admin\"}'\n )",
"def test_updating_of_an_order_with_put_fails(self):\n self.client.force_authenticate(user=self.user)\n data = {\n \"item_name\": \"updated item\",\n }\n res = self.client.put(self.order_url, data)\n self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n self.assertEqual(res.data[\"message\"], \"To update order, use PATCH method\")",
"def test_update_escalation(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Main function. Runs simulation based on the given four blocks.
|
def main():
B1 = ['R', 'Y', 'B', 'G', 'G', 'R']
B2 = ['Y', 'Y', 'R', 'B', 'G', 'R']
B3 = ['G', 'B', 'G', 'R', 'Y', 'B']
B4 = ['Y', 'R', 'Y', 'B', 'Y', 'G']
Blocks = [B1, B2, B3, B4]
print("This is the solution for the box:")
print("*********************************")
Soln = simulate(Blocks)
one = Soln[0]
two = Soln[1]
three = Soln[2]
four = Soln[3]
print("B1: ", B1)
print("B2: ", B2)
print("B3: ", B3)
print("B4: ", B4)
print("For B1, do this ring: ", one[0:4])
print("For B2, do this ring: ", two[0:4])
print("For B3, do this ring: ", three[0:4])
print("For B4, do this ring: ", four[0:4])
|
[
"def run_experiment():\n \n print_instructions(instructions)\n print_instructions(instructions2)\n run_blocks(PRACTICE_BLOCKS, f, True) \n print_instructions(instructions3)\n run_blocks(BLOCKS, f)\n print_instructions(exit_message)\n save_and_quit(f)",
"def main():\n # TODO (gina) : command line simulation inputs\n\n fname = \"simulation_greedy_8_may\"\n\n mode = (False, True, False)\n simulators = [(GreedySimulator, []), ]\n #\n # (GreedySimulator, []),\n # (RandomSwapsSimulator, []),\n # (BestCandidatesSimulator, []),\n traffics = [3, 0, 6, 8]\n simulate(fname, simulators, traffics, mode)\n\n summarize_results(fname)",
"def start(registered):\n api.newline()\n api.info(\"Seed: \" + str(api.settings('randomSeed')))\n api.info(\"(you will need this seed to replay the exact same game.)\")\n api.newline()\n\n graph.makeColorMap([main[1] for main in registered])\n\n for main in registered:\n w, h = api.settings('worldSize')\n x, y = api.RNG.randrange(w), api.RNG.randrange(h)\n #x, y = world.generate.nextSpanwPosition()\n loop.world.addNest(Queen(x, y, main[0], main[1]).nest)\n\n loop.counter = 0\n\n loop.mainseq = api.seqstart(\"game\")\n api.info(\"------------------- Start of simulation -------------------\")\n\n graph.start(loop)",
"def simulate(randomGenerators, simTime, initPhaseTime=0, printInfo=False):\n if printInfo:\n print(\"Input parameters before...\")\n for key in randomGenerators.keys():\n print(key+':',randomGenerators[key].lmbda)\n\n #The component instances\n components = {\n 'C1': Component('C1', randomGenerators['servinsp1']),\n 'C2': Component('C2', randomGenerators['servinsp22']),\n 'C3': Component('C3', randomGenerators['servinsp23']),\n }\n\n #The workstation instances\n workstations = [\n Workstation('workstation 1', ('C1',), randomGenerators['ws1']),\n Workstation('workstation 2', ('C1','C2'), randomGenerators['ws2']),\n Workstation('workstation 3', ('C1','C3'), randomGenerators['ws3']),\n ]\n\n #The inspector instances\n inspectors = [\n Inspector('inspector 1', (components['C1'],), workstations),\n Inspector('inspector 2', (components['C2'],components['C3']), workstations ),\n ]\n\n iterables = inspectors + workstations\n\n def passTime(amountTime):\n timePassed = 0\n while timePassed < amountTime:\n #Calculate time to next interesting thing\n timeToPass = float('inf')\n for iterable in iterables:\n if not iterable.blocked and iterable.workTime < timeToPass:\n timeToPass = iterable.workTime\n \n if timePassed + timeToPass >= amountTime:\n timeToPass = amountTime - timePassed\n printHandler(\"\\nT\",timeToPass)\n\n timePassed += timeToPass\n\n #Advance time until next interesting thing\n for iterable in iterables:#make inspectors check for opening\n iterable.advanceTime(timeToPass)\n \n for inspector in inspectors:#make inspectors check for opening\n inspector.advanceTime(0)\n \n\n if initPhaseTime:\n passTime(initPhaseTime)\n for iterable in iterables:\n iterable.timeWaiting = 0\n for workstation in workstations:\n workstation.completionTimes = []\n workstation.timeSinceLastCompletion = None\n printHandler(\"## BEGIN ACTUAL SIMULATION\")\n\n passTime(simTime)\n\n\n\n def completionInfo(workstation):\n amnt = len(workstation.completionTimes)\n if amnt != 0:\n avg = sum(workstation.completionTimes) / amnt\n if amnt != 1:\n var = math.sqrt(sum([ (y - avg) ** 2 for y in workstation.completionTimes ]) / (amnt - 1))\n else:\n var = 0\n else:\n avg = 0\n var = None\n return {'amount':amnt, 'average':avg, 'variance':var}\n \n returnInfo = {\n\n \n 'waitTimes':{\n 'inspector1':inspectors[0].timeWaiting,\n 'inspector2':inspectors[1].timeWaiting,\n 'workstation1':workstations[0].timeWaiting,\n 'workstation2':workstations[1].timeWaiting,\n 'workstation3':workstations[2].timeWaiting,\n },\n\n #redundant info so sensitivity analysis stuff doesn't need to change\n 'completed':{\n 'product1':len(workstations[0].completionTimes),\n 'product2':len(workstations[1].completionTimes),\n 'product3':len(workstations[2].completionTimes),\n },\n\n 'completionInfo':{\n 'product1':completionInfo(workstations[0]),\n 'product2':completionInfo(workstations[1]),\n 'product3':completionInfo(workstations[2]),\n }\n \n }\n\n if printInfo:\n print(\"\\nSimulated\", simTime, \"time...\")\n\n for p in ('product1','product2','product3'):\n print(\"workstation 1 - amnt:\",returnInfo['completionInfo'][p]['amount'],\n 'avg:',returnInfo['completionInfo'][p]['average'],\n 'var',returnInfo['completionInfo'][p]['variance'])\n \n for iterable in iterables:\n print(iterable.name, \"time waiting:\", iterable.timeWaiting, ' time units)')\n\n\n \n print(\"\\nInput parameters after...\")\n for key in randomGenerators.keys():\n print(key+':',randomGenerators[key].lmbda)\n\n return returnInfo",
"def main():\n\n\tname = \"SS_pyNN_closedLoop_webots\"\n\teesAmplitudes = [\"1\",\"240\"]\n\teesFrequency = \"40\"\n\tdelay = \"2\"\n\tweights_1 = np.linspace(0.05,0.1,5)\n\tweights_2 = np.linspace(0.01,0.05,5)\n\tweights_3 = np.linspace(0.01,0.1,10)\n\n\tw4 = -0.00145\n\tw5 = -0.0045\n\n\tsimTime = \"3000\"\n\tnSim = len(weights_1)*len(weights_2)*len(weights_3)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\n\tfor w1 in weights_1:\n\t\tfor w2 in weights_2:\n\t\t\tfor w3 in weights_3:\n\t\t\t\tfor eesAmplitude in eesAmplitudes:\n\t\t\t\t\tresultName = name+\"_eesAmp_%d_w1_%f_w2_%f_w3_%f_w4_%f_w5_%f\" % (int(eesAmplitude),w1,w2,w3,w4,w5)\n\t\t\t\t\tresultFile = gt.find(\"*\"+resultName+\"*.p\",pathToResults)\n\t\t\t\t\tif not resultFile:\n\t\t\t\t\t\tinputFile = \"generatedStructures/ss_cl_w1_%f_w2_%f_w3_%f_w4_%f_w5_%f.txt\" % (w1,w2,w3,w4,w5)\n\t\t\t\t\t\ttls.modify_network_structure(\"templateClosedLoop2Dof.txt\",inputFile,delay,[w1,w2,w3,w4,w5])\n\t\t\t\t\t\tprogram = ['python','./scripts/runClosedLoopSim.py',eesFrequency,eesAmplitude,\"hanging\",\"mouse\",simTime,resultName,inputFile]\n\t\t\t\t\t\tgt.run_subprocess(program)\n\n\t\t\t\t\tcount+=1\n\t\t\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"",
"def run_simulation(self):\n\n # create appropriate object\n simulation = self.all_sims[self.testcase](self.testcase, self.params.paramfile, self.root,\n self.plots, self.movies)\n\n simulation.run_simulation()\n self.finishedTestcase()",
"def task_4_3_1():\n sim = Simulation()\n sim.sim_param.SIM_TIME = 10000000\n sim.sim_param.S = 10000\n for rho in [.01, .5, .8, .95]:\n sim.sim_param.RHO = rho\n sim.reset()\n print \"_____________________________________________________\"\n print \"NEW RUN with rho=\" + str(sim.sim_param.RHO)\n print \"_____________________________________________________\\n\"\n sim.do_simulation()\n sim.counter_collection.report()",
"def run(self, steps_per_update=1):\n def loop(sim):\n sim.run(steps_per_update)\n self.loop(loop)",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified num ber of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n a.sumUp()",
"def runSimulation():\n\tdepartureCount = 0\n\ttimes = []\n\tqueues = []\n\tarrivalCountArray = [0]\n\twhile (True):\t\n\t\tnew_event = heapq.heappop(EVENTHEAP)\n\t\tif (new_event[1] == 'd'):\n\t\t\tdepartureCount += 1\n\t\t\tarrivalCountArray.append(0)\n\t\telif (new_event[1] == 'a'):\n\t\t\tarrivalCountArray.append(1)\n\t\tupdateState(new_event, queues)\n\t\tupdateFeasibleEvents(new_event, times)\n\n\t\tif (LIMIT_SWITCH):\n\t\t\tif (departureCount >= LIMIT_VALUE):\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif (times[-1] >= LIMIT_VALUE):\n\t\t\t\tbreak\n\n\ttarray = np.array(times)\n\tqarray = np.array(queues)\n\tq_substantive = qarray[:-1]\n\tdifft = np.diff(tarray)\n\tu = np.sum(q_substantive*difft)\n\tL = u/tarray[-1]\n\tS = u/len(arrivals)\n\treturn tarray, qarray, arrivalCountArray, L, S",
"def run_simulation(self):\n\n import numpy as np\n\n # set the initial total pair energy between particles in the system\n total_pair_energy = self.calculate_initial_energy\n print(f'total pair initial: {total_pair_energy}')\n # TODO: need to add a function that explicitly calculates the tail correction\n tail_correction = self.calculate_tail_correction\n print(f'tail correction: {tail_correction}')\n\n # set up an array to store energy values\n energy_array = np.zeros(self.arguments.n_steps)\n\n # start the Monte Carlo iterations\n n_trials = 0\n n_accept = 0\n \n # check to make sure we write to an empty file\n if self.arguments.output_traj:\n with open(self.arguments.traj_file,\"w\") as fn:\n pass\n for i_step in range(self.arguments.n_steps):\n #print(f'Step{i_step}:')\n n_trials += 1\n i_particle = np.random.randint(self.num_particles)\n random_displacement = (2.0 * np.random.rand(3) - 1.0) * self.arguments.max_displacement\n #print(f'random displacement: {random_displacement}')\n current_energy = self.energy.calculate_pair_energy(self.coordinates, self.box_length, i_particle)\n #print(f'current energy: {current_energy}')\n proposed_coordinates = self.coordinates.copy()\n proposed_coordinates[i_particle] += random_displacement\n proposed_coordinates -= self.box_length * np.round(proposed_coordinates / self.box_length)\n proposed_energy = self.energy.calculate_pair_energy(proposed_coordinates, self.box_length, i_particle)\n #print(f'i particle: {i_particle}')\n #print(f'proposd energy: {proposed_energy}')\n delta_e = proposed_energy - current_energy\n beta = 1.0 / self.arguments.reduced_temperature\n accept = self.accept_or_reject(delta_e, beta)\n #print(f'accept: {accept}')\n if accept:\n total_pair_energy += delta_e\n n_accept += 1\n self.coordinates[i_particle] += random_displacement\n self.coordinates -= self.box_length * np.round(self.coordinates / self.box_length)\n total_energy = (total_pair_energy + tail_correction) / self.num_particles\n #print(f'total energy: {total_energy}')\n energy_array[i_step] = total_energy\n if np.mod(i_step + 1, self.arguments.freq) == 0:\n print(i_step + 1, energy_array[i_step])\n if self.arguments.plot:\n ax = plt.axes(projection='3d')\n ax.set_xlim([-self.box_length/2, self.box_length/2])\n ax.set_ylim([-self.box_length/2, self.box_length/2])\n ax.set_zlim([-self.box_length/2, self.box_length/2])\n for i in range(self.arguments.num_particles):\n ax.plot3D([self.coordinates[i, 0]], [self.coordinates[i, 1]], [self.coordinates[i, 2]], 'o')\n plt.pause(0.05)\n # if prefer to output trajectories, open the output file\n if self.arguments.output_traj:\n with open(self.arguments.traj_file,\"a+\") as fn:\n # if prefer to output trajectories, open the output file\n if np.mod(i_step + 1, self.arguments.traj_freq) == 0:\n fn.write(f'Step: {i_step + 1} \\n')\n for i_atom in range(len(self.coordinates)):\n fn.write(f'{self.coordinates[i_atom, 0]} {self.coordinates[i_atom, 1]} {self.coordinates[i_atom, 2]} \\n')\n\n if self.arguments.tune_displacement:\n self.arguments.max_displacement, n_accept, n_trials = self.adjust_displacement(self.arguments.max_displacement, n_accept, n_trials)\n\n self.energy_array = energy_array\n\n return",
"def run(self, voxels, entry='all'):\n\n # Get 'global' parameters, that DO NOT change with voxel, from Dataset\n # - these processing/data parameters have to be updated at run time \n self.spectral_dims = self._dataset.spectral_dims\n self.spectral_dim0 = self._dataset.spectral_dims[0]\n self.spectral_hpp = self._dataset.spectral_hpp\n self.zero_fill_multiplier = self._dataset.zero_fill_multiplier\n self.phase_1_pivot = self._dataset.phase_1_pivot\n \n for voxel in voxels:\n # local copy of input data\n self.data = self._dataset.get_source_data('spectral')\n self.data = self.data[voxel[2],voxel[1],voxel[0],:]\n self.data = self.data.copy()\n\n # copy 'global' parameters, that DO change with voxel, from Dataset\n self.frequency_shift = self._dataset.get_frequency_shift(voxel)\n self.phase0 = self._dataset.get_phase_0(voxel)\n self.phase1 = self._dataset.get_phase_1(voxel)\n \n # copy block parameters, that DO change with voxel, from Block\n svd_output = self._block.get_svd_output(voxel)\n\n self.ndp = self._block.get_data_point_count(voxel)\n self.nssv = self._block.get_signal_singular_value_count(voxel)\n self.do_fit = self._block.get_do_fit(voxel)\n self.svd_output = svd_output\n self.voxel = voxel\n\n # select the chain processing functor based on the entry point\n if entry == 'all':\n self.functor_all(self)\n else:\n print('oooops!')\n\n # save data and parameter results into the Block results arrays\n self._block.data[voxel[2],voxel[1],voxel[0],:] = self.freq.copy()\n self._block.set_svd_output(self.svd_output, voxel)\n self._block.set_do_fit(self.do_fit, voxel)\n\n # Return values specific to calling Tab that contains this Block.Chain\n # Used to update its self.view (plot_panel_spectrum object).\n\n plot_results = { 'svd_data' : self.svd_data.copy(),\n 'svd_peaks_checked' : self.svd_peaks_checked.copy(),\n 'svd_peaks_checked_sum' : self.svd_peaks_checked_sum.copy(),\n 'svd_fids_checked_sum' : self.svd_fids_checked.copy(),\n 'freq' : self.freq.copy() }\n \n return plot_results",
"def run_simulation(num_scanLanes,num_min,prob_perSec,prob_preCheck,time_ID,time_scan,time_preCheckScan):\n \n # if not precheck, run simulation without pre check\n if prob_preCheck == None:\n no_preCheck_results = no_preCheck(num_scanLanes,num_min,prob_perSec,time_ID,time_scan)\n \n # **Output**\n print()\n print(\"Number of scanners:\",num_scanLanes)\n print(\"Simulation Length:\",num_min,\"minutes\")\n print(\"Passenger arrival probability:\",prob_perSec)\n print(\"Simulate PreCheck: NO\")\n print()\n print(\"Number of passengers cleared:\",no_preCheck_results[0])\n print(\"Average wait time:\",no_preCheck_results[1],\"minutes\")\n \n scan_linesData = no_preCheck_results[2]\n for key,values in scan_linesData.items():\n print(\"Avg Lane\",key[-1],\"Wait Time:\", values[0],\"minutes\", \"(\"+str(values[1])+\" people)\") \n \n print()\n print(\"Total number of passengers in line at end of simulation:\",no_preCheck_results[3]) \n print()\n \n # run simulation with precheck\n else:\n preCheck_results = preCheck(num_scanLanes,num_min,prob_perSec,prob_preCheck,time_ID,time_scan,time_preCheckScan)\n \n # **Output**\n print()\n print(\"Number of scanners:\",num_scanLanes)\n print(\"Simulation Length:\",num_min,\"minutes\")\n print(\"Passenger arrival probability:\",prob_perSec)\n print(\"Simulate PreCheck: YES\")\n print()\n print(\"Number of passengers cleared:\",preCheck_results[0])\n print(\"Average wait time:\",preCheck_results[1],\"minutes\")\n \n scan_linesData = preCheck_results[2]\n for key,values in scan_linesData.items():\n if key == 'preCheck_scan':\n print(\"Avg PreCheck Scan Wait Time:\",values[0],\"minutes\",\"(\"+str(values[1])+\" people)\")\n else:\n print(\"Avg Lane\",key[-1],\"Wait Time:\", values[0],\"minutes\", \"(\"+str(values[1])+\" people)\") \n \n print()\n print(\"Total number of passengers in line at end of simulation:\",preCheck_results[3]) \n print()",
"def run_simulation():\n # setup the environment\n env = gym.make('CartPole-v0')\n env._max_episode_steps = max_steps * 100\n env.reset()\n\n # initial dataset to train the neural net on\n initial_data = get_random_moves(env)\n\n input_list, output_list = get_inputs(initial_data)\n\n # get the trained instance of the neural network back\n curr_nn = train(input_list, output_list)\n\n # play game using the trained curr_nn\n play_game(env, curr_nn)",
"def run_simulation((env, simulation)):\n\t# Uses tuple arg instead of 2 args to be used with map function\n\n\tdef log(s):\n\t\ttime = datetime.datetime.now().strftime(\"%a %H:%M\")\n\t\tsys.stdout.write(\"Log %d, %s: %s\\n\" % (os.getpid(), time, s))\n\t\tsys.stdout.flush() # hope this cleans the log\n\n\tstart_time = datetime.datetime.now()\n\tlog(\"Setup %s\" % simulation['name'])\n\t#print \"STATUS:%d: Setup %s\" % (os.getpid(), simulation['name'])\n\n\t# create absolute paths\n\tsimulation['requests'] = os.path.expanduser(simulation['requests'])\n\tsimulation['golden-response'] = os.path.expanduser(simulation['golden-response'])\n\tsimulation['backpressure'] = os.path.expanduser(simulation['backpressure'])\n\n\t# creating directory\n\tDST = env['simdir'] + \"/\" + simulation['name']\n\tif os.path.exists(DST):\n\t\tlog(\"WARNING: Replacing %s\" % DST)\n\t\t#print \"STATUS:%d: WARNING: Replacing %s\" % (os.getpid(), DST)\n\t\tshutil.rmtree(DST)\n\tos.mkdir(DST)\n\n\t# setup simulation\n\tos.symlink(env['xlibs'], DST+\"/modelsim.ini\") \t\t\t\t\t# modelsim.ini\n\tshutil.copy(simulation['requests'], DST+\"/pkt.in.txt\") \t\t\t\t\t# Requests\n\tshutil.copy(simulation['backpressure'], DST+\"/bpr.txt\") \t\t\t\t\t# BPR\n\t#print simulation['simbase'] + \"/bramModel_memArray_V_ram.dat\"\n\t#shutil.copy(env['simbase'] + \"/bramModel_memArray_V_ram.dat\", DST+\"/bramModel_memArray_V_ram.dat\") \t# BRAM Init\n\tdofile = open(DST+\"/simrun.do\", \"w\") \t\t\t\t\t# .do file\n\tdofile.write(kvs_run.replace(\"__RUNTIME__\", simulation['runtime']))\n\tdofile.close()\n\n\t# run simulation\n\tlog(\"Running %s (%s)\" % (simulation['name'], simulation['runtime']))\n\t#print \"STATUS:%d: Running %s (%s)\" % (os.getpid(), simulation['name'], simulation['runtime'])\n\tfnull = open(\"/dev/null\", \"w\")\n\trunresult = subprocess.call(\n\t\t[env['vsim'], '-c', '-do', 'simrun.do'],\n\t\tcwd=DST, stdout=fnull)\n\tfnull.close()\n\n\t# verify result\n\tlog(\"Verifying %s\" % simulation['name'])\n\t#print \"STATUS:%d: Verifying %s\" % (os.getpid(), simulation['name'])\n\tif runresult == 0:\n\t\tvfile = open(DST+\"/verification.txt\", \"w\")\n\t\tgold = simulation['golden-response']\n\t\tsilver = DST+\"/pkt.out.txt\"\n\t\tcmp = os.path.abspath(os.path.dirname(__file__))+\"/memtest_compare.py\"\n\t\tif subprocess.call([sys.executable, cmp, gold, silver], stdout=vfile) == 0:\n\t\t\tsimulation['result'] = 0\n\t\telse:\n\t\t\tsimulation['result'] = 1\n\t\tvfile.close()\n\telse:\n\t\tsimulation['result'] = 2\n\n\tend_time = datetime.datetime.now()\n\tsimulation['clocktime'] = end_time - start_time\n\tlog(\"Finished %s. Took %ds. Status: %d.\" % (\n\t\tsimulation['name'],\n\t\tsimulation['clocktime'].seconds,\n\t\tsimulation['result']\n\t))\n\t\"\"\"\n\tprint \"STATUS:%d: Finished %s. Took %ss. Status: %d.\" % (\n\t\tos.getpid(),\n\t\tsimulation['name'],\n\t\tsimulation['clocktime'].seconds,\n\t\tsimulation['result']\n\t)\n\t\"\"\"\n\n\treturn simulation",
"def main():\n import doctest\n options = (doctest.IGNORE_EXCEPTION_DETAIL | doctest.NORMALIZE_WHITESPACE |\n doctest.ELLIPSIS)\n doctest.testmod(optionflags=options)\n\n print \"\\nRunning unit tests...\\n\"\n import nose\n if nose.run(argv=[\"--with-coverage\", \"pyhand.py\"]):\n print \"\\nPassed all unit tests\"\n\n Simulation().run()",
"def main():\n\t\n\t# start running trials\n\t# save outcomes of trials\n\t\n\tsuccesses = 0\n\t\n\tfor trial in range(1000):\n\t\tavailable_seats = list(range(0, 100))\n\t\tsuccesses += simulate(available_seats)\n\t\n\tprint(successes/(1000))",
"def run_N_body_sim(self, display=False, start_frac=0.0, update_point=100):\n # Save the initial semimajor axis distribution\n self.initial_smaxis_asteroids = (self.find_smaxis_asteroids() /\n self.const.smaxis_jup)\n\n # For live visualization\n if display:\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plt.pause(4) # Delay for a few seconds\n\n # Perform n_iterations-1 steps (intialization counts for the first step)\n for i in range(int(self.n_iterations) - 1):\n if i%(500/self.time_step) == 0:\n print i*self.time_step\n self.update_asteroid()\n self.update_planet()\n\n # Only do a live feed at certain conditions\n if display:\n if i % update_point == 0 and i > start_frac*(int(self.n_iterations)):\n # Remove the last view so we only see the current asteroid positions\n plt.cla()\n ax.scatter(*self.asteroids_pos.T, c=\"#3399FF\", s=1) # Transpose them\n ax.scatter(*self.jup_pos.T, c=\"#660000\", s=115)\n ax.scatter(*self.sun_pos.T, c=\"#FFA31A\", s=250)\n ax.set_xlim3d(-6, 6)\n ax.set_xlabel(\"X (AU)\")\n ax.set_ylim3d(-6, 6)\n ax.set_ylabel(\"Y (AU)\")\n ax.set_zlim3d(-.1, .1)\n ax.set_zlabel(\"Z (AU)\")\n ax.set_title(\"Iteration: {}, Asteroids: {}\".format(i, len(self.asteroids_pos)))\n fig.canvas.draw()\n plt.pause(0.05)\n if display:\n plt.show()",
"def main():\n run_game(even)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Aggregate the peers sampled
|
def aggregate_peer_samples(self):
with open("peer_samples.csv", "w") as out_file:
out_file.write("peer_id\n")
for _, filename, _ in self.yield_files('peer_samples.csv'):
with open(filename) as samples_file:
for peer_sample in samples_file.readlines():
out_file.write(peer_sample)
|
[
"def aggregate_weights(self, clients_params):",
"def get_sender_set_distribution_full(self):\n sender_set = self.get_sender_set()\n distro_set = {}\n total = len(sender_set)\n for node in sender_set:\n distro_set[node] = 1.0 / + total\n\n return distro_set",
"def test_numpeers(self):\n #clear the peers list. \n tracker.info_hash_to_peers.clear()\n #add 49 peers\n for i in range(49):\n params = TEST_DEFAULTS()\n params[\"peer_id\"] += str(i)\n send_test_params(params)\n\n #send without a started event\n params = TEST_DEFAULTS()\n del params[\"event\"]\n result = send_test_params(params)\n\n #check we got 49 peers back\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 49)\n\n #add another peer\n params = TEST_DEFAULTS()\n params[\"peer_id\"] += str(50)\n send_test_params(params)\n\n #send without a started event\n params = TEST_DEFAULTS()\n del params[\"event\"]\n result = send_test_params(params)\n\n #check we got 50 peers back\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 50)\n\n #set numwant to 25, & check we get 25 peers back\n params = TEST_DEFAULTS()\n params[\"numwant\"] = 25\n del params[\"event\"]\n result = send_test_params(params)\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 25)",
"def resample_particles(self):\n n = self.weights.shape[0]\n cyclic_weights = np.zeros(n, dtype=np.float)\n for i in range(n-1):\n cyclic_weights[i+1] = cyclic_weights[i] + self.weights[i]\n\n sample_generators = np.random.uniform(size=self.num_particles) * cyclic_weights.max()\n\n sorted_sample_generators = np.sort(sample_generators)\n\n particle_samples = np.zeros_like(self.particles, dtype=np.float)\n weights = np.zeros_like(self.weights)\n cyclic_weights_index = 0\n for i in range(n):\n sample_generator = sorted_sample_generators[i]\n while sample_generator > cyclic_weights[cyclic_weights_index]:\n cyclic_weights_index += 1\n particle_samples[i] = self.particles[cyclic_weights_index]\n weights[i] = self.weights[cyclic_weights_index]\n\n return particle_samples, weights",
"def inet(n,alpha,tau):\n G= nx.MultiGraph()\n degree = {}\n full_nodes = []\n connected_nodes = []\n unconnected_nodes = range(n)\n sum_deg = 0\n \n for i in range(n):\n G.add_node(i)\n degree[i] = rand_pl(alpha,1)\n sum_deg += degree[i]\n\n \n deg_sort = sorted([(degree[i],i) for i in range(n)],reverse=True)\n top_tau = [deg_sort[i][1] for i in range(tau)]\n\n for i in range(tau):\n connected_nodes.append(top_tau[i])\n unconnected_nodes.remove(top_tau[i])\n degree[top_tau[i]] -= (tau-1)\n for j in range(i+1,tau):\n G.add_edge(top_tau[i],top_tau[j])\n sum_deg -= 2\n\n\n deg_two_nodes = [i for i in range(n) if degree[i] == 2]\n \n for t in top_tau:\n for j in range(int(degree[t]*0.25)):\n try:\n x = random.choice(deg_two_nodes)\n except:\n break\n G.add_edge(t,x)\n deg_two_nodes.remove(x)\n degree[t] -= 1\n degree[x] -= 1\n sum_deg -= 2\n connected_nodes.append(x)\n unconnected_nodes.remove(x)\n \n while not (unconnected_nodes == []):\n u = random.choice(unconnected_nodes)\n v = random.choice(connected_nodes)\n if not (degree[v]==0):\n G.add_edge(u,v)\n connected_nodes.append(u)\n unconnected_nodes.remove(u)\n degree[u] -= 1\n degree[v] -= 1\n sum_deg -= 2\n if degree[u] == 0:\n connected_nodes.remove(u)\n full_nodes.append(u)\n if degree[v] == 0:\n connected_nodes.remove(v)\n full_nodes.append(v)\n\n num_repeats = 0\n while not (connected_nodes == []):\n if len(connected_nodes) % 1 == 0:\n print(len(connected_nodes))\n u = random.choice(connected_nodes)\n v = random.choice(connected_nodes)\n #print(connected_nodes)\n #print(G.edges(connected_nodes))\n if not(u==v) and not G.has_edge(u,v):\n sum_deg -= 2\n G.add_edge(u,v)\n degree[v] -= 1\n degree[u] -= 1\n if degree[u] == 0:\n connected_nodes.remove(u)\n full_nodes.append(u)\n if degree[v] == 0:\n connected_nodes.remove(v)\n full_nodes.append(v)\n elif (u==v) and len(connected_nodes) ==1:\n G.add_edge(u,v)\n degree[u] -= 2\n connected_nodes.remove(u)\n full_nodes.append(u)\n sum_deg -= 2\n elif G.has_edge(u,v) and num_repeats < 10: # This is definitely a hack\n num_repeats += 1\n elif G.has_edge(u,v) and num_repeats >= 10:\n num_repeats = 0\n G.add_edge(u,v)\n degree[v] -= 1\n degree[u] -= 1\n sum_deg -= 2\n if degree[u] == 0:\n connected_nodes.remove(u)\n full_nodes.append(u)\n if degree[v] == 0:\n connected_nodes.remove(v)\n full_nodes.append(v)\n return G",
"def connect_samples(runs, test):\n result = defaultdict(list)\n for run in runs:\n if not test(run):\n continue\n sample = get_sample(run)\n result[sample].append(run)\n return result",
"def sampling_neighbor(batch, full_graph, n_users, num_neighbors=8, num_items=4):\n users_id = np.unique(batch[:, 0:1]).tolist()\n items_id = np.unique(batch[:, 1:2]).tolist()\n users_map = dict()\n items_map = dict()\n G = nx.DiGraph()\n\n # Add user nodes and items which friend bought\n num_node_friends, num_node_items_friend_bought = 0, 0\n\n for user in range(1, len(users_id)+1):\n G.add_node(user, id=user, label='user')\n users_map.update({users_id[user-1] : user})\n cnt_friend = 0\n friends = [n for n in full_graph.neighbors(user) if n <= n_users]\n\n while cnt_friend < len(friends) and cnt_friend < num_neighbors:\n friend_node = user + num_node_friends + len(users_id) + len(items_id)\n G.add_node(friend_node, label='user')\n friend = friends[cnt_friend]\n cnt_friend += 1\n num_node_friends += 1\n items_friend_bought = [n for n in full_graph.neighbors(friend) if n > n_users]\n cnt_item = 0\n while cnt_item < len(items_friend_bought) and cnt_item < num_items:\n item_node = user + 4 * len(users_id) + len(items_id) + num_node_items_friend_bought\n G.add_node(item_node, label='item')\n item = items_friend_bought[cnt_item]\n G.add_edge(friend_node, item_node, rating=full_graph[friend][item])\n cnt_item += 1\n num_node_items_friend_bought += 1\n\n cnt = 0\n for item in range(len(users_id), len(users_id) + len(items_id)):\n G.add_node(item, id=item, label='item')\n items_map.update({items_id[cnt] + len(users_id) : item})\n cnt += 1\n\n train_set = []\n for data in batch:\n u = data[0]\n v = data[1] + len(users_id)\n G.add_edge(u, v, rating=data[3])\n train_set.append([users_map.get(int(u)), items_map.get(int(v)), data[3]])\n\n return G, torch.LongTensor(train_set)",
"def advertisement_simulations(active_users, first_monday, last_monday):\n friend_map_file = open(PREFIX_DAT+\"friend_map.obj\", \"r\")\n friend_map = pickle.load(friend_map_file)\n\n total_users = len(active_users) \n\n # ad params\n\n # do very basic advertising, randomly select store, \n locations = Location.objects.filter( type=Location.EATERY )\n loc_ids = [l.id for l in locations]\n total_locs = locations.count()\n # distribution for locations\n mu = total_locs/2\n sigma = total_locs/4 \n\n # distribution for users\n mu_recv = total_users/2\n sigma_recv = total_users/4\n\n # randomly send to 5, 10, 15, 20 customers\n num_target = 5\n cost_per_ad = 1\n # Generate advertisements, time step through time every 30 minutes\n timestep = 60\n\n d = first_monday+timedelta(days=49)\n first_day = first_monday+timedelta(days=49) \n last_day = first_monday+timedelta(days=56)\n while d < last_day:\n\n # if between 8 AM and 11:00 PM\n if d.hour < 7 and d.hour > 23:\n d += timedelta(minutes=timestep)\n continue\n\n # pick a location index num from a distribution\n num_advertisers = np.random.randint(0,10)\n\n non_overlap_users = list(active_users)\n\n #rand_ind = np.random.normal(mu, sigma, num_advertisers)\n rand_ind = np.random.uniform(0,total_locs, num_advertisers)\n for loc_ind in rand_ind:\n # for set of randomly selected locations\n i = int(loc_ind)\n if i < 0:\n i = 0\n if i > total_locs - 1:\n i = total_locs - 1\n ad_loc = locations[i] \n\n # create offer RANDOM_ALL 50% of users\n o = Offer(location=ad_loc, ad_strategy=Offer.RANDOM_ALL, timestamp=d) \n o.save()\n\n # distribution of ads (strategy ra = 50% of population) \n rand_ind = np.random.normal(mu_recv, sigma_recv, total_users/2)\n rand_ind = np.random.uniform(0,total_users, total_users/2)\n for user_ind in rand_ind:\n j = int(user_ind)\n if j < 0:\n j = 0\n if j > total_users - 1:\n j = total_users - 1\n\n # distribute\n if DEBUG:\n print \"User index: %d\"%j\n print \"User ID: %d\"%active_users[j]\n c = OfferCode(offer=o, user=OTNUser.objects.get(id=active_users[j]), timestamp=d)\n c.save()\n \n # create offer for RANDOM_SPOTTY\n o = Offer(location=ad_loc, ad_strategy=Offer.RANDOM_SPOTTY, timestamp=d) \n o.save()\n\n # distribution of ads (strategy rs = fixed number of population) \n #rand_ind = np.random.normal(mu_recv, sigma_recv/2, num_target)\n rand_ind = np.random.uniform(0,total_users, num_target)\n for user_ind in rand_ind:\n j = int(user_ind)\n if j < 0:\n j = 0\n if j > total_users - 1:\n j = total_users - 1\n\n # distribute\n if DEBUG:\n print \"User index: %d\"%j\n print \"User ID: %d\"%active_users[j]\n c = OfferCode(offer=o, user=OTNUser.objects.get(id=active_users[j]), timestamp=d)\n c.save()\n \n # create offer for RANDOM_NONOVERLAP\n o = Offer(location=ad_loc, ad_strategy=Offer.RANDOM_NONOVERLAP, timestamp=d) \n o.save()\n\n # distribution of ads (strategy rn = fixed number of population non-overlapping) \n num_users = len(non_overlap_users)\n target_users = set() \n #rand_ind = np.random.normal(mu_recv, sigma_recv/2, num_target)\n rand_ind = np.random.uniform(0,num_users, num_target)\n for user_ind in rand_ind:\n j = int(user_ind)\n if j < 0:\n j = 0\n if j > num_users - 1:\n j = num_users - 1\n\n target_users.add( non_overlap_users[j] )\n\n # distribute\n for uid in target_users:\n # remove users so that it does not overlap for next ad\n non_overlap_users.pop(non_overlap_users.index(uid))\n u = OTNUser.objects.get(id=uid)\n c = OfferCode(offer=o, user=u, timestamp=d) \n c.save()\n\n # create offer for RANDOM_REFERRAL\n o = Offer(location=ad_loc, ad_strategy=Offer.RANDOM_REFERRAL, timestamp=d) \n o.save()\n\n n_friends = 1\n # distribution of ads via social referral\n for uid in target_users:\n u = OTNUser.objects.get(id=uid)\n c = OfferCode(offer=o, user=u, timestamp=d) \n c.save()\n\n # choose random number of friends with some probability\n forward = random.randint(0,1) \n if forward == 1 and len(friend_map[uid][\"first\"]) > 0:\n fids = random.sample(friend_map[uid][\"first\"], n_friends)\n for f in fids:\n friend = OTNUser.objects.get(id=f)\n c = OfferCode(offer=o, user=friend, timestamp=d)\n c.save()\n\n\n # create offer for TARGET_BEHAVIORAL\n o = Offer(location=ad_loc, ad_strategy=Offer.TARGET_BEHAVIORAL, timestamp=d) \n o.save()\n\n # pick some\n target_users = TechCashTransaction.objects.filter(location=ad_loc, timestamp__range=(d-timedelta(days=14),d)).values_list('user', flat=True).distinct() \n num_users = random.randint(num_target, num_target*2) \n if len(target_users) > num_users:\n target_users = random.sample(target_users, num_users)\n for uid in target_users:\n u = OTNUser.objects.get(id=uid)\n c = OfferCode(offer=o, user=u, timestamp=d)\n c.save()\n \"\"\"\n # additional random users\n rand_users = set(active_users)-set(target_users)\n # num_users determines probability of the location\n rand_users = random.sample(list(rand_users), num_users)\n for uid in rand_users:\n u = OTNUser.objects.get(id=uid)\n c = OfferCode(offer=o, user=u, timestamp=d)\n c.save()\n \"\"\"\n\n # create offer for TARGET_REFERRAL\n o = Offer(location=ad_loc, ad_strategy=Offer.TARGET_REFERRAL, timestamp=d) \n o.save()\n\n # pick some\n n_friends = 1\n for uid in target_users:\n u = OTNUser.objects.get(id=uid)\n c = OfferCode(offer=o, user=u, timestamp=d)\n c.save()\n\n # choose random number of friends with some probability\n forward = random.randint(0,1) \n if forward == 1 and len(friend_map[uid][\"first\"]) > 0:\n fids = random.sample(friend_map[uid][\"first\"], n_friends)\n for f in fids:\n friend = OTNUser.objects.get(id=f)\n c = OfferCode(offer=o, user=friend, timestamp=d)\n c.save()\n\n d += timedelta(minutes=timestep)\n\n # by seeing if any person who received didn't go to store (spam)\n\n # by seeing if any person who received ad went to another store (timely, but miss)\n\n # by seeing if any person who received ad went to the location (redemption) \n\n # Future: by seeing if any person who received didn't go to store opened app (desired spam)\n\n friend_map_file.close()",
"def _sample_proportional(self):\n indices = []\n p_total = self.sum_tree.sum(0, len(self)-1)\n\n segment = p_total / self.batch_size\n\n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i+1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n\n return indices",
"def RefreshPeers(plcs):\n\n for plc in plcs:\n for peer in plcs:\n if peer == plc:\n continue\n\n print plc.config.PLC_NAME, \"refreshing\", peer.config.PLC_NAME\n plc.RefreshPeer(peer.config.PLC_NAME)\n\n peer_id = plc.GetPeers([peer.config.PLC_NAME])[0]['peer_id']\n\n peer_sites = todict(plc.GetSites({'peer_id': peer_id}), 'site_id')\n sites_at_peer = todict(peer.GetSites(), 'site_id')\n\n peer_keys = todict(plc.GetKeys({'peer_id': peer_id}), 'key_id')\n keys_at_peer = todict(peer.GetKeys(), 'key_id')\n\n peer_persons = todict(plc.GetPersons({'peer_id': peer_id}), 'person_id')\n persons_at_peer = todict(peer.GetPersons(), 'person_id')\n\n peer_nodes = todict(plc.GetNodes({'peer_id': peer_id}), 'node_id')\n nodes_at_peer = todict(peer.GetNodes(), 'node_id')\n\n our_nodes = todict(plc.GetNodes({'peer_id': None}), 'node_id')\n our_peer_id_at_peer = peer.GetPeers([plc.config.PLC_NAME])[0]['peer_id']\n our_nodes_at_peer = todict(peer.GetNodes({'peer_id': our_peer_id_at_peer,\n 'peer_node_id': our_nodes.keys()}), 'peer_node_id')\n\n peer_slices = todict(plc.GetSlices({'peer_id': peer_id}), 'peer_slice_id')\n slices_at_peer = todict(peer.GetSlices(), 'slice_id')\n \n for site_id, site in peer_sites.iteritems():\n # Verify that this site exists at the peer\n peer_site_id = site['peer_site_id']\n assert peer_site_id in sites_at_peer\n peer_site = sites_at_peer[peer_site_id]\n\n # And is the same\n for field in ['name', 'abbreviated_name', 'login_base', 'is_public',\n 'latitude', 'longitude', 'url',\n 'max_slices', 'max_slivers',]:\n assert site[field] == peer_site[field]\n\n for key_id, key in peer_keys.iteritems():\n # Verify that this key exists at the peer\n peer_key_id = key['peer_key_id']\n assert peer_key_id in keys_at_peer\n peer_key = keys_at_peer[peer_key_id]\n\n # And is the same\n for field in ['key_type', 'key']:\n assert key[field] == peer_key[field]\n\n for person_id, person in peer_persons.iteritems():\n # Verify that this user exists at the peer\n peer_person_id = person['peer_person_id']\n assert peer_person_id in persons_at_peer\n peer_person = persons_at_peer[peer_person_id]\n\n # And is the same\n for field in ['first_name', 'last_name', 'title', 'email', 'phone',\n 'url', 'bio', 'enabled']:\n assert person[field] == peer_person[field]\n\n for key_id in person['key_ids']:\n # Verify that the user is not associated with any local keys\n assert key_id in peer_keys\n key = peer_keys[key_id]\n peer_key_id = key['peer_key_id']\n\n # Verify that this key exists at the peer\n assert peer_key_id in keys_at_peer\n peer_key = keys_at_peer[peer_key_id]\n\n # And is related to the same user at the peer\n assert peer_key['key_id'] in peer_person['key_ids']\n\n for node_id, node in peer_nodes.iteritems():\n # Verify that this node exists at the peer\n peer_node_id = node['peer_node_id']\n assert peer_node_id in nodes_at_peer\n peer_node = nodes_at_peer[peer_node_id]\n\n # And is the same\n for field in ['boot_state', 'ssh_rsa_key', 'hostname',\n 'version', 'model']:\n assert node[field] == peer_node[field]\n\n # Verify that the node is not associated with any local sites\n assert node['site_id'] in peer_sites\n site = peer_sites[node['site_id']]\n\n # Verify that this site exists at the peer\n peer_site_id = site['peer_site_id']\n assert peer_site_id in sites_at_peer\n peer_site = sites_at_peer[peer_site_id]\n\n # And is related to the same node at the peer\n assert peer_site['site_id'] == peer_node['site_id']\n\n for slice_id, slice in peer_slices.iteritems():\n # Verify that this slice exists at the peer\n peer_slice_id = slice['peer_slice_id']\n assert peer_slice_id in slices_at_peer\n peer_slice = slices_at_peer[peer_slice_id]\n\n # And is the same\n for field in ['name', 'instantiation', 'url', 'description',\n 'max_nodes', 'expires']:\n assert slice[field] == peer_slice[field]\n\n for node_id in slice['node_ids']:\n # Verify that the slice is associated only with\n # the peer's own nodes, or with our nodes as\n # last cached by the peer.\n assert node_id in peer_nodes or node_id in our_nodes_at_peer\n if node_id in peer_nodes:\n node = peer_nodes[node_id]\n peer_node_id = node['peer_node_id']\n elif node_id in our_nodes_at_peer:\n peer_node = our_nodes_at_peer[node_id]\n peer_node_id = peer_node['node_id']\n\n # Verify that this node exists at the peer\n assert peer_node_id in nodes_at_peer\n\n # And is related to the same slice at the peer\n assert peer_node_id in peer_slice['node_ids']",
"def genRandomAgg(num_host, fn_out):\n if not fn_out:\n return\n f_out = open(fn_out, \"w\")\n src, dst, flow, oct = 0, 0, 0, 1\n t = ()\n #hidDic = {} #{host: hostid}\n hostPairSet=set()\n num_flow = min([100000, num_host*5])\n max_flow_weight = min([10000,num_flow])\n try:\n for i in xrange(num_flow):\n if (random.randint(1,10)<=9):\n src, dst = random.randint(1,num_host/10),random.randint(1,num_host/10)\n flow = random.randint(max_flow_weight/10,max_flow_weight)\n else:\n src, dst = random.randint(num_host/10,num_host),random.randint(num_host/10,num_host)\n flow = random.randint(1,max_flow_weight/10)\n t = tuple(sorted([src, dst]))\n while src == dst or t in hostPairSet:\n if (random.randint(1,10)<=9):\n src, dst = random.randint(1,num_host/10),random.randint(1,num_host/10)\n flow = random.randint(max_flow_weight/10,max_flow_weight)\n else:\n src, dst = random.randint(num_host/10,num_host),random.randint(num_host/10,num_host)\n flow = random.randint(1,max_flow_weight/10)\n t = tuple(sorted([src, dst]))\n hostPairSet.add(t)\n f_out.write(\"%u %u %u %u\\n\" % (src, dst, flow, oct))\n print \" %d flows are generated, with %d hosts\" %(num_flow,num_host)\n finally:\n f_out.close()",
"def stochasticUniversalSampling(self):\n index = 0\n fitnessArr = {}\n for ind_i in self.population:\n fitnessArr[index] = ind_i.getFitness()\n index+=1\n \n maxFitness = max(fitnessArr.values())\n\n for k, fitness in fitnessArr.items():\n transFitness = (maxFitness - fitness)+1\n fitnessArr[k] = transFitness\n\n totalTransFitness = sum(fitnessArr.values())\n for k, transFitness in fitnessArr.items():\n selectionProbability = round(transFitness / totalTransFitness, 8)\n fitnessArr[k] = selectionProbability\n \n p = 1 / self.popSize\n startPoint = random.uniform(0,p)\n matingPool = []\n for i in range(0,self.popSize):\n rulerPoint = startPoint*(i+1)\n popIndex = 0\n selctProb = 0\n for index, probability in fitnessArr.items():\n selctProb = selctProb + probability\n if rulerPoint < selctProb:\n popIndex = index \n break\n matingPool.append(self.population[popIndex])\n \n self.matingPool = matingPool\n pass",
"def sample(self, base_samples):\n samples = base_samples\n for flow in self.flows:\n samples = flow(samples)\n return samples",
"def testAssignParticipants():\n\tdef createFrequencyMap(participants):\n\t\t\"\"\"\n\t\tCreates a map of participant to how many times each other person received\n\t\ta gift from them.\n\t\t\"\"\"\n\t\tfrequencyMap = {}\n\t\tfor participant in participants:\n\t\t\trecipientList = participants.copy()\n\t\t\trecipientList.remove(participant)\n\n\t\t\tfrequencyMap[participant] = {}\n\t\t\tfor recipient in recipientList:\n\t\t\t\tfrequencyMap[participant][recipient] = 0\n\t\treturn frequencyMap\n\n\tparticipants = ['Albert', 'Becky', 'Chad', 'Diane', 'Eliza']\n\n\tfrequencyMap = createFrequencyMap(participants)\n\n\t# Assign Secret Santas for test list 10000 times and increment each persons'\n\t# recipient frequency map each time\n\tfor x in range(0, 10000):\n\t\tsecretSanta = assignParticipants(participants)\n\t\tfor giver, recipient in secretSanta.items():\n\t\t\tfrequencyMap[giver][recipient] += 1\n\n\t# Print frequency map results\n\t#\n\t# If everything works as intended, there should be a relatively even\n\t# distribution of givers and the number of times they had each other person\n\t# as their recipient.\n\tfor giver, recipientTimes in frequencyMap.items():\n\t\tprint('#######################')\n\n\t\tprint(f'Giver: {giver}')\n\t\tprint()\n\t\tfor recipient, times in recipientTimes.items():\n\t\t\tprint (f'{recipient}: {str(times)} times')",
"def resample(self, weights):\r\n\t\tnew_particles = []\r\n\t\tmax_weight = max(weights)\r\n\t\tindex = random.randint(0, len(self.particles) - 1)\r\n\t\toffset = 0.0\r\n\t\tfor i in range(len(self.particles)):\r\n\t\t\toffset += random.uniform(0, 2.0 * max_weight)\r\n\t\t\twhile offset > weights[index]:\r\n\t\t\t\toffset -= weights[index]\r\n\t\t\t\tindex = (index + 1) % len(weights)\r\n\t\t\t\t\r\n\t\t\tnew_particles.append(copy.deepcopy(self.particles[index]))\r\n\t\treturn new_particles",
"def make_receiver_seeds(self, device):\n shape = self.num_nodes, 1, self.receiver_seed_dim\n base = torch.randn(*shape, device=device)\n base = base.repeat(1, self.receivers_per_node, 1)\n shape = self.num_nodes, self.receivers_per_node, \\\n self.receiver_seed_dim\n noise = torch.randn(*shape, device=device) * self.receiver_seed_noise\n return base + noise",
"def pubMutGenerator(n,size_par,mean_depth,purity):\n prob_par=size_par*1.0/(size_par+mean_depth)\n mean_af = 0.5*purity\n depth_pub = []\n vaf_pub = []\n for k in range(0,n):\n correct = 0\n while correct == 0:\n site_depth = np.random.negative_binomial(size_par,prob_par)\n if site_depth >= 15:\n correct =1\n var_reads = np.random.binomial(site_depth,mean_af)\n site_vaf = var_reads*1.0/site_depth\n depth_pub += [site_depth]\n vaf_pub += [site_vaf]\n return depth_pub,vaf_pub",
"def test_random_partitions_by_sampling(self):\n self.run_test_random_partitions(spn.utils.random_partitions_by_sampling,\n balanced=False)\n self.run_test_random_partitions(spn.utils.random_partitions_by_sampling,\n balanced=True)",
"def _get_sampled_edges(self, attended_nodes, num_neighbors: int = 20,\n query_src_ts_emb=None,\n query_rel_emb=None, tc=None):\n if tc:\n t_start = time.time()\n src_idx_l = attended_nodes[:, 1]\n cut_time_l = attended_nodes[:, 2]\n node_idx_l = attended_nodes[:, 3]\n tuple2index = {(eg, src, ts):idx for eg, src, ts, idx in attended_nodes}\n\n src_ngh_node_batch, src_ngh_eidx_batch, src_ngh_t_batch = self.ngh_finder.get_temporal_neighbor(\n src_idx_l,\n cut_time_l,\n num_neighbors=num_neighbors)\n\n if self.ngh_finder.sampling == -1: # full neighborhood, select neighbors with largest attention score\n assert query_src_ts_emb is not None\n assert query_rel_emb is not None\n selected_src_ngh_node_batch = []\n selected_src_ngh_eidx_batch = []\n selected_src_ngh_t_batch = []\n with torch.no_grad():\n for i in range(len(src_ngh_eidx_batch)):\n src_ngh_nodes = src_ngh_eidx_batch[i]\n if sum(src_ngh_nodes != -1) > num_neighbors:\n\n mask = (src_ngh_nodes != -1)\n src_ngh_nodes = src_ngh_nodes[mask]\n src_ngh_eidx = src_ngh_eidx_batch[i][mask]\n src_ngh_t = src_ngh_t_batch[i][mask]\n src_node_embed = self.get_node_emb(np.array([src_idx_l[i]]*len(src_ngh_nodes)),\n np.array([cut_time_l[i]]*len(src_ngh_nodes)),\n np.array([attended_nodes[i, 0]*len(src_ngh_nodes)]))\n ngh_node_embed = self.get_node_emb(src_ngh_nodes, src_ngh_t, np.array([attended_nodes[i, 0]*len(src_ngh_nodes)]))\n rel_emb = self.get_rel_emb(src_ngh_eidx, self.device)\n query_src_vec, query_rel_vec, query_time_vec = self.att_flow.context_dim_red(query_src_ts_emb,\n query_rel_emb)\n\n att_scores = self.att_flow.cal_attention_score(np.ones(len(src_ngh_nodes))*attended_nodes[i, 0], src_node_embed, ngh_node_embed, rel_emb, query_src_vec, query_rel_vec, query_time_vec)\n _, indices = torch.topk(att_scores, num_neighbors)\n indices = indices.cpu().numpy()\n indices_sorted_by_timestamp = sorted(indices, key=lambda x: (src_ngh_t[x], src_ngh_nodes[x], src_ngh_eidx[x]))\n selected_src_ngh_node_batch.append(src_ngh_nodes[indices_sorted_by_timestamp])\n selected_src_ngh_eidx_batch.append(src_ngh_eidx[indices_sorted_by_timestamp])\n selected_src_ngh_t_batch.append(src_ngh_t[indices_sorted_by_timestamp])\n else:\n selected_src_ngh_node_batch.append(src_ngh_nodes[-num_neighbors:])\n selected_src_ngh_eidx_batch.append(src_ngh_eidx_batch[i][-num_neighbors:])\n selected_src_ngh_t_batch.append(src_ngh_t_batch[i][-num_neighbors:])\n src_ngh_node_batch = np.stack(selected_src_ngh_node_batch)\n src_ngh_eidx_batch = np.stack(selected_src_ngh_eidx_batch)\n src_ngh_t_batch = np.stack(selected_src_ngh_t_batch)\n\n # add selfloop\n src_ngh_node_batch = np.concatenate([src_ngh_node_batch, src_idx_l[:, np.newaxis]], axis=1)\n src_ngh_eidx_batch = np.concatenate(\n [src_ngh_eidx_batch, np.array([[self.selfloop] for _ in range(len(attended_nodes))], dtype=np.int32)],\n axis=1)\n src_ngh_t_batch = np.concatenate([src_ngh_t_batch, cut_time_l[:, np.newaxis]], axis=1)\n # removed padded neighbors, with node idx == rel idx == -1\n src_ngh_node_batch_flatten = src_ngh_node_batch.flatten()\n src_ngh_eidx_batch_flatten = src_ngh_eidx_batch.flatten()\n src_ngh_t_batch_faltten = src_ngh_t_batch.flatten()\n eg_idx = np.repeat(attended_nodes[:, 0], num_neighbors + 1)\n mask = src_ngh_node_batch_flatten != -1\n\n sampled_edges = np.stack([eg_idx,\n np.repeat(src_idx_l, num_neighbors + 1), np.repeat(cut_time_l, num_neighbors + 1), \\\n src_ngh_node_batch_flatten, src_ngh_t_batch_faltten, \\\n src_ngh_eidx_batch_flatten, np.repeat(node_idx_l, num_neighbors + 1)], axis=1)[mask]\n\n # index new selected nodes\n target_nodes_index = []\n new_sampled_nodes = []\n for eg, node, edge in sampled_edges[:, [0,3,4]]:\n if (eg, node, edge) in tuple2index:\n target_nodes_index.append(tuple2index[(eg, node, edge)])\n else:\n tuple2index[(eg, node, edge)] = self.num_existing_nodes\n target_nodes_index.append(self.num_existing_nodes)\n new_sampled_nodes.append([eg, node, edge, self.num_existing_nodes])\n self.num_existing_nodes += 1\n\n sampled_edges = np.concatenate([sampled_edges, np.array(target_nodes_index)[:, np.newaxis]], axis=1)\n # new_sampled_nodes = sampled_edges[:, [0, 3, 4, 7]]\n # sampled_nodes = np.array([[*k, v] for k, v in tuple2index.items()])\n # sampled_nodes.view('i8,i8,i8,i8').sort(order=['f3'], axis=0)\n new_sampled_nodes = sorted(new_sampled_nodes, key=lambda x: x[-1])\n new_sampled_nodes = np.array(new_sampled_nodes)\n\n if tc:\n tc['graph']['sample'] += time.time() - t_start\n return sampled_edges, new_sampled_nodes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save cpu in the database. This method is called for every item pipeline component.
|
def process_item(self, item, spider):
session = self.Session()
cpu = models.CPU(**item)
try:
session.add(cpu)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
|
[
"def cpu(self, cpu):\n if cpu is None:\n raise ValueError(\"Invalid value for `cpu`, must not be `None`\") # noqa: E501\n\n self._cpu = cpu",
"def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu):\r\n self._validate_vm_cpu(vm_cpu)\r\n self._change_vm_cpu(vapp_or_vm_id, vm_cpu)",
"def _set_cpu(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_cpu_vnf_bd__scenario_nodes_resources_cpu, is_container='container', yang_name=\"cpu\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"cpu must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_cpu_vnf_bd__scenario_nodes_resources_cpu, is_container='container', yang_name=\"cpu\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__cpu = t\n if hasattr(self, '_set'):\n self._set()",
"def save(self, *args, **kwargs):\n self.lineitem_total = self.comp.price * self.quantity\n super().save(*args, **kwargs)",
"def addHardware( self, hwItem ):\n\n\n\t\tquery = \"\"\"insert into game_hardware values( %s, %s )\"\"\"\n\t\tself.csr.execute( query, ( hwItem['asin'], hwItem['item_name'] ) )\n\n\t\t# removal any hardware that might be on the games table. (done to get around trigger permission problems on prod.)\n\t\tself.removeGame( hwItem['asin'] )",
"def monitor_cpu(self) -> None:\n last_write = 0\n _ = psutil.cpu_percent()\n _ = self.process.cpu_percent()\n system_usage = list()\n process_usage = list()\n\n process_cpu_times = self.process.cpu_times()\n process_times = dict()\n for field in process_cpu_times._fields:\n if not field.startswith(\"children_\"):\n process_times[field] = list()\n\n cpu_infos = {\n \"system_usage\": system_usage,\n \"process_usage\": process_usage,\n \"process_times\": process_times,\n \"time_step\": self.time_step\n }\n\n while not self.stop_event.is_set():\n time.sleep(self.time_step)\n system_usage.append(psutil.cpu_percent())\n process_usage.append(self.process.cpu_percent())\n process_cpu_times = self.process.cpu_times()\n\n for k in process_times.keys():\n process_times[k].append(getattr(process_cpu_times, k))\n\n if time.time() >= last_write + self.write_interval:\n self._log_to_file(cpu_infos)\n last_write = time.time()\n self._log_to_file(cpu_infos)",
"def on_cpu_change(self, cpu, add):\n if not isinstance(cpu, baseinteger):\n raise TypeError(\"cpu can only be an instance of type baseinteger\")\n if not isinstance(add, bool):\n raise TypeError(\"add can only be an instance of type bool\")\n self._call(\"onCPUChange\",\n in_p=[cpu, add])",
"def append_vm_data_remotely(self, db, data):\n db.insert_vm_cpu_mhz(data)",
"def save_sensors(self):\n for sensor in self.sensors:\n sensor.save()",
"def addCPU(self, core, speed, desc):\n self.cpus[core] = {\n \"Speed\" : speed,\n \"Description\" : desc,\n }\n return",
"def set_number_used_cores(job):\n\n pilot_user = os.environ.get('PILOT_USER', 'generic').lower()\n cpu = __import__('pilot.user.%s.cpu' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3\n cpu.set_core_counts(job)",
"def save(self, *args, **kwargs):\n self._update_fields()\n super(Monitor, self).save(*args, **kwargs)",
"def hot_plug_cpu(self, cpu):\n if not isinstance(cpu, baseinteger):\n raise TypeError(\"cpu can only be an instance of type baseinteger\")\n self._call(\"hotPlugCPU\",\n in_p=[cpu])",
"def save(self):\n for workunit in self.workunits.values():\n workunit.save()",
"def set_affine_cpu(self, cpu_dev):\n if self.affine_cpu != cpu_dev:\n assert self.affine_cpu is None, \"set_affine_cpu() called twice with different devices\"\n assert cpu_dev.type == CS_DEVTYPE_CORE\n self.affine_cpu = cpu_dev\n cpu_dev.affine_devices.append(self)\n if self.cpu_number is None:\n self.cpu_number = cpu_dev.cpu_number",
"def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_bay(self.uuid, updates)\n\n self.obj_reset_changes()",
"def set_task_cpu(\n self,\n data: Dict[str, Any],\n container_data: List[Dict[str, Any]],\n source: Dict[str, Any] = None\n ) -> None:\n if not source:\n source = self.data\n cpu_required = self._get_container_cpu_usage(container_data)\n if self.is_fargate():\n cpu = self._set_fargate_task_cpu(cpu_required, source=source)\n else:\n cpu = self._set_ec2_task_cpu(source=source)\n if cpu is not None:\n if cpu_required > cpu:\n raise SchemaException(\n f'You set task cpu to {cpu} but your container cpu sums to {cpu_required}.'\n 'Task cpu must be greater than the sum of container cpu.'\n )\n # we calculate cpu as an int, but register_task_definition wants a str\n data['cpu'] = str(cpu)",
"def cpu(self):\n ret = self._get_attr(\"CPU\")\n return ret",
"def save_item(self, item: Any) -> None:\n item_rejected = False\n original_item = item\n for processor in self.config.item_processors:\n item = processor(item)\n if item is None:\n item_rejected = True\n break\n if item_rejected:\n logger.debug('item %s was rejected', original_item)\n return\n\n logger.debug('writing item %s to file %s', item, self.config.backup_filename)\n with self._lock:\n write_mp(self.config.backup_filename, item, mode='a', encoder=self.config.msgpack_encoder)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns the final fitted shape.
|
def final_shape(self):
|
[
"def output_shape(self):\n return None",
"def shape(self) -> S:",
"def split_shape(self):\n return self.__split_shape",
"def shape(self):\n return self._input.shape",
"def shape(self):\n image, _ = self.datasets[0][0]\n return image.unsqueeze(dim=0)",
"def shape_a(self):\r\n return self._fixture_a._shape",
"def cache_shape(self):\n return self.param_info.cache_shape",
"def shape_b(self):\r\n return self._fixture_b._shape",
"def this_shape(self):\n _logger.debug('%s', where_am_i())\n return self._metadata['instance']['shape']",
"def get_data_shape(self):\n raise NotImplementedError",
"def build(self,input_shape):\r\n self.input_shape = input_shape\r\n return input_shape",
"def compute_output_shape(self, input_shape):\n return (\n input_shape[0],\n self.paddings[1][0] + input_shape[1] + self.paddings[1][1],\n self.paddings[2][0] + input_shape[2] + self.paddings[2][1],\n input_shape[3]\n )",
"def state_shape(self):\n pass",
"def shape(self):\n # Default behaviour is to try to evaluate the object directly\n # Try with some large y, to avoid having to unpack (slow)\n try:\n y = np.nan * np.ones((1000, 1))\n evaluated_self = self.evaluate(0, y, y, inputs=\"shape test\")\n # If that fails, fall back to calculating how big y should really be\n except ValueError:\n unpacker = pybamm.SymbolUnpacker(pybamm.StateVector)\n state_vectors_in_node = unpacker.unpack_symbol(self)\n min_y_size = max(\n max(len(x._evaluation_array) for x in state_vectors_in_node), 1\n )\n # Pick a y that won't cause RuntimeWarnings\n y = np.nan * np.ones((min_y_size, 1))\n evaluated_self = self.evaluate(0, y, y, inputs=\"shape test\")\n\n # Return shape of evaluated object\n if isinstance(evaluated_self, numbers.Number):\n return ()\n else:\n return evaluated_self.shape",
"def block_shape(self):\n return self._block_shape",
"def get_image_shape(self) -> Tuple[int, int]:",
"def shape(self):\n vars = {self}\n fn = lambda values: values[self].shape\n links = set()\n return PartialLink(vars=vars, fn=fn, links=links)",
"def observation_shape(self):\n return self.data[0].shape",
"def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n return self.output_shape",
"def shape(x):\n\treturn tf.shape(x)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns the initial shape from which the fitting started.
|
def initial_shape(self):
|
[
"def _get_weights_shape(self, kwargs):\n if kwargs['shape'] is not None:\n return kwargs['shape']\n else:\n if 'initial_value' not in kwargs:\n raise ValueError(\n '`initial_value` is not in kwargs: cannot infer the shape.')\n elif callable(kwargs['initial_value']):\n initial_tensor = kwargs['initial_value']()\n else:\n initial_tensor = kwargs['initial_value']\n # Check whether initial_tensor is None before returning its shape.\n if initial_tensor is None:\n raise ValueError('Cannot get shape information from kwargs: ', kwargs)\n\n return initial_tensor.shape",
"def shape(self):\n return self._input.shape",
"def shape(self):\n image, _ = self.datasets[0][0]\n return image.unsqueeze(dim=0)",
"def shape(self) -> S:",
"def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n output_shape = None\n for p in self.processings:\n new_output_shape = p.infer_image_input_shape()\n if new_output_shape is not None:\n output_shape = new_output_shape\n\n return output_shape",
"def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n return self.output_shape",
"def shape_p0(self):\n return self.topology.n_elements[0], self.index",
"def observation_shape(self):\n return self.data[0].shape",
"def build(self,input_shape):\r\n self.input_shape = input_shape\r\n return input_shape",
"def shape_a(self):\r\n return self._fixture_a._shape",
"def output_shape(self):\n return None",
"def this_shape(self):\n _logger.debug('%s', where_am_i())\n return self._metadata['instance']['shape']",
"def state_shape(self):\n pass",
"def array_shape(self):\n return None",
"def get_mean_shape(self):\n return self._mean_shape",
"def _choose_initial_point(self) -> np.ndarray:\n if self._warm_start and self._fit_result is not None:\n self._initial_point = self._fit_result.x\n elif self._initial_point is None:\n self._initial_point = algorithm_globals.random.random(self._neural_network.num_weights)\n return self._initial_point",
"def shape(self):\n # Default behaviour is to try to evaluate the object directly\n # Try with some large y, to avoid having to unpack (slow)\n try:\n y = np.nan * np.ones((1000, 1))\n evaluated_self = self.evaluate(0, y, y, inputs=\"shape test\")\n # If that fails, fall back to calculating how big y should really be\n except ValueError:\n unpacker = pybamm.SymbolUnpacker(pybamm.StateVector)\n state_vectors_in_node = unpacker.unpack_symbol(self)\n min_y_size = max(\n max(len(x._evaluation_array) for x in state_vectors_in_node), 1\n )\n # Pick a y that won't cause RuntimeWarnings\n y = np.nan * np.ones((min_y_size, 1))\n evaluated_self = self.evaluate(0, y, y, inputs=\"shape test\")\n\n # Return shape of evaluated object\n if isinstance(evaluated_self, numbers.Number):\n return ()\n else:\n return evaluated_self.shape",
"def image_shape(self):\n return self.mri_imgs[0].shape",
"def shape(self):\n qpi0 = self.get_qpimage_raw(0)\n return len(self), qpi0.shape[0], qpi0.shape[1]",
"def cache_shape(self):\n return self.param_info.cache_shape"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns a copy of the fitted image with the following landmark
|
def fitted_image(self):
image = Image(self.image.pixels)
image.landmarks['initial'] = self.initial_shape
image.landmarks['final'] = self.final_shape
if self.gt_shape is not None:
image.landmarks['ground'] = self.gt_shape
return image
|
[
"def crop_landmark2(image, landmarks, part, show_crop=False):\n dims = np.load('landmark_dims.npy')\n\n if (part == \"left eyebrow\" or part == 0):\n rango = range(17, 22)\n w, h = dims[0] // 2\n elif (part == \"right eyebrow\" or part == 1):\n rango = range(22, 27)\n w, h = dims[1] // 2\n elif (part == \"nose\" or part == 2):\n rango = range(27, 36)\n w, h = dims[5] // 2\n elif (part == \"left eye\" or part == 3):\n rango = range(36, 42)\n w, h = dims[2] // 2\n elif (part == \"right eye\" or part == 4):\n rango = range(42, 48)\n w, h = dims[3] // 2\n elif (part == \"mouth\" or part == 5):\n rango = range(48, 68)\n w, h = dims[4] // 2\n\n landmarks = np.array(landmarks)\n rango = np.array(rango)\n x_max = int(landmarks[rango, 0].max())\n x_min = int(landmarks[rango, 0].min())\n y_max = int(landmarks[rango, 1].max())\n y_min = int(landmarks[rango, 1].min())\n\n X = int(np.mean((x_min, x_max)).round(0))\n Y = int(np.mean((y_min, y_max)).round(0))\n\n landmark = _crop_image(image, X, Y, w, h)\n if show_crop:\n cv2.imshow(\"Image\", landmark)\n cv2.waitKey(15000)\n # cv2.waitKey(0)\n return landmark",
"def _get_landmarks(input, show_image=False):\n if type(input) == str:\n im = cv2.imread(input)\n if im.shape[2] == 3:\n image = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n else:\n image = im\n elif isinstance(input, np.ndarray):\n im = input\n if im.shape[2] == 3:\n image = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n else:\n image = im\n shape_predictor = 'shape_predictor_68_face_landmarks.dat'\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(shape_predictor)\n gray = image\n # detect faces in the grayscale image\n rects = detector(gray, 1)\n if len(rects) == 0:\n # foto entera es el rectangulo\n rectangle = None\n rectangle = dlib.rectangle(0, 0, image.shape[1], image.shape[0])\n elif len(rects) == 1:\n # ok\n rectangle = rects[0]\n else:\n # Ahora se elige el más grande.\n sizes = []\n for r in rects:\n (x, y, w, h) = face_utils.rect_to_bb(r)\n sizes.append(w * h)\n rectangle = rects[np.argmax(sizes)]\n rect = rectangle\n # determine the facial landmarks for the face region, then convert the facial landmark (x, y)-coordinates to a\n # NumPy array\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n # convert dlib's rectangle to a OpenCV-style bounding box\n # [i.e., (x, y, w, h)], then draw the face bounding box\n (x, y, w, h) = face_utils.rect_to_bb(rect)\n if show_image:\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n # show the face number\n cv2.putText(image, \"Face\", (x - 10, y - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n # loop over the (x, y)-coordinates for the facial landmarks\n # and draw them on the image\n for (x, y) in shape:\n cv2.circle(image, (x, y), 1, (0, 0, 255), -1)\n # show the output image with the face detections + facial landmarks\n cv2.imshow(\"Output\", image)\n cv2.waitKey(0)\n return shape, x, y, w, h",
"def get_landmark(filepath, predictor):\n detector = dlib.get_frontal_face_detector()\n\n img = dlib.load_rgb_image(filepath)\n dets = detector(img, 1)\n\n for k, d in enumerate(dets):\n shape = predictor(img, d)\n\n t = list(shape.parts())\n a = []\n for tt in t:\n a.append([tt.x, tt.y])\n lm = np.array(a)\n return lm",
"def get_face_mask(img, landmarks):\n img = numpy.zeros(img.shape[:2], dtype=numpy.float64)\n\n for group in OVERLAY_POINTS:\n draw_convex_hull(img,\n landmarks[group],\n color=1)\n\n img = numpy.array([img, img, img]).transpose((1, 2, 0))\n\n img = (cv2.GaussianBlur(img, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0\n img = cv2.GaussianBlur(img, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)\n\n return img",
"def extract_hough_landmarks(image):\n origin = numpy.array(image.size, dtype=numpy.int)/2 # Centre of image.\n black_white = occupancy.black_white(image)\n lines = probabilistic_hough_line(black_white, threshold=30, line_length=50, line_gap=50) # Extract lines\n landmarks = []\n for line in lines:\n landmarks.append(Landmark([line[0] - origin[0], line[1] - origin[1]]))\n return limit_landmarks(landmarks)",
"def align_face(img, landmarks, crop_size=112):\n facial5points = [[landmarks[j], landmarks[j + 5]] for j in range(5)]\n warped_face = warp_and_crop_face(np.array(img), facial5points, reference, crop_size=(crop_size, crop_size))\n img_warped = Image.fromarray(warped_face)\n return img_warped",
"def _crop_v0(self, image, landmarks, scale):\n # left eye: landmarks[36]\n # left mouth: landmarks[48]\n # nose: landmarks[29]\n # find the most left point and most right point\n landmarks_x = landmarks[:, 0]\n most_left_x = np.min(landmarks_x)\n most_right_x = np.max(landmarks_x)\n mid_x = (most_left_x + most_right_x) // 2\n # print(most_left_x, most_right_x, mid_x)\n # define new center point use mid_x and y from nose point\n center_point = [mid_x, landmarks[29][1]]\n # compute the distance between left eye(landmarks[36])\n distance = most_right_x - mid_x\n size = distance * scale\n # print(center_point)\n # compute row_start, row_end, col_start, col_end\n row_start = int(center_point[1] - size)\n row_end = int(center_point[1] + size)\n col_start = int(center_point[0] - size)\n col_end = int(center_point[0] + size)\n # print('*' * 10)\n # print(row_start, row_end, col_start, col_end)\n # make range valid and compute padding\n if row_start < 0:\n padding_up = abs(row_start)\n row_start = 0\n else:\n padding_up = 0\n if col_start < 0:\n padding_left = abs(col_start)\n col_start = 0\n else:\n padding_left = 0\n if row_end > (image.shape[0] - 1):\n padding_down = row_end - (image.shape[0] - 1)\n row_end = image.shape[0] - 1\n else:\n padding_down = 0\n if col_end > (image.shape[1] - 1):\n padding_right = col_end - (image.shape[1] - 1)\n col_end = image.shape[1] - 1\n else:\n padding_right = 0\n # print(row_start, row_end, col_start, col_end)\n # print('*' * 10)\n # crop image\n cropped_image = self._crop_helper(image, row_start, row_end, col_start, col_end,\n padding_up, padding_down, padding_left, padding_right)\n return cropped_image",
"def getLandmarkPatchAndBBox(img,landmark,N):\r\n n = N/2\r\n if landmark[1]<=n or landmark[1]>=(1935-n) or landmark[0]<=n or landmark[0]>=(2400-n):\r\n radis = N/4\r\n else:\r\n radis = N/2\r\n #landmark[0]为行数(top,bottom),landmark[1]为列数(left,right)\r\n left = (landmark[1]-radis).astype('int16')\r\n right = (landmark[1]+radis).astype('int16')\r\n top = (landmark[0]-radis).astype('int16') \r\n bottom = (landmark[0]+radis).astype('int16')\r\n patch = img[top:bottom+1,left:right+1]\r\n patch_bbox = BBox([left,right,top,bottom])\r\n return patch,patch_bbox",
"def __landmarks2mask(self, landmarks):\n h, w = self.dim\n k = len(landmarks)\n\n mask = np.zeros((w, h, self.n_landmarks), dtype=np.float32)\n \n for i in range(k):\n p = landmarks[i]\n mask[int(p['y'] * w), int(p['x'] * h), i] = 1.\n mask[:,:,i] = gaussian(image=mask[:,:,i], sigma=self.sigma)\n \n if self.make_2d_masks:\n mask = np.reshape(mask, (self.dim[0] * self.dim[1], k))\n return mask",
"def annotate_landmarks(img, landmarks, font_scale = 0.4):\n img = img.copy()\n for idx, point in enumerate(landmarks):\n pos = (point[0, 0], point[0, 1])\n cv2.putText(img, str(idx), pos,\n fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,\n fontScale=font_scale,\n color=(0, 0, 255))\n cv2.circle(img, pos, 3, color=(0, 255, 255))\n return img",
"def reconstruct_input_image(input_data, predicted_region):\r\n offset = 7\r\n\r\n def normalize(x):\r\n return np.array((x - np.min(x)) / (np.max(x) - np.min(x)))\r\n \r\n predicted_region = normalize(predicted_region)\r\n \r\n h, w, _ = np.shape(predicted_region)\r\n\r\n mask = np.sum(input_data, axis=2) == 0\r\n mask_i = np.sum(mask, axis=0)\r\n mask_j = np.sum(mask, axis=1)\r\n \r\n #print(list(np.where(mask_i == 50)[0])[0])\r\n i = np.where(mask_i == 50)[0][0] - offset\r\n j = np.where(mask_j == 50)[0][0] - offset\r\n\r\n full_image = input_data.copy()\r\n full_image[j+offset:j+h-offset, i+offset:i+w-offset] = predicted_region[offset:-offset,offset:-offset]\r\n\r\n return full_image",
"def zero_slope(data_set):\n images = data_set[\"train\"][\"image\"] + data_set[\"test\"][\"image\"]\n landmarks = data_set[\"train\"][\"landmark\"] + data_set[\"test\"][\"landmark\"]\n train_size = len(data_set[\"train\"][\"image\"])\n\n rotated_images = []\n rotated_landmarks = []\n for i, image in enumerate(images):\n current_slope = (landmarks[i][6]-landmarks[i][5]) / (landmarks[i][1] - landmarks[i][0])\n theta = current_slope * (-1)\n rotated_image = image.rotate(theta)\n rotated_landmark = rotate_landmarks(landmarks[i], theta)\n # Save to the list\n rotated_images.append(rotated_image)\n rotated_landmarks.append(rotated_landmark)\n # plt.imshow(np.asarray(rotated_image), cmap='gray')\n # plt.plot(rotated_landmark[0:5], rotated_landmark[5:10], 'r.')\n # plt.show()\n\n del data_set[\"train\"][\"image\"], data_set[\"test\"][\"image\"], \\\n data_set[\"train\"][\"landmark\"], data_set[\"test\"][\"landmark\"]\n\n data_set[\"train\"][\"image\"] = rotated_images[0:train_size]\n data_set[\"test\"][\"image\"] = rotated_images[train_size:]\n data_set[\"train\"][\"landmark\"] = rotated_landmarks[0:train_size]\n data_set[\"test\"][\"landmark\"] = rotated_landmarks[train_size:]\n\n return data_set",
"def getAsMaskImage(self):\n\t\tif not self.isROI():\n\t\t\treturn None\n\t\tinsideMap = {}\n\t\tinsideMap.update(self.getCoveredPoints())\n\t\tinsMap = {}\n\t\tfor x, y in insideMap.keys():\n\t\t\tinsMap[(x, y)] = 1\n\t\tparent = self.GetCanvas()\n\t\tmx, my, mz = parent.dataUnit.getDimensions()\n\t\treturn lib.ImageOperations.getMaskFromPoints(insMap, mx, my, mz)",
"def showLandmarks(image, landmarks):\n\t\tplt.imshow(image)\n\t\tplt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n\t\tplt.pause(0.001)\n\t\tplt.show()",
"def face_allignment(data_set):\n images = data_set[\"train\"][\"image\"] + data_set[\"test\"][\"image\"]\n landmarks = data_set[\"train\"][\"landmark\"] + data_set[\"test\"][\"landmark\"]\n train_size = len(data_set[\"train\"][\"image\"])\n widening_size = 100\n\n alligned_images = []\n alligned_landmarks = []\n\n for i, image in enumerate(images):\n crop_width = landmarks[i][1] - landmarks[i][0]\n crop_height = landmarks[i][8] - landmarks[i][5]\n bbox = (landmarks[i][0]-widening_size/2, landmarks[i][5]-widening_size/2,\n landmarks[i][0] + crop_width + widening_size/2,\n landmarks[i][5] + crop_height + widening_size/2)\n aligned_image = image.crop(bbox)\n alligned_images.append(aligned_image)\n # Align landmarks:\n origin_x = landmarks[i][0]\n origin_y = landmarks[i][5]\n xs = [x_lndmrk-origin_x + widening_size/2 for x_lndmrk in landmarks[i][0:5]]\n ys = [y_lndmrk-origin_y + widening_size/2 for y_lndmrk in landmarks[i][5:10]]\n alligned_landmarks.append(xs+ys)\n # plt.imshow(np.asarray(aligned_image), cmap='gray')\n # plt.plot(xs, ys, 'r.')\n # plt.show()\n\n del data_set[\"train\"][\"image\"], data_set[\"test\"][\"image\"], \\\n data_set[\"train\"][\"landmark\"], data_set[\"test\"][\"landmark\"]\n\n data_set[\"train\"][\"image\"] = alligned_images[0:train_size]\n data_set[\"train\"][\"landmark\"] = alligned_landmarks[0:train_size]\n data_set[\"test\"][\"image\"] = alligned_images[train_size:]\n data_set[\"test\"][\"landmark\"] = alligned_landmarks[train_size:]\n\n return data_set",
"def landmask(self):\n if not hasattr(self, '_landmask'):\n nc = netCDF4.Dataset(self.gridfile)\n \n self._landmask = self.gmt.field(nc.variables[\"u\"][0,0,:,:].mask)\n\n\n \n return self._landmask",
"def get_fitted_roi_img(self):\n if isinstance(self.masker, NiftiMasker):\n return self.masker.mask_img_\n elif isinstance(self.masker, NiftiLabelsMasker):\n return self.masker.labels_img\n elif isinstance(self.masker, NiftiSpheresMasker):\n return _get_spheres_from_masker(self.masker, self.img)\n elif isinstance(self.masker, NiftiMapsMasker):\n return self.masker.maps_img_",
"def show_landmarks(image, landmarks):\r\n plt.imshow(image)\r\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\r\n plt.pause(0.001) # pause a bit so that plots are updated\r",
"def landmarks_match(src_im, src_landmarks, tar_landmarks, border_mode=cv2.BORDER_REPLICATE, border_value=(0,255,0)):\n src_size = src_im.shape\n src_tmp = [(int(xy[1]), int(xy[0])) for xy in src_landmarks]\n dst_tmp = [(int(xy[1]), int(xy[0])) for xy in tar_landmarks]\n if len(src_tmp) >= 68:\n src_tmp = src_tmp[17:]\n if len(dst_tmp) >= 68:\n dst_tmp = dst_tmp[17:]\n M = umeyama(np.array(src_tmp), np.array(dst_tmp), True)[0:2]\n result = cv2.warpAffine(src_im, M, (src_size[1], src_size[0]), borderMode=border_mode, borderValue=border_value)\n return result, M",
"def landmask(self):\n if not hasattr(self, '_landmask'):\n nc = netCDF4.Dataset(self.gridfile)\n self._landmask = self.gmt.field(nc.variables[\"water_u\"][0,0,:,:].mask)\n return self._landmask"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Generates a list containing the transforms obtained at each fitting iteration.
|
def transforms(self):
return [self.fitter.transform.from_vector(p)
for p in self.shape_parameters]
|
[
"def transform(self, X):\n result = []\n for name, transformer in self.transformer_list:\n result.append(transformer.transform(X))\n\n return result",
"def transforms(self) -> TransformationsList:\n transforms = TransformationsList(self)\n depends_on = self.get_field(CommonAttrs.DEPENDS_ON)\n self._get_transform(depends_on, transforms, local_only=True)\n return transforms",
"def transformations(self):\n\t\treturn [ [n.a.t.v,n.a.r.v] for n in self.nodes ]",
"def compute_transforms(images, reference_idx=0, from_reference=False):\n # TODO - add multiprocessing?\n\n p = ProgressBar(len(images))\n warp_list = []\n for i, current_image in enumerate(images):\n p.update(i)\n warp_list.append(affine_registration(images[reference_idx],\n current_image))\n p.update(len(images))\n print(\" Done!\")\n return warp_list",
"def transform(self, data):\n\t\t\n\t\tfor t in self.transformer_list:\n\t\t\tdata = t.transform(data)\n\t\t\t\n\t\treturn data",
"def _set_transforms(self):\n train_transform_list = [\n transforms.ToTensor()\n ]\n if self.color_jitter:\n train_transform_list.append(transforms.ColorJitter(self.color_jitter, self.color_jitter, self.color_jitter))\n train_transform_list.append(transforms.RandomResizedCrop(\n size=[self.dims[2], self.dims[3]],\n scale=(0.8, 1.1)\n ))\n train_transform_list.append(transforms.RandomAffine(\n degrees=self.degrees_affine,\n translate=(self.translate_affine, self.translate_affine),\n scale=(1 - self.scale_margin_affine, 1 + self.scale_margin_affine),\n shear=self.shear_affine\n ))\n if self.horizontal_flip:\n train_transform_list.append(transforms.RandomHorizontalFlip())\n train_transform_list.append(transforms.Normalize(DATASET_MEAN, DATASET_STD))\n if self.random_erasing:\n train_transform_list.append(transforms.RandomErasing())\n self.train_transform = transforms.Compose(train_transform_list)\n self.val_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(DATASET_MEAN, DATASET_STD)\n ])",
"def build_transforms():\n\n normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n transform_tr = Compose([\n RandomHorizontalFlip(),\n ToTensor(),\n normalize\n ])\n\n transform_te = Compose([\n ToTensor(),\n normalize,\n ])\n\n return transform_tr, transform_te",
"def all_linkgeom_transforms(self, thetas):\n trans = []\n mtrans = []\n for q in thetas:\n self.robot.setConfig(q)\n tmp = []\n for _, geom_idx, _ in self.link_cache['link']:\n tmp.append(self.robot.link(geom_idx).getTransform())\n trans.append(tmp)\n # for mounted\n tmp = []\n for link_idx, relT, _ in self.link_cache['mount']:\n tmp.append(se3.mul(self.robot.link(link_idx).getTransform(), relT))\n mtrans.append(tmp)\n return trans, mtrans",
"def calc_transform_all(triangles1, triangles2):\n transformations = []\n i = 0\n for tri1, tri2 in zip(triangles1, triangles2):\n# print(i)\n# i += 1\n transformation = calc_affine_transform(tri1, tri2)\n transformations.append(transformation)\n return np.array(transformations)",
"def augmentation_transform(self, data):\n for aug in self.auglist:\n data = [ret for src in data for ret in aug(src)]\n \n return data",
"def get_transform_list(transform_dir, direction='forward'):\n if direction == 'forward':\n transform_list = [os.path.join(transform_dir, 'forward', 'warp.nii.gz'),\n os.path.join(transform_dir, 'forward', 'affine.mat')]\n elif direction == 'inverse':\n transform_list = [os.path.join(transform_dir, 'inverse', 'affine.mat'),\n os.path.join(transform_dir, 'inverse', 'warp.nii.gz')]\n\n return transform_list",
"def list_transforms(self):\n\n Ltransforms = self.builder.get_object(\"TransformType\")\n for entry in self.intermediary.Tlist:\n Ltransforms.insert(tk.END, entry)",
"def updateTransforms(self, node):\n transforms = []\n for _, transform in self.instrument.positioning_stack.model():\n transforms.append(transform)\n\n for detector in self.instrument.detectors.values():\n for _, transform in detector.model():\n transforms.append(transform)\n\n for _, transform in self.instrument.jaws.model():\n transforms.append(transform)\n\n node.per_object_transform[:len(transforms)] = transforms",
"def transforms_full_chain(self) -> TransformationsList:\n transforms = TransformationsList(self)\n depends_on = self.get_field(CommonAttrs.DEPENDS_ON)\n self._get_transform(depends_on, transforms)\n return transforms",
"def combine_transforms(transforms):\n t = transforms[0]\n for i in range(1, len(transforms)):\n t = np.dot(t, transforms[i])\n return t",
"def _transform(self, dataset):\n\n for t in self.transforms:\n method = getattr(dataset, t.name)\n dataset = method(*t.args, **t.kwargs)\n\n return dataset",
"def transform_multiple(x, transf_paths, transf_types, gene_name_path='../network/genes_in_data.csv'):\n out = []\n for i in range(len(transf_paths)):\n path = transf_paths[i]\n transf_type = transf_types[i]\n print(path)\n\n if transf_type == 'filter_and_pca':\n out.append(transform_from_files(x, path[0], transf_type, transf2_path=path[1]))\n else:\n out.append(transform_from_files(x, path, transf_type, gene_name_path))\n\n return out",
"def transform(self, points):\n scaled = [ ]\n for pt in points:\n scaled_pt = self.transform_pt(pt)\n scaled.append(scaled_pt)\n return scaled",
"def transform(self, X):\n Xs = Parallel(n_jobs=self.n_jobs)(\n delayed(_transform_one)(\n **(self.fit_args(_transform_one, locals(), X)[0]),\n **(self.fit_args(_transform_one, locals())[1])\n )\n for name, trans, weight in self._iter())\n if not Xs:\n # All transformers are None\n return np.zeros((X.shape[0], 0))\n Xs = pd.concat(Xs, axis=1)\n return Xs",
"def concurrent_transforms(func, imgs, max_workers=None):\n executor = futures.ThreadPoolExecutor\n if max_workers is None:\n # default to as many workers as available cpus\n max_workers = cpu_count()\n results = []\n with executor(max_workers=max_workers) as execute:\n for result in execute.map(func, imgs):\n results.append(result)\n return results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns the final transform.
|
def final_transform(self):
return self.fitter.transform.from_vector(self.shape_parameters[-1])
|
[
"def transform(self):\n return self.interface.transform",
"def get_transform(self):\n transform = self.InvertedMercatorLatitudeTransform(self.thresh)\n return transform",
"def transform(self):\n ext = os.path.splitext(self.filename)[-1][1:]\n t = self.transformers.get(ext, lambda x: x)\n return t(self._content)",
"def getTransform(self, *args):\n return _coin.SbMatrix_getTransform(self, *args)",
"def getTransform(self, *args) -> \"PyObject *\":\n return _coin.SbMatrix_getTransform(self, *args)",
"def getTransform(self, *args) -> \"void\":\n return _coin.SbDPMatrix_getTransform(self, *args)",
"def transform(self) -> Qt3DCore.QTransform:\n transform_matrix = QMatrix4x4()\n for transform in self.transforms_full_chain:\n transform_matrix *= transform.qmatrix\n transformation = Qt3DCore.QTransform()\n transformation.setMatrix(transform_matrix)\n return transformation",
"def getTransform(self):\n\tbuffer = mc.ls(self.mNode, type = 'transform') or False\n\tif buffer:\n\t return buffer[0]\n\telse:\n\t buffer = mc.listRelatives(self.mNode,parent=True,type='transform') or False\n\tif buffer:\n\t return buffer[0]\n\treturn False",
"def get_transformation(self, **kwargs):\n return self.wrapped_generator.get_transformation(**kwargs)",
"def projective_transform(self):\r\n return transform.ProjectiveTransform(self.persp_matrix)",
"def render_transform(self):\n return self.fb_ndc_transform * self.canvas_fb_transform",
"def __call__(self, video_path: str, output_path: Optional[str] = None) -> None:\n if random.random() > self.p:\n return None\n\n transform = random.choices(self.transforms, self.transform_probs)[0]\n return transform(video_path, output_path, force=True)",
"def popTransform(self):\n assert len(self._transformStack) > 1\n return self._transformStack.pop()",
"def getTransform(self) -> \"SbMatrix const &\":\n return _coin.SbXfBox3f_getTransform(self)",
"def transform(self, verbose=1, **kwargs):\n self.current = self.tokens.copy()\n transformation_selection = self.transformation_selection.copy()\n for kw in kwargs:\n if kw in transformation_selection:\n transformation_selection[kw] = kwargs[kw]\n for i, trans in enumerate(self.transformation_order, 1):\n if verbose:\n print(f\"{i}/{len(self.transformation_order)}. {trans}...\", end='')\n if transformation_selection[trans]:\n self.current = self.current.apply(eval(f\"self.{trans}\", ), \n stopwords=self.sw, \n lemmatizer=self.lm, \n stemmer=self.sm,\n exceptions=self.non_alpha_exceptions,\n replacer=self.replacer)\n if verbose:\n print(\"Completed.\" if transformation_selection[trans] else \"Skipped.\")\n return self.current",
"def transform(self):\n self._prepare()\n self._cluster()\n self._store()\n self.logger.debug('Transformation is done...')",
"def __call__(self, obj):\n return obj._affine(self)",
"def _transform(self, field: FieldBase, t: float) -> FieldBase:\n if self.transformation is None:\n return field\n elif self.transformation.__code__.co_argcount == 1:\n return self.transformation(field) # type: ignore\n else:\n return self.transformation(field, t)",
"def transform(self, tf):\n pass",
"def transformed(self, transformation):\n wrench = self.copy()\n wrench.transform(transformation)\n return wrench"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns the initial transform from which the fitting started.
|
def initial_transform(self):
return self.fitter.transform.from_vector(self.shape_parameters[0])
|
[
"def get_transform(self):\n transform = self.InvertedMercatorLatitudeTransform(self.thresh)\n return transform",
"def transform(self):\n return self.interface.transform",
"def start_coordinates(self):\n return self.transformation.from_system",
"def getTransform(self):\n\tbuffer = mc.ls(self.mNode, type = 'transform') or False\n\tif buffer:\n\t return buffer[0]\n\telse:\n\t buffer = mc.listRelatives(self.mNode,parent=True,type='transform') or False\n\tif buffer:\n\t return buffer[0]\n\treturn False",
"def getTransform(self, *args):\n return _coin.SbMatrix_getTransform(self, *args)",
"def getTransform(self, *args) -> \"void\":\n return _coin.SbDPMatrix_getTransform(self, *args)",
"def _getAffine(self):\n with self._getDatasetLock:\n affine = self.dataset.transform\n if len(self.dataset.gcps[0]) != 0 and self.dataset.gcps[1]:\n affine = rio.transform.from_gcps(self.dataset.gcps[0])\n\n return affine",
"def _choose_initial_point(self) -> np.ndarray:\n if self._warm_start and self._fit_result is not None:\n self._initial_point = self._fit_result.x\n elif self._initial_point is None:\n self._initial_point = algorithm_globals.random.random(self._neural_network.num_weights)\n return self._initial_point",
"def get_transformation(self, target_frame, source_frame):\n\n pose = geometry_msgs.msg.Pose()\n try:\n common_time = self.listener.getLatestCommonTime(\n target_frame, source_frame\n )\n\n self.listener.waitForTransform(\n target_frame, source_frame,\n common_time, rospy.Duration(self.wait_for_transform)\n )\n\n (trans,quat) = self.listener.lookupTransform(\n target_frame, source_frame, common_time)\n\n pose.position.x = trans[0]\n pose.position.y = trans[1]\n pose.position.z = trans[2]\n\n pose.orientation.x = quat[0]\n pose.orientation.y = quat[1]\n pose.orientation.z = quat[2]\n pose.orientation.w = quat[3]\n\n return pose\n\n except tf.Exception, error:\n rospy.logwarn(\"Exception occurred: {0}\".format(error))\n return None\n\n return None",
"def fit_transform(self, tau=None):\r\n\r\n # Check if fitted\r\n check_is_fitted(self, ['d_', 'y_']) \r\n \r\n # Perform transformation and store results\r\n unit_simplex, tau = self.transform(self.d_, tau, return_tau=True)\r\n self.tau_ = tau\r\n return unit_simplex",
"def getTransform(self, *args) -> \"PyObject *\":\n return _coin.SbMatrix_getTransform(self, *args)",
"def estimated_pose(self):\n return self.dead_reckoning.pose",
"def _get_transform():\n with open(app.config.get('FGDC_XSL'), 'r') as fp:\n f = StringIO.StringIO(fp.read())\n\n xslt = etree.parse(f)\n xform = etree.XSLT(xslt)\n return xform",
"def getTransform(self) -> \"SbMatrix const &\":\n return _coin.SbXfBox3f_getTransform(self)",
"def default_transform(self, **kwargs) -> (Affine, int, int, CRS):\n with rasterio.open(str(self.get_default_band_path(**kwargs))) as dst:\n return dst.transform, dst.width, dst.height, dst.crs",
"def default_transform(self, **kwargs) -> (affine.Affine, int, int, CRS):\n default_band = self.get_default_band()\n def_path = self.get_band_paths(\n [default_band], pixel_size=self.pixel_size, **kwargs\n )[default_band]\n with rasterio.open(str(def_path)) as dst:\n return dst.transform, dst.width, dst.height, dst.crs",
"def test_get_transform():\n w = wcs.WCS(pipe[:])\n tr_forward = w.get_transform('detector', 'focal')\n tr_back = w.get_transform('icrs', 'detector')\n x, y = 1, 2\n fx, fy = tr_forward(1, 2)\n assert_allclose(w.pipeline[0].transform(x, y), (fx, fy))\n assert_allclose(w.pipeline[0].transform(x, y), (fx, fy))\n assert_allclose((x, y), tr_back(*w(x, y)))\n assert(w.get_transform('detector', 'detector') is None)",
"def get_transformed(self):\n if self.coords is None or self.reference_coords is None:\n raise PDBException(\"No coordinates set.\")\n\n if self.rot is None:\n raise PDBException(\"Nothing is superimposed yet.\")\n\n self.transformed_coords = np.dot(self.coords, self.rot) + self.tran\n return self.transformed_coords",
"def transform(self) -> Qt3DCore.QTransform:\n transform_matrix = QMatrix4x4()\n for transform in self.transforms_full_chain:\n transform_matrix *= transform.qmatrix\n transformation = Qt3DCore.QTransform()\n transformation.setMatrix(transform_matrix)\n return transformation",
"def get_pose(self):\n\t\treturn self.real_pose"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The list containing the warped images obtained at each fitting iteration.
|
def warped_images(self):
mask = self.fitter.template.mask
transform = self.fitter.transform
interpolator = self.fitter.interpolator
return [self.image.warp_to(mask, transform.from_vector(p),
interpolator=interpolator)
for p in self.shape_parameters]
|
[
"def warped_images(self):\n mask = self.algorithm_results[-1].fitter.template.mask\n transform = self.algorithm_results[-1].fitter.transform\n interpolator = self.algorithm_results[-1].fitter.interpolator\n\n warped_images = []\n for s in self.shapes():\n transform.set_target(s)\n image = self.image.warp_to(mask, transform,\n interpolator=interpolator)\n warped_images.append(image)\n\n return warped_images",
"def unmasked_model_image_of_galaxies(self) -> List[aa.Array2D]:\r\n return self.max_log_likelihood_fit.unmasked_blurred_image_of_galaxies_list",
"def get_images(self):\n return [env.render(mode='rgb_array') for env in self.list_env]",
"def make_image_list(self):\n return [\n tools.get_image(48, 0, 16, 16, self.sprite_sheet),\n tools.get_image(0, 0, 22, 16, setup.GFX['sword2'])\n ]",
"def get_images(self):\n pass",
"def get_images_mp(sample_size_total,SN_limits=[10,100],translation_distance=1,radius_limits=[1.5,3],parameter_function=0):\n\n\n if __name__ == '__main__' or __name__=='mp_images_new':\n import numpy as np\n\n from multiprocessing import Pool,cpu_count\n from functools import partial\n\n nbr_images = []\n nbr_workers = cpu_count()\n sample_size = sample_size_total//nbr_workers\n\n # Avoid sending the extra paramters multiple times by creating partial\n\n tmp = partial(f,SN_limits=SN_limits,translation_distance=translation_distance,radius_limits=radius_limits,parameter_function=parameter_function)\n\n # Do some load balancing\n\n for i in range(nbr_workers):\n nbr_images.append(sample_size)\n if(i<sample_size_total%nbr_workers):\n nbr_images[-1]+=1\n\n # Use a pool to efficiently generate lots of images\n\n with Pool(processes=cpu_count()) as pool:\n tmp = pool.map(tmp,nbr_images)\n pool.close()\n pool.join()\n\n # Reformat the results from the pool map to be the right shape\n\n final_images = np.zeros((sample_size_total,51,51,1))\n final_targets = np.zeros((sample_size_total,3))\n img_idx = 0\n for worker_no in range(nbr_workers):\n for image_no in range(len(tmp[worker_no][0][:])):\n\n final_images[img_idx] = np.reshape(tmp[worker_no][0][image_no],(51,51,1))\n final_targets[img_idx] = tmp[worker_no][1][image_no]\n img_idx+=1\n return final_images,final_targets",
"def getBandGapImages(params,resolution=32,getFlatness=False,band=None,processes=None):\n \n units = params['units']\n enLabel, en = units['energy']\n xLabel, x = units['xaxis']\n yLabel, y = units['yaxis']\n zLabel, z = units['zaxis']\n numImages = len(z)\n\n xyz = np.roll(np.array(list(product(z,x,y))),2,1)\n energy = en*np.ones(len(xyz))\n \n data = np.c_[energy,xyz]\n names = [enLabel,xLabel,yLabel,zLabel]\n \n iterFunc = partial(getBG,params['lattice'],params['cutoff'],names,resolution,getFlatness=getFlatness,band=band)\n \n #iterFunc = partial(getBG,params['lattice'],params['cutoff'],names,resolution,)\n \n pool = mp.Pool(processes)\n \n if getFlatness:\n band_output = pool.map(iterFunc,data)\n band_output = np.array(band_output)\n pool.close()\n \n bandGaps, bandFlats = np.array(band_output[:,0].tolist()), np.array(band_output[:,1].tolist()) #np.array(array.tolist()) fixes the data type to np.float instead of np.object\n \n #take only minimum flatness if more than 1 band\n if bandFlats.ndim > 1:\n bandFlats = np.amin(bandFlats,axis=1)\n \n return bandGaps.reshape(numImages,len(x),len(y)), bandFlats.reshape(numImages,len(x),len(y))\n \n else:\n bandGaps = pool.map(iterFunc,data)\n bandGaps = np.array(bandGaps)\n pool.close()\n return bandGaps.reshape(numImages,len(x),len(y))",
"def get_all_from_original(self, idx):\n return [self.image(i) for i in range(idx * self._count,\n (idx+1) * self._count)]",
"def get_all_posteriors(self):\n return numpy.array(\n [self.posterior_at_index(i) for i in range(self.sketch.m)])",
"def pruneImages(self):\r\n tmp_list = self.img_names\r\n self.img_names = list()\r\n factor = self.num_imgs / self.num_slices\r\n for i in range(self.num_slices):\r\n self.img_names.append(tmp_list[int(i * factor)])\r\n self.num_imgs = len(self.img_names)",
"def get_next_batch(self):\n images = []\n while len(images) < self._batch_size:\n line = self._catalog.readline()\n self._counter += 1\n if self._counter < self._skip:\n continue\n url, time = line.split(\",\")\n time1 = int(time.split('-')[0])\n if time1 < 1650: # only consider paintings after 1650\n continue\n url = \"https://www.wga.hu/art\" + url.split(\"html\")[1] + \"jpg\"\n try:\n img_arr = self._scrape_image(url)\n except:\n continue\n if img_arr.shape[2] != 3: # only consider RGB paintings\n continue\n img_arr = (img_arr - 127.5) / 127.5\n images.append(img_arr)\n\n result = np.stack(images, axis=0)\n assert result.shape == (self._batch_size, self._input_size[0], self._input_size[1], 3)\n return result",
"def make_image_list(image_dir):",
"def images(self) -> List[Image.Image]:\n return [page.image for page in self._pages]",
"def _resize_images(images: List) -> List:\n return list(\n map(\n lambda i: i.resize((64, 64)),\n images\n )\n )",
"def get_image_scale_list(self): \n return self.image_scale_list",
"def get_images(ibs, gid_list):\n gpath_list = ibs.get_image_paths(gid_list)\n image_list = [gtool.imread(gpath) for gpath in gpath_list]\n return image_list",
"def floppy_images(self):\n ret = self._get_attr(\"floppyImages\")\n return [IMedium(a) for a in ret]",
"def _create_images(self, genomes: Genomes) -> Images:\n return [self._image_creator.create_image(g) for g in genomes]",
"def get_bayer_images(\n self, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL\n ) -> typing.List[np.ndarray]:\n return [rbg_to_bayer_bg(img) for img in self.get_images(renderer)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The list containing the appearance reconstruction obtained at each fitting iteration.
|
def appearance_reconstructions(self):
if self.appearance_parameters:
return [self.fitter.appearance_model.instance(w)
for w in self.appearance_parameters]
else:
return [self.fitter.template for _ in self.shapes]
|
[
"def appearance_reconstructions(self):\n return flatten_out(\n [f.appearance_reconstructions for f in self.algorithm_results])",
"def getDisplacements(self):\n return np.array(self.disps)",
"def get_representative_fits(self):\n\n\t\treturn self._repfit_list",
"def get_all_posteriors(self):\n return numpy.array(\n [self.posterior_at_index(i) for i in range(self.sketch.m)])",
"def extractParameters(self):\n return(self.matrices)",
"def flag_matriz(self):\n return self._flag_matriz",
"def extract(self):\n imgpaths = self.imgpaths\n object_nm = self.object_nm\n color_histograms = []\n progress = progressbar.ProgressBar(\n widgets=['{o}: '.format(o=object_nm), progressbar.Bar(),\n progressbar.Percentage(), ' ', progressbar.ETA()])\n for imgpath in progress(list(imgpaths)):\n if type(imgpath) is tuple:\n raw_path, mask_path = imgpath\n raw_img = cv2.imread(raw_path)\n mask_img = cv2.imread(mask_path)\n train_img = cv2.add(mask_img, raw_img)\n else:\n raw_path = imgpath\n train_img = cv2.imread(raw_path)\n\n color_hist_sub = rospy.Subscriber('single_channel_histogram_'\n + self.color + '/output', ColorHistogram, self.color_hist_cb)\n bridge = cv_bridge.CvBridge()\n train_imgmsg = bridge.cv2_to_imgmsg(train_img, encoding='bgr8')\n train_imgmsg.header.stamp = rospy.Time.now()\n # wait for histogram extracted from new image\n while not self.stamp or self.stamp < train_imgmsg.header.stamp:\n self.image_pub.publish(train_imgmsg)\n rospy.sleep(0.3)\n color_histograms.append(self.color_hist)\n return np.array(color_histograms)",
"def reveal_fitted_models(self):\n return [str(x) for x in self.fitted_models]",
"def descriptors(self):\n descs = []\n for x in xrange(0, 4):\n desc = self.GetDescriptor(x)\n if desc:\n descs.append(desc)\n return descs",
"def getFCs(self):\n return self.elements",
"def results(self) -> List:\n results = []\n for element in self.parameters.output:\n if \"concentration\" in element:\n results.append(self.concentration_to_latex(element))\n continue\n if element in self.__dict__.keys() or element in dir(self):\n results.append(getattr(self, element))\n else:\n results.append(getattr(self.parameters, element))\n return results",
"def atoms(self):\n if not hasattr(self, '_atoms'):\n self._atoms = [si.SingleImage(im[0], mask=im[1])\n for im in self.imglist]\n elif len(self._atoms) is not len(self.imglist):\n self._atoms = [si.SingleImage(im[0], mask=im[1])\n for im in self.imglist]\n return self._atoms",
"def return_possible_fitting_models():\n model_dictionary_keys = fitting_models.keys()\n for i,model_name in enumerate(model_dictionary_keys):\n print(\"%i: '%s'\" % (i+1, model_name))",
"def get_Rclasses(self):\r\n list_op = list(zip(sorted(self.morphisms.keys()),\r\n [0]*len(self.morphisms.keys())))\r\n R_classes = []\r\n for x,visited in list_op:\r\n if not visited:\r\n R_class = self.element_Rclass(x)\r\n R_classes.append(R_class)\r\n for i,(y,flag) in enumerate(list_op):\r\n if y in R_class:\r\n list_op[i]=(y,1)\r\n return R_classes",
"def _convert_rcfs(self):\n self.rcfs = []\n for i in range(self.get_nof_rcf()):\n edges = self.get_edges_for_rcf(i)\n nodes = self.get_nodes_for_rcf(i)\n weight = self.get_weight_for_rcf(i)\n rcf = RCF(i, nodes, edges, weight)\n self.rcfs.append(rcf)",
"def initPreds(self):\n mean_images = int(self.config['video']['mean_images'])\n preds = np.zeros((mean_images,len(self.contours)),dtype=np.float32)\n self.logger.debug('Mean images of prediction (time filtering): {}'.format(mean_images))\n return preds",
"def list(self):\n\n for arrname in list(self):\n print(arrname + ':', self[arrname].shape)",
"def aic(self):\n aics = []\n aics_bool = []\n for i, chain in enumerate(self.parent.chains):\n p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params\n if p is None or n_data is None or n_free is None:\n aics_bool.append(False)\n missing = \"\"\n if p is None:\n missing += \"posterior, \"\n if n_data is None:\n missing += \"num_eff_data_points, \"\n if n_free is None:\n missing += \"num_free_params, \"\n\n self._logger.warning(\"You need to set %s for chain %s to get the AIC\" % (missing[:-2], chain.name))\n else:\n aics_bool.append(True)\n c_cor = 1.0 * n_free * (n_free + 1) / (n_data - n_free - 1)\n aics.append(2.0 * (n_free + c_cor - np.max(p)))\n if len(aics) > 0:\n aics -= np.min(aics)\n aics_fin = []\n i = 0\n for b in aics_bool:\n if not b:\n aics_fin.append(None)\n else:\n aics_fin.append(aics[i])\n i += 1\n return aics_fin",
"def getRegistros(self):\n return self.__registros"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The list containing the error images obtained at each fitting iteration.
|
def error_images(self):
template = self.fitter.template
warped_images = self.warped_images
appearances = self.appearance_reconstructions
error_images = []
for a, i in zip(appearances, warped_images):
error = a.as_vector() - i.as_vector()
error_image = template.from_vector(error)
error_images.append(error_image)
return error_images
|
[
"def error_images(self):\n return flatten_out(\n [f.error_images for f in self.algorithm_results])",
"def errors(self):\n return [thread.err for thread in self._threads]",
"def get_errors(self):\n result = []\n for error in self.errors:\n result.append(os.path.basename(error[0]) +\n ':\\n ' + str(error[1]) + '\\n')\n return result",
"def get_all_errs(self):\n thiserr = self.get_err()\n errors = []\n while thiserr != '+0,\"No error\"':\n thiserr = self.get_err()\n errors.append(thiserr)\n return errors",
"def iter_failed(self):\n for awsnexradfile in self.failed:\n yield awsnexradfile",
"def errors(self):\n return (test for test in self.tests if test.error is not None)",
"def execution_errors(self):\n return (test for test in self.tests if test.execution_error is not None)",
"def errors(self):\n return Sequence.__errors",
"def error_list(self):\n all_errors = []\n for field_name, errors in self.errors.items():\n for error in errors:\n if isinstance(error, dict) and isinstance(self[field_name], FieldList):\n for field in self[field_name].entries:\n all_errors += ['{}: {}'.format(self[field_name].label.text, sub_error)\n for sub_error in field.form.error_list]\n else:\n all_errors.append('{}: {}'.format(self[field_name].label.text, error))\n return all_errors",
"def get_error_vector(self):\n return self.yerr",
"def getFitErr(self):\n return(self.fitSum2Err)",
"def get_images(self):\n return [env.render(mode='rgb_array') for env in self.list_env]",
"def get_errors(self, value):\n return list(self.errors(value))",
"def get_validation_errors(self):\n errors = []\n try:\n self.xsd_validate()\n except ValidationError, ex:\n errors.extend(ex.errors)\n return errors",
"def getImgUrls(self):\r\n return self.ImgUrls",
"def get_all_errors(self):\n if self.state == Check.State.NOT_RUN:\n return []\n dep_errors = [set(dependency.get_all_errors()) for dependency in self._dependencies]\n return list(set.union(set(self._errors), *dep_errors))",
"def calculate_errors(self, setup, errorfunc):\n errors = np.zeros((len(self.t_matrices), 2))\n\n for i, (wall_time, t_matrix) in enumerate(self.t_matrices):\n errors[i, 0] = wall_time\n errors[i, 1] = errorfunc(setup, t_matrix)\n\n self.errors = errors",
"def get_train_error(self):\n\n\t\tavg_err = 0.0\n\n\t\tfor i in range(len(self.X_train)):\n\n\t\t\tx = np.array([self.X_train[i]])\n\t\t\ty = self.Y_train[i]\n\n\t\t\ty_ = self.model.predict(x)\n\n\t\t\terr = loss(self.il_config['loss_type'],y,y_[0])\n\n\t\t\tavg_err += err\n\n\t\treturn avg_err/float(len(self.X_train))",
"def parse_dbl_error_files(self):\r\n error_list={}\r\n file_list=[]\r\n #Get the list of error files in all folders\r\n for dir in self.error_dir_list:\r\n file_list.extend(glob.glob(dir+\"/*_{INSTANCE_ID}_*.log\"\\\r\n .format(INSTANCE_ID=self.INSTANCE_ID)))\r\n #Parse all log files\r\n for filename in file_list:\r\n filename_arr=[set(),[]]\r\n with open(filename,'r') as file:\r\n filedata=file.read().split('\\n')\r\n for line in filedata:\r\n #Table name found\r\n if line.startswith('Table '):\r\n table_name='_'.join(line.split(',')[0].split(' ')[1]\\\r\n .split('.')[1].split('_')[:-1])\r\n if table_name not in error_list:\r\n error_list[table_name]={}\r\n #Error found\r\n elif line.startswith('ORA-'):\r\n #Oracle Error found\r\n filename_arr[0].add(line)\r\n elif line.startswith('Record '):\r\n #Oracle Error found\r\n filename_arr[0].add(line.split(':')[1])\r\n #Statistics found\r\n elif 'Rows' in line:\r\n #Adding the summary of data loaded\r\n filename_arr[1].append(line)\r\n if table_name in error_list:\r\n error_list[table_name][filename]=filename_arr\r\n return error_list",
"def get_hidden_errors(self):\n return self.scores['hidden_errors']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The number of levels of the fitter object.
|
def n_levels(self):
return self.fitter.n_levels
|
[
"def count_levels(self):\r\n lcount = 0\r\n rcount = 0\r\n if self.left:\r\n lcount = self.left.count_levels()\r\n if self.right:\r\n rcount = self.right.count_levels()\r\n return 1 + max(lcount, rcount)",
"def _get_number_of_alpha_levels(self):\n return self._number_of_alpha_levels",
"def getNumLevels(self) -> \"int\":\n return _coin.SoVRMLLOD_getNumLevels(self)",
"def get_number_of_layers(self) -> int:\n pass",
"def get_level_count(wsi) -> int:\n return wsi.resolutions[\"level_count\"] # type: ignore",
"def depth(self):\n return self._depth * 10",
"def depth(self) -> float:",
"def _windowlevel(self):\n prev = self\n count = 0\n while not prev._istoplevel:\n count = count + 1\n prev = prev._parent\n return count",
"def getNumberOfHeuristics(self) -> None:",
"def level_for_count(question_count):\n return int(math.log(questions_count + 1))",
"def get_number_of_categories(self) -> int:\n # 'unknown' not tracked anymore...\n # return len([c for c in self.node_stats.keys() if c != 'unknown'])\n return len(self.node_stats.keys())",
"def count_levels_nore(self, root):\n d = deque()\n d.append((1,root))\n maxlevels = 1\n while len(d):\n parent = d.pop()\n for child in parent[1].children:\n d.appendleft((parent[0]+1, child))\n maxlevels = max(maxlevels, parent[0])\n return maxlevels",
"def count(self):\n c = self.main_tab_widget.count()\n for child in self.child_splitters:\n c += child.count()\n return c",
"def get_numVictories(self):\r\n return GameSimulator.WON / 2",
"def get_number_of_grains(self):\n return len(self.grains)",
"def _get_knotCount(self) -> \"int\" :\n return _core.NurbsCurve2D__get_knotCount(self)",
"def n_bins(self):\n return self.num",
"def get_depth(self):\r\n check_is_fitted(self)\r\n return self.tree_.max_depth",
"def _get_knotCount(self) -> \"int\" :\n return _core.NurbsCurve3D__get_knotCount(self)",
"def get_num_features(self):\n return(len(self.modelfeatures_codebook))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The total number of iterations used to fitter the image.
|
def n_iters(self):
n_iters = 0
for f in self.algorithm_results:
n_iters += f.n_iters
return n_iters
|
[
"def getNIterations(self):\n return self.n_iterations",
"def num_passed_iterations(self) -> int:\n\n return self._num_passed_iterations",
"def min_num_iterations():\n err = 1e6\n count = 0\n ERROR_BOUND = 1e-4\n while (err > ERROR_BOUND):\n bkp_utils = utilities.copy()\n update_utils(utilities, map_shape, map_arr, rewards, final_arr, actions, gamma)\n # calc euclidean error norm\n d = bkp_utils.flatten() - utilities.flatten()\n err = np.sqrt(np.dot(d,d)) \n count += 1\n return count",
"def nb_total_steps_per_epoch(self):\n return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch",
"def getNrTimesteps():\n\n timesteps = 25\n return timesteps",
"def _animationLength(self):\n # -1 because last frame is finished animation\n iterations = round(self._fps * self._animTime) - 1\n return self._fps if iterations <= 0 else iterations",
"def _compute_number_of_tiles(tile_extent, image_size, min_overlapping):\n return ceil_int(image_size * 1.0 / (tile_extent - min_overlapping + 1e-10))",
"def steps_per_revolution(self):\n return self.__stepsPerRevolution",
"def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name\n return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards)))",
"def max_iterations(self) -> int:\n return self._max_epochs",
"def getNumFramesSinceUsed(self) -> \"int\":\n return _coin.SoGLImage_getNumFramesSinceUsed(self)",
"def total_number_of_cell(self) ->float:\n return self.parameters.cell_per_well * self.nb_puits",
"def _get_actual_iter_epoch_count(self, iter_count, epoch_count, dataset_size, batch_size):\n if epoch_count is not None:\n iter_count_by_epoch = (dataset_size * epoch_count) // batch_size\n if dataset_size % batch_size != 0:\n iter_count_by_epoch += 1\n if iter_count is not None:\n iter_count = min(iter_count, iter_count_by_epoch)\n else:\n iter_count = iter_count_by_epoch\n else:\n epoch_count = (iter_count * batch_size) // dataset_size\n self._iter_count = iter_count\n self._epoch_count = epoch_count",
"def num_examples_per_epoch_for_eval(self):\n\n pass",
"def __len__(self):\n length = int(np.ceil(len(self.samples) / float(self.batch_size)))\n return length",
"def scan_Ntot(self):\n return self.scan_N*self.repeat_number",
"def num_imputer(self):\n return self._num_imputer",
"def speed_count(self) -> int:\n return ATTR_MAX_FAN_STEPS",
"def iterations(N):\n i = N\n count = 1\n while i > 1:\n # print(\"\\nDrawing a sample from [0, %d]\" %(i-1))\n i = R(i)\n # print(\"Counts: \", count)\n # print(\"i: \", i)\n if i == 0:\n break\n else:\n count += 1\n return count",
"def __len__(self) -> int:\n return self.num_images"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" The final fitted shape.
|
def final_shape(self):
final_shape = self.algorithm_results[-1].final_shape
return self._affine_correction.apply(final_shape)
|
[
"def output_shape(self):\n return None",
"def shape(self) -> S:",
"def split_shape(self):\n return self.__split_shape",
"def shape(self):\n return self._input.shape",
"def build(self,input_shape):\r\n self.input_shape = input_shape\r\n return input_shape",
"def state_shape(self):\n pass",
"def shape_b(self):\r\n return self._fixture_b._shape",
"def shape_a(self):\r\n return self._fixture_a._shape",
"def cache_shape(self):\n return self.param_info.cache_shape",
"def shape(self):\n image, _ = self.datasets[0][0]\n return image.unsqueeze(dim=0)",
"def shape(self):\n vars = {self}\n fn = lambda values: values[self].shape\n links = set()\n return PartialLink(vars=vars, fn=fn, links=links)",
"def get_data_shape(self):\n raise NotImplementedError",
"def output_shape_for(self, input_shape):\n # N1, C1, W1, H1 = input_shape\n # output_shape = (N1, self.n_classes, W1, H1)\n x = input_shape\n\n # Encoder\n x = OutputShapeFor(self.convbnrelu1.cbr_unit)(x)\n x = OutputShapeFor(self.maxpool)(x)\n\n e1 = OutputShapeFor(self.encoder1)(x)\n e2 = OutputShapeFor(self.encoder2)(e1)\n e3 = OutputShapeFor(self.encoder3)(e2)\n e4 = OutputShapeFor(self.encoder4)(e3)\n\n # Decoder with Skip Connections\n d4 = OutputShapeFor(self.decoder4)(e4)\n # d4 += e3\n d3 = OutputShapeFor(self.decoder3)(d4)\n # d3 += e2\n d2 = OutputShapeFor(self.decoder2)(d3)\n # d2 += e1\n d1 = OutputShapeFor(self.decoder1)(d2)\n\n # Final Classification\n f1 = OutputShapeFor(self.finaldeconvbnrelu1)(d1)\n f2 = OutputShapeFor(self.finalconvbnrelu2)(f1)\n f3 = OutputShapeFor(self.finalconv3)(f2)\n return f3",
"def array_shape(self):\n return None",
"def this_shape(self):\n _logger.debug('%s', where_am_i())\n return self._metadata['instance']['shape']",
"def compute_output_shape(self, input_shape):\n return (\n input_shape[0],\n self.paddings[1][0] + input_shape[1] + self.paddings[1][1],\n self.paddings[2][0] + input_shape[2] + self.paddings[2][1],\n input_shape[3]\n )",
"def shape(self, new_shape):\n self.set_shape(new_shape)",
"def __setdimandshape__(self):\n # set ndim and shape\n self.ndim = len(self.fshape)\n self.shape = tuple([self.fshape[i] for i in self.order])",
"def shape(self):\n # Default behaviour is to try to evaluate the object directly\n # Try with some large y, to avoid having to unpack (slow)\n try:\n y = np.nan * np.ones((1000, 1))\n evaluated_self = self.evaluate(0, y, y, inputs=\"shape test\")\n # If that fails, fall back to calculating how big y should really be\n except ValueError:\n unpacker = pybamm.SymbolUnpacker(pybamm.StateVector)\n state_vectors_in_node = unpacker.unpack_symbol(self)\n min_y_size = max(\n max(len(x._evaluation_array) for x in state_vectors_in_node), 1\n )\n # Pick a y that won't cause RuntimeWarnings\n y = np.nan * np.ones((min_y_size, 1))\n evaluated_self = self.evaluate(0, y, y, inputs=\"shape test\")\n\n # Return shape of evaluated object\n if isinstance(evaluated_self, numbers.Number):\n return ()\n else:\n return evaluated_self.shape",
"def plt_shape(self):\n return (self.ny, self.nx)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.