query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Helper method that returns the corresponding enum value for the training algorithm in the model_metadata json of the model selected. | def get_training_algorithm(model_metatdata_json):
try:
training_algorithm = None
err_msg = ""
if constants.ModelMetadataKeys.TRAINING_ALGORITHM in model_metatdata_json:
training_algorithm_value = model_metatdata_json[constants.ModelMetadataKeys.TRAINING_ALGORITHM]
if constants.TrainingAlgorithms.has_member(training_algorithm_value):
training_algorithm = constants.TrainingAlgorithms[training_algorithm_value]
else:
return 2, "The training algorithm value is incorrect", ""
else:
# To handle DeepRacer models with no training_algorithm key
print("No training algorithm key in model_metadata_file. Defaulting to clipped_ppo.")
training_algorithm = constants.TrainingAlgorithms.CLIPPED_PPO.value
return 0, err_msg, training_algorithm
except Exception as exc:
return 1, f"Error while getting training algorithm model_metadata.json: {exc}", "" | [
"def model_training_mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"model_training_mode\")",
"def AlgorithmEnum(self):\n\n return self._GetMessage('StudySpec').AlgorithmValueValuesEnum",
"def get_model_training_status(self, name):\n crds = kubernetes.client.CustomObjectsApi()\n\n model_training = crds.get_namespaced_custom_object(*self._model_training_info, name.lower())\n print(f'Fetched model training: {model_training}')\n\n status = model_training.get('status')\n return status if status else {}",
"def current_operation(self):\n if self.device.mode == 'cool':\n return STATE_COOL\n elif self.device.mode == 'heat':\n return STATE_HEAT\n elif self.device.mode == 'range':\n return STATE_AUTO\n elif self.device.mode == 'off':\n return STATE_OFF\n else:\n return STATE_UNKNOWN",
"def get_best_model(self):\r\n return self.ml_dict.get(self.get_best_model_name()).get('model')",
"def which_model(input_csv_path : str) -> str:\n with open(input_csv_path, \"r\") as csv_file:\n\n params_reader = csv.reader(csv_file, delimiter=\";\")\n\n for key, value in params_reader:\n if key == \"model\":\n return value # string describing model e.g. \"GGNN\"\n\n raise ValueError(\"Model type not specified.\")",
"def decide_model_type(opt, model_dict):\n # decide from user input\n if opt.get('model_type'):\n return opt['model_type']\n\n # fields to check and key words that match to a model type\n check_fields = ('agent', 'title', 'path', 'description')\n key_words = {\n 'ranker': RANKER,\n 'classifier': CLASSIFIER,\n 'generator': GENERATOR,\n 'retrieval': RETRIEVER,\n 'retriever': RETRIEVER,\n }\n\n # decide from model_dict\n for key, model_type in key_words.items():\n for field in check_fields:\n if model_dict.get(field) and key in model_dict.get(field):\n return model_type",
"def get_training_op(self):\n _verif(self._training_op, \"training_op\")\n return self._training_op",
"def _get_dnn_model_framework(model):\n actual_model = model[0] if type(model) is tuple else model\n return DNNFramework.PYTORCH if hasattr(actual_model, \"named_parameters\") else DNNFramework.TENSORFLOW",
"def return_algorithm_category(algorithm_name: str) -> str:\n if algorithm_name in Api.get_signal_information():\n algo_type = PandasEnum.SIGNAL.value\n elif algorithm_name in Api.get_allocation_information():\n algo_type = PandasEnum.ALLOCATION.value\n else:\n raise NameError(\"Algorithm is not supported: \", algorithm_name)\n return algo_type",
"def predict_type(self) -> 'PreElabRDLType':\n raise NotImplementedError",
"def get_engineering_step_stage(datasets: List[str]) -> str:\n if any(_.startswith(\"validation_\") for _ in datasets):\n return \"intra_cv\"\n if any(_.startswith(\"non_train_\") for _ in datasets):\n return \"intra_cv\"\n return \"pre_cv\"",
"def current_operation(self):\n return EVO_STATE_TO_HA.get(self._status['systemModeStatus']['mode'])",
"def get_type(self):\n return FeatureType.VALUE\n # pretty sure its never interpreter type\n # TODO: think about that",
"def model_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"model_type\")",
"def model(self):\n if self._mode == 'train':\n if self.training_models:\n return self.training_models[0]\n else:\n if self.models:\n return self.models[0]",
"def get_alg_name(self, algorithm):\n if algorithm == AlgorithmEnum.BINARY_EXP:\n return \"BinExp\"\n if algorithm == AlgorithmEnum.ALOHA:\n return \"Aloha\"\n if algorithm == AlgorithmEnum.ADAPTIVE_ALOHA:\n return \"AdaptiveAloha\"\n if algorithm == AlgorithmEnum.INTERVAL_BINARY_EXP:\n return \"IntervalBinExp\"",
"def getEnumNameFromValue(value):\n for t, tobj in list(TypeDef.typeDict.items()):\n if tobj.objtype in {s_ENUM, s_KERNEL}:\n n = tobj.getLabel(value)\n if n is not None:\n return t, n\n return None, None",
"def get_tune_mode(self, json_info):\n tune_mode = json_info[\"SocInfo\"][\"autoTilingMode\"]\n if self.offline_tune:\n tune_mode = \"RL\"\n return tune_mode"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Produce the root of the tree in a "dict" state. hashname is one of the names in hashlib.algorithms. sequence_class is the type of PRNG to be returned by .sequence(). The default is treeprng.Hash_PRNG using hashname. | def __init__(self, hashname="sha1", sequence_class=None):
self.hashname = hashname
self.sequence_class = sequence_class
self.__hash = hashlib.new(hashname)
self.is_dict = True # The root is always a dict. | [
"def generate_tree(\n g: \"Generator\",\n assembly: str,\n class_name: str,\n namespace: str,\n unity_version=[2018, 4, 3, 1],\n) -> Dict[str, Dict]:\n # C# System\n from System import Array\n\n unity_version_cs = Array[int](unity_version)\n\n # fetch all type definitions\n def_iter = g.getTypeDefs(assembly, class_name, namespace)\n\n # create the nodes\n trees = {}\n for d in def_iter:\n try:\n nodes = g.convertToTypeTreeNodes(d, unity_version_cs)\n except Exception as e:\n # print(d.Name, e)\n continue\n trees[d.Name] = [\n {\n \"level\" : node.m_Level,\n \"type\" : node.m_Type,\n \"name\" : node.m_Name,\n \"meta_flag\" : node.m_MetaFlag,\n }\n for node in nodes\n ]\n return trees",
"def sequence(self, prng_class=None):\n self.__check_state(\"sequence\")\n seed = long(self.__hash.hexdigest(), 16)\n self.__hash = None # Spent.\n prng_class = prng_class or self.sequence_class\n if prng_class:\n return prng_class(seed)\n else:\n return Hash_PRNG(seed, hashname=self.hashname)",
"def genDict(self):\n outputDict = {}\n for child in self.children:\n if child.children:\n outputDict[child.instruction] = child.genDict()\n else:\n outputDict[child.instruction] = \"leaf\"\n\n return outputDict",
"def generate_random_tree(self):\n # Starting random generators and parameter arrays\n tree_rand = np.random\n tree_rand.seed(self.random_tree_seed)\n nominal_att_candidates = array('i')\n min_numeric_vals = array('d')\n max_numeric_vals = array('d')\n\n for i in range(self.num_numerical_attributes):\n min_numeric_vals.append(0.0)\n max_numeric_vals.append(1.0)\n\n for i in range(self.num_numerical_attributes + self.num_nominal_attributes):\n nominal_att_candidates.append(i)\n\n self.tree_root = self.generate_random_tree_node(0, nominal_att_candidates, min_numeric_vals, max_numeric_vals, tree_rand)",
"def __init__(self, list_of_sequences=[], label=[]):\n self.nodes = set([])\n self.nodes_label = {}\n self.node_children_dict = {}\n self.node_parent_dict = {}\n self.node_sequence_dict = {}\n self.node_visits_dict = {}\n self.node_full_sequence_dict = {}\n self.list_of_sequences = list_of_sequences\n self.number_of_objects = len(list_of_sequences)\n self.number_of_classes = len(set(label))\n self.label = label\n\n nodes = set([])\n structure = {}\n dict_seq = {}\n dict_num = {}\n dict_prev = {}\n dict_all_seq = {}\n\n num_of_sequence = 0\n # len_of_seq = len(self.list_of_sequences)\n free_node = 1\n seq1 = self.list_of_sequences[0]\n nodes.add(0)\n current_node = 0\n dict_all_seq[0] = []\n\n for i in seq1:\n structure[current_node] = [free_node]\n structure[free_node] = []\n dict_prev[free_node] = current_node\n\n str_seq = [elem for elem in i]\n dict_all_seq[free_node] = dict_all_seq[current_node][:]\n dict_all_seq[free_node].append(str_seq)\n\n if type(str_seq) == list:\n dict_seq[free_node] = str_seq\n else:\n dict_seq[free_node] = [str_seq]\n # dict_num[free_node] = 1\n dict_num[free_node] = [0 for i in range(self.number_of_classes)]\n dict_num[free_node][self.label[num_of_sequence]] = 1\n current_node = free_node\n free_node += 1\n\n for seq in self.list_of_sequences[1:]:\n num_of_sequence += 1\n current_node = 0\n for elem in seq:\n str_seq = [i for i in elem]\n if len(structure[current_node]) > 0:\n temp_seq = [dict_seq[son] for son in structure[current_node]]\n flag = str_seq in temp_seq\n if flag:\n number = 0\n while temp_seq[number] != str_seq:\n number += 1\n current_node = structure[current_node][number]\n # dict_num[current_node] += 1\n dict_num[current_node][self.label[num_of_sequence]] += 1\n else:\n structure[current_node].append(free_node)\n dict_prev[free_node] = current_node\n dict_all_seq[free_node] = dict_all_seq[current_node][:]\n dict_all_seq[free_node].append(str_seq)\n current_node = free_node\n structure[current_node] = []\n # dict_num[current_node] = 1\n dict_num[current_node] = [0 for i in range(self.number_of_classes)]\n dict_num[current_node][self.label[num_of_sequence]] = 1\n dict_seq[current_node] = str_seq\n free_node += 1\n else:\n structure[current_node].append(free_node)\n dict_prev[free_node] = current_node\n dict_all_seq[free_node] = dict_all_seq[current_node][:]\n dict_all_seq[free_node].append(str_seq)\n current_node = free_node\n structure[current_node] = []\n # dict_num[current_node] = 1\n dict_num[current_node] = [0 for i in range(self.number_of_classes)]\n dict_num[current_node][self.label[num_of_sequence]] = 1\n dict_seq[current_node] = str_seq\n free_node += 1\n\n self.nodes = nodes\n self.node_children_dict = structure\n self.node_sequence_dict = dict_seq\n self.node_visits_dict = dict_num\n self.node_parent_dict = dict_prev\n self.node_full_sequence_dict = dict_all_seq\n\n dic_all_seq_rev = {str(v): k for k, v in self.node_full_sequence_dict.items()}\n self.node_full_sequence_dict_reversed = dic_all_seq_rev",
"def hash(self):\n assert self.__hash, \\\n \"Tried to use hash() after spent. See:\\n\" \\\n + TREEPRNG_DOC_URL + \"#the-treeprng-life-cycle\"\n hash = self.__hash.copy()\n hash.update(\"h\")\n self.is_dict = True\n return long(hash.hexdigest(), 16)",
"def random_genesis() -> dict:\n\treturn {'alloc': {},\n\t\t\t'mixhash': '0x0000000000000000000000000000000000000000000000000000000000000000',\n\t\t\t'extraData': '0x' + randhex(32),\n\t\t\t'difficulty': '0x1',\n\t\t\t'parentHash': '0x0000000000000000000000000000000000000000000000000000000000000000',\n\t\t\t'coinbase': '0x0000000000000000000000000000000000000000',\n\t\t\t'nonce': '0x' + randhex(8),\n\t\t\t'gasLimit': '0x16e360',\n\t\t\t'timestamp': '0x00'}",
"def __init__(self, root, node_dict):\n self.node_dict = node_dict\n self.root = self.make_tree(root)\n self.nodes = {}\n self.leaves_mapping = {}\n self.fill_parents()\n self.n_leaves = None",
"def input_tree(self):\n\n if self.starttreename:\n if self.starttreename[-3:] == 'xml':\n self.starttree = Phylo.read(self.starttreename, \"phyloxml\")\n elif self.starttreename[-6:] == 'newick':\n self.starttree = Phylo.read(self.starttreename, \"newick\")\n\n print \"Generating phylogenetic tree...\"\n\n if self.treetype[-3:] == 'xml':\n self.tree = Phylo.read(self.treetype, \"phyloxml\")\n elif self.treetype[-3:] == 'nwk':\n self.tree = Phylo.read(self.treetype, \"newick\")\n elif self.treetype == 'pars':\n self.parsimony_tree()\n elif self.treetype == 'PhyML':\n self.phyml_tree()\n else:\n self.raxml_tree()\n\n self.tree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.treeparents = self.all_parents(self.tree)\n for btree in self.btrees:\n btree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.btreeparents.append(self.all_parents(btree))",
"def _generate_tree(self, config, comment):\n # list to store keys in the created tree\n keys = set()\n # add starting value\n keys.add(\"\")\n # create container for the tree\n maintree = self.tree_container(comment=comment)\n tree = maintree\n parent = None\n # parse defaults\n for entry in parse(config):\n if entry.value is IS_BLOCK:\n keys.add(entry.name)\n parent = entry.name\n tree = self._create_block(maintree, entry)\n continue\n tree[entry.name] = self.leaf_from_string(entry, parent=parent)\n return maintree, keys",
"def generate_tree(self):\n tree = bt.Sequence()\n dead_or_alive_fallback = bt.FallBack()\n dead_or_alive_fallback.add_child(self.IsAlive(self))\n dead_or_alive_fallback.add_child(self.Die(self))\n\n flood_fallback = bt.FallBack()\n flood_fallback.add_child(self.IsNotFlooded(self))\n flood_fallback.add_child(self.Flood(self))\n\n reproduce_sequence = bt.Sequence()\n reproduce_sequence.add_child(self.CanReproduce(self))\n reproduce_sequence.add_child(self.Reproduce(self))\n\n tree.add_child(dead_or_alive_fallback)\n tree.add_child(flood_fallback)\n tree.add_child(self.Grow(self))\n tree.add_child(reproduce_sequence)\n tree.add_child(self.MoveWater(self))\n return tree",
"def get_proof_generator(self, tx_id: AnyStr, signature_type: AnyStr, chain_name: AnyStr) -> Dict:\n root = ensure_string(self.tree.get_merkle_root())\n node_count = len(self.tree.leaves)\n for index in range(0, node_count):\n proof = self.tree.get_proof(index)\n proof2 = []\n\n for p in proof:\n dict2 = dict()\n for key, value in p.items():\n dict2[key] = ensure_string(value)\n proof2.append(dict2)\n target_hash = ensure_string(self.tree.get_leaf(index))\n merkle_proof = {\n \"type\": ['MerkleProof2017', 'Extension'],\n \"merkleRoot\": root,\n \"targetHash\": target_hash,\n \"proof\": proof2,\n \"anchors\": [{\n \"sourceId\": tx_id,\n \"type\": signature_type,\n \"chain\": chain_name\n }]}\n yield merkle_proof",
"def __generate_tree(word, data=None, index=0, node=None):\n current_chr = word[index]\n\n current_word = None\n if len(word) == index + 1:\n current_word = word\n\n is_child_node_exists = node.get_child_node(current_chr)\n\n if is_child_node_exists:\n child_node = is_child_node_exists\n\n child_node.add_word(current_word, data)\n else:\n child_node = Node(current_chr, word=current_word, parent=node, data=data)\n node.add_child(child_node)\n\n if len(word) > index + 1:\n DictionaryGenerator.__generate_tree(word, data, index=index + 1, node=child_node)",
"def generate_lexical_tree(word_list):\n lexical_tree = {}\n generate_sub_lexical_tree(lexical_tree, word_list, 0)\n return lexical_tree",
"def _create_tree(self, nodes):\n\n if len(nodes) == 1:\n self.root = nodes[0].data\n return self.root\n\n next_level = len(nodes)\n tree_level = []\n for i in range(0, next_level, 2):\n combined = sha3(nodes[i].data + nodes[i + 1].data)\n next_node = Node(combined, nodes[i], nodes[i + 1])\n tree_level.append(next_node)\n\n self.tree.append(tree_level)\n self._create_tree(tree_level)",
"def make_leaf(self, names_so_far, hashtype):\n\n while True:\n name = self.rng.next_file_name(8)\n if name not in names_so_far:\n names_so_far.add(name)\n break\n nnn = self.rng.some_bytes(8) # 8 quasi-random bytes\n if hashtype == HashTypes.SHA1:\n sha = hashlib.sha1()\n elif hashtype == HashTypes.SHA2:\n sha = hashlib.sha256()\n elif hashtype == HashTypes.SHA3:\n sha = hashlib.sha3_256()\n sha.update(nnn)\n return NLHLeaf(name, sha.digest(), hashtype)",
"def merkle_root(hashes):\n # current level starts as hashes\n current_level = hashes\n # loop until there's exactly 1 element\n while len(current_level) > 1:\n # current level becomes the merkle parent level\n current_level = merkle_parent_level(current_level)\n # return the 1st item of current_level\n return current_level[0]",
"def root_generator(folder, build, variant=\"common\"):\n #Priv specific\n privx = \"bbfoundation/hashfiles_priv/{0}\".format(folder[variant])\n #DTEK50 specific\n dtek50x = \"bbSupport/DTEK50\" if build[:3] == \"AAF\" else \"bbfoundation/hashfiles_priv/dtek50\"\n #DTEK60 specific\n dtek60x = dtek50x # still uses dtek50 folder, for some reason\n #Pack it up\n roots = {\"Priv\": privx, \"DTEK50\": dtek50x, \"DTEK60\": dtek60x}\n return roots",
"def hashable_state(self):\n if self._hashable_state is None:\n state = OrderedDict()\n state['name'] = self.name\n state['edges_hash'] = self.edges_hash\n state['is_log'] = self.is_log\n state['is_lin'] = self.is_lin\n state['bin_names'] = self.bin_names\n self._hashable_state = state\n return self._hashable_state"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
t.__getitem__(key) t[key] Given a TreePRNG t, t[key] Creates an uncommitted daughter TreePRNG object. This commits t to be a dict, but otherwise doesn't change t. key can be any picklable object, but if you want repeatability across runs of a program, see help(pickle_key). | def __getitem__(self, key):
assert self.__hash, \
"Tried to use as a dict after spent. See:\n" \
+ TREEPRNG_DOC_URL + "#the-treeprng-life-cycle"
self.is_dict = True
child = copy.copy(self)
child.__hash = self.__hash.copy()
child.is_dict = False
child.__hash.update("k" + pickle_key(key))
return child | [
"def __getitem__(self, key):\n if self.vec is None:\n return None\n\n if type(key) == int or type(key) == slice:\n return self.vec[key]\n return P4Node({'node_type': '<vec>'}, [node for node in self.vec if type(node) is P4Node if node.node_type == key])",
"def __getitem__(self, pbft_public_key):\n # Get the PBFT key state from the underlying LMDB. The only catch is\n # that the data was stored using cbor.dumps(). When this happens, it\n # gets stored as a list not a namedtuple. When re-creating the bgt\n # key state we are going to leverage the namedtuple's _make method.\n try:\n pbft_key_state = \\\n PbftKeyState._make(self._store_db[pbft_public_key])\n except TypeError: # handle keys persisted using sawtooth v1.0.1\n try:\n old_key_state = self._store_db[pbft_public_key]\n old_key_state.append('UNKNOWN_NONCE')\n pbft_key_state = PbftKeyState._make(old_key_state)\n except (AttributeError, TypeError) as error:\n raise ValueError('pbft_key_state is invalid: {}'.format(error))\n except (AttributeError, ValueError) as error:\n raise ValueError('pbft_key_state is invalid: {}'.format(error))\n\n PbftKeyStateStore._check_pbft_key_state(pbft_key_state)\n return pbft_key_state",
"def __getitem__(self,key):\n val = self.data[key][1]\n self.data[key] = (time.time(),val)\n return val",
"def __getitem__(self, key):\n self.s = self.s[self.ofs:]\n self.ofs = 0\n return self.s[key]",
"def __getitem__(self, key):\n if key in self._lazyload:\n value = self._lazyload[key]()\n self._dic[key] = value\n del self._lazyload[key]\n return self._dic[key]",
"def test_getitem(self):\n r = self.TreeRoot\n n = self.TreeNode\n assert r[0] is n['b']\n items = n['c'][0:1]\n self.assertEqual(len(items), 1)\n assert items[0] is n['d']\n items = n['c'][0:2]\n self.assertEqual(len(items), 2)\n assert items[0] is n['d']\n assert items[1] is n['e']\n items = n['c'][:]\n self.assertEqual(len(items), 3)\n assert items[0] is n['d']\n assert items[-1] is n['f']",
"def __getitem__(self, key):\n self.open_hdu(checksum=self.checksum)\n return self.hdu[key]",
"def __getitem__(self, key):\n # BEGIN SOLUTION\n # current version:\n cur = self.root_versions[-1]\n\n # find element\n def find(t, x):\n # if None, so not there, return False\n if not t:\n return False\n # if val equals x, then returns true.\n if t.val == x:\n return True\n # if val is grater then key, then get left.\n if t.val > x:\n return find(t.left, x)\n # if val is less then key, then get right.\n if t.val < x:\n return find(t.right, x)\n\n # result of find\n result = find(cur, key)\n\n if result:\n return key\n else:\n raise KeyError\n\n # END SOLUTION",
"def test_valid_getitem_with_keys(self):\n\n proxy = self.test_construct_keys()\n assert proxy['one'] == 1\n assert proxy['two'] == 2",
"def __getitem__( self, key ):\n return self.read( key=key, default=None, raiseOnError=True )",
"def __getitem__(self, key):\n hash_val = self._hash(key)\n if self.table[hash_val] != self.defVal and (isinstance(self.table[hash_val], tuple) and \n self.table[hash_val][0] == key and\n self.table[hash_val][2] == True):\n return self.table[hash_val][1]\n else:\n key_found = False\n iter_count = 0\n while not key_found:\n if hash_val >= self.capacity:\n hash_val = 0\n if self.table[hash_val] == self.defVal:\n \tbreak\n if self.table[hash_val][0] == key:\n if self.table[hash_val][2] == True:\n return self.table[hash_val][1]\n hash_val += 1\n iter_count += 1\n return self.defVal",
"def ImmutableSortedDictionary(*args, **kwargs): # real signature unknown\r\n pass",
"def test_getitem_setitem_not_implemented():",
"def __getitem__(self, *args) -> \"tid_t\":\n return _ida_pro.tid_array___getitem__(self, *args)",
"def __getitem__(self,n):\n return TreeNode(self._value[n],self.tree)",
"def __init__(self):\n self.T = dict()",
"def safe_insert(key, value, my_dict):\r\n return",
"def p_7():\n\n # Add code here\n def dict_copies(my_dict, num_copies):\n \"\"\"\n Given a dictionary my_dict and an integer num_copies,\n returns a list consisting of num_copies copies of my_dict.\n \"\"\"\n list_copies =[]\n for copy in range(num_copies):\n list_copies.append(dict(my_dict))\n\n return list_copies\n\n # Tests\n print(dict_copies({}, 0))\n print(dict_copies({}, 1))\n print(dict_copies({}, 2))\n\n test_dict = dict_copies({'a': 1, 'b': 2}, 2)\n print(test_dict)\n\n # Check for reference problem\n test_dict[1][\"a\"] = 3\n print(test_dict)\n return 0",
"def special_key_assignment(self):\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a PRNG seeded from this TreePRNG object. prng_class is an optional random.Random subclass; the default is self.sequence_class (see __init__()). self becomes spent. | def sequence(self, prng_class=None):
self.__check_state("sequence")
seed = long(self.__hash.hexdigest(), 16)
self.__hash = None # Spent.
prng_class = prng_class or self.sequence_class
if prng_class:
return prng_class(seed)
else:
return Hash_PRNG(seed, hashname=self.hashname) | [
"def get_prng(seed=None):\n if seed is not None and not (isinstance(seed, numbers.Integral) and seed >= 0):\n raise ValueError('Seed must be a non-negative integer or omitted, not {}'.format(seed))\n\n prng = np.random.RandomState()\n seed = create_seed(seed)\n seed = _int_list_from_bigint(hash_seed(seed))\n prng.seed(seed)\n return prng",
"def rng(self):\n if self._rng is None:\n # One-time initialization from backend-neutral seed int.\n self._rng = fastmath.random.get_prng(self._rng_seed_int)\n return self._rng",
"def rng(self):\n\n if self._rng is None:\n u = uuid.uuid3(self._simulus.namespace, self.name)\n self._rng = random.Random(int(u.int/2**32))\n return self._rng",
"def _rand_class_num(self):\n res = []\n letters = ''\n num_groups = r.choice(self.class_num_of_letter_groups)\n for i in range(num_groups):\n if i == 0:\n letters += r.choice(self.class_alpha_start)\n else:\n letters += r.choice(self.alpha)\n res.append(letters)\n\n whole_num_size = r.choice(self.class_whole_length)\n num = str(r.randint((whole_num_size - 1) * 10, whole_num_size * 10 - 1))\n res.append(num)\n\n if r.choice([True, False]):\n frac = '%.2f' % r.random() # 0.0 - 1.0\n res.append(frac)\n\n return res",
"def get_rng(self, instance: Instance, seed: Optional[int] = None) -> Random:\n assert instance.id is not None\n # If seed exists, use it as part of the random seed\n return Random(instance.id if seed is None else str(seed) + instance.id)",
"def get_rng(random_state):\n if random_state is None:\n return np.random.mtrand._rand\n elif isinstance(random_state, (numbers.Integral, np.integer)):\n return np.random.RandomState(random_state)\n if isinstance(random_state, np.random.RandomState):\n return random_state\n raise ValueError('Wrong random state. Expecting None, an int or a numpy '\n 'RandomState instance, got a '\n '{}'.format(type(random_state)))",
"def generate_person(self, cls=Person):\n # choose a sex\n sex = self.choose_sex()\n # choose a name\n name = self.choose_name(sex)\n # sample age\n age = int(cls.age_distrib_func(*cls.age_distrib_args))\n # sample height\n height = cls.height_distrib_func(*cls.height_distrib_args)\n \n return Person(name, sex, age, height)",
"def generate_random_tree(self):\n # Starting random generators and parameter arrays\n tree_rand = np.random\n tree_rand.seed(self.random_tree_seed)\n nominal_att_candidates = array('i')\n min_numeric_vals = array('d')\n max_numeric_vals = array('d')\n\n for i in range(self.num_numerical_attributes):\n min_numeric_vals.append(0.0)\n max_numeric_vals.append(1.0)\n\n for i in range(self.num_numerical_attributes + self.num_nominal_attributes):\n nominal_att_candidates.append(i)\n\n self.tree_root = self.generate_random_tree_node(0, nominal_att_candidates, min_numeric_vals, max_numeric_vals, tree_rand)",
"def generate_random_node(self):\n if np.random.random_sample() > self.goal_sample_rate:\n x = np.random.uniform(self.sample_space.x_min, self.sample_space.x_max)\n y = np.random.uniform(self.sample_space.y_min, self.sample_space.y_max)\n z = np.random.uniform(self.sample_space.z_min, self.sample_space.z_max)\n node = Node(np.array((x, y, z)))\n else:\n node = self.goal_node\n\n return node",
"def preferred_rng(self):\n return self._preferred_rng",
"def nativeRNG_pick(n, rng, distribution='uniform', parameters=[0, 1]):\n native_rng = h.Random(0 or rng.seed)\n rarr = [getattr(native_rng, distribution)(*parameters)]\n rarr.extend([native_rng.repick() for j in xrange(n - 1)])\n return numpy.array(rarr)",
"def __get_next_random(self, rand_seq):\n if rand_seq is not None:\n return rand_seq.pop(0)\n else:\n return random.random()",
"def __init__(self):\n self.prob_heads = random()",
"def get_seed(self):\n return self.rseed",
"def random_class_priors(dataset: Dataset, *, rng: np.random.RandomState) -> ClassPriors:\n return dict(zip(dataset.classes, rng.dirichlet([1] * len(dataset.classes))))",
"def get_random(self):\n index = randrange(self.size)\n return self.individuals[index]",
"def get_random_generation_strategy(self) -> GenerationStrategy:\n\n generation_strategy = get_generation_strategy(with_callable_model_kwarg=False)\n gs_name = \"\".join(random.choice(string.ascii_letters) for i in range(8))\n generation_strategy._name = gs_name\n return generation_strategy",
"def generate_single_sample(self):\n self.reset_assignments()\n for node in self.all_nodes:\n node.assign_new_random_value_based_on_parent()\n # for node -ends\n\n return self.current_assignment_comb()",
"def randomize(self):\n self._pk = self._generate_pk(self.alpha, self.k, self.zero)\n self._entropy = None\n return self"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
hash() can be used on an uncommitted or dict TreePRNG. It commits self to be a dict. | def hash(self):
assert self.__hash, \
"Tried to use hash() after spent. See:\n" \
+ TREEPRNG_DOC_URL + "#the-treeprng-life-cycle"
hash = self.__hash.copy()
hash.update("h")
self.is_dict = True
return long(hash.hexdigest(), 16) | [
"def __hash__(self) -> hash:\n if self.empty:\n return hash(())\n else:\n return hash((self.data, self.left, self.right))",
"def __hash__(self):\n # see if there is an available hash value\n # if you are seeing cache bugs this is the thing\n # to try eliminating because it is very likely that\n # someone somewhere is modifying the data without\n # setting `self._hash = None`\n hashed = getattr(self, '_hash', None)\n if hashed is not None:\n return hashed\n\n hashed = hash_fast(\n (''.join(str(hash(k)) + v.get('geometry', '')\n for k, v in self.edge_data.items()) +\n ''.join(str(k) + v.get('geometry', '')\n for k, v in self.node_data.items())).encode('utf-8') +\n b''.join(v['matrix'].tobytes()\n for v in self.edge_data.values()\n if 'matrix' in v))\n self._hash = hashed\n return hashed",
"def get_hash(self, descriptor):",
"def __hash__(self) -> int:\n # Return the Python hash of the cryptographic hash.\n return hash(self.__event_hash__)",
"def __hash__(self):",
"def hash(self, recalc=False):\n if self._hash is None or recalc is True:\n hsh = hashlib.sha256(json.dumps(self.diff_dicts)).hexdigest()\n self._hash = hsh\n if recalc:\n # check matching hash\n assert hsh == self._hash\n return self._hash",
"def __hash__(self):\n return hash(self.__dn__)",
"def __get_current_hash__(self):\n hasher = hashlib.sha256()\n hasher.update(self.previous_hash.encode() + self.data.encode())\n return hasher.hexdigest()",
"def __hash__(self):\n return hash((self.bike.public_key, self.remote))",
"def hash(self):\n if not self.__hash:\n self.__hash = compute_file_hash(str(self.value))\n return self.__hash",
"def contents_hash(self):\n # type: () -> int\n if self._hash is None:\n self._hash = hash(tuple(self.items()))\n return self._hash",
"def _hash(self: Syscall) -> int:\n return hash(self.name)",
"def hash(self):\n return hash(\n (\n self.uid,\n self.status,\n self.runtime_status,\n self.env_status,\n tuple(entry.hash for entry in self.entries),\n tuple(entry[\"uid\"] for entry in self.logs),\n )\n )",
"def hashring(self):\n return",
"def __hash__(self):\n\n return hash(\n (self.__class__, ) + self._defining_values\n )",
"def __hash__(self) -> \"size_t\":\n return _coin.SoBase___hash__(self)",
"def __hash__(self):\n return hash((self.__class__, self.line))",
"def __hash__(self):\r\n # We inherit object.__hash__, so we must deny this explicitly\r\n raise TypeError(\"Cannot hash a Context.\")",
"def hash_field(self):\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If hasattr(seed, "hexdigest"), assume seed is a hashlib object that no one will update(). else create a hashlib.new(self.hashname) object and update with the pickled seed. | def seed(self, seed):
if hasattr(seed, "hexdigest"):
self.base_hash = seed
else:
self.base_hash = hashlib.new(self.hashname)
self.base_hash.update("s" + pickle_key(seed))
# Note: this is the digest of the base_hash itself, while later
# chunks of bits (if any) are based on updated copies. That's okay,
# digest(base) doesn't let you predict digest(base + morebytes).
self.bits = long(self.base_hash.hexdigest(), 16)
self.nbits = self.base_hash.digest_size * 8
self.i = 1 | [
"def setseed(self, seed):\n self.hashseed = seed\n if not os.environ.get('PYTHONHASHSEED'):\n os.environ['PYTHONHASHSEED'] = str(seed)\n os.execv(sys.executable, ['python3'] + sys.argv)",
"def __hash__(self):\n # see if there is an available hash value\n # if you are seeing cache bugs this is the thing\n # to try eliminating because it is very likely that\n # someone somewhere is modifying the data without\n # setting `self._hash = None`\n hashed = getattr(self, '_hash', None)\n if hashed is not None:\n return hashed\n\n hashed = hash_fast(\n (''.join(str(hash(k)) + v.get('geometry', '')\n for k, v in self.edge_data.items()) +\n ''.join(str(k) + v.get('geometry', '')\n for k, v in self.node_data.items())).encode('utf-8') +\n b''.join(v['matrix'].tobytes()\n for v in self.edge_data.values()\n if 'matrix' in v))\n self._hash = hashed\n return hashed",
"def __init__(self):\n super(SHA1Hasher, self).__init__()\n self._sha1_context = hashlib.sha1()",
"def __hash_new(name, data=b'', **kwargs):\n try:\n return _hashlib.new(name, data)\n except ValueError:\n # If the _hashlib module (Mbedtls) doesn't support the named\n # hash, try using our builtin implementations.\n # This allows for SHA224/256 and SHA384/512 support even though\n # the Mbedtls library prior to 0.9.8 doesn't provide them.\n return __get_builtin_constructor(name)(data)",
"def update_random_seed(self):\n iseed = self.run_card['iseed']\n if iseed == 0:\n randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'))\n iseed = int(randinit.read()[2:]) + 1\n randinit.close()\n randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w')\n randinit.write('r=%d' % iseed)\n randinit.close()",
"def make_seed(self):\n hash_digest = self.Hash(self.name.encode()).digest()\n hash_int = int.from_bytes(hash_digest, 'big')\n return hash_int % 1e7 + self.n_turns + 1e3 * self.game + self.seed",
"def hash_combine(seed, hashed) -> int:\n seed ^= hashed + 0x9e3779b9 + (seed << 6) + (seed >> 2)\n return seed",
"def __get_current_hash__(self):\n hasher = hashlib.sha256()\n hasher.update(self.previous_hash.encode() + self.data.encode())\n return hasher.hexdigest()",
"def _update_hash_and_blob(self):\n self._blob = self._serialize_data()\n cp = self._blob\n self._key = hashlib.sha512(cp).hexdigest()",
"def worker_init_fn(worker_id):\n np.random.seed(args.seed + worker_id)\n random.seed(args.seed + worker_id)",
"def init(self,):\r\n self.random_seed_ = self.random_state\r\n self.random_state_ = check_random_state(self.random_seed_)\r\n return self",
"def __hash__(self):\n\n return hash(\n (self.__class__, ) + self._defining_values\n )",
"def set_seed(self, random_seed=None):\n if random_seed is None:\n self.cnt[0] += 1\n if self.cnt[1] > 0 and self.cnt[0] % self.cnt[1] == 0:\n if self.cnt[0] <= 0 or self.cnt[1] <= 1:\n if not hasattr(self, \"rng\"):\n self.rng = default_rng(choice(1e18, 1).item())\n self.random_seed[0] = self.rng.integers(1e18)\n if self.cnt[1] > 1:\n self.random_seed[1] = self.rng.integers(1e18)\n else:\n self.random_seed[0] = self.random_seed[1]\n self.random_seed[1] = self.rng.integers(1e18)\n else:\n self.rng = default_rng(random_seed)\n self.random_seed[0] = self.rng.integers(1e18)\n if self.cnt[1] > 1:\n self.random_seed[1] = self.rng.integers(1e18)\n self.cnt[0] = 0",
"def __check_hash__(self) -> None:\n state = self.__dict__.copy()\n event_hash = state.pop(\"__event_hash__\")\n method_name = state.get(\"__event_hash_method_name__\", \"__hash_object_v1__\")\n hash_method = getattr(self, method_name)\n if event_hash != hash_method(state):\n raise EventHashError()",
"def __init__(self, seed=0):\n if isinstance(seed, (str, unicode)):\n seed = int(float(seed))\n self.__c_obj = llbc.inl.NewRandom(seed)",
"def seed(self, a):\n assert(len(a) == 6)\n self._current_seed = a\n super().seed(a)",
"def __hash__(self) -> int:\n # Return the Python hash of the cryptographic hash.\n return hash(self.__event_hash__)",
"def __hash__(self):\n return hash(self.piece_identification)",
"def _hash(data):\r\n hash_algo = hashlib.new('md5')\r\n hash_algo.update(pickle.dumps(data))\r\n # prefix allows possibility of multiple applications\r\n # sharing same keyspace\r\n return 'esi_' + hash_algo.hexdigest()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a shorted UUID or the original name. | def to_humanreadable_name(name: str) -> str:
return name[:8] if (is_valid_uuid(name) is True) else name | [
"def get_uuid(self): # real signature unknown; restored from __doc__\n return \"\"",
"def get_uuid():\n return str(UUID(int=random.randint(0, 2**128 - 1))) # nosec",
"def get_uuid(limit=10):\n uuid_sample = str(uuid.uuid4()).replace('-', '')\n if limit and limit <= len(uuid_sample):\n return (uuid_sample[:limit]).upper()\n return uuid_sample.upper()",
"def getShortName(id):",
"def new_uuid(length=25):\n letters = [random.choice(string.hexdigits) for _ in range(length)]\n return ''.join(letters).lower()",
"def canonicalize_UUID(a_uuid):\n return str(uuid.UUID(a_uuid.replace(':', '')))",
"def shorter_uuid(length=7, starter=None, with_original=False):\n original_id = str(shortuuid.uuid()) if starter is None else starter\n n = len(original_id)\n dx = min(length, len(original_id)) # ID length\n if starter is not None and len(starter) < dx * 2:\n original_id = str(shortuuid.uuid())\n\n start_point = random.randint(0, n - dx)\n shorter_id = original_id[start_point:(start_point + dx)]\n\n return shorter_id if not with_original else [shorter_id, original_id]",
"def _generate_shortname(cls):\n return ''.join([cls.letters[random.randrange(0, cls.num_letters)] for idx in range(0, cls.SHORTNAME_LEN)])",
"def short_name(self):\n return str(self._fund_id)",
"def shortname(self):\n return '%s-%s' % (self.product, self.version)",
"def get_uuid(cls, file_name):\n return ''.join(file_name.split('.')[0].split('-'))",
"def get_uuid5(*names) -> str:\n return str(uuid.uuid5(uuid.NAMESPACE_DNS, ''.join(list(names))))",
"def gen_uuid():\n return str(uuid.uuid1().hex)",
"def short_name(self):\n return self._name",
"def short_name(self):\n ret = self._get_attr(\"shortName\")\n return ret",
"def _gen_uuid(self):\r\n return uuid.uuid4().hex",
"def _build_service_id(name):\n\n md5_hash = hashlib.md5(name.encode('utf-8')) # nosec\n ns = uuid.UUID(md5_hash.hexdigest())\n return str(uuid.uuid5(ns, socket.getfqdn()))",
"def unique_id() -> str:\n return \"unique-id\"",
"def online_uuid_to_name(uuid):\n api_reply = requests.get('https://api.mojang.com/user/profile/%s' % uuid)\n try:\n reply_json = json.loads(api_reply.content)\n except:\n return None\n \n if 'name' in reply_json:\n return reply_json['name']\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse user input. uses parse_bool() to partially return Boolean and None values All other types as returned asis >>> parse_user_input("YES") True >>> parse_user_input("false") False >>> parse_user_input("notfalse") 'notfalse' >>> parse_user_input(8.4) 8.4 >>> parse_user_input(dict(ioc=123)) | def parse_user_input(
data: typing.Optional[typing.Union[str, bool]]
) -> typing.Optional[typing.Union[str, bool]]:
try:
return parse_bool(data)
except TypeError:
pass
try:
parse_none(data)
return None
except TypeError:
pass
return data | [
"def userinput():\r\n var_dict = {}\r\n print('Input the variable names and their associated truth values \\n'\r\n ' Use the format: Letter:Truth Value \\n'\r\n ' Use T for True and F for False \\n'\r\n ' Example: P:F \\n'\r\n ' After each variable press return \\n'\r\n ' Once finished type \"end\"')\r\n continueLoop = True\r\n while continueLoop == True:\r\n userin = input()\r\n if userin == 'end':\r\n continueLoop = False\r\n else:\r\n splituserin = userin.split(':')\r\n if len(userin) != 3:\r\n print('Incorrect format, try again')\r\n elif splituserin[1] == 'T':\r\n var_dict.update({splituserin[0] : True})\r\n elif splituserin[1] == 'F':\r\n var_dict.update({splituserin[0] : False})\r\n else:\r\n print('Incorrect format, try again')\r\n \r\n print('Input the logic statement using these rules: \\n'\r\n ' and = & \\n'\r\n ' or = v \\n'\r\n ' not = ~ \\n'\r\n ' if/then = -> \\n'\r\n ' if and only if = <-> \\n'\r\n ' Use round brackets to indicate order of operations. \\n'\r\n ' You must use round brackets on negation statements as well. Ex: (~B) \\n'\r\n ' Example: ((AvB)&(~(A&B)))')\r\n sentence = input()\r\n sentence = list(sentence)\r\n return var_dict, sentence",
"def parse_bool(question, default=True):\n choices = 'Y/n' if default else 'y/N'\n default = 'Y' if default else 'N'\n while True:\n answer = raw_input('%s [%s]: ' % (question, choices)).upper() or default\n if answer.startswith('Y'):\n return True\n elif answer.startswith('N'):\n return False\n else:\n print(\"Invalid selection: '%s'. Must be either [y]es or [n]o.\"\n % answer)",
"def parse_bool(value):\n return bool({\n 'True': True,\n 'False': False\n }.get(value, value))",
"def parse_config_value(value):\n if value == '':\n return None\n elif value.lower() == 'true':\n return True\n elif value.lower() == 'false':\n return False\n\n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n return float(value)\n except ValueError:\n pass\n\n return value",
"def parse_bool(s, default=False):\n if s is None:\n return default\n return TRUTH.get(s.lower(), default)",
"def parse_boolean(self, query):\n query['bool'] = self.line.split()\n return query",
"def parse_user_input(user_input: str):\n\n try:\n\n # Split the full command into its parts but ignore space in quotes\n full_command = shlex.split(user_input)\n command, arguments = full_command[0], full_command[1:]\n\n # Attempt to parse the arguments using docopt and a function pointer to their docstring\n return command, docopt(FUNCTION_COMMANDS[command].__doc__, argv=arguments)\n\n except (SystemExit, KeyError) as e:\n\n # If the error was caused by the user issuing a non-valid command then let them know\n if type(e) is KeyError:\n print_formatted_text(ANSI(f'\\x1b[31mCommand \"{command}\" is not recognized as a valid command!'))\n\n # Otherwise check to make sure they user gave invalid arguments and not asking for help\n elif '-h' not in arguments and '--help' not in arguments:\n print_formatted_text(ANSI(f'\\x1b[31mInvalid arguments for command \"{command}\" -> {arguments}'))\n\n # Return an empty dictionary to let execute_shell know we failed\n return command, dict()\n\n except ValueError:\n # If the error was caused by not closing the parenthesis then let the user know\n print_formatted_text(ANSI(f'\\x1b[31mMissing closing quotation, so cannot parse input'))\n return '', dict()",
"def _handle_input_(self, user_input):\n\t\thandled = None\n\n\t\tuser_input = re.split(r' |,', user_input, 1)\n\t\toption = user_input[0]\t# function to call from self.options dict\n\n\t\tkwarg_dict = {}\n\t\targ_list = []\n\t\ttry:\n\t\t\tif len(user_input) == 1:\n\t\t\t\t# call function with no arguments\n\t\t\t\thandled = self.options[option]()\n\t\t\t\treturn handled\n\t\t\telse:\n\t\t\t\t# separate parameters and proceed\n\t\t\t\tparams = user_input[1]\n\n\t\t\t# identify each argument, separate args and kwargs\n\t\t\tfor argument in params.split(','):\n\t\t\t\ttokens = re.split('\\s*=\\s*', argument)\n\t\t\t\t# print(argument, tokens)\n\t\t\t\tif len(tokens) == 1:\n\t\t\t\t\t# argument is positional (arg)\n\t\t\t\t\targ_list.append(eval(tokens[0].strip(' ')))\n\n\t\t\t\telse:\n\t\t\t\t\t# argument is keyword (kwarg)\n\t\t\t\t\tthis_keyword = tokens[0].strip(' ')\n\t\t\t\t\tif this_keyword == '':\n\t\t\t\t\t\traise SyntaxError\n\t\t\t\t\telse:\n\t\t\t\t\t\tkwarg_dict[this_keyword] = eval(tokens[1].strip(' '))\n\n\t\t\t# try calling function with args and/or kwargs\n\t\t\t# raises KeyError if function is not one of the options\n\t\t\thandled = self.options[option](*arg_list, **kwarg_dict)\n\n\t\t# except IndexError:\n\t\t# \t# call function with no params\n\t\texcept SyntaxError:\n\t\t\tprint('Invalid syntax for calling function')\n\t\texcept KeyError:\n\t\t\tif self.default is None:\n\t\t\t\thandled = False\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\thandled = self.default(user_input)\n\t\t\t\texcept TypeError:\n\t\t\t\t\thandled = self.default()\n\n\t\treturn handled",
"def get_bool(item):\r\n\r\n if str(item).lower() in ['true','yes','1','t','y']:\r\n return True\r\n if str(item).lower() in ['false', 'no', '0', 'f', 'n']:\r\n return False\r\n raise ValueError(\"'%s' cannot be parsed into a boolean value\" % item)",
"def postParse(self, _, _2, tokenlist):\n tok = string.lower(tokenlist[0])\n if tok in [\"t\", \"true\", \"1\"]:\n return True\n elif tok in [\"f\", \"false\", \"0\"]:\n return False\n else:\n raise Error(\"token (%s) must be boolean\" % tok)",
"def parse_bool(val) -> str:\n return str(val).lower() if isinstance(val, bool) else val",
"def parse(cls, input):",
"def test_parse_bool_false_for_non_truthy_values():\n assert_false(eg_config._parse_bool_from_raw_egrc_value(''))\n assert_false(eg_config._parse_bool_from_raw_egrc_value(None))\n assert_false(eg_config._parse_bool_from_raw_egrc_value('false'))\n assert_false(eg_config._parse_bool_from_raw_egrc_value('False'))",
"def boolean_from_str(src):\n if src is None:\n return None\n elif src == \"true\":\n return True\n elif src == \"false\":\n return False\n elif src == \"1\":\n return True\n elif src == \"0\":\n return False\n else:\n raise ValueError",
"def str2bool(text: str) -> bool:\n text = text.lower()\n if text == \"true\":\n return True\n elif text == \"false\":\n return False\n else:\n raise ValueError(f\"Cannot parse bool: '{text}'\")",
"def test__parse_flags(input_data):\n output = parse_flags(input_data)\n vampytest.assert_instance(output, UserFlag)\n return output",
"def _get_bool(val) -> bool | None:\n if isinstance(val, bool):\n return val\n elif isinstance(val, str):\n if val.strip().lower() == \"true\":\n return True\n elif val.strip().lower() == \"false\":\n return False\n return None",
"def string_parser(string):\n\n # converts string into a list\n if ', ' in string:\n config = []\n # converts each item in the list into its respective types\n for item in string.split(', '):\n config.append(string_parser(item))\n return config\n # converts string to boolean\n elif string == 'True':\n return True\n elif string == 'False':\n return False\n # converts string to int\n elif string.count('.') == 0:\n try:\n return int(string)\n except ValueError:\n pass\n # converts string to float\n else:\n try:\n return float(string)\n except ValueError:\n pass\n\n # does not convert string if already is a string\n return string",
"def get_correct_data_types(user_input, answer_type, alpha_string):\n if answer_type == int:\n try:\n user_input = int(user_input)\n except ValueError:\n user_input = None\n ui.print_error_message(\"Wrong value provided.\\n\")\n\n elif answer_type == str:\n if alpha_string:\n user_input_copy = user_input.replace(' ', '')\n\n if not user_input_copy.isalpha():\n user_input = None\n ui.print_error_message('It not alpha string.')\n\n return user_input"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a JSON string from the input data. | def to_json(data: typing.Dict[str, typing.Any]) -> str:
output_data = _normalize_data(data)
return str(json.dumps(output_data, sort_keys=True, indent=4)) | [
"def encode_data(self, data):\r\n return json.dumps(data)",
"def write_json_string(data):\r\n raise NotImplementedError()",
"def _encode_data(self, data, **kwargs):\n return json.dumps(data, cls=JSONEncoder, **kwargs)",
"def format_json(data, dense):\n buf = io.StringIO()\n write_json(buf, data, dense)\n return buf.getvalue()",
"def convert_to_json(data_obj):\n\n return json.dumps(data_obj)",
"def make_json(self, raw_string):\n return json.dumps([x.strip() for x in raw_string.split(',')])",
"def template_json(data):\n return JSONRenderer().render(data).decode()",
"def _format_data(data):\n if data is None:\n data = ''\n elif not isinstance(data, string_types):\n data = json_dumps_or_string(data)\n return data",
"def json_dumps(data, *args, **kwargs):\n kwargs.setdefault(\"cls\", JSONEncoder)\n kwargs.setdefault(\"encoding\", None)\n return simplejson.dumps(data, *args, **kwargs)",
"def format_data_to_json_serializable(data: Any):\n if data is None:\n return 'None'\n if type(data) in (str, int, float, bool):\n return data\n if isinstance(data, torch.Tensor):\n if data.shape == () or reduce(operator.mul, data.shape, 1) == 1:\n return format_data_to_json_serializable(data.cpu().item())\n return f'Tensor of shape {str(data.shape)}'\n if isinstance(data, collections.abc.Mapping):\n return {format_data_to_json_serializable(k): format_data_to_json_serializable(v) for k, v in data.items()}\n if isinstance(data, collections.abc.Iterable):\n return [format_data_to_json_serializable(v) for v in data]\n\n # Unknown format catch-all\n return str(data)",
"def render(self, data):\n separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS\n\n try:\n render = json.dumps(\n data, ensure_ascii=self.ensure_ascii, separators=separators\n )\n\n # Unicode symbols \\u2028 and \\u2029 are invisible in JSON and\n # make output are invalid. To avoid this situations, necessary\n # replace this symbols.\n # For more information read this article: http://goo.gl/ImC89E\n for wrong_symbol, expected in WRONG_UNICODE_SYMBOLS:\n render = render.replace(wrong_symbol, expected)\n\n render = bytes(render.encode('utf-8'))\n except Exception as exc:\n raise SerializerError(exc)\n return render",
"def jsonify(self,data):\n data = json.dumps(data)\n self.session.add_data(data)\n if self.verbose == True:\n print \"JSON data: %s\" % data",
"def _format_json(data, indent=None, line_prefix=None):\n\n if not data:\n return ''\n\n if line_prefix is None:\n return json.dumps(data, indent=indent)\n\n return '\\n'.join('{}{}'.format(line_prefix, line)\n for line\n in json.dumps(data, indent=indent).splitlines()\n if line)",
"def pretty_print_json(data):\n return json.dumps(data,ensure_ascii=False,sort_keys=True,indent=4)",
"def to_json(self):\n data = {}\n if self.customer_id != '':\n data['customer_id'] = self.customer_id\n if self.contact_persons:\n data['contact_persons'] = self.contact_persons\n if self.reference_number != '':\n data['reference_number'] = self.reference_number\n if self.template_id != '':\n data['template_id'] = self.template_id\n if self.date != '':\n data['date'] = self.date\n if self.exchange_rate > 0:\n data['exchange_rate'] = self.exchange_rate\n if self.line_items:\n data['line_items'] = []\n for value in self.line_items:\n line_item = value.to_json()\n data['line_items'].append(line_item)\n if self.notes != '':\n data['notes'] = self.notes\n if self.terms != '':\n data['terms'] = self.terms\n if self.creditnote_number != '':\n data['creditnote_number'] = self.creditnote_number\n return data",
"def js2json(data):\n ctx = get_ctx()\n fret = ctx.eval(\"\"\"\n function func() {\n var data = \"\"\" + data + \"\"\";\n var json_data = JSON.stringify(data);\n return json_data;\n }\n \"\"\")\n\n jsond = ctx.locals.func()\n return jsond",
"def generateJson(self):\n data = self.__combineResult()\n return json.dumps(data, indent=4, sort_keys=True)",
"def generate_json_data(self, user_name, uuid, email):\n\t\treturn {uuid:{\"name\":name,\"email\":email}}",
"def serialize_json(series: str, data: List[Pair]) -> str:\n obj = [dict(x=r.x, y=r.y) for r in data]\n text = json.dumps(obj, sort_keys=True)\n return text"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unmount a mountpoint using libc. | def umount(
mountpoint: typing.Optional[typing.Union[
libioc.Types.AbsolutePath,
typing.List[libioc.Types.AbsolutePath],
]]=None,
options: typing.Optional[typing.List[str]]=None,
force: bool=False,
ignore_error: bool=False,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
if isinstance(mountpoint, list) is True:
for entry in typing.cast(
typing.List[libioc.Types.AbsolutePath],
mountpoint
):
try:
umount(
mountpoint=entry,
options=options,
force=force,
ignore_error=ignore_error,
logger=logger
)
except (
libioc.errors.UnmountFailed,
libioc.errors.InvalidMountpoint
):
if force is False:
raise
return
mountpoint_path = libioc.Types.AbsolutePath(mountpoint)
if force is False:
umount_flags = ctypes.c_ulonglong(0)
else:
umount_flags = ctypes.c_ulonglong(0x80000)
if os.path.ismount(str(mountpoint_path)) is False:
raise libioc.errors.InvalidMountpoint(
mountpoint=mountpoint,
logger=logger
)
_mountpoint = str(mountpoint_path).encode("utf-8")
if libjail.dll.unmount(_mountpoint, umount_flags) == 0:
if logger is not None:
logger.debug(
f"Jail mountpoint {mountpoint} umounted"
)
else:
if logger is not None:
logger.spam(
f"Jail mountpoint {mountpoint} not unmounted"
)
if ignore_error is False:
raise libioc.errors.UnmountFailed(
mountpoint=mountpoint,
logger=logger
) | [
"def unmount(path='/sd'):\n import uos\n uos.unmount(path)",
"def unmount(mount_dir: str):\n logging.info(f\"{mount_dir}: unmounting\")\n\n try:\n subprocess.run([\"fusermount\", \"-u\", mount_dir], check=True)\n except Exception as e:\n logging.error(f\"{mount_dir}: {e}\")\n error({\"status\": \"Failure\", \"message\": f\"ratarmount-flexvol: error during unmount of {mount_dir}: {e}\"})\n\n try:\n if Path(mount_dir).exists():\n Path(mount_dir).rmdir()\n except:\n logging.error(f\"{mount_dir}: still exists after unmount\")\n error(\n {\n \"status\": \"Failure\",\n \"message\": f\"ratarmount-flexvol: error during unmount: {mount_dir} did not get unmounted\",\n }\n )\n\n logging.info(f\"{mount_dir}: unmounted\")\n info({\"status\": \"Success\", \"message\": f\"ratarmount-flexvol: unmounted {mount_dir}\"})",
"def do_umount(mountpoint):\n try:\n _logger.info(\"Unmounting %s\", mountpoint)\n subprocess.check_output(['/usr/bin/umount', mountpoint], stderr=subprocess.STDOUT)\n return True\n except subprocess.CalledProcessError as e:\n _logger.error(\"Failed to unmount [%s]: %s\", mountpoint, e.output)\n return False",
"def umount(self, target):\n if self._libc_umount(target) < 0:\n errno = ctypes.get_errno()\n emsg = 'Could not unmount {}: {}'\n raise OSError(errno, emsg.format(target, os.strerror(errno)))",
"def remove_osd_mounts(ctx):\n ctx.cluster.run(\n args=[\n 'grep',\n '/var/lib/ceph/osd/',\n '/etc/mtab',\n run.Raw('|'),\n 'awk', '{print $2}', run.Raw('|'),\n 'xargs', '-r',\n 'sudo', 'umount', run.Raw(';'),\n 'true'\n ],\n )",
"def umount():\n logging.info('Umount rozofs')\n threads = []\n for site in topology:\n for client in site['storaged']:\n threads.append(\n Thread(target=exec_commands, args=(['umount %s' % (config['rozofs']['mount_dir'])]\n , [client], ))\n )\n for t in threads:\n t.start()\n for t in threads:\n t.join()",
"def unmount(self, device=None, card_os=\"raspberry\", host=None):\n\n host = host or get_platform()\n\n print (\"KKKK\", host)\n card = SDCard(card_os=card_os, host=host)\n\n if not self.dryrun:\n self.system('sudo sync') # flush any pending/in-process writes\n\n if host in ['linux', 'raspberry']:\n\n Console.ok(f\"unmounting {card.boot_volume}\")\n os.system(f\"sudo umount {card.boot_volume}\")\n time.sleep(3)\n Console.ok(f\"unmounting {card.root_volume}\")\n os.system(f\"sudo umount {card.root_volume}\")\n\n time.sleep(3)\n\n rm = [f\"sudo rmdir {card.boot_volume}\",\n f\"sudo rmdir {card.root_volume}\"]\n\n for command in rm:\n os.system(command)\n elif host == \"macos\":\n\n Console.ok(f\"unmounting {card.boot_volume}\")\n os.system(f\"diskutil umount {card.boot_volume}\")\n\n else:\n Console.error(\"Not yet implemnted for your OS\")\n return \"\"",
"def unmount(self, path):\n del self._mountpoints[self._join_chunks(self._normalize_path(path))]",
"def umount(ignore_errors):\n\n u_boot_console.log.action('Unmounting UMS device')\n cmd = ('/bin/umount', host_ums_part_node)\n u_boot_utils.run_and_log(u_boot_console, cmd, ignore_errors)",
"def _CleanUmount(self, mount_point):\n self.ExecOnDevice(['sync', '&&', 'sync'])\n self._Remount(mount_point, 'ro')\n\n # If the mount_point is not present, don't bother trying to umount.\n if mount_point not in self.ExecOnDevice(['mount']):\n return True\n\n info = [point for point in self.ExecOnDevice(['mount']).splitlines()\n if mount_point in point.split()]\n umount_attempts = 0\n umounted = False\n while umount_attempts < 5 and not umounted:\n umount_attempts += 1\n self.ExecOnDevice(['umount', mount_point])\n umounted = mount_point not in self.ExecOnDevice(['mount'])\n if not umounted:\n time.sleep(self._connect_poll_interval)\n\n if not umounted:\n err = self.ExecOnDevice(['umount', mount_point])\n logging.warn('%s could not be umounted: %s', mount_point, err)\n logging.warn('Mounts:\\n%s', self.ExecOnDevice(['mount']))\n for f in self.ExecOnDevice(['lsof']).splitlines():\n if mount_point in f:\n logging.warn('Open file: %s', f)\n\n # e2fsck is not present on API < 21\n if self.GetApiVersion() < 21:\n return umounted\n\n clean = False\n if info:\n info = info[0]\n if 'ext4' in info:\n dev = info.split()[0]\n fsck_out = self.ExecOnDevice(['e2fsck', '-v', '-f', '-p', dev])\n if 'UNEXPECTED INCONSISTENCY' in fsck_out:\n logging.error('%s: FS Corruption! %s', mount_point, fsck_out)\n else:\n clean = True\n else:\n clean = True\n else:\n logging.warn('%s: Could not retrieve mount info - cannot fsck.',\n mount_point)\n\n return clean and umounted",
"def unmount(self, path, opt=None):\n\n url = self._paths_url(path, 'unmount')\n self._post(url, opt)",
"def fsdestroy(self):\n q.logger.log(\"Unmounting file system\")",
"def unmount_remote_repo(mount_point: str, mount_root: str):\n\n # TODO: [MV] this is experimental\n # TODO: [MV] decide later what to do with unmounting remote repo\n if not os.path.exists(mount_point):\n return\n\n if not os.path.ismount(mount_point):\n shutil.rmtree(mount_root)\n return\n\n # force unmount the remote path as we are sure we've done everything we need up until to this point\n # and don't want to get stopped by any other processes stopping from unmounting\n umount_executable = shutil.which('umount')\n cmd = [umount_executable, mount_point]\n child = subprocess.Popen(\n cmd,\n )\n child.communicate()\n exit_code = child.wait()\n if exit_code != 0:\n # in case of failure log warning so the user can unmount manually if needed\n logger.warning(f'Could not unmount path: {mount_point}.\\n'\n f'Please unmount manually using command:\\n'\n f'{\" \".join(cmd)}')\n else:\n shutil.rmtree(mount_root)",
"def _unbindProcSysDev(self, mountPoint):\n if self._procInBootMounted:\n self.__debug(\"mount point ≠ / so umount /dev, /proc and /sys in \" + mountPoint)\n slt.execCall('umount {mp}/dev'.format(mp=mountPoint))\n slt.execCall('umount {mp}/proc'.format(mp=mountPoint))\n slt.execCall('umount {mp}/sys'.format(mp=mountPoint))",
"def unshare_directory(source_slice, source_dir):\n\n return stork_proper.unmount(__cpath(source_slice, source_dir))\n #return stork_proper.call([\"unmount\", __cpath(source_slice, source_dir)])",
"def umount_hugepages():\n if not is_hugepage_mounted():\n return\n\n try:\n tasks.run_task(['sudo', 'umount', settings.getValue('HUGEPAGE_DIR')],\n _LOGGER, 'Unmounting hugepages...', True)\n except subprocess.CalledProcessError:\n _LOGGER.error('Unable to umount hugepages.')\n\n if not deallocate_hugepages():\n _LOGGER.error('Unable to deallocate previously allocated hugepages.')",
"def ChrootUnmountPaths(RootPath: str):\n subprocess.run(\"umount -l {0}/dev\".format(RootPath), shell=True, check=False, stdout=subprocess.DEVNULL)\n subprocess.run(\"umount -l {0}/proc\".format(RootPath), shell=True, check=False, stdout=subprocess.DEVNULL)\n subprocess.run(\"umount -l {0}/sys\".format(RootPath), shell=True, check=False, stdout=subprocess.DEVNULL)\n subprocess.run(\"umount -l {0}/tmp\".format(RootPath), shell=True, check=False, stdout=subprocess.DEVNULL)",
"def mount(path='/sd'):\n from machine import SD\n sd = SD()\n os.mount(sd, path)",
"async def test_remove_mount(\n coresys: CoreSys, all_dbus_services: dict[str, DBusServiceMock], mount: Mount\n):\n systemd_service: SystemdService = all_dbus_services[\"systemd\"]\n systemd_service.StopUnit.calls.clear()\n\n # Remove the mount\n assert mount == await coresys.mounts.remove_mount(mount.name)\n\n assert mount.state is None\n assert mount not in coresys.mounts\n\n assert [call[0] for call in systemd_service.StopUnit.calls] == [\n \"mnt-data-supervisor-media-media_test.mount\",\n \"mnt-data-supervisor-mounts-media_test.mount\",\n ]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the list of basedirs according to the host distribution. | def get_basedir_list(distribution_name: str="FreeBSD") -> typing.List[str]:
basedirs = [
"bin",
"boot",
"lib",
"libexec",
"rescue",
"sbin",
"usr/bin",
"usr/include",
"usr/lib",
"usr/libexec",
"usr/sbin",
"usr/share",
"usr/libdata",
]
if distribution_name == "FreeBSD":
basedirs.append("usr/lib32")
return basedirs | [
"def _getdirs(self):\n dirs = [\"/\"] + self.dirs\n for name in self.zf.namelist():\n dirpath = os.path.dirname(name)\n if dirpath not in dirs:\n dirs.append(dirpath)\n return dirs",
"def auto_detect_base_directories():\n auto_rootdirs = None\n if PLATFORM == 'linux':\n auto_rootdirs = ('/usr', '/usr/local', '/opt')\n elif PLATFORM == 'osx':\n auto_rootdirs = ('/usr/local', '/opt')\n elif PLATFORM == 'mingw64':\n auto_rootdirs = ('/mingw64',)\n elif PLATFORM == 'windows':\n auto_rootdirs = ()\n\n auto_include_directories = []\n auto_library_directories = []\n for j in auto_rootdirs:\n if os.path.isdir(j):\n path_include = os.path.join(j, 'include')\n path_library = os.path.join(j, 'lib')\n if os.path.isdir(path_include):\n auto_include_directories.append(path_include)\n if os.path.isdir(path_library):\n auto_library_directories.append(path_library)\n\n return auto_rootdirs, auto_include_directories, auto_library_directories",
"def list_master_dirs(saltenv=\"base\", prefix=\"\"):\n return __context__[\"fileclient\"].dir_list(saltenv, prefix)",
"def get_eplus_basedirs():\n if platform.system() == \"Windows\":\n eplus_homes = Path(\"C:\\\\\").glob(\"EnergyPlusV*\")\n return eplus_homes\n elif platform.system() == \"Linux\":\n eplus_homes = Path(\"/usr/local/\").glob(\"EnergyPlus-*\")\n return eplus_homes\n elif platform.system() == \"Darwin\":\n eplus_homes = Path(\"/Applications\").glob(\"EnergyPlus-*\")\n return eplus_homes\n else:\n warnings.warn(\n \"trnslator is not compatible with %s. It is only compatible \"\n \"with Windows, Linux or MacOs\" % platform.system()\n )",
"def directories(self):\n ret = self._get_attr(\"directories\")\n return [IGuestDirectory(a) for a in ret]",
"def _get_config_dirs():\n config_dirs = [\n USER_CONFIG_DIR,\n os.path.join(\"/\", \"etc\", \"rapport\"),\n os.path.abspath(os.path.join(\"rapport\", \"config\"))\n ]\n return config_dirs",
"def get_directories_paths(self):\n dirs_paths = [ROOT]\n prefixes = [ROOT]\n while prefixes:\n prefix = prefixes.pop()\n results = self.native_container.get_all_keys(\n prefix=prefix,\n delimiter=SEP)\n for result in results:\n if self.obj_cls.is_prefix(result):\n dirs_paths.append(result.name)\n prefixes.append(result.name)\n\n return sorted(set(dirs_paths))",
"def _get_host_ips_dir(self):\n host_ips_dir = []\n target_dir_path = os.path.join(self._cluster_profiler_dir, 'cluster_profiler')\n target_dir_path = validate_and_normalize_path(\n target_dir_path, raise_key=\"Invalid cluster_profiler dir path.\")\n if not os.path.exists(target_dir_path):\n log.error('Did not find cluster_profiler dir : %s', target_dir_path)\n raise ProfilerDirNotFoundException(msg='Did not find cluster_profiler dir:{}'.format(target_dir_path))\n\n entries = os.scandir(target_dir_path)\n # host_mapping_id_index:1\n host_mapping_ips = [i[1] for i in self._host_ips_mapping_info]\n for entry in entries:\n if entry.is_symlink():\n continue\n if entry.is_dir():\n if entry.name in host_mapping_ips:\n host_ips_dir.append(entry.name)\n return host_ips_dir",
"def get_dirchain(self):\n chain = [self.themedir]\n base = self.base\n while base is not None:\n chain.append(base.themedir)\n base = base.base\n return chain",
"def get_host_list(self):",
"def _candidate_tempdir_list():\r\n\r\n dirlist = []\r\n\r\n # First, try the environment.\r\n for envname in 'TMPDIR', 'TEMP', 'TMP':\r\n dirname = _os.getenv(envname)\r\n if dirname: dirlist.append(dirname)\r\n\r\n # Failing that, try OS-specific locations.\r\n if _os.name == 'mac':\r\n try:\r\n fsr = _Folder.FSFindFolder(_Folders.kOnSystemDisk,\r\n _Folders.kTemporaryFolderType, 1)\r\n dirname = fsr.as_pathname()\r\n dirlist.append(dirname)\r\n except _Folder.error:\r\n pass\r\n elif _os.name == 'riscos':\r\n dirname = _os.getenv('Wimp$ScrapDir')\r\n if dirname: dirlist.append(dirname)\r\n elif _os.name == 'nt':\r\n dirlist.extend([ r'c:\\temp', r'c:\\tmp', r'\\temp', r'\\tmp' ])\r\n else:\r\n dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])\r\n\r\n # As a last resort, the current directory.\r\n try:\r\n dirlist.append(_os.getcwd())\r\n except (AttributeError, _os.error):\r\n dirlist.append(_os.curdir)\r\n\r\n return dirlist",
"def ls_dirs(self, dir):\n if is_abs_bpath(dir):\n fsdir = path.join(self.root, dir[1:])\n listing = os.listdir(fsdir)\n return sorted([x for x in listing\n if path.isdir(path.join(fsdir, x))])\n else:\n raise ValueError('Expected absolute blaze catalog path: %r' % dir)",
"def dirpaths(self):\n parts = self.split()\n result = [DotPath(parts[0] or \"/\")]\n for name in parts[1:]:\n result.append(result[-1] / name)\n return result",
"def _getRepositoryListPaths():\r\n _repositoryListPaths = []\r\n _repositoryListPaths.append(os.path.join(home,\".subuser\",\"repositories.json\"))\r\n _repositoryListPaths.append(\"/etc/subuser/repositories.json\") # TODO how does this work on windows?\r\n _repositoryListPaths.append(os.path.join(_getSubuserDir(),\"repositories.json\"))\r\n repositoryListPaths = []\r\n for path in _repositoryListPaths:\r\n if os.path.exists(path):\r\n repositoryListPaths.append(path)\r\n return repositoryListPaths",
"def listdir(self):\r\n\t\treturn []",
"def getfolders(self):\n\n return []",
"def get_all_paths(buildname, basedir=OUTPUT_DIR):\n buildpath = os.path.join(basedir, buildname)\n assert os.path.exists(\n buildpath), 'path does not exist at {}'.format(buildpath)\n\n # Gets all the children dirs from the build path\n # TODO: Note that the depth is fixed at 4, which == NUM_PARTS\n # We should make this restriction in code and/or add test for this\n glob_dirs = glob.glob(os.path.join(buildpath, '*/*/*/*'))\n # print(glob_dirs)\n child_dirs = filter(lambda f: os.path.isdir(f), glob_dirs)\n return child_dirs",
"def dirs_of(self, directory):\n return self.listings[directory]['dirs']",
"def gethomepaths(self):\n cwd = os.getcwd()\n home_dir = os.path.expanduser('~')\n os.chdir(home_dir)\n fs_dir = os.path.abspath('.')\n\tos.chdir(cwd) # I hope this will always get you back to the original place...\n if home_dir!= fs_dir:\n return [home_dir, fs_dir]\n else:\n return [home_dir]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a generator for all cog files that aren't in do_not_use. | def extensions_generator():
cog_path = "./cogs"
do_not_use = ["__init__.py", "__pycache__", "xp.py"]
for cog in os.listdir(cog_path):
if cog not in do_not_use:
yield f"cogs.{cog[:-3]}" | [
"def extensions_generator():\n cog_path = \"./cogs\"\n do_not_use = ['errors', 'helper', 'managers', '__pycache__']\n for cog in os.listdir(cog_path):\n if cog not in do_not_use:\n yield f\"cogs.{cog[:-3]}\"",
"def iter_dists_excl(dists, exclude_fn):\n for dist in dists:\n fn = dist_naming.filename_dist(dist)\n if fn in exclude_fn:\n continue\n yield dist",
"def _black_list_filter(self, files=(), ext=()):\n \n for f in files:\n if not any(f.endswith(e) for e in ext):\n yield f",
"def test_antiNumuCC_generator():\n antinumucc_outputs = HIGHLAND_LOADER.get_outputs()['RunAntiNumuCCAnalysis']\n for input_file in antinumucc_outputs:\n if 'beam' in input_file or 'rdp' in input_file:\n test_funcs = tests.antiNumuCC_test_funcs_rdp\n else:\n test_funcs = tests.antiNumuCC_test_funcs\n\n for generated_test in analysis_generator(antinumucc_outputs[input_file],\n test_funcs):\n yield generated_test",
"def _FilterOutUnneededSkylabDeps(self, deps):\n file_ignore_list = [\n re.compile(r'.*build/chromeos.*'),\n re.compile(r'.*build/cros_cache.*'),\n # No test target should rely on files in [output_dir]/gen.\n re.compile(r'^gen/.*'),\n ]\n return [f for f in deps if not any(r.match(f) for r in file_ignore_list)]",
"def _file_list(self, directory, excluded=\"\"):\n for dirname, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if filename not in excluded:\n yield os.path.join(dirname, filename)",
"def _discover_files(self, files_or_modules: Sequence[str]) -> Iterator[str]:\n for something in files_or_modules:\n if os.path.isdir(something) and not os.path.isfile(\n os.path.join(something, \"__init__.py\")\n ):\n skip_subtrees: list[str] = []\n for root, _, files in os.walk(something):\n if any(root.startswith(s) for s in skip_subtrees):\n # Skip subtree of already discovered package.\n continue\n\n if _is_ignored_file(\n root,\n self.config.ignore,\n self.config.ignore_patterns,\n self.config.ignore_paths,\n ):\n skip_subtrees.append(root)\n continue\n\n if \"__init__.py\" in files:\n skip_subtrees.append(root)\n yield root\n else:\n yield from (\n os.path.join(root, file)\n for file in files\n if file.endswith(\".py\")\n )\n else:\n yield something",
"def skip_components_and_ophyd_stuff(app, what, name, obj, skip, options):\n if isinstance(obj, ophyd.Component):\n return True\n\n if name.startswith(\"_\"):\n # It's unclear if I broke this or if it's always been broken,\n # but for our use case we never want to document `_` items with\n # autoclass.\n return True\n\n if name in OPHYD_SKIP:\n return True\n\n return skip",
"def load_generator_coca_corpus(ignore_stopwords, coca_path=\"data/static/COCA/corpus_files/\"):\n eos_regex = re.compile(r\"(!|\\.|\\?)\")\n ignore_regex = re.compile(r\"@\")\n later_re = r\"\\d+(_\\w+)+\\.txt\"\n \n if ignore_stopwords:\n stop_ws = set(stopwords.words('english'))\n ignore = stop_ws.union(set([\"#\"]))\n else:\n ignore = set([])\n\n for filename in os.listdir(coca_path):\n if \".txt\" not in filename:\n continue\n with open(coca_path+filename, 'r', encoding='ascii', errors=\"ignore\") as coca_file:\n sentence = []\n for line in coca_file:\n split_line = line.strip().split('\\t')\n if re.match(later_re, filename):\n if len(split_line) < 5: \n continue\n token = split_line[2].lower()\n pos_tag = split_line[4].strip()\n else:\n if len(split_line) < 3: \n continue\n token = split_line[0].lower()\n pos_tag = split_line[2].strip()\n \n if not re.match(ignore_regex, token) and token not in ignore: \n sentence.append((token, pos_tag))\n if re.match(eos_regex, token):\n sentence = LineWithPos(sentence)\n yield sentence \n sentence = []",
"def iter_cached_obo() -> List[Tuple[str, str]]:\n for prefix in os.listdir(PYOBO_HOME):\n if prefix in GLOBAL_SKIP or prefix in NOT_AVAILABLE_AS_OBO or prefix in OBSOLETE:\n continue\n d = os.path.join(PYOBO_HOME, prefix)\n if not os.path.isdir(d):\n continue\n for x in os.listdir(d):\n if x.endswith('.obo'):\n p = os.path.join(d, x)\n yield prefix, p",
"def get_cogs() -> list:\r\n cogs: List[str] = []\r\n\r\n for path, dirs, files in walk(join(\"src\")):\r\n for f in files:\r\n file_path: str = join(path, f)\r\n cog_split: list = file_path.split(\"_\")\r\n if len(cog_split) > 1 and cog_split[1] == \"cog.py\":\r\n sep_split: list = file_path.split(sep)\r\n sep_split[len(sep_split) - 1] = splitext(sep_split[len(sep_split) - 1])[0]\r\n cogs.append(\".\".join(sep_split))\r\n\r\n return cogs",
"def iter_unresolved(self) -> Generator[MbedLibReference, None, None]:\n for lib in self.iter_all():\n if not lib.is_resolved():\n yield lib",
"def remove_missing_files(cls, files: List[CGTag]) -> List[CGTag]:\n filtered_files: List[CGTag] = files.copy()\n for file in files:\n if file.mandatory and not Path(file.path).exists():\n raise CgDataError(f\"Mandatory file cannot be found at {file.path}\")\n if not Path(file.path).exists():\n LOG.info(f\"Optional file {file.path} not found, removing from bundle.\")\n filtered_files.remove(file)\n return filtered_files",
"def source_excl(self):\n return self._cache_get(\"source_excl\", [])",
"def iter_all(self) -> Generator[MbedLibReference, None, None]:\n for lib in self.root.rglob(\"*.lib\"):\n if not self._in_ignore_path(lib):\n yield MbedLibReference(lib, lib.with_suffix(\"\"))",
"def maya_file_generator(root_path, skip='None', descend='True'):\n\n pass",
"async def get_cogs(self, *, all_cogs: bool=False) -> list:\r\n return sorted([\r\n cog for cog in self.bot.cogs.values()\r\n if cog.DISABLE_HELP in [False, all_cogs] # [False, True] or [False, False]\r\n and cog.cog_name != \"BotSetup\"\r\n ],\r\n key=lambda c: c.cog_name)",
"def get_initial_default_excludes():\n return [ Pattern(exclude) for exclude in\n'''**/__pycache__/**\n**/*~\n**/#*#\n**/.#*\n**/%*%\n**/._*\n**/CVS\n**/CVS/**\n**/.cvsignore\n**/SCCS\n**/SCCS/**\n**/vssver.scc\n**/.svn\n**/.svn/**\n**/.DS_Store\n**/.git\n**/.git/**\n**/.gitattributes\n**/.gitignore\n**/.gitmodules\n**/.hg\n**/.hg/**\n**/.hgignore\n**/.hgsub\n**/.hgsubstate\n**/.hgtags\n**/.bzr\n**/.bzr/**\n**/.bzrignore\n'''.splitlines() ]",
"def missingoutputfiles(self):\n return self.getmissingoutputfiles(self.SlideID, **self.workflowkwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a generator for all submodule addons. | def submodules_generator():
sub_path = "./subs"
do_not_use = ["solver"]
for item in os.listdir(sub_path):
path = os.path.join(sub_path, item)
if item not in do_not_use:
for sub in os.listdir(path):
if sub == f"{item}.py" and sub not in do_not_use:
yield f"subs.{item}.{sub[:-3]}" | [
"def extensions_generator():\n cog_path = \"./cogs\"\n do_not_use = ['errors', 'helper', 'managers', '__pycache__']\n for cog in os.listdir(cog_path):\n if cog not in do_not_use:\n yield f\"cogs.{cog[:-3]}\"",
"def submodules(self) -> Iterable[str]:\n exit_code, stdout, _ = self.run(\n \"git\",\n \"config\",\n \"--null\",\n \"--file\",\n \".gitmodules\",\n \"--get-regexp\",\n # Get only the path key of each submodule.\n r\"^submodule\\..*\\.path$\",\n record=False,\n )\n if exit_code != 0:\n # The command fails if the project doesn't have submodules (the .gitmodules file doesn't exist).\n return []\n\n keys_and_values = stdout.split(\"\\0\")\n for key_and_value in keys_and_values:\n try:\n key, value = key_and_value.split(\"\\n\", maxsplit=1)\n except ValueError:\n # This should never happen, but we log a warning just in case\n # Git doesn't return the expected format.\n log.warning(\"Wrong key and value format.\", key_and_value=key_and_value)\n continue\n\n if key.endswith(\".path\"):\n yield value\n else:\n # This should never happen, but we log a warning just in case the regex is wrong.\n log.warning(\"Unexpected key extracted fom .gitmodules.\", key=key)",
"def extensions_generator():\n cog_path = \"./cogs\"\n do_not_use = [\"__init__.py\", \"__pycache__\", \"xp.py\"]\n for cog in os.listdir(cog_path):\n if cog not in do_not_use:\n yield f\"cogs.{cog[:-3]}\"",
"def get_modules(module):\n file_dir = abspath(join(PROJ_DIR, module))\n for root, _, files in walk(file_dir): # pylint: disable=E1133\n mod_path = '{}{}'.format(\n APP_MODULE,\n root.split(PROJ_DIR)[1]).replace('/', '.')\n for filename in files:\n if (filename.endswith('.py') and not\n filename.startswith('__init__')):\n yield '.'.join([mod_path, filename[0:-3]])",
"def module_generator(self):\n assert self.cayley_graph().is_strongly_connected()\n return self.first()",
"def generate_modules_tree(\n self,\n base_path,\n name,\n srcs,\n deps,\n visibility):\n\n cmds = []\n\n for dep in deps:\n cmds.append('rsync -a $(location {})/ \"$OUT\"'.format(dep))\n\n # Copy files from sources and make their dirs.\n files = collections.OrderedDict()\n dirs = collections.OrderedDict()\n for dst, raw_src in srcs.items():\n src = self.get_source_name(raw_src)\n dst = os.path.join('\"$OUT\"', dst)\n dirs[os.path.dirname(dst)] = None\n files[dst] = src\n cmds.append('mkdir -p ' + ' '.join(dirs))\n for dst, src in files.items():\n cmds.append('cp {} {}'.format(src, dst))\n\n attrs = collections.OrderedDict()\n attrs['name'] = name\n if visibility is not None:\n attrs['visibility'] = visibility\n attrs['out'] = os.curdir\n attrs['srcs'] = srcs.values()\n attrs['cmd'] = ' && '.join(cmds)\n return Rule('genrule', attrs)",
"def _iter_module_files():\n for module in list(sys.modules.values()):\n filename = getattr(module, '__file__', None)\n if filename:\n if filename[-4:] in ('.pyo', '.pyc'):\n filename = filename[:-1]\n yield filename",
"def make_module(self, gen):\r\n body = list(gen)\r\n return self.config.make_module(self.template_name, self.exports,\r\n body)",
"def discover_examples():\r\n root = './examples'\r\n for filename in os.listdir(root):\r\n if os.path.splitext(filename)[1] == '.py':\r\n yield os.path.join(root, filename)",
"def get_submodule_names():\n return _SUBMODULE_NAMES",
"def package_generator():\n # the inner for-loop generates a finite sequence of all valid\n # ecosystem+package combinations, but we need infinite sequence.\n # Thence we use outer infinite loop here\n while True:\n for ecosystem, packages in GremlinPackageGenerator.PACKAGES.items():\n yield from GremlinPackageGenerator.generate_ecosystem_package(ecosystem, packages)",
"def register_submodule_factory(module_name, submodule_names):\n\n module = None\n submodules = []\n\n def register():\n nonlocal module\n module = __import__(name=module_name, fromlist=submodule_names)\n submodules[:] = [getattr(module, name) for name in submodule_names]\n for mod in submodules:\n mod.register()\n\n def unregister():\n from sys import modules\n for mod in reversed(submodules):\n mod.unregister()\n name = mod.__name__\n delattr(module, name.partition(\".\")[2])\n del modules[name]\n submodules.clear()\n\n return register, unregister",
"def iter_all(self) -> Generator[MbedLibReference, None, None]:\n for lib in self.root.rglob(\"*.lib\"):\n if not self._in_ignore_path(lib):\n yield MbedLibReference(lib, lib.with_suffix(\"\"))",
"def discover_modules(search_dir: Path) -> Generator[Path, None, None]:\n return (\n f\n for f in search_dir.iterdir()\n if (f.is_file() and f.suffix == \".py\")\n or (f.is_dir() and \"__init__.py\" in f.iterdir())\n )",
"def package_generator_for_ecosystem(ecosystem='pypi'):\n packages = GremlinPackageGenerator.PACKAGES[ecosystem]\n # the inner for-loop generates a finite sequence of all valid\n # ecosystem+package combinations, but we need infinite sequence.\n # Thence we use outer infinite loop here\n while True:\n yield from GremlinPackageGenerator.generate_ecosystem_package(ecosystem, packages)",
"def _generate_modules_xml(self, iml_path_list=None):\n module_path = self.project_info.project_absolute_path\n\n # b/121256503: Prevent duplicated iml names from breaking IDEA.\n module_name = iml.IMLGenerator.get_unique_iml_name(module_path)\n\n if iml_path_list is not None:\n module_list = [\n _MODULE_SECTION % (module_name, module_name),\n _MODULE_SECTION % (constant.KEY_DEPENDENCIES,\n constant.KEY_DEPENDENCIES)\n ]\n for iml_path in iml_path_list:\n module_list.append(_SUB_MODULES_SECTION.format(IML=iml_path))\n else:\n module_list = [\n _MODULE_SECTION % (module_name, module_name)\n ]\n module = '\\n'.join(module_list)\n content = self._remove_debugger_token(templates.XML_MODULES)\n content = content.replace(_MODULE_TOKEN, module)\n target_path = os.path.join(module_path, _IDEA_FOLDER, _MODULES_XML)\n common_util.file_generate(target_path, content)",
"def get_all_subpackages(package_name):\n\tif package_name is None:\n\t\treturn\n\tsubpackage = ''\n\tfor segment in package_name.split('.'):\n\t\tif subpackage != '':\n\t\t\tsubpackage += '.'\n\t\tsubpackage += segment\n\t\tyield subpackage",
"def GypGenerator(self):\n return self.GYP_GENERATOR",
"def get_modules_with_coveragerc(root_module):\n root_dir = os.path.join(INFRA_ROOT, root_module.replace('/', os.sep))\n if not os.path.isdir(root_dir):\n return []\n return [\n '%s/%s' % (root_module, d)\n for d in os.listdir(root_dir)\n if os.path.isfile(os.path.join(root_dir, d, '.coveragerc'))\n ]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads a specified extension into the bot. | async def load(self, ctx, extension):
try:
self.load_extension(extension)
await ctx.send(f"Successfully loaded extension {extension}.")
except Exception:
await ctx.send(f'Failed to load extension {extension}.')
logging.exception(f'Failed to load extension {extension}.') | [
"async def load(self, ctx, ext):\n ext_folder = \"extensions\"\n ext_dir = os.path.join(os.path.dirname(__file__), \"..\", ext_folder)\n ext_files = [name for _, name, _ in pkgutil.iter_modules([ext_dir])]\n if ext not in ext_files:\n await ctx.error(f\"{ext} extension not found.\")\n return\n\n ext_name = f\"firetail.extensions.{ext}\"\n was_loaded = ext_name in ctx.bot.extensions\n\n try:\n if was_loaded:\n ctx.bot.reload_extension(ext_name)\n await ctx.success(f'{ext} extension reloaded.')\n else:\n ctx.bot.load_extension(ext_name)\n await ctx.success(f'{ext} extension loaded.')\n except commands.ExtensionFailed as e:\n original_traceback = \"\\n\".join(traceback.format_tb(e.original.__traceback__))\n await ctx.codeblock(original_traceback, title=f\"Exception on loading {ext}\")",
"async def load(ctx, extension):\r\n reaction = client.get_cog('Reaction')\r\n client.load_extension(f'cog.{extension}')\r\n await ctx.send(f\"{reaction.success} *`cog.{extension}`* has been loaded!\")",
"async def load(self):\n await self._create()\n self.loaded_extensions = self.extension_level[self.level]\n for extension in self.loaded_extensions:\n self.bot.load_extension(self.extensions_dict[extension])\n print(\"Finished loading: {}\".format(extension))",
"async def reload(self, ctx, *, extension: str):\n self.bot.reload_extension(f\"extensions.{extension}\")\n await ctx.send(f\"**{extension}** reloaded successfully.\")",
"async def reload(self, ctx, extension_name : str):\r\n\t\tself.bot.unload_extension(extension_name)\r\n\t\ttry:\r\n\t\t\tself.bot.load_extension(extension_name)\r\n\t\texcept (AttributeError, ImportError) as e:\r\n\t\t\tawait self.bot.say(\"```py\\n{}: {}\\n```\".format(type(e).__name__, str(e)))\r\n\t\t\treturn\r\n\t\tawait self.bot.say(\"{} has been reloaded.\".format(extension_name))",
"def load_extension(self, name):\n fullname = 'extensions.%s' % name\n try:\n if HAS_IMPORTLIB:\n mod = importlib.import_module('.' + fullname, package=__package__)\n else:\n mod = __import__(fullname, globals(), locals(), [''], 1)\n except Exception as err:\n import traceback\n traceback.print_exc()\n mod = None\n return mod",
"async def load(ctx, *, cog):\n try:\n bot.load_extension('cogs.' + cog)\n await ctx.send(f\"Loaded cog '{cog}'.\")\n except commands.ExtensionError as e:\n if \"already\" in str(e):\n await restart(ctx, cog=cog)\n elif 'raised an error' in str(e):\n await ctx.message.add_reaction('\\U0001F916');\n await ctx.send(f\"Couldn't load '{cog}' due to \"\n f\"{repr(e.__cause__)}.\")\n else:\n await ctx.message.add_reaction('\\U0001F916');\n await ctx.send(\"Unrecognized cog.\")",
"async def reload(ctx, extension):\r\n if f'cog.{extension}' in client.extensions:\r\n client.unload_extension(f'cog.{extension}')\r\n\r\n client.load_extension(f'cog.{extension}')\r\n reaction = client.get_cog('Reaction')\r\n await ctx.send(f\"{reaction.success} *`cog.{extension}`* has been reloaded!\")",
"def given_extension_has_loaded(context):\n uitests.vscode.extension.activate_python_extension(context)",
"def load_extension_object(name):\n module = None\n if \".\" not in name:\n try:\n module = import_module(f\"marko.ext.{name}\")\n except ImportError:\n pass\n if module is None:\n try:\n module = import_module(name)\n except ImportError:\n raise ImportError(\n f\"Extension {name} cannot be found. Please check the name.\"\n )\n\n try:\n maker = getattr(module, \"make_extension\")\n except AttributeError:\n raise AttributeError(\n f\"Module {name} does not have 'make_extension' attributte.\"\n )\n return maker",
"def when_extension_has_loaded(context):\n uitests.vscode.extension.activate_python_extension(context)",
"def load_extensions(self, cogs=None, path='cogs.'):\n for extension in cogs or self._extensions:\n try:\n self.load_extension(f'{path}{extension}')\n print(f'Loaded extension: {extension}')\n except Exception as e:\n traceback.print_exc()",
"def add_encoded_extension(self, extension):\n ...",
"def load_extension(self, check=True, **kwds):\n self.unload_extension()\n if check:\n config = kwds.get(\"PASSLIB_CONFIG\") or kwds.get(\"PASSLIB_CONTEXT\")\n for key in self._config_keys:\n kwds.setdefault(key, UNSET)\n update_settings(**kwds)\n import passlib.ext.django.models\n if check:\n self.assert_patched(context=config)",
"def ext(self, name):\n if name in self.__require:\n return self.__extensions[name]\n\n raise extensions.ExtensionNotRequired(name)",
"def load_extension(import_path: str) -> type[jinja2.ext.Extension]:\n with reraise(TemplateExtensionNotFoundError(import_path)):\n extension = import_object(import_path)\n\n if not (\n isinstance(extension, type) and issubclass(extension, jinja2.ext.Extension)\n ):\n raise TemplateExtensionTypeError(import_path, str(type(extension)))\n\n return extension",
"def get_extension(self, name):\n extensions = self._extensions\n if name in extensions:\n return extensions[name]\n else:\n raise ValueError('extension %s not found' % name)",
"def load(self, entry: mitmproxy.addonmanager.Loader):\n logging.info( '1: load' )",
"def plugin_loaded():\n global settings\n update_settings()\n settings.add_on_change('extensions_path', update_settings)",
"def load(self):\n self.player = self.reader.load_script(0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the original dataset with bssoup xml extractor | def read_data(self):
with open(self.data_path, encoding="utf8",
errors="ignore") as fl:
fle = fl.read()
bs_data = BeautifulSoup(fle, "xml")
return bs_data | [
"def readXMLData(xmldoc: 'cc_xml_doc *') -> \"ScXMLDocument *\":\n return _coin.ScXMLDocument_readXMLData(xmldoc)",
"def ScXMLDocument_readXMLData(xmldoc: 'cc_xml_doc *') -> \"ScXMLDocument *\":\n return _coin.ScXMLDocument_readXMLData(xmldoc)",
"def get_description_data(xml_file):\n soup = bs4.BeautifulSoup(xml_file, 'lxml')\n descs = soup.find_all('description')\n for desc in descs:\n desc_data = str(desc.string)\n # if '.com' in desc_data:\n desc_arr = \"\"\n desc_arr.append(desc_data)",
"def _extract_data(self):\n print('Extracting dataset...')\n shutil.unpack_archive(os.path.join(self.raw_path, 'ECGDataDenoised.zip'), self.raw_path)",
"def xml_to_soup(self, xml_loc):\n x = open('/tmp/todd.xml', 'r').read()\n return BeautifulSoup(x, 'xml')",
"def make_soup(self):\n self.soup = BeautifulSoup(self.xml_fp, 'lxml-xml')\n self.xml_fp.close()",
"def xml2df(xml_data):\r\n \r\n root = bs.BeautifulSoup(xml_data,\"lxml\")\r\n all_records = []\r\n row_number = 0\r\n rows = root.find_all(\"r\")\r\n \r\n for row in rows:\r\n if row_number >= len(all_records):\r\n all_records.append([])\r\n \r\n for cell in row.find_all(\"c\"):\r\n if 'v' in cell.attrs:\r\n try:\r\n all_records[row_number].append(float(cell.attrs[\"v\"].replace(',','')))\r\n except ValueError:\r\n all_records[row_number].append(cell.attrs[\"v\"])\r\n else:\r\n if 'r' not in cell.attrs:\r\n all_records[row_number].append(cell.attrs[\"l\"])\r\n else:\r\n \r\n for row_index in range(int(cell.attrs[\"r\"])):\r\n if (row_number + row_index) >= len(all_records):\r\n all_records.append([])\r\n all_records[row_number + row_index].append(cell.attrs[\"l\"])\r\n else:\r\n all_records[row_number + row_index].append(cell.attrs[\"l\"])\r\n \r\n row_number += 1\r\n return all_records",
"def data_from_html():\n Session = sql.prepare_database(blogabet.configuration.database_url, model.Base)\n data = Session.query(model.Bettor).all()\n for dat in data:\n tree = lxml.html.document_fromstring(dat.HTML)\n matches = []\n i = 0\n pom = tree.cssselect('div.media-body')\n for element in tree.cssselect('div.media-body'):\n reduced = []\n asstr = str(element.text_content()).splitlines()\n for s in asstr:\n tmp = s.strip()\n if tmp != \"\":\n reduced.append(tmp)\n match = parse_different(reduced, dat.name)\n if match is not None:\n matches.append(match)\n print(\"iteration {i} out of {j}\".format(i=i, j=len(pom)))\n i += 1\n for match in matches:\n sql.update(Session, match)",
"def extract_data(self):\r\n self.extract_student_demographics_data()\r\n self.extract_staff_demographics_data()\r\n self.extract_school_geography_data()",
"def extract(self, soup, kernel):\n raise NotImplementedError",
"def get_dataset(ds,dataDir,removecompressed=1):\n #Convert input ds to string incase it is put in via function\n ds = str(ds)\n #The final character of the dataset can be a letter\n lettersuffix=''\n if re.search('[A-Za-z]$',ds):\n lettersuffix = ds[-1]\n ds = ds[:-1]\n openfMRI_dataset_string = '{0:06d}'.format(int(ds)) + lettersuffix\n #Some datasets include\n try:\n os.mkdir(dataDir)\n except:\n pass\n\n datasetDir = os.path.join(dataDir, 'openfmri/')\n\n try:\n os.mkdir(datasetDir)\n except:\n pass\n\n openfMRI_url = 'https://openfmri.org/dataset/ds' + openfMRI_dataset_string + '/'\n r = urlopen(openfMRI_url).read()\n soup = BeautifulSoup(r,'lxml')\n\n #Isolate only the links from the latest revision. The text \"data associated with revision\". If the website changes its static text, this needs to be changed\n unformatted_soup=soup.prettify()\n firstOccurance=unformatted_soup.find('Data Associated with Revision')\n secondOccurancce=unformatted_soup[firstOccurance+1:].find('Data Associated with Revision')\n #If there is only one \"Data Associated...\" (i.e. only one revision) this returns -1. This should be kept. Otherwise add on the firstOccurance index\n if secondOccurancce != -1:\n secondOccurancce+=firstOccurance\n #The latest links are confined within this part of the text\n soup_latestversion = BeautifulSoup(unformatted_soup[firstOccurance:secondOccurancce],'lxml')\n\n # Loop through all links and dowload files\n filelist = []\n for a in soup_latestversion.find_all('a', href=True):\n #This assumes that all files include ds....\n if re.search('ds[A-Za-z_0-9.-]*$',a['href']):\n filename_start=re.search('ds[A-Za-z_0-9.-]*$',a['href']).start()\n filelist.append(a['href'][filename_start:])\n print('Downloading: ' + a['href'][filename_start:])\n urlretrieve(a['href'],datasetDir + a['href'][filename_start:])\n print('--- Download complete ---')\n for f in filelist:\n untar_or_unzip(datasetDir,f)\n print('--- Uncompressing complete ---')\n if removecompressed==1:\n for f in filelist:\n print('Clean up. Deleting: ' + f)\n os.remove(datasetDir+f)\n print('--- Clean up complete ---')\n print('NOTE: It is best to verify manually that all the correct data has been downloaded and uncompressed correctly. \\n If data is used in any publication, see openfmri.org about how to appropriately cite/credit the data.')\n print('--- Script complete ---')",
"def load_data(es: Elasticsearch) -> None:\n\n # \n tf = tarfile.open(\"wiki-small.tar.gz\")\n count = 1\n for tarinfo in tf.getmembers():\n if(\".html\" in os.path.split(tarinfo.name)[1]):\n file = tf.extractfile(tarinfo)\n contents = file.read()\n tup = parse_html(contents)\n title = tup[0]\n body = tup[1]\n d = {\n \"title\": title,\n \"body\": body\n }\n es.index(index=\"wikipedia\", id=count,body=d)\n count += 1\n tf.close()",
"def convert_xml(sumo_detector_file=SUMO_DETECTOR_FILE, detect_csv_name=DETECT_CSV_NAME,\n tls_file_name=TLS_FILE_NAME, tls_csv_name=TLS_CSV_NAME):\n\n if sumo_detector_file is not None:\n xml_parse.parse_detector_xml(sumo_detector_file, detect_csv_name)\n # xml2csv.main([sumo_detector_file, '-o', detect_csv_name, '-s', ','])\n if tls_file_name is not None:\n xml_parse.parse_tl_xml(tls_file_name, tls_csv_name)\n # xml2csv.main([tls_file_name, '-o', tls_csv_name, '-s', ','])",
"def load_dataset(self):",
"def read_xml_file(self, xml_fn):\n pass",
"def __parse_akhbarelyomgate(self, html_):\n page_ = lxml.html.fromstring(html_)\n elements_ = page_.find_class('articleTitle')\n data_ = []\n for e in elements_:\n title_ = e.xpath('parent::node()/descendant::text()')\n for text_ in title_:\n text_ = text_.encode('utf-8').strip()\n if text_: data_.append(text_)\n break;\n\n elements_ = page_.find_class('articleBody')\n for e in elements_:\n body_ = e.xpath('parent::node()/descendant::text()')\n for text_ in body_:\n text_ = text_.encode('utf-8').strip()\n if text_: data_.append(text_)\n break;\n \n return '\\n'.join(data_)",
"def test_read_xml_string_for_staff(self):\n element = None\n for element, tag in read_xml_string(self.xml_string, records_tag=['staff']):\n if tag == 'staff':\n break\n expected_element = self.expected_xml_output.findall('.//staff')[0]\n self.assertIsInstance(element, Element)\n self.assertEqual(ElementTree.tostring(element), ElementTree.tostring(expected_element))",
"def parse_datasets(self , selector, response):\n datasets = []\n for row in selector.xpath('//table[@class=\"Tabular\"]//tr[td]'):\n base_title = row.xpath(\"td[1]//text()\").extract()[0].strip()\n for link in row.xpath(\"td[2]//a\"):\n dataset = DatasetItem()\n dataset.set_default('dataset/base_title', base_title)\n item = DistributionItem()\n dataset.add_distribution(item)\n dataset[\"documentation_title\"] = documentation_title(response)\n dataset[\"documentation_url\"] = documentation_url(response)\n\n date_arr = link.xpath(\".//text()\").extract()\n date_long = \"\".join( date_arr )\n date = re.sub( ' +', '', date_long )\n\n item['description'] = \" \".join([base_title , date])\n item['access_url'] = urlparse.urljoin(\"http://www.eba.europa.eu\", link.xpath(\"@href\").extract()[0])\n item['distribution_type'] = \"dcat:Download\"\n item['distribution_format'] = \"XLS\"\n\n dataset['title'] = item['description']\n# dataset['description'] = item['description']\n dataset['description'] = \"Aggregated statistical data on a key aspect of the implementation of prudential framework in each Member State.\"\n dataset['keyword_eng'] = \"Credit, Credit and financial institutions, European banking, Financial market, Market risk, Market supervision, Monetary and financial indicators, Regulation and policy, Risk analysis, Supervisory convergence\"\n dataset['issued'] = date\n dataset['uri'] = item['access_url']\n\n datasets.append(dataset)\n return datasets",
"def load_xmls(path, h5_dirname, rgb_tile_dir, rgb_res):\n #Load xml annotations and find the directory of .tif files\n if os.path.isdir(path):\n xmls = glob.glob(os.path.join(path,\"*.tif\"))\n \n #set xml dir, assume its in annotation folder\n annotation_dir = os.path.join(os.path.dirname(os.path.dirname(path)),\"annotations\")\n annotation_xmls = [os.path.splitext(os.path.basename(x))[0] + \".xml\" for x in xmls]\n full_xml_path = [os.path.join(annotation_dir, x ) for x in annotation_xmls]\n \n xml_data = []\n for x in full_xml_path:\n xml_data.append(load_xml(x, dirname=rgb_tile_dir, res=rgb_res)) \n data = pd.concat(xml_data)\n else:\n \n data = load_xml(path, dirname=rgb_tile_dir, res=rgb_res)\n \n return data"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter for the paperdefined time interval(19942010) Separate the title and the document description | def filter_data(self):
dataset = self.data_read.find_all(True)
filtered_docs = {}
for tag in dataset:
try:
# Filter the years
date = int(tag.find('year').text)
if 1994 < date < 2010:
doc_text = tag.find('docText').text
doc_splitted = doc_text.split('\n')
# Fitler if multiple linebreaks separate the title and the text
doc_splitted = [d for d in doc_splitted if len(d) > 0]
# Extract the title
title = doc_splitted[0]
# Assign the text to the title in the dictionary
filtered_docs[title] = doc_splitted[1]
except:
pass
return filtered_docs | [
"def is_time_sensitive(title):\n count = 0\n number = 0\n timestop = ['weekly digest', 'webinar', 'conference', 'summit', 'upcoming']\n years = ['2010', '2011', '2012', '2013']\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', \\\n 'september', 'october', 'november', 'december']\n timestop = timestop + months + years\n while number == 0 and count < len(timestop):\n if re.search(timestop[count], title.lower()) != None:\n number = 1\n count += 1\n return number",
"def test_filter_time_period(client, awards_data):\n\n # narrow filter, gets none\n result = award_filter({\n 'time_period': [{\n 'start_date': '2016-01-01',\n 'end_date': '2016-02-01'\n }, ]\n })\n assert result.count() == 0\n\n # broad filter, gets all\n result = award_filter({\n 'time_period': [{\n 'start_date': '2010-01-01',\n 'end_date': '2020-02-01'\n }, ]\n })\n assert result.count() == 4\n\n # two windows, one unbounded\n result = award_filter({\n 'time_period': [{\n 'start_date': '2015-01-01',\n 'end_date': '2016-01-01'\n }, {\n 'start_date': '2017-01-01'\n }]\n })\n assert result.count() == 2",
"def moviesFromTo(start, end):\n data = movies.find({\"year\": {\"$gte\": start, \"$lte\": end}})\n for movie in data:\n for key, value in movie.items():\n if key == \"title\":\n print(\"{title: %s}\" % value)",
"def search_date(self):",
"def filterFormat(self):\n \n pass",
"def filter_study(title, condition, ec):\n lines = [title + '.']\n for l in condition.split('\\n'):\n lines.append(l + '.')\n segments = re.split(\n r'\\n+|(?:[A-Za-z0-9\\(\\)]{2,}\\. +)|(?:[0-9]+\\. +)|(?:[A-Z][A-Za-z]+ )+?[A-Z][A-Za-z]+: +|; +| (?=[A-Z][a-z])',\n ec, flags=re.MULTILINE)\n for i, l in enumerate(segments):\n l = l.strip()\n if l:\n if l:\n if ' ' in l and l[-1] not in string.punctuation:\n l += '.'\n lines.append(l)\n text = '\\n'.join(lines)\n cp = subprocess.run(['iconv', '-t', 'ascii//TRANSLIT'], input=text, stdout=subprocess.PIPE, universal_newlines=True)\n return cp.stdout",
"def ISO_date_filter(metadata_with_variants):\n print(\"Filtering metadata with variants for entries with a collection day specified.\")\n pattern=\"\\d{4}-\\d{2}-\\d{2}\"\n #Re.search() returns None if there is no match, and an re.match object if there is a match.\n #pd.notna() will return True for the rows that match the expression. \n standard_date=metadata_with_variants[metadata_with_variants.date.apply(lambda x: pd.notna(re.search(pattern,x)))]\n #Report the number of non-standard dates removed\n n_removed=len(metadata_with_variants)-len(standard_date)\n print(\"Excluded {} entries with no defined collection day. Entries remaining: {}\".format(n_removed,len(standard_date)))\n return standard_date",
"def createTimeTitle( options, timeSlice, rcmedData, modelData ):\n if(options['timeRegrid'] == 'daily'):\n timeTitle = timeSlice.strftime(\"%b %d, %Y\")\n if options['seasonalCycle'] == True:\n timeTitle = timeSlice.strftime(\"%b %d (all years)\")\n\n if(options['timeRegrid'] == 'monthly'):\n timeTitle = timeSlice.strftime(\"%b %Y\")\n if options['seasonalCycle'] == True:\n timeTitle = timeSlice.strftime(\"%b (all years)\")\n\n if(options['timeRegrid'] == 'annual'):\n timeTitle = timeSlice.strftime(\"%Y\")\n \n if(options['timeRegrid'] == 'full'):\n minTime = min(min(rcmedData['times']), min(modelData['times']))\n maxTime = max(max(rcmedData['times']), max(modelData['times']))\n timeTitle = minTime.strftime(\"%b %d, %Y\")+' to '+maxTime.strftime(\"%b %d, %Y\")\n \n return timeTitle",
"def count_verbatim_definiton(index):\n given_b4_2010 = 0\n given_after_2010 = 0\n not_given_b4_2010 = 0\n not_given_after_2010 = 0\n for paper, rows in index.items():\n for row in rows:\n year = row[\"pub_year\"]\n definition = row[\"criterion_definition_verbatim\"]\n if definition != \"\" and definition != \"not given\" and definition != \"blank\" and definition != \"unclear\" and definition != \"none given\":\n if year < 2010:\n given_b4_2010 += 1\n else:\n given_after_2010 += 1\n else:\n if year < 2010:\n not_given_b4_2010 += 1\n else:\n not_given_after_2010 += 1\n\n print(\"Num of times Verbatim Definition for a criteria is given (pre 2010): {}\".format(given_b4_2010))\n print(\"Num of times Verbatim Definition for a criteria is given (post 2010): {}\".format(given_after_2010))\n print(\n \"Num of times Verbatim Definition for a criteria is given (Total): {}\".format(given_after_2010 + given_b4_2010))\n print(\"------------------------------ \\n\")\n print(\"Num of times Verbatim Definition for a criteria is not given (pre 2010): {}\".format(not_given_b4_2010))\n print(\"Num of times Verbatim Definition for a criteria is not given (post 2010): {}\".format(not_given_after_2010))\n print(\"Num of times Verbatim Definition for a criteria is not given (Total): {}\".format(\n not_given_b4_2010 + not_given_after_2010))",
"def filter_by_date(self, cr, uid, lines, date_start=None,\\\n date_end=None, context={}):\n result = []\n \n for l in lines:\n if (date_start == None or l.period_id.date_start >= date_start)\\\n and (date_end == None or l.period_id.date_stop <= date_end):\n result.append(l) \n \n return result",
"def filter_date_range():\n start_date = custom_select(\"specify start date\\0:\", get_date)[1]\n if start_date == '-': # no start date provided\n start_date = ''\n end_date = custom_select(\"specify end date\\0:\", get_date)[1]\n if end_date == '-': # no end date provided\n end_date = ''\n return start_date, end_date",
"def give_books_by_date(book_obj, start_time: str, stop_time: str) -> list:\n date_books = book_obj.query.filter(book_obj.publishedDate <= stop_time).filter(book_obj.publishedDate >= start_time)\n return [i.to_dictionary() for i in date_books]",
"def dateFilterMyDataFrame(focus_df = focusDataframer(), bring_all_records_for = '2016'):\n pass\n print()\n print('> > > dateFilterMyDataFrame() filtering all records by year : ' + bring_all_records_for)\n# focus_df = focusDataframer()\n focus_df['dat3'] = [\n date[-4:] for date in focus_df['DATE']\n ]\n filtered_by_date_df = focus_df.loc[focus_df['dat3'] == bring_all_records_for]\n return filtered_by_date_df.drop(columns = 'DATE')",
"def test_export_with_intersecting_filter(self):\n self.t(\"track Tag1 2021-02-01T00:00:00 - 2021-03-01T00:00:00\")\n self.t(\"track Tag2 2021-03-01T00:00:00 - 2021-04-01T00:00:00\")\n\n # Pass a filter to export that is contained within the above intervals\n # and check that it picks up the containing interval\n j = self.t.export(\"2021-02-02 - 2021-02-03\")\n\n self.assertEqual(len(j), 1)\n\n self.assertClosedInterval(j[0],\n expectedId=2,\n expectedTags=[\"Tag1\"])",
"def filter_mb_df_dates(mb_df):\n \n mb_df = mb_df[mb_df['release_date'].str[-4:].map(lambda x: int(x)) >= 2010]\n mb_df.drop('release_date', axis = 1, inplace = True)\n \n return mb_df",
"def test_daily_get_date_filters(self):\n default_daily_time = dubwebdb.CTimes(d_format=\"%Y-%m-%d\",\n start_time=None,\n end_time=None)\n date_filters = dubwebdb.get_date_filters(default_daily_time)\n jan_first = datetime.datetime(2014, 1, 1, 0, 0, 0)\n self.assertGreater(date_filters.start,\n calendar.timegm(jan_first.timetuple()))\n self.assertGreater(date_filters.end,\n date_filters.start + (29*24*60*60))",
"def test_timeformat_filter():\n with systems.app.app_context():\n assert systems.timeformat_filter(50) == \"50 seconds\"\n assert systems.timeformat_filter(2110) == \"35 minutes\"\n assert systems.timeformat_filter(18030) == \"5 hours\"\n assert systems.timeformat_filter(2592000) == \"30 days\"",
"def preprocessing(df, start_date, end_date): \n\n #Filter dataframe by dates \n df = df[(df['Just Date'] >= start_date) & (df['Just Date'] <= end_date)]\n df = lc.CleanedFrame(df)\n\n return df",
"def filter_years():\n years = sys.argv[1:]\n for year in years:\n infile = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME1)\n outfile1 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME2)\n outfile2 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME3)\n print year\n filter_terms(infile, outfile1, outfile2)\n print"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build the bag of words model from the cleaned data And the dictionary of the unique words | def build_bag_of_words_model(self):
lda_dictionary = Dictionary(self.cleaned_data.values())
lda_bag_of_words = [lda_dictionary.doc2bow(c, allow_update=True) for c in self.cleaned_data.values()]
return lda_dictionary, lda_bag_of_words | [
"def build_bag(data):\n\tbag = []\n\tfor sample in data:\n\n\t\tbag += [word.lower() for word in sample[0] if word not in bag and len(word) > 0]\n\n\t# Set the list to insure all dupes are removed\n\tbag = list(set(bag))\n\tbag.sort()\n\treturn bag",
"def bag_of_words(self):\t\n\t\tprint 'Building bag of words model...'\n\t\tstart = time.time()\n\n\t\tfinal_df = []\n\t\tfor file in self.filenames:\n\t\t\timg = cv2.imread(file)\t\n\t\t\tkeypoints = self.get_imagekeypoints(img)\t\n\t\t\t#predict the keypoints \n\t\t\tpredict_labels = self.kmeans_model.predict(keypoints)\n\t\t\t#make a bow vector \n\t\t\tbow_vector = self.make_vector(predict_labels, self.num_classes)\n\t\t\tfinal_df.append(bow_vector)\n\n\t\tend = time.time()\t\n\t\tprint 'Successfully built the BoW model.\\n\\nTime taken - %f seconds\\n' % (end - start)\n\n\t\treturn pd.DataFrame(final_df)",
"def build_vocab(self):\n copy_dict = self.unigram_dic_.copy()\n for word in self.bigram_dic_:\n # First feed the vocabulary with bigrams :\n if word in self.phrasewords_:\n try:\n i, j = (word.replace(self.parsing_char_, \" \", 1)).split()\n # delete unigrams if unigrams only appear in a given bigram\n if self.unigram_dic_[i] == self.phrasewords_[word]:\n try:\n # Delete element from copy_dict and not\n # unigram_dic_\n del copy_dict[i]\n except:\n pass\n if self.unigram_dic_[j] == self.phrasewords_[word]:\n try:\n del copy_dict[j]\n except:\n pass\n self.vocabulary_[\n word.replace(self.parsing_char_, \"_\")\n ] = self.phrasewords_[word]\n except:\n pass\n self.vocabulary_ = melt_vocab_dic(copy_dict, self.vocabulary_)",
"def bag_of_words(data):\n\tprint(\"Creating the bag of words...\\n\")\n\timport numpy as np\n\tfrom sklearn.feature_extraction.text import CountVectorizer\n\n\t# Initialize the \"CountVectorizer\" object, which is scikit-learn's\n\t# bag of words tool. \n\tvectorizer = CountVectorizer(analyzer = \"word\", tokenizer = None,\n\t\tpreprocessor = None, stop_words = None,max_features = 5000) \n\n\t# fit_transform() does two functions: First, it fits the model\n\t# and learns the vocabulary; second, it transforms our training data\n\t# into feature vectors. The input to fit_transform should be a list of \n\t# strings.\n\ttrain_data_features = vectorizer.fit_transform(data)\n\n\t# Numpy arrays are easy to work with, so convert the result to an \n\t# array\n\ttrain_data_features = train_data_features.toarray()\n\t# Take a look at the words in the vocabulary\n\tvocab = vectorizer.get_feature_names()\n\tprint('Nb of documents: '+str(train_data_features.shape[0])+', '+\n\t\t'Nb of features: '+str(train_data_features.shape[1]))\n\tprint('Computing the most frequent words.')\n\t# Sum up the counts of each vocabulary word\n\tdist = np.sum(train_data_features, axis=0)\n\t# create a dataframe with the most frequent words and their occurence\n\tvocabdf = pd.DataFrame()\n\tvocabdf['words'] = vocab\n\tvocabdf['count'] = dist\n\tvocabdf = vocabdf.sort_values('count',ascending=False)\n\tvocabdf = vocabdf.reset_index(drop=True)\n\treturn vocabdf",
"def buildDict(self, words):\n for w in words:\n self.add(w)",
"def _clean_and_save(self, data):\n\n stop_words = self._load_stopwords()\n tokenizer = RegexpTokenizer(r'\\w+')\n lemmatizer = WordNetLemmatizer()\n\n processed_data = [[lemmatizer.lemmatize(word, self._get_wordnet_pos(word))\n for word in tokenizer.tokenize(d.lower())\n if word not in stop_words]\n for d in data]\n\n dictionary = corpora.Dictionary(processed_data)\n corpus = [dictionary.doc2bow(d) for d in processed_data]\n\n dictionary.save(config.DICTIONARY_PATH)\n corpora.MmCorpus.serialize(config.CORPUS_PATH, corpus)\n\n return processed_data",
"def gen_wordbag(self, file_path, data_type, word_dict=\"../dict/word_dict.txt\"):\n\n\t\t#read word_dict.txt\n\t\tdict_list = []\n\t\twith open(word_dict) as d:\n\t\t\tfor line in d:\n\t\t\t\tdict_list.append(line.strip(\"\\n\"))\n\t\t\n\t\t# remove tmp file if exists\n\t\tif os.path.exists(file_path+\".tmp\"):\n\t\t\tos.remove(file_path+\".tmp\")\n\t\tif os.path.exists(data_type+\"_labels.txt\"):\n\t\t\tos.remove(data_type+\"_labels.txt\")\n\t\n\t\tclass_ids = []\n\t\t#gen vector fomate of data_set, overwrite origin {file_path}\n\t\twith nested(open(file_path), open(file_path+\".tmp\", \"a+\")) as (f1, f2):\n\t\t\tfor line in f1:\n\t\t\t\t# tmp vector of one text\n\t\t\t\tword_vector = []\n\t\t\t\tfor i in range(0, len(dict_list)):\n\t\t\t\t\tword_vector.append(0)\n\t\t\t\twords = line.split()\n\t\t\t\t#words[0] is {class_id}_type_id\n\t\t\t\tclass_id = words[0].split(\"_\")[0]\n\t\t\t\tclass_ids.append(class_id)\n\n\t\t\t\tfor w in words[1:]:\n\t\t\t\t\tif w in dict_list:\n\t\t\t\t\t\tword_vector[dict_list.index(w)] += 1\n\t\t\t\t\n\t\t\t\tf2.write(\" \".join(map(str, word_vector)) + \"\\n\")\n\t\t\n\t\tprint len(class_ids)\n\t\twith open(data_type+\"_labels.txt\", \"a+\") as l:\n\t\t\tl.write(\"\\n\".join(class_ids))\n\n\t\tshutil.move(file_path+\".tmp\", file_path)\n\t\tprint \"gen word bag over of %s.\" % file_path\n\t\treturn",
"def build_vocab(self):\n # Create a dictionary that maps words to their count\n self.word_count = self.word2count()\n\n # Trim the vocabulary\n # Get rid of out-of-vocabulary words from the dataset\n if self.min_word_count or self.max_vocab_size:\n self.trimVocab()\n self.trimDatasetVocab()\n\n # Trim sequences in terms of length\n if self.max_seq_len:\n if self.x_lengths:\n self.trimSeqLen()\n\n else:\n # Calculate sequences lengths\n self.x_lengths = [len(seq.split()) for seq in self.dataset[:, 0]]\n \n if self.target_col:\n self.y_lengths = [len(seq.split()) for seq in self.dataset[:, self.target_col]]\n \n self.trimSeqLen() \n\n \n # Map each tokens to index\n if not self.word2idx_mapping:\n self.mapWord2index()\n \n # Crate index2word mapping\n self.index2word = {index: word for word, index in self.word2index.items()}\n \n # Map dataset tokens to indices\n self.mapWords2indices()\n \n # Create weights matrix based on Glove vectors\n if self.use_pretrained_vectors:\n self.glove_vectors()",
"def bag_of_words(tokenized_sentence, all_words):\n\ttokenized_sentence = [stem(w) for w in tokenized_sentence]\n\n\tbag = np.zeros(len(all_words), dtype=np.float32)\n\tfor idx, w in enumerate(all_words):\n\t\tif w in tokenized_sentence:\n\t\t\tbag[idx] = 1.0\n\n\treturn bag",
"def preprocess_data(reviews: list):\n\n for review in reviews:\n ## Lower all review\n review[\"Review\"] = review[\"Review\"].lower()\n\n ## Remove special character from string\n regex1 = r\"[^\\w+\\d+\\']+\"\n review[\"Review\"] = re.sub(regex1, ' ', review[\"Review\"]).strip()\n\n ## Get unique word list\n review_list = [review[\"Review\"].split(\" \") for review in reviews]\n unique_words = set().union(*review_list)\n\n ## Remove stop words\n stop_words = set(stopwords.words('english'))\n unique_words = unique_words.difference(stop_words)\n\n return unique_words, reviews",
"def clean(self):\n self.suggest = {\n 'input': [' '.join(p) for p in permutations(self.name.split())],\n 'weight': self.popularity\n }",
"def build_dictionary_ngrams(training_datasets): \n word_counter_unigrams = collections.Counter()\n word_counter_bigrams = collections.Counter()\n word_counter_trigrams = collections.Counter()\n for i, dataset in enumerate(training_datasets):\n for example in dataset:\n sent1_tokenized = tokenize(example['sentence1_binary_parse'])\n sent2_tokenized = tokenize(example['sentence2_binary_parse'])\n bigrams1 = nltk.bigrams(sent1_tokenized)\n bigrams2 = nltk.bigrams(sent2_tokenized)\n trigrams1 = nltk.trigrams(sent1_tokenized)\n trigrams2 = nltk.trigrams(sent2_tokenized)\n word_counter_bigrams.update(bigrams1)\n word_counter_bigrams.update(bigrams2)\n word_counter_trigrams.update(trigrams1)\n word_counter_trigrams.update(trigrams2)\n word_counter_unigrams.update(sent1_tokenized)\n word_counter_unigrams.update(sent2_tokenized)\n \n vocabulary_uni = set([word for word in word_counter_unigrams])\n vocabulary_uni = list(vocabulary_uni)\n vocabulary_uni = [PADDING, UNKNOWN] + vocabulary_uni \n word_indices_uni = dict(zip(vocabulary_uni, range(len(vocabulary_uni))))\n \n vocabulary_bi = set([word for word in word_counter_bigrams])\n vocabulary_bi = list(vocabulary_bi)\n vocabulary_bi = [PADDING, UNKNOWN] + vocabulary_bi \n word_indices_bi = dict(zip(vocabulary_bi, range(len(vocabulary_bi))))\n \n vocabulary_tri = set([word for word in word_counter_trigrams])\n vocabulary_tri = list(vocabulary_tri)\n vocabulary_tri = [PADDING, UNKNOWN] + vocabulary_tri \n word_indices_tri = dict(zip(vocabulary_tri, range(len(vocabulary_tri))))\n\n return word_indices_uni, word_indices_bi, word_indices_tri",
"def build_word_dict(args, examples, fields, dict_size=None):\r\n word_dict = Vocabulary()\r\n for w in load_words(args, examples, fields, dict_size):\r\n word_dict.add(w)\r\n return word_dict",
"def init_dic(self):\n self.word_dic = {}\n self.bigram = 0 # count counts the number of bigrams for Laplace smoothing\n for i in range(len(self.corpus)):\n ch = self.corpus[i]\n if ch not in self.word_dic:\n self.word_dic[ch] = {}\n # The number of times the word appears independently\n self.word_dic[ch][ch] = 1 + self.word_dic[ch].get(ch, 0)\n if i != len(self.corpus) - 1:\n ch_next = self.corpus[i + 1]\n # Count the frequency of occurrence of the word and the following word\n self.word_dic[ch][ch_next] = 1 + self.word_dic[ch].get(ch_next, 0)\n\n for key in self.word_dic.keys():\n self.bigram += len(self.word_dic[key].keys()) - 1 # Count the total number of all bigrams",
"def _bagofwords(self, df, colname, idcol, min_df=3):\n vectorizer = CountVectorizer(min_df=min_df, ngram_range=(1, 2))\n X = vectorizer.fit_transform(df[colname])\n\n bow = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()).T\n bow['ngram'] = [len(i.split()) for i in bow.index]\n bow = bow.sort_values(by='ngram', ascending=False)\n bow = bow.drop(columns='ngram').drop_duplicates().T\n bow.columns = [f\"{colname}_{i}\" for i in bow.columns]\n\n df = pd.concat([df[idcol], bow], axis=1)\n\n return df",
"def build_model():\n\n #building the pipeline. Firstly using Count vect and Tfidf to transform the words data into numbers. and then using a Adaboost model.\n model = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),('tfidf', TfidfTransformer()),('clf', MultiOutputClassifier(AdaBoostClassifier()))]) #RandomForestClassifier(n_jobs=-1)\n return model",
"def _collect_words(self, data, init_words=None):\n logging.info('Building word list...')\n words = init_words if init_words is not None else {}\n for sample in tqdm(data['data']):\n for paragraph in sample['paragraphs']:\n # collect words in context\n for word in paragraph['context']:\n if word.text not in words:\n words[word.text] = 0\n else:\n words[word.text] += 1\n\n # collect words in question\n for qa in paragraph['qas']:\n for word in qa['question']:\n if word.text not in words:\n words[word.text] = 0\n else:\n words[word.text] += 1\n\n return words",
"def get_bag_words(tokens, doc_dict):\n bag_words = dict.fromkeys(doc_dict, 0)\n _tokens = [word for word in tokens if len(word) > 3] \n for token in _tokens: \n bag_words[token] += 1\n return bag_words",
"def bag_of_words(sentence, all_words):\n bag = np.zeros(len(all_words), dtype=np.float32)\n for (index, word) in enumerate(sentence):\n if word in all_words:\n bag[all_words.index(word)] = 1\n return bag"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
extract all the word probabilities from the lda model for each cluster | def get_word_probabilities(self):
word_probs = {}
for i in range(self.num_of_clusters):
topic_id = i
word_probs[topic_id] = []
for l in self.lda_model.get_topic_terms(i, topn=len(self.lda_dict)):
word_probs[topic_id].append((self.lda_dict[l[0]], l[1]))
return word_probs | [
"def perplexity(ldamodel, testset, dictionary, size_dictionary, num_topics):\r\n # dictionary : {7822:'deferment', 1841:'circuitry',19202:'fabianism'...]\r\n # print ('the info of this ldamodel: \\n')\r\n # print ('num of testset: %s; size_dictionary: %s; num of topics: %s'%(len(testset), size_dictionary, num_topics))\r\n prep = 0.0\r\n prob_doc_sum = 0.0\r\n topic_word_list = [] # store the probablity of topic-word:[(u'business', 0.010020942661849608),(u'family', 0.0088027946271537413)...]\r\n for topic_id in range(num_topics):\r\n topic_word = ldamodel.show_topic(topic_id, size_dictionary)\r\n dic = {}\r\n for word, probability in topic_word:\r\n dic[word] = probability\r\n topic_word_list.append(dic)\r\n doc_topics_ist = [] #store the doc-topic tuples:[(0, 0.0006211180124223594),(1, 0.0006211180124223594),...]\r\n for doc in testset:\r\n doc_topics_ist.append(ldamodel.get_document_topics(doc, minimum_probability=0))\r\n testset_word_num = 0\r\n for i in range(len(testset)):\r\n prob_doc = 0.0 # the probablity of the doc\r\n doc = testset[i]\r\n doc_word_num = 0 # the num of words in the doc\r\n for word_id, num in doc:\r\n prob_word = 0.0 # the probablity of the word\r\n doc_word_num += num\r\n word = dictionary[word_id]\r\n for topic_id in range(num_topics):\r\n # cal p(w) : p(w) = sumz(p(z)*p(w|z))\r\n prob_topic = doc_topics_ist[i][topic_id][1]\r\n prob_topic_word = topic_word_list[topic_id][word]\r\n prob_word += prob_topic*prob_topic_word\r\n prob_doc += math.log(prob_word) # p(d) = sum(log(p(w)))\r\n prob_doc_sum += prob_doc\r\n testset_word_num += doc_word_num\r\n prep = math.exp(-prob_doc_sum/testset_word_num) # perplexity = exp(-sum(p(d)/sum(Nd))\r\n # print (\"the perplexity of this ldamodel is : %s\"%prep)\r\n return prep",
"def predict_topics(df):\n stopwords = (nltk.corpus.stopwords.words('english')\n + ['book', 'read'])\n dictionary = gensim.corpora.Dictionary.load('dictionary.gensim')\n lda_model = gensim.models.LdaMulticore.load('good_lda_model.gensim')\n\n sid = SentimentIntensityAnalyzer()\n lemmatizer = WordNetLemmatizer()\n\n def make_nvec(v, n=10):\n if len(v) == n:\n return [vv[1] for vv in v]\n result = [0.] * n\n for vv in v:\n result[vv[0]] = vv[1]\n return result\n\n sentiments = []\n nsentiments = []\n vectors = []\n for review in df['reviews']:\n sentences = []\n for sentence in review.split('.'):\n sentences.extend(sentence.split('||'))\n\n clean_sentences = [remove_proper_nouns(sentence)[0] for sentence in sentences]\n clean_sentences = [[lemmatizer.lemmatize(t) for t in nltk.word_tokenize(sentence)]\n for sentence in clean_sentences]\n clean_sentences = [[word for word in sentence if word not in stopwords and len(word) > 3]\n for sentence in clean_sentences]\n clean_sentences = [dictionary.doc2bow(sentence) for sentence in clean_sentences]\n sentence_vectors = [make_nvec(lda_model[sentence]) for sentence in clean_sentences]\n vectors.append(np.mean(sentence_vectors, axis=0))\n\n sentiment = np.zeros(10)\n nsentiment = np.zeros(10)\n for sentence, vector in zip(sentences, sentence_vectors):\n score = sid.polarity_scores(sentence)\n sentiment[np.argmax(vector)] += score['compound']\n nsentiment[np.argmax(vector)] += 1\n sentiments.append(sentiment / nsentiment)\n nsentiments.append(nsentiment)\n sentiments = np.array(sentiments)\n sentiments[np.isnan(sentiments)] = 0 # division by zero\n for i in range(10):\n df['review_embed{}'.format(i)] = sentiments[:, i]\n return df",
"def test():\n vocabulary = [\n \"bass\", \"pike\", \"deep\", \"tuba\", \"horn\", \"catapult\",\n ]\n beta = np.array([\n [0.4, 0.4, 0.2, 0.0, 0.0, 0.0],\n [0.0, 0.3, 0.1, 0.0, 0.3, 0.3],\n [0.3, 0.0, 0.2, 0.3, 0.2, 0.0]\n ])\n alpha = np.array([0.2, 0.2, 0.2])\n xi = 50\n # np.random.seed(1)\n\n documents = [\n lda_gen(vocabulary, alpha, beta, xi)\n for _ in range(100)\n ]\n\n # Create a corpus from a list of texts\n dictionary = Dictionary(documents)\n corpus = [dictionary.doc2bow(text) for text in documents]\n model = LdaModel(\n corpus,\n id2word=dictionary,\n num_topics=3,\n )\n print(model.alpha)\n print(model.show_topics())",
"def predict_topic(self, document):\n if self.lda is None:\n print(\"ERROR in lda_topic_model.predict_topic(): Need to create_lda() before predicting topics.\")\n dict_lda = getattr(self.lda, 'id2word')\n lda_vector = self.lda[dict_lda.doc2bow(self.to_bow(document))]\n return self.topic_names[max(lda_vector, key=lambda item: item[1])[0]]\n #print(max(lda_vector, key=lambda item: item[1])[0])\n #print(lda.print_topic(max(lda_vector, key=lambda item: item[1])[0])) # prints the most prominent LDA topic",
"def predict(self, text):\n bow_transformed = self.dataset.transform([text])[0]\n topic_predictions = self.model.get_document_topics(bow_transformed)\n sorted_predictions = sorted(topic_predictions, key=lambda x: x[1],\n reverse=True)\n sorted_predictions = [(self.topic_names[topic_idx], prob)\n for (topic_idx, prob) in sorted_predictions]\n return sorted_predictions",
"def predict(self, text):\n\n if not models:\n self.__init__(self.filename, force_load=True)\n vec = self.tokenize(text)\n print(\"BoW:\")\n print(vec)\n topics = np.array(self.model[vec], dtype=[('topic_id', int), ('confidence', float)])\n topics[::-1].sort(order=\"confidence\")\n # This may seem super weird, but it works and it is actually more efficient\n # see https://stackoverflow.com/questions/26984414/efficiently-sorting-a-numpy-array-in-descending-order\n print(topics)\n return topics",
"def prob_classify(self, document):\n features = document.get_features()\n probs = self.classifier.prob_classify(features)\n return probs",
"def predict_proba(self, text):\n probabilities = detect_langs(text)\n converted = []\n for el in probabilities:\n converted.append({'lang': self.map2wili(el.lang),\n 'prob': el.prob})\n return converted",
"def word_probabilities(word_counts, total_pos, total_neg, k=0.5):\n triplet_list = []\n for w, (pos, neg) in word_counts.items():\n p_pos = (pos + k) / (total_pos + 2 * k)\n p_neg = (neg + k) / (total_neg + 2 * k)\n triplet_list.append((w, p_pos, p_neg))\n return triplet_list",
"def recognize(models: dict, test_set: SinglesData):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n probabilities = []\n guesses = []\n\n sequences = test_set.get_all_sequences()\n XLenghts = test_set.get_all_Xlengths()\n\n for s in sequences:\n X, length = XLenghts[s]\n p = {}\n guess = \"\"\n for word, model in models.items():\n try:\n p[word] = model.score(X, length)\n except:\n p[word] = float('-inf')\n probabilities.append(p)\n values = list(p.values())\n keys = list(p.keys())\n guesses.append(keys[values.index(max(values))])\n\n return probabilities, guesses\n\n \"\"\"\n valid_models = {word: model for word,model in models.items() if model is not None}\n probabilities = [word_probabilities(valid_models, *test_set.get_item_Xlengths(i))\n for i,_ in enumerate(test_set.wordlist)]\n guesses = [best_guess(word_probs) for word_probs in probabilities]\n return probabilities, guesses\n\ndef word_probabilities(models, X, lengths):\n word_probs = {}\n\n for word,model in models.items():\n try:\n word_probs[word] = model.score(X, lengths)\n except ValueError: # The hmmlearn library may not be able to train or score all models.\n word_probs[word] = float('-inf')\n\n return word_probs\n\ndef best_guess(word_probs):\n return max(word_probs.keys(), key=lambda word: word_probs[word])\n \"\"\"",
"def calculate_probabilities(self):\n\n # o numero total de palavras de cada categoria\n total_positive = sum(self.pos_freq.values())\n total_negative = sum(self.neg_freq.values())\n\n vocab_size = len(self.counts.keys())\n\n # TODO: calculate priors for classes\n\n # Para cada palavra no dicionario de frequencia ...\n for word, freq in self.counts.iteritems():\n\n # ajustando entrada para o dicionario de probabilidades\n self.probs.setdefault(word, {}) # adiciona palavra em probs\n self.probs[word].setdefault(\"pos\", 0)\n self.probs[word].setdefault(\"neg\", 0)\n\n # ocorrencias da palavra nas classes (+1 for Laplace smoothing)\n positive_count = freq.get(\"pos\", 0) + 1\n negative_count = freq.get(\"neg\", 0) + 1\n\n pw_given_pos = (float(positive_count) /\n (total_positive + vocab_size + 1))\n pw_given_neg = (float(negative_count) /\n (total_negative + vocab_size + 1))\n\n self.probs[word][\"pos\"] = pw_given_pos\n self.probs[word][\"neg\"] = pw_given_neg\n\n # probabilities for the unknown word\n self.probs[self.unk_word] = {}\n self.probs[self.unk_word]['pos'] = 1.0 / (total_positive + vocab_size + 1)\n self.probs[self.unk_word]['neg'] = 1.0 / (total_negative + vocab_size + 1)",
"def theorize_text(s, classifier, data, dict_result = True):\n\n\tpredictions = classifier.decision_function([s]) #we want to know probabilities! this returns a list of lists of values\n\tguess_values = defaultdict()\n\t\n\t#populate dictionary with decisiion function per author\n\tfor index1, prediction in enumerate(predictions): #loop through predictions (f there are multiple )\n\t\tfor index2, value in enumerate(prediction): #loop through each guess and the probability\n\t\t\tguess_values[data.target_names[index2]] = value #save prediction to dictionary, getting name of author corresponding to index in prediction \n\tif dict_result == True:\n\t\treturn guess_values #return dictionary of guesses for the given string\n\telse:\n\t\toutput = \"\"\n\t\tfor author, value in guess_values.items():\n\t\t\toutput += author + \": \" + str(value)+\"\\n\\n\"\n\treturn output",
"def perform_lda(id2word,\n doc2bow,\n corpus,\n *n_topics):\n\n summary_all = []\n sub_topics_all = []\n health_topics_all = []\n\n print(n_topics)\n\n for n in n_topics:\n print('\\nLDA with {} topics...'.format(n))\n model = lda_gensim(id2word, doc2bow, n)\n\n # Gets coherence scores for trained LDA model\n # coherence_umass = None\n # coherence_umass = coh_model_umass.get_coherence()\n # print('Coherence score (u_mass): {}'.format(coherence_umass))\n # coherence_cv = coh_model_cv.get_coherence()\n # print('Coherence score (c_v): {}'.format(coherence_cv))\n\n # Gets document topics and key terms and health topics\n top_topics_df, terms_df = get_sub_topics(model, doc2bow, corpus, n)\n\n sub_topics = top_topics_df[['subreddit', 'health', 'topic_number']]\n sub_topics.set_index(['subreddit',\n 'health',\n ], inplace=True,\n )\n sub_topics.columns = pd.MultiIndex.from_product([sub_topics.columns,\n [n],\n ])\n sub_topics_all.append(sub_topics)\n\n health_sub_topics = sub_topics.loc[sub_topics.index.get_level_values(level=1) == 1].transpose()\n health_sub_topics = health_sub_topics.droplevel(level=0,\n )\n health_sub_topics = health_sub_topics.droplevel('health',\n axis=1,\n )\n health_sub_topics.index.name = 'topic_label'\n health_topics_all.append(health_sub_topics)\n\n health_topics_top = top_topics_df.loc[top_topics_df['health'] == 1, 'topic_number'].value_counts().rename(\n 'subreddit_count')\n\n # top_health = health_topics.idxmax()\n # possible_health_subs = top_topictops_df.loc[top_topics_df['topic_number'] == top_health]\n\n # Saves results to dictionary\n summary = {\n 'n_topics': n,\n # 'coherence_score_umass': coherence_umass,\n # 'coherence_score_cv': coherence_cv,\n 'top_health_topics': health_topics_top.to_dict(),\n 'topic_terms': terms_df['terms_list'].apply(lambda x: ', '.join(x)).to_dict()\n }\n summary_all.append(summary)\n # sub_topics_file = pd.read_pickle('all_lda_topics.pkl')\n # sub_topics_all.append(sub_topics_file)\n sub_topics_all = pd.concat(sub_topics_all,\n axis=1,\n )\n sub_topics_all.to_pickle('all_lda_topics.pkl')\n health_topics_all = pd.concat(health_topics_all)\n health_topics_all.to_pickle('health_lda_topics.pkl')\n\n return summary_all, health_topics_all",
"def word2vect_clustering(nlp, docs, metric='euclidean',\n cluster_prefix='cls', eps=3.0, min_samples=2):\n X = None\n for d in docs:\n if X is None:\n X = np.array([nlp(unicode(d)).vector.tolist()])\n else:\n X = np.concatenate((X, np.array([nlp(d).vector.tolist()])), axis=0)\n\n model = cluster.DBSCAN(eps=eps, min_samples=min_samples, metric=metric)\n labels = model.fit_predict(X)\n print labels\n cls2docs = {}\n for idx in xrange(len(labels)):\n if labels[idx] == -1:\n cls2docs[docs[idx]] = [docs[idx]]\n else:\n cls = cluster_prefix + str(labels[idx])\n arr = []\n if cls in cls2docs:\n arr = cls2docs[cls]\n else:\n cls2docs[cls] = arr\n arr.append(docs[idx])\n print cls2docs\n return cls2docs",
"def get_cluster_label(text_list, product_type):\r\n \r\n df = pd.DataFrame(list(zip(text_list, product_type)), columns = ['PAR', 'Product'])\r\n # df.dropna(subset = ['PAR'], inplace = True)\r\n\r\n # set default cluster label to -2\r\n df['cluster_label'] = -2\r\n \r\n # get list of products\r\n products = df.Product.value_counts().index\r\n \r\n # model for embedding\r\n print(\"creating embedding\")\r\n model = SentenceTransformer('paraphrase-distilroberta-base-v1')\r\n\r\n print(\"enumerating to cluster data\")\r\n for i, product in enumerate(products):\r\n print(\"starting to cluster product: \" + str(product))\r\n filtered_df = df[(df.Product == product) & (df[\"PAR\"].notnull())]\r\n \r\n \r\n # encode text with embedding\r\n filtered_embedding = model.encode(filtered_df.PAR.tolist())\r\n \r\n # dimenstinality reduction\r\n print(\"reduce dimension\")\r\n reduce_embedding = filtered_embedding\r\n \r\n try:\r\n # dimenstinality reduction\r\n reduce_embedding = umap.UMAP(n_components = 64).fit_transform(filtered_embedding)\r\n # clustering\r\n clusterer = hdbscan.HDBSCAN(min_cluster_size = 40, min_samples = 1)\r\n cluster_labels = clusterer.fit_predict(reduce_embedding)\r\n # update dataframe\r\n df.loc[(df.Product == product) & (df[\"PAR\"].notnull()), 'cluster_label'] = cluster_labels\r\n except:\r\n print(\"Exception occured when clustering\")\r\n \r\n return df['cluster_label'].to_list()",
"def predict(self, x):\n predictionList=[]\n if self._model.loaded:\n for xValue in x:\n systemLabel=self._model.infer_topic(xValue)\n result=self._model.topicLabelling[systemLabel]\n predictionList.append(int(result))\n else:\n self._util.logError('TopicClusteringPredictionModel','Model needs to be loaded before prediction')\n\n return predictionList",
"def model_logprobs(model, collator, input_texts, output_texts, length_penalty=0, batchsize=128):\n total_logprobs = []\n for batch in splitevery(zip(input_texts, output_texts), batchsize):\n input_batch, output_batch = zip(*batch)\n encoded_inputs = collator.encode_inputs(input_batch)\n encoded_outputs = collator.encode_outputs(output_batch)\n with torch.no_grad():\n output = model(**encoded_inputs, **encoded_outputs)\n # Normalize probabilities\n normalized = output[\"logits\"].log_softmax(dim=2)\n for k in range(len(input_batch)):\n total_logprob = sum([\n normalized[k, i, encoded_outputs[\"labels\"][k][i]]\n for i in range(1, len(encoded_outputs[\"labels\"][k]) - 1)\n ]).cpu().numpy().item()\n # Normalize by length: https://www.aclweb.org/anthology/W18-6322.pdf\n num_output_tokens = len(encoded_outputs[\"labels\"][k]) - 2 # Ignore tokens for text start/end\n total_logprob /= (5+num_output_tokens)**length_penalty / (5+1)**length_penalty\n total_logprobs.append(total_logprob) \n return total_logprobs",
"def get_informative_words(nb_model):\n words = nb_model.decades[1930].keys()\n freq_not_zero = np.zeros((len(DECADES), len(words)))\n for i, dec in enumerate(DECADES):\n for j, word in enumerate(words):\n freq_not_zero[i,j] = 1.0 - nb_model.decades[dec][word][0]\n scores = np.where(freq_not_zero!=0, freq_not_zero, nb_model.dirichlet)\n scores /= np.min(scores, axis = 0)\n best_words = {}\n for i, dec in enumerate(DECADES):\n indices = np.argsort(scores[i,:])[-100:]\n best_words[dec] = [words[index] for index in list(indices)]\n return best_words",
"def important_words(model, n):\n theta = np.array(model.theta.toArray())\n dictionary = np.array(CountVectorizerModel.load('./cv_model').vocabulary)\n ind = np.argpartition(theta, -n)[-n:]\n return dictionary[ind]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build pandas dataframe for the paralell coordinates view | def get_parall_coord_df(self):
parall_coords = {}
for i, b in zip(self.cleaned_data.keys(), self.lda_bag_of_words):
parall_coords[i] = self.lda_model[b]
parall_coord_df = pd.DataFrame.from_dict({k: dict(v) for k, v in parall_coords.items()})
parall_coord_df = parall_coord_df.replace(np.nan, 0)
parall_coord_df = parall_coord_df.transpose()
parall_coord_df = parall_coord_df.reindex(sorted(parall_coord_df.columns), axis=1)
parall_coord_df['Dominant_Topic'] = parall_coord_df.idxmax(axis="columns")
return parall_coord_df | [
"def create_dataframe(self):\n self.df = pd.DataFrame.from_records(self.all_residues)\n # add code to give meaningful columns names, including the one base on win_size, here\n # TODO\n window_size = self.half_window_size\n new_columns = [\"center\"]\n # For negative values\n neg_val = -1*window_size\n for i in range(neg_val,0):\n new_columns.append(str(i))\n\n # For positive values\n for i in range(1,window_size+1):\n new_columns.append(str(i))\n\n new_columns.append(\"state\")\n self.df.columns = new_columns",
"def columns_to_index(self) -> pd.DataFrame:\n if self._obj.index.name == \"locus\":\n return self._obj\n obj = self._obj.copy()\n obj.loc[:, \"locus\"] = pd.IntervalIndex.from_arrays(\n obj.loc[:, \"start_locus\"],\n obj.loc[:, \"end_locus\"],\n closed=\"left\",\n name=\"locus\",\n )\n obj = obj.set_index(\"locus\").drop([\"start_locus\", \"end_locus\"], axis=1)\n return obj",
"def index_to_columns(self) -> pd.DataFrame:\n if \"start_locus\" in self._obj.columns and \"end_locus\" in self._obj.columns:\n return self._obj\n obj = self._obj.copy()\n obj.loc[:, \"start_locus\"] = obj.index.left\n obj.loc[:, \"end_locus\"] = obj.index.right\n obj = obj.reset_index().drop(\"locus\", axis=1)[[\"chr\", \"start_locus\", \"end_locus\", \"intensity\"]]\n return obj",
"def data_frame(self):\n \n def column_from_range(named_range):\n sh, rlo, rhi, clo, chi = named_range.area2d()\n return [ sh.cell(r, clo).value \\\n for r in range(rlo, rhi) ]\n \n ranges = self.named_ranges\n return pandas.DataFrame(\n { name : column_from_range(ranges[name]) \\\n for name in ranges })",
"def create_dataframe():\n # Import Libraries\n import pandas as pd\n # Function\n df_cols = [\n 'sequence', # STR\n 'on_site_score' # FLOAT\n ]\n df = pd.DataFrame(columns=df_cols)\n \"\"\"\n implement memory optimization by assigning appropriate dtype\n \"\"\"\n return df",
"def as_geodataframe(self):\n return gpd.GeoDataFrame(geometry=list(self.geometries),crs=self.crs)",
"def get_hipparcos_data():\n print \"Building Hipparcos dataframe.\"\n\n coords = SkyCoord(ra='3h47m24s', dec='+24d7m0s',\n unit=(u.deg, u.deg), frame='icrs')\n r = 2 * u.degree\n hip_results_raw = Vizier.query_region(coords, radius=r,\n catalog='I/239/hip_main')[0]\n hip_results = hip_results_raw.to_pandas()\n\n hip_df = pd.DataFrame()\n hip_df['Proper Motion (RA)'] = hip_results['pmRA']\n hip_df['Proper Motion (Dec)'] = hip_results['pmDE']\n hip_df['Distance'] = (hip_results['Plx'] * 1e-3)**(-1)\n hip_df['Parallax'] = hip_results['Plx']\n hip_df['Plx. Error'] = hip_results['e_Plx']\n hip_df['mag'] = hip_results['Vmag']\n hip_df['Color'] = hip_results['B-V']\n hip_df['Absolute Magnitude'] = hip_df['mag'] - \\\n 5 * (np.log10(hip_df['Distance']) - 1)\n hip_df['T Effective'] = [0] * len(hip_df['Color'])\n hip_df['Parallax'] = hip_results['Plx']\n hip_df['Plx. Error'] = hip_results['e_Plx']\n hip_df['Confidence'] = 1 - hip_results['e_Plx']/max(hip_results['e_Plx'])\n\n # Subset the data based on proper motion and Parallax cuts\n ra_cond1 = hip_results['pmRA'] > pmra_lims[0]\n ra_cond2 = hip_results['pmRA'] < pmra_lims[1]\n dec_cond1 = hip_results['pmDE'] > pmdec_lims[0]\n dec_cond2 = hip_results['pmDE'] < pmdec_lims[1]\n plx_cond1 = hip_results['Plx'] > plx_lims[0]\n plx_cond2 = hip_results['Plx'] < plx_lims[1]\n\n hip_df = hip_df[ra_cond1 & ra_cond2 & dec_cond1 &\n dec_cond2 & plx_cond1 & plx_cond2]\n # hip_df.sort_values('Distance')\n pleiades_hip = {'Survey': 'Hipparcos',\n 'Mean Distance': round(np.mean(hip_df['Distance']), 1),\n 'Number of Stars': len(hip_df['Distance']),\n 'Data': hip_df}\n return pleiades_hip",
"def _create_dataframe(self):\n\n data_dict={}\n\n # create a Boolean array of only those positions where sequences have been identified\n positive_elements=self.arrays[\"number_genomes\"]>0\n\n for key in ['amino_acid_position','original_triplet','new_triplet','number_nucleotide_changes','mutation','number_genomes','original_amino_acid','new_amino_acid','synonymous','non_synonymous']:\n data_dict[key]=(self.arrays[key][positive_elements]).tolist()\n\n self.df=pandas.DataFrame(data=data_dict)\n\n self.df[\"number_nucleotide_changes\"]=self.df[\"number_nucleotide_changes\"].astype(\"int8\")",
"def createDataFrame():\n\n df = pd.DataFrame(data = {\"Calories\": None, \"Water / g\":None, \"Fat / g\": None, \"Protein / g\": None, \"Cholesterol / mg\":None}, index = DFmanager.getTimeIndex(), dtype = \"float64\")\n df.dropna(inplace = True)\n return df",
"def to_dataframe(directory,im,frame,field):\n #making the dataframe in tidy format\n\n sx, dx = Faster.faster(im)\n dx[\"side\"] = \"dx\"\n sx[\"side\"] = \"sx\"\n df = pd.concat([dx,sx])\n df[\"frame\"] = frame\n df[\"field\"] = field\n df[\"experiment\"] = directory\n df.to_csv(\"coordinates.txt\",index = True,header = None, sep = \" \", mode = \"a\")",
"def nuclear_locations_df() -> pd.DataFrame:\r\n emissions_data = data_processing.read_nuclear_powerplant_co2('global_power_plant_database.csv',\r\n 'owid-co2-data.csv', 'countries of the world.csv')\r\n position_data = data_processing.get_longtitude_latitude('global_power_plant_database.csv',\r\n 'owid-co2-data.csv', 'countries of the world.csv')\r\n\r\n return pd.DataFrame({\r\n 'Countries': position_data[0],\r\n 'Power Plant': position_data[1],\r\n 'Latitudes': position_data[2],\r\n 'Longitudes': position_data[3],\r\n 'Emissions': duplicate_emissions(emissions_data, position_data[0])\r\n })",
"def produce_df(data, rows, columns, row_names=None, column_names=None):\r\n row_index = pd.MultiIndex.from_product(rows, names=row_names)\r\n col_index = [i for i in range(1,len(columns[0])+1)]\r\n return pd.DataFrame(data, index=row_index, columns=col_index)",
"def net_parameters_to_dataframe(self, stringify_index=False):\n interactions, values = self.free_parameters, self.parameters.get_value()\n # now put everything in dataframe\n return pd.DataFrame({\n 'interaction': interactions,\n 'value': values\n }).set_index('interaction')",
"def to_data_frame(self, num_records: int = 0) -> PandasDataFrame:",
"def to_decoded_dataframe(self) -> DecodedSpots:\n df = self.to_features_dataframe()\n pixel_coordinates = pd.Index([Axes.X, Axes.Y, Axes.ZPLANE])\n df = df.drop(pixel_coordinates.intersection(df.columns), axis=1).drop(Features.AXIS, axis=1)\n return DecodedSpots(df)",
"def generate_pandas_data(fit_results):\n data = {}\n data[\"q\"] = fit_results.q\n for par in fit_results.parameter:\n data[str(par.values)] = fit_results.parameters.loc[par].values\n pd_data_frame = pd.DataFrame(data = data)\n return pd_data_frame",
"def features_pairplot_df(self):\n df = self.features.data()[self.features.features()]\n return df",
"def full_frame(self, noise=0):\n df = pd.DataFrame(self.results)\n self.noise = noise\n hoods = self.neighborhoods\n\n # add columns obtained from the hoods\n df['v_index'] = [h.v_index for h in hoods]\n df['m_center'] = [h.moment['center'] for h in hoods]\n df['m_width'] = [h.moment['std_err'] for h in hoods]\n df['m_bgnd'] = [h.moment['background'] for h in hoods]\n df['g_center'] = [h.gaussian.center for h in hoods]\n df['g_width'] = [h.gaussian.width for h in hoods]\n df['g_bgnd'] = [h.gaussian.background for h in hoods]\n df['g_int'] = [h.gaussian.amplitude for h in hoods]\n df['g_chisq'] = [h.gaussian.chisq for h in hoods]\n df['g_prob'] = [h.gaussian.prob_greater for h in hoods]\n # return the frame with the index set to time in microseconds\n df.index = df['time'] * 1e6\n # Now we need to set the formatting with\n # df.style.format(dictionary of callables)\n return df",
"def dataframe(self) -> pd.DataFrame:\n data = []\n columns = [\"lection\", 'season', 'week', 'day']\n for lection_membership in self.lections_in_system():\n if type(lection_membership.day) != MovableDay:\n raise NotImplementedError(f\"Cannot yet export for days of type {type(lection_membership.day)}.\")\n data.append(\n [\n lection_membership.lection.description, \n lection_membership.day.get_season_display(), \n lection_membership.day.week, \n lection_membership.day.get_day_of_week_display(), \n ]\n )\n df = pd.DataFrame(data, columns=columns)\n return df"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assign color to topics with opacity | def color_assign_to_topic_with_opacity(self, x):
color_with_opacity = list(mcolors.to_rgba(self.colors[x]))
color_with_opacity[3] = 0.3
rgba = f'rgba({color_with_opacity[0] * 255}, {color_with_opacity[1] * 255}, {color_with_opacity[2] * 255}, {color_with_opacity[3]})'
return rgba | [
"def add_color(tweets):\n colors = list(Color(\"red\").range_to(Color(\"green\"), 100))\n for t in tweets:\n print t\n score = t['score']\n colorscore = (score + 1) / 2 * 100\n color = colors[int(colorscore)]\n t['color'] = color\n\n return tweets",
"def tubeColor(self):\n\t\treturn",
"def updateColor(self):\n\t\tpass",
"def set_text_color(self, foreground_color, background_color):\n\n raise NotImplementedError()",
"def highlight_edges(edges, image):\n image[edges>0.01] = [255, 0, 0]",
"def colorfran(self,):\r\n self.objeto_varios.color(self.random,self.color)",
"def poster(fg_color: str = \"#404040\", accent: str = \"#01BCA3\",\n dark: bool = False) -> Tuple[str]:\n matplotlib.font_manager.findSystemFonts()\n if dark:\n fg_color = \"#ffffff\"\n\n style = {\"axes.edgecolor\": fg_color,\n \"axes.labelcolor\": fg_color,\n \"text.color\": fg_color,\n \"xtick.color\": fg_color,\n \"ytick.color\": fg_color,\n \"font.family\": \"sans-serif\",\n \"font.sans-serif\": [\"Roboto\"]}\n\n palette = sns.color_palette([accent, fg_color])\n\n plt.rcParams[\"legend.frameon\"] = False\n sns.set_context(\"poster\", rc={\"lines.linewidth\": 4})\n sns.set_palette(palette)\n sns.set_style(\"ticks\", style)\n return palette",
"def route_style(color):\n return lambda feature: dict(color=color, opacity=0.9, weight=4)",
"def set_tint_opacity(node_tree, opacity):\n\n node_tree.nodes[Glass.TINT_OPACITY_NODE].outputs[0].default_value = opacity",
"def cluster_colour(self, node):\n\n # define max & min age\n max_age = max(self.age_dict.values())\n min_age = min(self.age_dict.values())\n # each cluster has an age range given by:\n cluster_age_range = (max_age - min_age) * self.age_fraction\n\n # make sure that in the beginning the number of groups is min 1\n if cluster_age_range == 0:\n cluster_age_range = 1\n\n self.lattice.nodes[node]['colour'] = math.ceil(self.lattice.nodes[node]['age'] / cluster_age_range)",
"def _opacity_background_colors(self) -> tuple[Color, Color]:\n base_background = background = BLACK\n opacity = 1.0\n for node in reversed(self.ancestors_with_self):\n styles = node.styles\n base_background = background\n opacity *= styles.opacity\n background += styles.background.multiply_alpha(opacity)\n return (base_background, background)",
"def set_bgcolor(self, color):\n self.bgcolor = color\n self.textsurf = self.generate_surface()",
"def createFadeToWhite(elements, color):\n colors = []\n for i, element in enumerate(elements):\n colors.append(get_color(color, 0 + i * 1/len(elements)))\n\n return colors",
"def fadeToColor(self, led, millis, r, g, b):\n self.logger.info(\"fade to color %03d|%03d|%03d in %.3f seconds\"%(r,g,b,millis/1000.0))\n self._connManager.send(COMMANDS['fadeToColor'] + struct.pack('B', led) + struct.pack('>H', millis) + struct.pack('B', r) + struct.pack('B', g) + struct.pack('B', b))",
"def text_foreground_color(self, color): # Sub-section .6\n command = 'FFE7{0}'.format(self._to_16_bit_rgb(color))\n reply = self._send_command(command, 2)\n return self._from_16_bit_rgb(reply)",
"def yellow():\n\n return color2float(Uint8Tensor([[255, 242, 0]]))",
"def set_fade(self, color_and_fade_callback: Callable[[int], Tuple[float, int, bool]]):\n raise NotImplementedError",
"def color(dist):\n if dist < 20:\n return 'indigo'\n elif dist < 40:\n return 'blue'\n elif dist < 60:\n return 'yellow'\n elif dist < 80:\n return 'orange'\n else:\n return 'red'",
"def set_led_colour(self, index, colour):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build dictionary for the topics which will be input for the cytoscape node, generate a random position as well | def get_topic_nodes(self):
topic_dict = {}
for k, v in self.lda_most_rel_topics.items():
#random position
pos = np.random.randint(900, size=2)
topic_id = f'Cluster {k}'
# key: topic id, value: [top terms for the topic with linebreak, color assigned to topic, position of the
# topic node]
topic_dict[topic_id] = (' '.join(v).replace(' ', '\n'), self.colors[k], pos)
return topic_dict | [
"def get_network(cso, found_topics):\n\n if type(found_topics) is dict:\n list_of_topics = []\n for key, value in found_topics.items():\n list_of_topics += value\n\n list_of_topics = list(set(list_of_topics))\n elif type(found_topics) is list:\n list_of_topics = found_topics\n\n from collections import deque\n topics = []\n for topic in list_of_topics:\n if topic in cso[\"topics\"]:\n topics.append(topic)\n else:\n print(\"Asked to process '\", topic, \"', but I couldn't find it in the current version of the Ontology\")\n\n nodes = []\n edges = []\n\n nodes.append({\"id\": \"paper\", \"label\": \"paper\"})\n t_id = 0\n pos = {}\n for topic in topics:\n pos[topic] = t_id\n pos[t_id] = topic\n temp = {\"id\": \"topic\" + str(t_id), \"label\": topic}\n nodes.append(temp)\n t_id += 1\n\n matrix = np.ones((len(topics), len(topics)), dtype=int) * 999\n queue = deque()\n for topic in topics:\n queue.append({\"t\": topic, \"d\": 1})\n while len(queue) > 0:\n dequeued = queue.popleft()\n if dequeued[\"t\"] in cso[\"broaders\"]:\n broaders = cso[\"broaders\"][dequeued[\"t\"]]\n for broader in broaders:\n if broader in pos:\n matrix[pos[topic]][pos[broader]] = dequeued[\"d\"]\n queue.append({\"t\": broader, \"d\": dequeued[\"d\"] + 1})\n\n for topic in topics:\n nearest_min = matrix[pos[topic]].min()\n nearest_pos = np.where(matrix[pos[topic]] == nearest_min)[0]\n\n if (nearest_min == 1):\n for near in nearest_pos:\n edge = {\"id\": \"edge\", \"source\": topic, \"target\": pos[near], \"kind\": \"hard\"}\n edges.append(edge)\n elif (nearest_min > 1 and nearest_min < 999):\n for near in nearest_pos:\n edge = {\"id\": \"edge\", \"source\": topic, \"target\": pos[near], \"kind\": \"soft\"}\n edges.append(edge)\n else:\n edge = {\"id\": \"edge\", \"source\": topic, \"target\": \"paper\", \"kind\": \"conn\"}\n edges.append(edge)\n\n network = {\"nodes\": nodes, \"edges\": edges}\n return network",
"def buildCoauthorNodesEdges(authorColabDict, majorKeywords = ['bioinformatics',\n 'network', 'cancer', 'aging', 'circadian', 'omics', 'neuro', 'computer',\n 'genetics', 'microbiome', 'computational', 'cardio', 'social', 'epidemic',\n 'sleep', 'pharamcology', 'mitochondria', 'metabolism', 'inflammation',\n 'systems', 'chrono', 'diabetes', 'immunology', 'diet', 'bacteria', 'math', \n 'physics', 'graph', 'complex', 'machine learning', 'artificial intelligence']):\n nodeDF = []\n for author in authorColabDict:\n totalPapers = len(authorColabDict[author]['Papers'])\n fAuthors = authorColabDict[author]['First author']\n lAuthors = authorColabDict[author]['Last author']\n \n citationList = []\n keyWordList = []\n for paper in authorColabDict[author]['Papers']:\n keywords = authorColabDict[author]['Papers'][paper]['Keywords']\n if 'Citations' in authorColabDict[author]['Papers'][paper]:\n citationList.append(authorColabDict[author]['Papers'][paper]['Citations'])\n if len(keywords) > 0:\n keywords = [x.lower() for x in keywords]\n keyWordList += keywords\n citations = sum([x for x in citationList if x != 'n/a'])\n\n keyWordList = [x for x in keyWordList if x != '[]']\n if len(keyWordList) > 0:\n sortKeywordsDict = [dict({i : len(list(c))}) for i,c in groupby(sorted(keyWordList))]\n keyWordList = [re.sub(r'\\ .*$', '', i).lower() for i,c in groupby(sorted(keyWordList))]\n majorWord = ''\n for word in keyWordList:\n for ref in majorKeywords:\n if re.findall('{0}'.format(ref), word): #Pulling the first most prevalent word in the majorKeywords\n majorWord = ref \n break\n maxCount = max([list(x.values())[0] for x in sortKeywordsDict])\n maxKey = [list(x.keys())[0] for x in sortKeywordsDict if list(x.values())[0] == maxCount][0]\n else:\n maxKey = ''\n\n authorSur = re.findall(r'^(.*)\\,', author)[0]\n authorFore = re.findall(r'\\, (.*)', author)[0]\n fullName = authorFore + ' ' + authorSur \n nodeDF.append([fullName, totalPapers, fAuthors, lAuthors, citations, maxKey, majorWord])\n \n nodeDF = pd.DataFrame(nodeDF)\n nodeDF.columns = ['Author', 'Total papers', 'First authors', 'Last authors', \n 'Citations', 'Max keyword', 'Major keyword']\n nodeDF['id'] = list(range(len(nodeDF)))\n\n authorIDs = dict(zip(nodeDF['Author'], nodeDF['id']))\n edgeDF = []\n colabsSet = set()\n for author1 in authorColabDict:\n for author2 in authorColabDict[author1]['Coauthors']:\n if author2 in authorColabDict:\n colabStr = ''.join(sorted([author1, author2]))\n if colabStr not in colabsSet:\n nPapers = len(authorColabDict[author1]['Coauthors'][author2])\n coCitations = []\n for paper in authorColabDict[author1]['Coauthors'][author2]:\n if 'Citations' in authorColabDict[author1]['Papers'][paper]:\n coCitations.append(authorColabDict[author1]['Papers'][paper]['Citations'])\n nCitations = sum([x for x in coCitations if x != 'n/a'])\n\n authorSur1 = re.findall(r'^(.*)\\,', author1)[0]\n authorFore1 = re.findall(r'\\, (.*)', author1)[0]\n fullName1 = authorFore1 + ' ' + authorSur1\n authorSur2 = re.findall(r'^(.*)\\,', author2)[0]\n authorFore2 = re.findall(r'\\, (.*)', author2)[0]\n fullName2 = authorFore2 + ' ' + authorSur2\n \n edgeDF.append([fullName1, fullName2, nPapers, nCitations])\n colabsSet.add(colabStr)\n \n edgeDF = pd.DataFrame(edgeDF)\n edgeDF.columns = ['Author1', 'Author2', 'nPapers', 'nCitations']\n edgeDF['id1'] = [authorIDs[x] for x in edgeDF['Author1']]\n edgeDF['id2'] = [authorIDs[x] for x in edgeDF['Author2']]\n \n return nodeDF, edgeDF",
"def generate_docs(num_topics, num_docs, words_per_doc=50, vocab_size=30,\n alpha=None, beta=None, noise=-1, plsi=False, ctm=False, \n pareto=False):\n #@TODO: integrate ctm parameters (ie mu and sigma) into alpha and beta\n mu = np.zeros(num_topics)\n sigma = np.ones((num_topics, num_topics))\n \n if plsi and ctm:\n print \"plsi and ctm flags cannot both be active (returning None)\"\n return None\n \n if not plsi and not ctm:\n if pareto:\n alpha = [alpha / i for i in range(1, num_topics + 1)]\n beta = [np.sqrt(beta / i) for i in range(1, vocab_size + 1)]\n #beta = [beta / i for i in range(1, vocab_size + 1)]\n else:\n alpha = [alpha] * num_topics\n beta = [beta] * vocab_size\n\n if plsi or ctm:\n sig_words = [rsample(range(vocab_size), util.poisson(beta, vocab_size))\\\n for t in range(num_topics)]\n word_dist = [np.zeros(vocab_size) for t in range(num_topics)]\n for i in range(num_topics):\n word_dist[i][sig_words[i]] = 1.0 / len(sig_words[i])\n else:\n word_dist = [dirichlet(beta) for i in range(num_topics)]\n word_cdfs = []\n for topic in word_dist:\n word_cdfs.append(get_cdf(topic))\n \n topic_cdfs = []\n docs = []\n topics = []\n topic_dists = []\n doc_index = 0\n for i in range(num_docs):\n if doc_index % 100 == 0:\n print \"reached document\", doc_index\n if plsi:\n sig_topics = rsample(range(num_topics), \n util.poisson(alpha, num_topics))\n topic_dist = np.zeros(num_topics)\n topic_dist[sig_topics] = 1.0 / len(sig_topics)\n elif ctm:\n eta = N(mu, sigma)\n topic_dist = np.exp(eta) / np.sum(np.exp(eta))\n else:\n topic_dist = dirichlet(alpha)\n num_words = util.poisson(words_per_doc)\n doc = []\n topic_dists.append(topic_dist)\n topic_cdf = get_cdf(topic_dist)\n topic_cdfs.append(topic_cdf)\n doc_topics = []\n for word in range(num_words):\n if rand() < noise:\n doc.append(rsample(range(vocab_size), 1))\n doc_topics.append(-1)\n else:\n topic = sample(topic_cdf)\n doc.append(sample(word_cdfs[topic]))\n doc_topics.append(topic)\n docs.append(doc)\n topics.append(doc_topics)\n doc_index += 1\n return docs, topics, word_dist, topic_dists",
"def rand_TE(num_topics=5, k=20):\n W = np.random.uniform(-0.25,0.25,(num_topics,k)) \n return W",
"def generate_topics_network(self, gcube_token: str, contact_type: Union[Hashable, Tuple[Hashable]] = \"__all__\",\n cluster_topics: bool = False, num_threads: int = 500, rho_thr: float = 0.1,\n min_edge_weight: float = 0):\n contact_type = standardize_contact_type(contact_type)\n\n def fetch_topics(text: str, gcube_token: str, topic_egonet: ContactEgoNetwork, topics_list: deque):\n topics = get_topics_from_text(text=text, gcube_token=gcube_token, rho_th=rho_thr).items()\n for topic_id, topic_count in topics:\n topic_egonet.add_contact(timestamp=contact.timestamp, alter_id=\"__info_topic_\" + str(topic_id),\n contact_type=\"info_topic\",\n num_contacted_alters=len(topics))\n if cluster_topics:\n topics_list.append(topic_id)\n\n topic_egonet = ContactEgoNetwork(ego_id=self.ego_id, last_time=self.last_time)\n topics_list = deque()\n cluster_map = {}\n modularity = 0\n logging.debug(\"extracting topics from contacts\")\n try:\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n contacts = [contact for contact in self.get_all_contacts(contact_type=contact_type)]\n if \"__all__\" in contact_type:\n contacts.extend([no_contact for no_contact in self.get_all_contacts(contact_type=\"__no_contact__\")])\n for contact in contacts:\n executor.submit(fetch_topics, contact.text, gcube_token, topic_egonet, topics_list)\n except Exception as e:\n print(str(e))\n\n logging.debug(\"topic network has \" + str(topic_egonet.get_egonet_size()) + \" alters\")\n topic_egonet_clustered = None\n if cluster_topics:\n logging.debug(\"calculating topics relatedness\")\n topics_set = set(topics_list)\n topics_edgelist = get_topics_rel_network(topics=list(topics_set), gcube_token=gcube_token,\n num_threads=num_threads)\n # remap nodes ids for igraph\n vertex_map = dict([(str(topic), idx) for idx, topic in enumerate(topics_set)])\n # create a graph with the same number of vertices of the topics\n graph = igraph.Graph()\n logging.debug(\"creating topics relatedness graph adding vertices first\")\n graph.add_vertices(len(topics_set))\n # add edges between topics if they are above the chosen threshold and if they are not already there\n logging.debug(\"adding links to the graph\")\n es = []\n es_set = set()\n weights = []\n for topic_pair in topics_edgelist:\n if float(topic_pair[2]) > min_edge_weight and (vertex_map[topic_pair[0]], vertex_map[topic_pair[1]]) \\\n not in es_set and (vertex_map[topic_pair[1]], vertex_map[topic_pair[0]]) not in es_set:\n es.append((vertex_map[topic_pair[0]], vertex_map[topic_pair[1]]))\n es_set.add((vertex_map[topic_pair[0]], vertex_map[topic_pair[1]]))\n weights.append(float(topic_pair[2]))\n graph.add_edges(es)\n graph.es[\"weight\"] = weights\n topic_egonet_clustered = ContactEgoNetwork(ego_id=self.ego_id, last_time=self.last_time)\n if graph.ecount() > 0:\n # there is at least one edge, so we apply community detection\n logging.debug(\"applying community detection on relatedness network\")\n dendogram = graph.community_fastgreedy(weights=\"weight\")\n clusters = dendogram.as_clustering()\n membership = clusters.membership\n modularity = clusters.modularity\n for contact in topic_egonet.get_all_contacts(contact_type=\"info_topic\"):\n topic_egonet_clustered.add_contact(timestamp=contact.timestamp,\n alter_id=\"__clustered_topic__\" +\n str(membership[vertex_map[\n str(contact.alter_id.split(\"_\")[-1])]]),\n contact_type=\"info_clustered_topic\",\n num_contacted_alters=contact.num_contacted_alters)\n # store the membership values in the map\n cluster_map[str(contact.alter_id)] = membership[vertex_map[str(contact.alter_id)]]\n logging.debug(\"clustered network has \" + str(topic_egonet_clustered.get_egonet_size()) + \" alters\")\n else:\n logging.debug(\"since there are no edges in the relatedness network, I skip community detection\")\n # change contact_type to __clustered_topic__\n for contact in topic_egonet.get_all_contacts(contact_type=\"info_topic\"):\n topic_egonet_clustered.add_contact(timestamp=contact.timestamp,\n alter_id=contact.alter_id,\n contact_type=\"info_clustered_topic\",\n num_contacted_alters=contact.num_contacted_alters)\n return topic_egonet, topic_egonet_clustered, cluster_map, modularity",
"def get_nodes_from_dict(parent, dictionary, prefix):\n for topic in dictionary:\n if isinstance(topic, str):\n topic_source_id = \"{}-{}\".format(prefix, topic.strip().replace(\" \", \"_\"))\n topic_node = nodes.TopicNode(source_id=topic_source_id, title=topic.capitalize())\n LOGGER.info(\"\\tCreating a math topic node - {}\".format(topic))\n get_nodes_from_dict(topic_node, dictionary[topic], topic_source_id)\n parent.add_child(topic_node)\n else:\n for node in topic:\n if node and not isinstance(node, str):\n LOGGER.info(\"\\tAdding a child node - {}\".format(node))\n parent.add_child(node)",
"def get_document_nodes(self):\n doc_nodes = {}\n if self.filtered_topic_df is None:\n iterable_df = self.topic_df.copy()\n else:\n iterable_df = self.filtered_topic_df.copy()\n\n for idx, d in iterable_df.iterrows():\n cluster_id = 'Cluster ' + str(d['Dominant_Topic'])\n doc_nodes[str(d['Document_No'])] = (d['Title'], d['color'], cluster_id)\n\n return doc_nodes",
"def __init__(self, topics: List[int]):\n base_topics = np.array(sorted(set(topics)))\n topics = base_topics.copy().reshape(-1, 1)\n self.mappings_ = np.hstack([topics.copy(), topics.copy()]).tolist()",
"def create_questions(self):\n prev_node = self.root_node\n node_iter = PreOrderIter(prev_node)\n question_list = []\n\n terms = []\n definitions = []\n\n sentence_list = []\n question_starter_list = []\n\n for node in node_iter:\n # check if question is empty in node\n\n if node == self.root_node:\n continue\n\n question_starter = ''\n\n # checks if tree has valid document structure\n num_first_layer = len(self.root_node.children)\n num_below_first_layer = len(self.root_node.descendants) - len(self.root_node.children)\n if (len(self.root_node.descendants) > len(self.annotation_list) and num_below_first_layer > num_first_layer):\n question_starter = self.get_question_starter(node=node)\n # checks if prev node is not a sibling \n # Creates a property question based on subtopic here\n if len(node.ancestors) > 1 and prev_node is node.parent:\n node_layer = [sibling for sibling in node.siblings]\n node_layer.append(node)\n\n # if there is only one property, skip question\n if len(node_layer) <= 1:\n continue\n\n temp_term = \"%sWhat are the %s properties?\" % (question_starter, len(node_layer))\n temp_definition = '\\n'.join([ \"%s. \" % (i+1) + sibling.text for i, sibling in enumerate(node_layer) ])\n terms.append(temp_term)\n definitions.append(temp_definition)\n\n prev_node = node\n \n # Appends \n sentence_list.extend(node.sentences)\n question_starter_list.extend([question_starter] * len(node.sentences))\n\n for annotation in self.annotation_list:\n sentence_list.extend(annotation['sentences'])\n question_starter_list.extend([''] * len(annotation['sentences']))\n\n questions, question_starters = self.questions_from_sentlist(sentence_list=sentence_list, question_starter_list=question_starter_list)\n if not questions:\n print('Failed to score sentences')\n return terms, definitions\n\n temp_terms = [ q_starter+question.sentence.return_string() for question, q_starter in zip(questions, question_starters)]\n temp_definitions = [str(question.answer.content) for question in questions]\n\n # extend the question list\n temp_terms.extend(terms)\n temp_definitions.extend(definitions)\n\n return temp_terms, temp_definitions",
"def get_topic_distributions(model_info, corpus_name, subreddit_list):\r\n\t\r\n # initialize where topic counts will be stored for each model indicated in model_info\r\n model_dict = initialize_model_counters(model_info, subreddit_list)\r\n print()\r\n\r\n # iterate through each subreddit, each of its documents, and each word type in its documents to get counts.\r\n for subreddit in subreddit_list:\r\n\r\n current_time = datetime.datetime.now()\r\n print(str(current_time) + ' : starting ' + subreddit)\r\n print('--------------------')\r\n\r\n corpus_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name)\r\n corpus_metadata_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name + '_metadata.csv')\r\n corpus = gensim.corpora.MmCorpus(corpus_fpath)\r\n\r\n for doc in corpus:\r\n if len(doc) < 25:\r\n continue\r\n\r\n # For each model, get theta for the document.\r\n model_theta_dict = {}\r\n for model_key in model_dict.keys():\r\n doc_dist_gensim = model_dict[model_key]['model'][doc]\r\n k = model_key[3]\r\n doc_dist_numpy = np.zeros(k, dtype='float64')\r\n for (topic, val) in doc_dist_gensim:\r\n doc_dist_numpy[topic] = val\r\n\r\n # now that we have this document's theta, add it to the sum.\r\n model_dict[model_key]['doc_topic_sums'][subreddit] += doc_dist_numpy\r\n\r\n # From Thompson & Blei (2018):\r\n nz_theta_d = doc_dist_numpy > 0\r\n log_theta_d = xlogy(nz_theta_d, doc_dist_numpy)\r\n\r\n model_theta_dict[model_key] = {'nz_theta_d': nz_theta_d, 'log_theta_d': log_theta_d}\r\n\r\n # For each word type that occurs in doc, iterate through each model to make topic assignments.\r\n model_doc_token_topics = {model_key: np.zeros(model_key[3]) for model_key in model_dict}\r\n for (word_id, word_count) in doc:\r\n\r\n # Estimate topics for each model.\r\n for model_key in model_dict:\r\n k = model_key[3]\r\n #topic_assingments = assign_type_to_topic()\r\n\r\n # From Thompson & Blei (2018). Basically for the current word, get its\r\n # probability in each topic (nz_phis.T[word_id]). Multiply each element in this k-dimensional\r\n # vector by the corresponding elements in the document's nonzero theta vector. For each element\r\n # that is nonzero, return exponent(log phi values of the word in each topic + log theta values\r\n # of the document. Otherwise, return 0. Not sure why the .ravel() at the end--it seems that\r\n # this will return a k-dimensional vector with or without it. The resulting distribution\r\n # provides the distribution p(topic | word) from which we can make an assignment of the token\r\n # to a topic.\r\n topic_dist = np.where(model_dict[model_key]['nz_phis'].T[word_id] * model_theta_dict[model_key]['nz_theta_d'] != 0,\r\n np.exp(model_dict[model_key]['log_phis'].T[word_id] + model_theta_dict[model_key]['log_theta_d']),\r\n 0.0).ravel()\r\n\r\n # Normalize distribution p(topic | word, phi, theta):\r\n topic_dist = topic_dist / topic_dist.sum()\r\n\r\n # Draw a topic from topic_dist for however many times the word occurs in the document.\r\n topics = np.random.choice(k, size=int(word_count), p=topic_dist)\r\n\r\n for topic_i in topics:\r\n model_doc_token_topics[model_key][topic_i] += 1\r\n\r\n # now we have token-topic assingment counts for each word type present in the current document.\r\n # START HERE -->\r\n # update token-topic assignment counts\r\n for model_key in model_dict:\r\n model_doc_topic_counts = model_doc_token_topics[model_key]\r\n\r\n model_dict[model_key]['token_topic_counts'][subreddit] += model_doc_topic_counts\r\n\r\n # also make the token-topic distribution and add it to ongoing count\r\n model_doc_token_dist = model_doc_topic_counts / model_doc_topic_counts.sum()\r\n model_dict[model_key]['doc_topic_tokens_sums'][subreddit] += model_doc_token_dist\r\n\r\n model_dict[model_key]['doc_counts'][subreddit] += 1\r\n\r\n # Now we are done with all documents in a subreddit. Summary stats for the subreddit can now be calculated\r\n # including the average theta distribution, the distribution of token-topic assignments, & the average\r\n # token-topic document distribution.\r\n for model_key in model_dict.keys():\r\n\r\n # All token-topic assignments have been counted for this subreddit, so store those counts in\r\n # token_assignment_counts for later use and write them to file.\r\n token_topic_freqs_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'token_topic_freqs_' + subreddit + '.csv')\r\n write_token_topic_freqs(token_topic_freqs_fpath,\r\n model_dict[model_key]['token_topic_counts'][subreddit])\r\n\r\n # Find average theta distribution by dividing the summed thetas by the number of documents.\r\n avg_doc_topic_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_' + subreddit + '.txt')\r\n avg_doc_topic = model_dict[model_key]['doc_topic_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_fpath, avg_doc_topic)\r\n\r\n # Find the average topic distribution of each document from token-topic assignments by dividing the sum of the\r\n # document distributions by the number of documents.\r\n avg_doc_topic_tokens_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_from_tokens_' + subreddit + '.txt')\r\n avg_doc_topic_from_tokens = model_dict[model_key]['doc_topic_tokens_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_tokens_fpath, avg_doc_topic_from_tokens)\r\n\r\n # topic model summary files can now be written\r\n # Topic summary file. Possible things to include:\r\n # - entropy of the topic's word distribution (what does this really tell us that is useful?)\r\n # - entropy of topic over subreddits\r\n # - top N words & probabilities OR top words & probabilities up to some cumulative probability (eg, the\r\n # topic words needed to account for at least 50% of the topic's word distribution.\r\n # - number of tokens assigned to each subreddit. Can also do as a proportion of a subreddit's tokens\r\n # assigned to each topic.\r\n for model_key in model_dict:\r\n subreddit_entropy_counts, subreddit_entropy_props = get_subreddit_entropy(model_dict[model_key]['token_topic_counts'])\r\n\r\n phis = model_dict[model_key]['model'].get_topics()\r\n k = model_key[3]\r\n topic_entropies = [measures.entropy(phis[topic_i]) for topic_i in range(k)]\r\n\r\n max_subreddit_count, max_subreddit_prop = get_subreddits_w_max_topics(model_dict[model_key]['token_topic_counts'])\r\n\r\n # model_key = (training_corpus_type, sample_name, corpus_name, k)\r\n topic_summary_fpath = os.path.join(cons.lda_dir,\r\n model_key[0],\r\n model_key[1],\r\n model_key[1] + '-' + str(k),\r\n 'topics_summary.csv')\r\n write_topics_summary_file(topic_summary_fpath,\r\n subreddit_entropy_counts, subreddit_entropy_props,\r\n topic_entropies,\r\n max_subreddit_count, max_subreddit_prop,\r\n model_dict[model_key]['model'],\r\n k)",
"def construct_channel(self, *args, **kwargs):\n channel = self.get_channel(*args, **kwargs)\n scrape_spreadsheet()\n for grade in GRADE_DICT:\n source_id = grade.strip().replace(\" \", \"_\")\n LOGGER.info(\"\\tCreating a topic node - {}\".format(grade))\n topic = nodes.TopicNode(source_id=source_id, title=grade.capitalize())\n get_nodes_from_dict(topic, GRADE_DICT[grade], grade)\n channel.add_child(topic)\n raise_for_invalid_channel(channel) # Check for errors in channel construction\n return channel",
"def update_params(self):\n # todo: sample theta and phi\n\n #sample theta from dirichlet (A_{d,k}+alpha), since dim(theta)=ndoc * ntopic , we need to update for each d, so for each row\n for d in range(self.n_docs):\n self.theta[d,:] = np.random.dirichlet(self.A_dk[d,:] + self.alpha)\n\n #sample phi from dirichlet (B_{k,w}+beta), dim(phi) = ntopics * nwords\n for k in range(self.n_topics):\n self.phi[k,:] = np.random.dirichlet(self.B_kw[k,:] + self.beta)\n\n\n self.update_topic_doc_words()\n #print('thishif',self.topic_doc_words_distr[0,0,:])\n self.sample_counts() #update A and B",
"def test_detect_topics_dict(self):\n\n consumer = ELDConsumer(Queue(), 30, min_burst=0)\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n line = f.readline()\n tweet = json.loads(line)\n documents = consumer._to_documents([ tweet ])\n timestamp = twitter.extract_timestamp(tweet)\n consumer.buffer.enqueue(*documents)\n consumer._create_checkpoint(timestamp)\n self.assertEqual([ timestamp ], list(consumer.store.all().keys()))\n self.assertEqual(documents[0].dimensions.keys(), consumer.store.get(timestamp).keys())\n\n \"\"\"\n Create a new cluster with a sligtly different tweet.\n The function should return some of the different dimensions as breaking terms.\n \"\"\"\n document = documents[0].copy()\n document.text = document.text + ' pipe'\n cluster = Cluster(document)\n terms = consumer._detect_topics(cluster, timestamp + 60)\n self.assertEqual(dict, type(terms))",
"def generate_pdf_nodes(data, topic, source=\"\"):\n\n # Iterate through chapter data\n for chapter in data:\n # Create topics if we're dealing with a section\n if chapter.get('header'):\n source_id = \"{}-{}\".format(source, chapter['header'])\n subtopic = nodes.TopicNode(title=chapter['header'], source_id=source_id)\n topic.add_child(subtopic)\n generate_pdf_nodes(chapter['chapters'], subtopic, source=source_id)\n\n # Create a document node and its related exercise nodes if it's a document\n elif chapter.get(\"chapter\"):\n # Create doucment node\n source_id = \"{}-{}\".format(source, chapter['chapter'])\n topic.add_child(nodes.DocumentNode(\n title=chapter['chapter'],\n source_id=source_id,\n copyright_holder=COPYRIGHT_HOLDER,\n license=LICENSE,\n files=[files.DocumentFile(chapter['path'])]\n ))\n\n # Create exercise nodes\n for index, exercise in enumerate(chapter.get(\"exercises\") or []):\n exercise_id = \"{} Exercise {}\".format(source_id, index)\n exercise_node = nodes.ExerciseNode(\n title=chapter['chapter'],\n source_id=exercise_id,\n description=exercise.get('description'),\n copyright_holder=COPYRIGHT_HOLDER,\n license=LICENSE,\n )\n topic.add_child(exercise_node)\n create_exercise_questions(exercise_node, exercise.get('questions') or [])",
"def insert_synapses(params_dict, syn_params_dict, sections_dict, logger, syn_loc_seed):\n \n section_list = sections_dict['section_list']\n \n # defining a dictionary for the inserted synapse. \n syndict={ \n 'x': [], 'y': [], 'weight': [], 'BC_syn': [], 'dist': [] \n }\n \n count = 0 # counts total number of introduced synapses\n tot_segs = 0 # counts total number of segments in all sections\n syn_loc_rnd = np.random.RandomState(syn_loc_seed)\n for k, sec in enumerate(section_list):\n \n n3d = int(NEURON.n3d(sec = sec)) - 1\n tot_segs += sec.nseg\n\n # Interpolating the arc length position of the i'th point in the 3d list to the i'th 3d point\n f2 = interp1d([NEURON.arc3d(i, sec = sec) for i in range(n3d + 1)], np.array(range(n3d + 1)))\n\n for l in range(int(sec.L)):\n \n frac = (l + 0.5) / sec.L\n dist = path_distance(frac, sec, sections_dict)\n \n ################## This is a manually defined density function ##################\n # In order to use the XML-defined density function use the line: ##\n # --> synape = (np.random.rand() < syn_params_dict['density_function'](dist)) ##\n #################################################################################\n \n def synapse_sigmoid_distribution_function(x, scale_factor, offset, transition_placement):\n return 1 - (scale_factor * (0.5 * (1 + math.tanh(x - transition_placement))) + offset)\n \n # (distance, scale_factor, offset, transition_placement)\n scale_factor = params_dict['scale_factor'] \n offset = params_dict['offset'] \n transition_placement = params_dict['transition_placement']\n synape = (syn_loc_rnd.rand() < synapse_sigmoid_distribution_function(dist, scale_factor, \n offset, transition_placement))\n \n #################################################################################\n \n # transforming loaction on the segment to the segment number\n seg_no = int(f2(l + 0.5))\n \n x = (NEURON.x3d(seg_no, sec=sec) + NEURON.x3d(seg_no + 1, sec = sec)) / 2\n y = (NEURON.y3d(seg_no, sec=sec) + NEURON.y3d(seg_no + 1, sec = sec)) / 2\n \n if synape:\n count += 1\n \n BC_syn = NEURON.Exp2Syn(sec(frac))\n BC_syn.tau1 = 0.89\n BC_syn.tau2 = 1.84\n BC_syn.e = 0\n \n syndict['weight'].append(params_dict['syn_weight'])\n syndict['x'].append(x)\n syndict['y'].append(y)\n syndict['BC_syn'].append(BC_syn)\n syndict['dist'].append(dist)\n \n logger.info('{} synapses were defined in {} segments'.format(count, tot_segs)) \n return syndict",
"def _get_annotations(topic_model,\n hierarchical_topics: pd.DataFrame,\n embeddings: csr_matrix,\n linkage_function: Callable[[csr_matrix], np.ndarray],\n distance_function: Callable[[csr_matrix], csr_matrix],\n orientation: str,\n custom_labels: bool = False) -> List[List[str]]:\n df = hierarchical_topics.loc[hierarchical_topics.Parent_Name != \"Top\", :]\n\n # Calculate distance\n X = distance_function(embeddings)\n X = validate_distance_matrix(X, embeddings.shape[0])\n\n # Calculate linkage and generate dendrogram\n Z = linkage_function(X)\n P = sch.dendrogram(Z, orientation=orientation, no_plot=True)\n\n # store topic no.(leaves) corresponding to the x-ticks in dendrogram\n x_ticks = np.arange(5, len(P['leaves']) * 10 + 5, 10)\n x_topic = dict(zip(P['leaves'], x_ticks))\n\n topic_vals = dict()\n for key, val in x_topic.items():\n topic_vals[val] = [key]\n\n parent_topic = dict(zip(df.Parent_ID, df.Topics))\n\n # loop through every trace (scatter plot) in dendrogram\n text_annotations = []\n for index, trace in enumerate(P['icoord']):\n fst_topic = topic_vals[trace[0]]\n scnd_topic = topic_vals[trace[2]]\n\n if len(fst_topic) == 1:\n if isinstance(custom_labels, str):\n fst_name = f\"{fst_topic[0]}_\" + \"_\".join(list(zip(*topic_model.topic_aspects_[custom_labels][fst_topic[0]]))[0][:3])\n elif topic_model.custom_labels_ is not None and custom_labels:\n fst_name = topic_model.custom_labels_[fst_topic[0] + topic_model._outliers]\n else:\n fst_name = \"_\".join([word for word, _ in topic_model.get_topic(fst_topic[0])][:5])\n else:\n for key, value in parent_topic.items():\n if set(value) == set(fst_topic):\n fst_name = df.loc[df.Parent_ID == key, \"Parent_Name\"].values[0]\n\n if len(scnd_topic) == 1:\n if isinstance(custom_labels, str):\n scnd_name = f\"{scnd_topic[0]}_\" + \"_\".join(list(zip(*topic_model.topic_aspects_[custom_labels][scnd_topic[0]]))[0][:3])\n elif topic_model.custom_labels_ is not None and custom_labels:\n scnd_name = topic_model.custom_labels_[scnd_topic[0] + topic_model._outliers]\n else:\n scnd_name = \"_\".join([word for word, _ in topic_model.get_topic(scnd_topic[0])][:5])\n else:\n for key, value in parent_topic.items():\n if set(value) == set(scnd_topic):\n scnd_name = df.loc[df.Parent_ID == key, \"Parent_Name\"].values[0]\n\n text_annotations.append([fst_name, \"\", \"\", scnd_name])\n\n center = (trace[0] + trace[2]) / 2\n topic_vals[center] = fst_topic + scnd_topic\n\n return text_annotations",
"def generate_topics(documents, store_path, nbr_topics=TOPIC_NBR, tfidf_on=False):\n logging.info(\"Start generating topics\")\n dictionary = corpora.Dictionary(documents)\n corpus = [dictionary.doc2bow(document) for document in documents]\n\n # Generate a tf idf model\n if tfidf_on:\n tfidf = models.TfidfModel(corpus)\n corpus = tfidf[corpus]\n topic_model = models.LdaModel(corpus, id2word=dictionary, num_topics=nbr_topics)\n\n dictionary.save(os.path.join(store_path, \"dictionary.dict\"))\n topic_model.save(os.path.join(store_path, \"model.lda\"))\n logging.info(\"Done generating topics\")",
"def generate_topic_vectors(ctx):\n asyncio.run(generate_topic_vectors_impl(ctx.obj[\"config\"]))",
"def __init__(self, nodes=None, replicas=3, distribution_points=3):\n self.distribution_points = distribution_points\n self.replicas = replicas\n\n self.ring = dict()\n self._sorted_keys = []\n\n if nodes:\n for node in nodes:\n self.add_node(node)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build dictionary for document nodes to the cytoscape network visualization | def get_document_nodes(self):
doc_nodes = {}
if self.filtered_topic_df is None:
iterable_df = self.topic_df.copy()
else:
iterable_df = self.filtered_topic_df.copy()
for idx, d in iterable_df.iterrows():
cluster_id = 'Cluster ' + str(d['Dominant_Topic'])
doc_nodes[str(d['Document_No'])] = (d['Title'], d['color'], cluster_id)
return doc_nodes | [
"def buildCoauthorNodesEdges(authorColabDict, majorKeywords = ['bioinformatics',\n 'network', 'cancer', 'aging', 'circadian', 'omics', 'neuro', 'computer',\n 'genetics', 'microbiome', 'computational', 'cardio', 'social', 'epidemic',\n 'sleep', 'pharamcology', 'mitochondria', 'metabolism', 'inflammation',\n 'systems', 'chrono', 'diabetes', 'immunology', 'diet', 'bacteria', 'math', \n 'physics', 'graph', 'complex', 'machine learning', 'artificial intelligence']):\n nodeDF = []\n for author in authorColabDict:\n totalPapers = len(authorColabDict[author]['Papers'])\n fAuthors = authorColabDict[author]['First author']\n lAuthors = authorColabDict[author]['Last author']\n \n citationList = []\n keyWordList = []\n for paper in authorColabDict[author]['Papers']:\n keywords = authorColabDict[author]['Papers'][paper]['Keywords']\n if 'Citations' in authorColabDict[author]['Papers'][paper]:\n citationList.append(authorColabDict[author]['Papers'][paper]['Citations'])\n if len(keywords) > 0:\n keywords = [x.lower() for x in keywords]\n keyWordList += keywords\n citations = sum([x for x in citationList if x != 'n/a'])\n\n keyWordList = [x for x in keyWordList if x != '[]']\n if len(keyWordList) > 0:\n sortKeywordsDict = [dict({i : len(list(c))}) for i,c in groupby(sorted(keyWordList))]\n keyWordList = [re.sub(r'\\ .*$', '', i).lower() for i,c in groupby(sorted(keyWordList))]\n majorWord = ''\n for word in keyWordList:\n for ref in majorKeywords:\n if re.findall('{0}'.format(ref), word): #Pulling the first most prevalent word in the majorKeywords\n majorWord = ref \n break\n maxCount = max([list(x.values())[0] for x in sortKeywordsDict])\n maxKey = [list(x.keys())[0] for x in sortKeywordsDict if list(x.values())[0] == maxCount][0]\n else:\n maxKey = ''\n\n authorSur = re.findall(r'^(.*)\\,', author)[0]\n authorFore = re.findall(r'\\, (.*)', author)[0]\n fullName = authorFore + ' ' + authorSur \n nodeDF.append([fullName, totalPapers, fAuthors, lAuthors, citations, maxKey, majorWord])\n \n nodeDF = pd.DataFrame(nodeDF)\n nodeDF.columns = ['Author', 'Total papers', 'First authors', 'Last authors', \n 'Citations', 'Max keyword', 'Major keyword']\n nodeDF['id'] = list(range(len(nodeDF)))\n\n authorIDs = dict(zip(nodeDF['Author'], nodeDF['id']))\n edgeDF = []\n colabsSet = set()\n for author1 in authorColabDict:\n for author2 in authorColabDict[author1]['Coauthors']:\n if author2 in authorColabDict:\n colabStr = ''.join(sorted([author1, author2]))\n if colabStr not in colabsSet:\n nPapers = len(authorColabDict[author1]['Coauthors'][author2])\n coCitations = []\n for paper in authorColabDict[author1]['Coauthors'][author2]:\n if 'Citations' in authorColabDict[author1]['Papers'][paper]:\n coCitations.append(authorColabDict[author1]['Papers'][paper]['Citations'])\n nCitations = sum([x for x in coCitations if x != 'n/a'])\n\n authorSur1 = re.findall(r'^(.*)\\,', author1)[0]\n authorFore1 = re.findall(r'\\, (.*)', author1)[0]\n fullName1 = authorFore1 + ' ' + authorSur1\n authorSur2 = re.findall(r'^(.*)\\,', author2)[0]\n authorFore2 = re.findall(r'\\, (.*)', author2)[0]\n fullName2 = authorFore2 + ' ' + authorSur2\n \n edgeDF.append([fullName1, fullName2, nPapers, nCitations])\n colabsSet.add(colabStr)\n \n edgeDF = pd.DataFrame(edgeDF)\n edgeDF.columns = ['Author1', 'Author2', 'nPapers', 'nCitations']\n edgeDF['id1'] = [authorIDs[x] for x in edgeDF['Author1']]\n edgeDF['id2'] = [authorIDs[x] for x in edgeDF['Author2']]\n \n return nodeDF, edgeDF",
"def get_topic_nodes(self):\n topic_dict = {}\n for k, v in self.lda_most_rel_topics.items():\n #random position\n pos = np.random.randint(900, size=2)\n topic_id = f'Cluster {k}'\n # key: topic id, value: [top terms for the topic with linebreak, color assigned to topic, position of the\n # topic node]\n topic_dict[topic_id] = (' '.join(v).replace(' ', '\\n'), self.colors[k], pos)\n\n return topic_dict",
"def get_nodes(self):\n return {node_id: Node(node_id, self.dates[node_id], self.communities[node_id]) for node_id in self.dates}",
"def create_network(edges, nodes, log):\n log.info('Creating the graph with attributes...')\n edges = edges.drop_duplicates(subset = ['xs', 'ys'])\n edges_tuples = [(edges.iloc[i]['xs'], edges.iloc[i]['ys']) for i in range(len(edges))]\n edges['edges_couple'] = edges_tuples #this will be useful for successive sorting after the graph is created on bokeh\n\n # build the nx graph\n log.info('Creating nx graph...')\n G=nx.Graph()\n G.add_edges_from(edges_tuples)\n nodes_list = list(G.nodes)\n\n idxs = []\n for i in nodes_list:\n idxs.append(nodes[nodes['Company_Name']==i].index[0])\n\n #sorting with same graph order\n nodes = nodes.iloc[idxs]\n\n #nodes analysis to define their centrality\n log.info('Calculating centralities...')\n centrality = nx.degree_centrality(G) #centrality dictionary\n nodes['centrality'] = [centrality[n] for n in list(nodes['Company_Name'])]\n log.info(\"Nodes df updated with the new column 'centrality'...\")\n\n #coordinates\n log.info('Adding coordinates for circular layout...')\n pos = init_layout(G, nodes)\n coordinates = [np.array(pos[j]) for j in nodes['Company_Name']]\n nodes['coords'] = coordinates\n log.info(\"Nodes df updated with the new column 'coords'...\")\n\n return G, edges, nodes",
"def __init__(self):\n\n self.nodes = {}\n self.rendered = False",
"def render_nodelist(nodes):\n all_nodes = []\n\n for node in nodes.values():\n all_nodes.append(node.nodelist())\n\n return {\n \"version\": \"1.0.0\",\n \"updated_at\": NOW_TIMESTAMP.isoformat(),\n 'nodes': all_nodes,\n }",
"def _load_nodes(self) -> NoReturn:\n total = self.project_size[1]\n self._nodes = {\n self.object_name(shared_enum.ElementType.NODE, index): index\n for index in range(total)\n }",
"def _generate_scene_dict(self):\n\n self._scene_info = {}\n self._scene_info[\"source\"] = \"PyDy\"\n self._scene_info[\"name\"] = self.name\n self._scene_info[\"newtonian_frame\"] = str(self.reference_frame)\n # TODO : This should be accomodated in scene instead of width/height\n # of scene.\n self._scene_info[\"workspaceSize\"] = 0.2\n\n self._scene_info[\"objects\"] = {}\n self._scene_info[\"cameras\"] = {}\n self._scene_info[\"lights\"] = {}\n\n for frame in self.visualization_frames:\n if self.system is None:\n constants = self.constants\n else:\n constants = self.system.constants\n object_info = frame.generate_scene_dict(constant_map=constants)\n self._scene_info[\"objects\"].update(object_info)\n\n for camera in self.cameras:\n object_info = camera.generate_scene_dict()\n self._scene_info[\"cameras\"].update(object_info)\n\n for light in self.lights:\n object_info = light.generate_scene_dict()\n self._scene_info[\"lights\"].update(object_info)",
"def node_to_dict(node):\n return {'x': node.x, 'y': node.y, 'text': node.text, 'id': get_node_id(node)}",
"def __make_nodes_elements_mapping(data):\n\n nodes = {}\n for e in data[\"elements\"]:\n for node in e[\"nodes\"]:\n if node in nodes:\n nodes[node].append(e[\"id\"])\n else:\n nodes[node] = [e[\"id\"]]\n\n return nodes",
"def nodecmtys(self):\n if hasattr(self, '_ncmtys'):\n return self._ncmtys\n nodecmtys = { }\n for c, nodes in self.iteritems():\n for n in nodes:\n nodecmtys.setdefault(n, set())\n nodecmtys[n].add(c)\n self._ncmtys = nodecmtys\n return nodecmtys",
"def nx_node_coordinates_dictionary(node_id_list, edge_list):\n\n json_graph = nx.Graph()\n add_node_values_to_nxgraph(json_graph, node_id_list)\n json_graph.add_edges_from(edge_list)\n node_coor_dictionary = nx.spring_layout(json_graph)\n return node_coor_dictionary",
"def connected_nodes():\n nodes = GridNodes.query.all()\n nodes_dict = {}\n for node in nodes:\n nodes_dict[node.id] = node.address\n return nodes_dict",
"def _create_sections(self):\n\t\t# NOTE: cell=self is required to tell NEURON of this object.\n\t\tself.node = [h.Section(name='node',cell=self) for x in range(self.nNodes)]\n\t\tself.paranode = [h.Section(name='paranode',cell=self) for x in range(self.nNodes)]",
"def __init__(self):\n self.G = nx.Graph()\n self.node_attr_dfs = dict()\n self.unique_relations = set()\n self.node_types = dict()\n self.normalized_node_id_map = dict()\n self.train_edges = list()\n self.valid_edges = list()\n self.test_edges = list()\n self.relation_to_id = dict()\n self.id_to_relation = dict()\n self.nodeid2rowid = dict()\n self.rowid2nodeid = dict()\n self.rowid2vocabid = dict()",
"def _to_graph_spec(self) -> Dict[str, Any]:\n return {\n \"name\": self.name,\n \"predecessors\": [s.name for s in self.predecessors],\n }",
"def get_node_names_with_pos(self):\n node_dict = {}\n for node in self.nodes:\n node_dict[node.name] = (node.x, node.y)\n\n return node_dict",
"def _create_dxa_node_dict(self):\n\n # Extract wse and width values to calculate d_x_area and store in a dictionary\n dxa_dict = {}\n for key, value in self.width.width_node.items():\n wse = self.wse.wse_node[key]\n dxa_dict[key] = _calculate_dxa(wse, value)\n \n return dxa_dict",
"def output_graph(self):\n for n in self._nodes.values():\n print(str(n.get_name()) + \": \" + n.get_type())\n print(n.get_prior())\n print(n.get_neighbors())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate Cosine similarity between the documents with sklearn implementation Also removing the duplicities (taking only the upper trinagle elements from the result) In order to avoid duplicate edges on the graph | def calculate_cosine_similarity(self):
data = []
#prepare input for the sklearn cosine similarity function
for k in sorted(self.node_dict.keys()):
data.append(" ".join(self.cleaned_data[self.node_dict[k]]))
vec = TfidfVectorizer()
x = vec.fit_transform(
data)
# Calculate the pairwise cosine similarities (depending on the amount of data that you are going to have this
# could take a while)
matrix_similarity = cosine_similarity(x)
# Remove duplicates + diagonal: cosine similarity returns a symmetric matrix, where the diagonal and the
# lower or upper triangular is irrelevant
tril_ind = np.tril_indices(matrix_similarity.shape[0])
mat_sim_upper = matrix_similarity.copy()
mat_sim_upper[tril_ind] = -1
return mat_sim_upper | [
"def compute_similarity():\n movie_data = pd.read_csv(\"movie_recsys/datasets/movie_data.csv\")\n\n # Compute TF-IDF representation.\n tfidf = TfidfVectorizer(stop_words=\"english\")\n tfidf_matrix = tfidf.fit_transform(movie_data[\"story\"])\n\n # Compute Cosine Similarity.\n cosine_sim_scores = linear_kernel(tfidf_matrix, tfidf_matrix)\n\n # Saving.\n file_path = Path.cwd() / \"movie_recsys/datasets/cosine_sim_scores.csv\"\n savetxt(file_path, cosine_sim_scores)\n return",
"def _weighted_cosine_similarity(spacy_textA, spacy_textB):\n\n textA = _remove_stops(spacy_textA)\n textB = _remove_stops(spacy_textB)\n\n setsA = _get_consectuive_word_sets(textA)\n setsB = _get_consectuive_word_sets(textB)\n\n maxL = min(len(setsA[-1]), len(setsB[-1]))\n\n for L in range(1, maxL+1):\n pass",
"def document_similarity(self, doc1, doc2):\n\n synsets1 = self.doc_to_synsets(doc1)\n #print(synsets1)\n synsets2 = self.doc_to_synsets(doc2)\n #print(synsets2)\n\n return (self.similarity_score(synsets1, synsets2) + self.similarity_score(synsets2, synsets1)) / 2",
"def cosine_score(self):\n for i in self.all_results: \n length = 0\n for j in self.all_results[i]:\n\n length += self.all_results[i][j] ** 2\n length = math.sqrt(length)\n \n for j in self.all_results[i]:\n self.all_results[i][j] = self.all_results[i][j]/length\n \n for doc in self.all_results:\n score = 0\n for query_word in self.query_score:\n if query_word in self.all_results[doc]:\n score += self.all_results[doc][query_word] * self.query_score[query_word]\n self.doc_score[doc] = score",
"def similarity(self, word1, word2):\r\n aa = self.dot(self.distributional_sims.get(word1, {}), self.distributional_sims.get(word1, {}))\r\n ab = self.dot(self.distributional_sims.get(word1, {}), self.distributional_sims.get(word2, {}))\r\n bb = self.dot(self.distributional_sims.get(word2, {}), self.distributional_sims.get(word2, {}))\r\n if (aa == 0 or ab == 0 or bb == 0): # if word had no features (one word sentence) then similarity must be 0\r\n return 0\r\n cos_similarity = ab / (math.sqrt(aa * bb)) # cosine similarity\r\n return cos_similarity",
"def __cosine_sim(data, col1, col2, colname):\n data[colname + '.sim'] = [len(set(a.split()).intersection(b.split())) / (len(a.split()) * len(b.split())) for\n a, b in zip(data[col1], data[col2])]\n return data",
"def cosine_similarity(self, query, indices=None):\n\n pass",
"def _cosine_similarity(spacy_textA, spacy_textB):\n\n wordsA = ' '.join([token.lemma_ for token in spacy_textA])\n wordsB = ' '.join([token.lemma_ for token in spacy_textB])\n\n A = set(wordsA.split())\n B = set(wordsB.split())\n\n similarity = len(A & B) / (np.sqrt(len(A)) * np.sqrt(len(B)))\n\n return similarity",
"def cosine_similarity(d1, d2):\n return dot_product(d1, d2) / (norm(d1) * norm(d2))",
"def cos_dist(self,doc,tweet,col):\n MAX_TAKE = 10\n v1 = [(x,self.__tf__(x,doc)*self.__idf__(x,col)) for x in set(doc.split())]\n v2 = [(x,self.__tf__(x,tweet)*self.__idf__(x,col)) for x in set(tweet.split())]\n v2.sort(key=lambda x:x[1],reverse=True)\n # determine how many words to compare max is 10\n take = min(MAX_TAKE,min(len(v2),len(v1)))\n v2 = v2[:take]\n vd = dict(v1)\n v1 = [vd[v[0]] if v[0] in vd else 0.0 for v in v2 ]\n v2 = [v[1] for v in v2]\n return np.dot(v1, v2) / (np.sqrt(np.dot(v1, v1)) * np.sqrt(np.dot(v2, v2)))",
"def calculate_document_similarity(tfidf):\n return numpy.dot(tfidf, tfidf.T)",
"def cosine_similarity(cls, text1, text2):\n\n # count frequency of characters\n counter1 = defaultdict(lambda: 0)\n counter2 = defaultdict(lambda: 0)\n for char in text1:\n counter1[char] += 1\n for char in text2:\n counter2[char] += 1\n\n # vectorize and dot\n all_char = set(list(counter1.keys()) + list(counter2.keys()))\n len1_sqr = 0\n len2_sqr = 0\n dot = 0 # dot result of two vectors\n for char in all_char:\n dot += counter1[char] * counter2[char]\n len1_sqr += counter1[char] * counter1[char]\n len2_sqr += counter2[char] * counter2[char]\n\n # cosine similarity\n return dot / sqrt(len1_sqr * len2_sqr)",
"def cosine_similarity(self, sentence_embedding_a, sentence_embedding_b):\n dot_product = (sentence_embedding_a * sentence_embedding_b).sum(1)\n norm_a = sentence_embedding_a.norm(p=2, dim=1)\n norm_b = sentence_embedding_b.norm(p=2, dim=1)\n cosine_sim = dot_product / (norm_a * norm_b)\n return cosine_sim",
"def cal_cosine_similarity(vec_a, vec_b):\n cosine_similarity = np.divide(np.dot(vec_a, vec_b), (np.linalg.norm(vec_a) * np.linalg.norm(vec_b)))\n cosine_similarity = cosine_similarity * 0.5 + 0.5\n return cosine_similarity",
"def text_cluster_users_similarity(self):\n\n self._processor = ClusterUsers(store_docs=True)\n\n # for each user, we want to have a set of features representing it\n features = []\n for name, docs in self.processor.iterate():\n features = self.processor.get_features(docs)\n # there is only one tuple (name, docs) so we return here\n return euclidean_distances(features, features)",
"def cosine_similarity(self,user,friend):\n #counting the intersection of friends\n intersection = 0\n #counting the neighbors of the user\n user_neighbors = 0\n #counting the neighbors of the friend\n friend_neighbors = 0\n for neighbor_user in self.mention_network.neighbors_iter(user):\n user_neighbors += 1.0\n for neighbor_friend in self.mention_network.neighbors_iter(friend):\n friend_neighbors += 1.0\n if neighbor_friend == neighbor_user: intersection += 1.0\n\n #avoiding division by zero error\n if user_neighbors == 0 or friend_neighbors == 0:\n return 0\n\n cos_similarity = intersection / np.sqrt(user_neighbors * friend_neighbors)\n\n return cos_similarity",
"def compute_cosine_similarity(document_vector1, document_vector2):\n\tdocument_vector1_len = np.sqrt( document_vector1.dot(document_vector1) )\n\tdocument_vector2_len = np.sqrt( document_vector2.dot(document_vector2) )\n\tcosine_of_angle_theta = np.dot(document_vector1,document_vector2) / (document_vector1_len * document_vector2_len)\n\treturn cosine_of_angle_theta",
"def text_users_similarity(self):\n\n processor = self._processor = TextProcessor()\n features = []\n for user, docs in processor.iterate():\n features.append(processor.get_features(docs, user))\n \n # draw the matrix for alexis\n draw_matrix(euclidean_distances(features[0], features[0]), \n \"text_alexis\", OUTPUT_PATH)",
"def cosine_similarity(a, b):\n dot_product = np.dot(a, b)\n norm_a = np.linalg.norm(a)\n norm_b = np.linalg.norm(b)\n res = dot_product / (norm_a * norm_b)\n if np.isnan(res):\n # If one of the vectors have zero length,\n # we can not score the similarity between the two vectors, so we assume the worst\n return -1\n\n return res"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the visible edges (edges between document nodes over the cosine sim threshold | def get_filtered_edges(self):
#filter edges based on the given threshold
filtered_edges = np.argwhere(self.cos_sim >= self.cosine_sim_threshold)
# get the document ids -> since document removal is also a function, simply returning the index at the matching
# points are not enough, since then the graph might attempt to create edge with a non-existing document,
# which throws an error
doc_ids = sorted(self.node_dict.keys())
cos_sim_edges = []
if self.filtered_topic_df is None:
available_docs = set(self.topic_df['Document_No'].tolist())
else:
available_docs = set(self.filtered_topic_df['Document_No'].tolist())
for f in filtered_edges:
idx_0 = doc_ids[f[0]]
idx_1 = doc_ids[f[1]]
if idx_0 in available_docs and idx_1 in available_docs:
cos_sim_edges.append((idx_0, idx_1, self.cos_sim[f[0], f[1]]))
return cos_sim_edges | [
"def determine_special_faces(graph, dist):\n return [node for node in graph.nodes() if graph.nodes[node]['distance'] >= dist]",
"def edges(gray):\n return cv2.Canny(gray, 50, 150)",
"def get_edge_nodes(self, graph):\n return eccentricity(graph)",
"def out_edge_count(self):",
"def edge_features(self):",
"def get_edges_from_centres(centres):\n bin_width = centres[1] - centres[0]\n edges = centres - bin_width / 2\n edges = np.append(edges, centres[-1] + bin_width / 2)\n \n return edges",
"def getConnectedEdges(*args, **kwargs):\n \n pass",
"def edges(self):\n return self.generateEdges()",
"def reveal(self, cell):\n\n discovered = []\n self.graph.set_fog(cell, False)\n neighbours = self.graph.neighbours(cell, False)\n for n in neighbours:\n if self.graph.get_fog(n):\n self.graph.set_fog(n, False)\n discovered.append(n)\n\n return discovered",
"def findEdges(self):\n for nc in self.nodes:\n x = nc[0]\n y = nc[1]\n nc_neighbours = self.nodes.get(nc).neighbours\n # Check for adjacent nodes in all directions\n if (x - self.x_div_len, y) in self.nodes:\n nc_neighbours.append(self.nodes.get((x - self.x_div_len, y)))\n if (x + self.x_div_len, y) in self.nodes:\n nc_neighbours.append(self.nodes.get((x + self.x_div_len, y)))\n if (x, y - self.y_div_len) in self.nodes:\n nc_neighbours.append(self.nodes.get((x, y - self.y_div_len)))\n if (x, y + self.y_div_len) in self.nodes:\n nc_neighbours.append(self.nodes.get((x, y + self.y_div_len)))",
"def enumerate_edges(self):\n q = queue.Queue()\n seen = [False for _ in range(self.max_vertex+1)]\n list_of_edges = []\n q.put(0)\n while not q.empty():\n v = q.get()\n if seen[v]:\n continue\n seen[v]=True\n for u in self.geodesics_continuations(v, self.max_vertex):\n q.put(u)\n list_of_edges.append((v,u))\n return list_of_edges",
"def node_link_incidence(self):\r\n\r\n # Rows = Nodes and Columns = Links\r\n network = []\r\n for node in self.N:\r\n node_list = []\r\n\r\n for link in self.A:\r\n if node == link[0]:\r\n node_list.append(1)\r\n elif node == link[1]:\r\n node_list.append(-1)\r\n else:\r\n node_list.append(0)\r\n\r\n network.append(node_list)\r\n \r\n return network",
"def detect_edges(img, kernel, norm=True):\n # TODO: detect edges using convolve2d and normalize the image containing detected edges using normalize.\n # raise NotImplementedError\n\n img_edges = convolve2d(img,kernel)\n\n if norm == True:\n img_edges = normalize(img_edges)\n\n\n return img_edges",
"def getNeighbors(self):\n neighbors = []\n for edge in self.edges:\n neighbors.append(edge.toUnit)\n return neighbors",
"def grab_edges(self):\n self._find_connects() # Do DFS\n self.connected_edge_list = [] # Init list of points on edge\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n if (self.img[i][j] == 255):\n # If on edge, append important info about point to list\n self.connected_edge_list.append(self._get_info_ro(i,j))\n\n # Sort list first based on which edge group it belongs to\n # Then sort based on where it is radially\n self.connected_edge_list = np.array(self.connected_edge_list)\n self.connected_edge_list.view('i8,i8,i8,i8,i8').sort(order=['f4','f3'], axis=0)\n return self.connected_edge_list[:,[0,1,4]]",
"def get_edges_that_points_to_community(algo_coms):\n communities = algo_coms[\"communities\"]\n communities_edges_outside = []\n\n G = nx.DiGraph()\n data = pd.read_csv(\"filtered_edge_list.csv\", delimiter=\"\\t\", usecols=[\"Source\", \"Target\"])\n # iterate ove dataframe's rows\n for row in data.itertuples():\n u = int(row.Source)\n v = int(row.Target)\n G.add_edge(u, v)\n\n for c_list in tqdm.tqdm(communities):\n b_c = 0\n for i in range(0, len(c_list)):\n c_list[i] = int(c_list[i])\n\n for node in c_list:\n # get nodes's neighbours\n neighbors = [n for n in G[node]]\n\n for friend in neighbors:\n if int(friend) not in c_list:\n # check edge direction\n if G.has_edge(int(friend), int(node)):\n b_c += 1\n\n communities_edges_outside.append(b_c)\n\n a = np.array(communities_edges_outside)\n min = np.min(a)\n max = np.max(a)\n score = np.mean(a)\n std = np.std(a)\n return min, max, score, std, communities_edges_outside",
"def get_community_internal_edge_density(algo_coms):\n communities = algo_coms[\"communities\"]\n communities_internal_edge_density = []\n\n # I have to count the number of edges between the internal nodes inside the community c_list:\n # a) I load the graph\n # b) I check if exist a node between i, j in C, i != j\n G = nx.DiGraph()\n data = pd.read_csv(\"filtered_edge_list.csv\", delimiter=\"\\t\", usecols=[\"Source\", \"Target\"])\n # iterate ove dataframe's rows\n for row in data.itertuples():\n u = int(row.Source)\n v = int(row.Target)\n G.add_edge(u, v)\n\n for c_list in tqdm.tqdm(communities):\n n_c = len(c_list)\n denominator = n_c * (n_c - 1) / 2\n e_c = 0\n\n for i in range(0, len(c_list)):\n c_list[i] = int(c_list[i])\n H = G.subgraph(c_list)\n internal_edge_density = nx.density(H)\n communities_internal_edge_density.append(internal_edge_density)\n\n\n \"\"\"e_c = H.number_of_edges()\n try:\n internal_edge_density = e_c / denominator\n except ZeroDivisionError:\n internal_edge_density = 0.0\n communities_internal_edge_density.append(internal_edge_density)\"\"\"\n\n a = np.array(communities_internal_edge_density)\n min = np.min(a)\n max = np.max(a)\n score = np.mean(a)\n std = np.std(a)\n return min, max, score, std, communities_internal_edge_density",
"def detectEdges(self, src) -> None:\n ...",
"def getVerticesOfSelectedEdges(graph: edu.uci.ics.jung.graph.Graph) -> java.util.Collection:\n ..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset the the view and the lda class itself with the original cluster number | def reset_settings(self):
self.__init__(self.orig_clust_num, self.data_path) | [
"def test_fully_reset_cluster(self):\n self.reset_cluster()",
"def reset(self):\n self.ensemble = []\n self.i = -1\n self.X_batch = None\n self.y_batch = None\n return self",
"def cluster_reinitialize (self, i_fold): \n if i_fold == self.cv_folds -1: \n if 'multi' in self.compute_type:\n print('\\n! closing cluster\\n')\n self.close_cluster()\n elif i_fold < self.cv_folds - 1: \n if 'multi' in self.compute_type:\n print('\\n! reinitializing cluster\\n')\n self.close_cluster()\n self.cluster, self.client = self.cluster_initialize()",
"def resetNode(self):\n\n self.nwkAdr = None\n self.macAdr = None\n self.panAdr = None\n self.packet_total = 0\n self.curNeighbors = []\n self.npPreNeighbors = [] \n self.pPreNeighbors = []\n self.isResetedNode = True",
"def reset(self):\n self.confusion_matrix.reset()",
"def set_original_last_cluster(self, cluster_id: int) -> None:\r\n self.original_last_cluster = cluster_id",
"def reset(self):\n self.current_shard = None\n self.current_shard_n = None\n self.current_offset = None",
"def _reset_pca(self):\n \n del self.training_data\n del self._scaler\n del self.model\n del self.projected_training_data\n self.n_components = self._init_n_components",
"def _clear_clusters(self):\n for point in self._points:\n point.cluster = None\n self._clusters.clear()",
"def reset(self):\n # self.__regions will not be changed\n self.__labels = [True] * self.__dimension.get_dim_size()",
"def reset(self):\r\n self.counter = 0\r\n self.distdict.clear()",
"def reset(self):\n self.csr.data[:] = 0",
"def change_cluster_unit(self, new_cluster_column_name):\n\n # 1. Check new cluster information exists in anndata.\n if new_cluster_column_name in self.adata.obs.columns:\n if not f\"{new_cluster_column_name}_colors\" in self.adata.uns.keys():\n sc.pl.scatter(self.adata,\n color=new_cluster_column_name,\n basis=self.embedding_name[2:])\n else:\n raise ValueError(f\"{new_cluster_column_name} was not found in anndata\")\n\n\n # 2. Reset previous GRN data and simoulation data\n attributes_remained = ['TFdict', 'adata', 'cv_mean_selected_genes',\n 'embedding_name', 'high_var_genes', 'knn',\n 'knn_smoothing_w', 'pca', 'cv_mean_score',\n 'cv_mean_selected', 'pcs', #'GRN_unit',\n 'active_regulatory_genes']\n\n attributes = list(self.__dict__.keys())\n for i in attributes:\n if i not in attributes_remained:\n delattr(self, i)\n\n # 4. Update cluster info\n self.cluster_column_name = new_cluster_column_name\n\n # 3. Update color information\n col_dict = _get_clustercolor_from_anndata(adata=self.adata,\n cluster_name=new_cluster_column_name,\n return_as=\"dict\")\n self.colorandum = np.array([col_dict[i] for i in self.adata.obs[new_cluster_column_name]])",
"def reset(self):\n self.matrix.fill(True)\n for cat in range(len(self.categories)):\n s = self.cat_slice(cat)\n self.matrix[s, s] = False\n np.fill_diagonal(self.matrix, True)\n self.assertions.clear()\n self._log(f'Reset matrix with {self.edges} edges\\n')",
"def reset(self):\n self.featureset = features_to_featureset(self._features, new=True)",
"def generate_clusters(self):\n\n self.cluster_labels = None",
"def reset_ptr(self):\n self.train_pointer = 0",
"def reset(self):\n if not self.samples:\n print('Error: Data has not been loaded yet!')\n else:\n self.groups = set([sample['Group'] for sample in self.samples])\n self.sessions = set([sample['Session'] for sample in self.samples])\n self.runs = set([sample['Run'] for sample in self.samples])\n self.locations = set([sample['Location'] for sample in self.samples])\n temp_var = [self.samples[i][\"FlatULM\"] for i in range(len(self.samples))]\n self.covariance_matrix = np.cov(np.asarray(temp_var).T)\n self.projectors, self.cov_eigenvalues, _ = np.linalg.svd(self.covariance_matrix)",
"def reset(cls):\n cls.instances = []\n cls.next_id = 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
filter paralell coordinates based on the input value (>value has to be kept) filtering also the documenttopic df to filter in cytoscape | def filter_parall_coords_topic_contribution(self, value):
self.filtered_paarcord_topics_df = self.get_topics_df().copy()
#get_indexed_topic_node_df
self.filtered_paarcord_topics_df = self.filtered_paarcord_topics_df[self.filtered_paarcord_topics_df['Topic_Perc_Contrib'] >= value]
remained_docs = self.filtered_paarcord_topics_df['Document_No'].tolist()
self.filtered_topic_df = self.topic_df[self.topic_df['Document_No'].isin(remained_docs)] | [
"def filter(self, df):\n pass",
"def filter_coords(df):\n lon_l, lon_r = -74.1, -73.7\n lat_l, lat_r = 40.65, 40.85\n\n for c in filter(lambda c: c.endswith('_Lon'), df.columns):\n df = df[(df[c] <= lon_r) & (df[c] >= lon_l)]\n\n for c in filter(lambda c: c.endswith('_Lat'), df.columns):\n df = df[(df[c] <= lat_r) & (df[c] >= lat_l)]\n\n return df",
"def dataWNfilter(df):\n\t\n\t#WIDE\n\tdf = wide_filter(df,'combinado')\n\n\t#NARROW\n\tdf = narrow_filter(df,\"mb_total_qt\")\n\tdf = narrow_filter(df,\"arpu_negocio_promedio\")\n\n\treturn df",
"def filter(self, label=None, **kwargs):",
"def filterOnScores(dicoParam, dfWindows):\n\tdfWindows = dfWindows[ dfWindows.cGcC >= dicoParam[\"cGcC\"] ].dropna()\n\tdfWindows = dfWindows[ dfWindows.G4H >= dicoParam[\"G4H\"] ].dropna()\n\tdfWindows = dfWindows[ dfWindows.G4NN >= dicoParam[\"G4NN\"] ].dropna()\n\treturn dfWindows",
"def cluster_dots(qvals_mtx,mtx,min_cluster_size=2,q_value_treshold=0.01,filter_by_coverage=True,min_coverage=0,filter_dist=1):\n #significant dots coordinates\n dots=np.where((qvals_mtx<=np.log10(q_value_treshold)) & (qvals_mtx!=-np.inf) & (qvals_mtx!=np.isnan(qvals_mtx)))\n #intensity values\n I=[]\n for x,y in zip(dots[0],dots[1]):\n I.append(mtx[x,y])\n I=np.array(I)\n data=np.concatenate((np.vstack(dots[0]),np.vstack(dots[1]),np.vstack(I)),axis=1)\n #cluster adjuscent dots\n clustering = DBSCAN(eps=1.0, min_samples=min_cluster_size).fit(data[:,0:2])\n data=np.concatenate((data,np.vstack(clustering.labels_)),axis=1)\n #filter noise dots\n data=data[data[:,3]!=-1]\n #reorder first two values in row such that 0<1\n for el,i in zip(data,[x for x in range(len(data))]):\n if el[0]>el[1]:\n data[i]=el[[1,0,2,3]]\n \n #create temporary pandas dataframe\n tmp_df=pd.DataFrame(data)\n #group by coordinates and sum intensity\n tmp_df=tmp_df.groupby([0,1],as_index=False).sum()\n #filter by coverage\n if filter_by_coverage==True:\n #filter dots near low coverage bins\n coordinates=np.where(np.sum(mtx,axis=1)<=min_coverage)[0]\n out=np.array([])\n for i in range(1,filter_dist+1):\n plus=coordinates+i\n minus=coordinates-i\n new_coordinates=np.concatenate((plus,minus))\n out=np.concatenate((out,new_coordinates))\n coordinates=np.concatenate((coordinates,out))\n \n tmp_df=tmp_df[(~tmp_df[0].isin(coordinates))]\n tmp_df=tmp_df[(~tmp_df[1].isin(coordinates))]\n tmp_df=tmp_df.groupby(3).filter(lambda x: len(x) >= min_cluster_size)\n \n #reclaster data\n clustering = DBSCAN(eps=1.0, min_samples=min_cluster_size).fit(tmp_df[[0,1]])\n #set new label\n tmp_df[3]=clustering.labels_\n #filter noise\n tmp_df=tmp_df[tmp_df[3]!=-1]\n return(tmp_df)",
"def filter_genes(self, exprs, y, number_genes):\r\n print('Filtering top ' + str(number_genes) + ' genes.')\r\n filter = SelectKBest(score_func=f_classif, k=number_genes)\r\n rnaseq_filtered = filter.fit(exprs, y).transform(exprs)\r\n mask = filter.get_support()\r\n new_features = exprs.columns[mask]\r\n rnaseq_filtered_df = pd.DataFrame(rnaseq_filtered, columns=new_features, index=exprs.index)\r\n return rnaseq_filtered_df",
"def filt_pointcloud(pc):\n\n # remove radius outliers\n pc_filtered, _ = radius_outlier_removal(pc,\n nb_points=3, radius=0.05)\n\n return pc_filtered",
"def filter(\n self,\n point_ids: Optional[Union[List[int], np.ndarray, None, Undefined]] = UNDEF\n ) -> Union[Scatter, np.ndarray, None]:\n if point_ids is not UNDEF:\n try:\n if self._data is not None and self._data_use_index:\n row_idxs = self._data.index.get_indexer(point_ids)\n self._filtered_points_idxs = np.asarray(\n row_idxs[row_idxs >= 0],\n dtype=SELECTION_DTYPE\n )\n self._filtered_points_ids = self._data.iloc[self._filtered_points_idxs].index\n else:\n self._filtered_points_idxs = np.asarray(point_ids, dtype=SELECTION_DTYPE)\n self._filtered_points_ids = self._filtered_points_idxs\n except:\n if point_ids is None:\n self._filtered_points_idxs = None\n self._filtered_points_ids = None\n pass\n\n self.update_widget('filter', self._filtered_points_idxs)\n\n return self\n\n if self._widget is not None:\n row_idxs = self._widget.filter.astype(SELECTION_DTYPE)\n if self._data is not None and self._data_use_index:\n return self._data.iloc[row_idxs].index\n return row_idxs\n\n return self._filtered_points_ids",
"def obtain_rules_discard(df_anomalies_no_sub, df_anomalies_yes_sub, X_train, sc,\n n_vertex_numerical, numerical_cols, categorical_cols,\n clustering_algorithm, use_inverse):\n\n def hyper_limits(vectors_bound_all, df_anomalies_yes_sub, numerical_cols):\n limits = obtain_limits(vectors_bound_all)\n df_anomalies_yes_sub[\"outside_hcube\"] = df_anomalies_yes_sub.apply(\n lambda x: function_check(x, limits, numerical_cols), axis=1)\n return df_anomalies_yes_sub, limits\n\n if clustering_algorithm == \"kprototypes\":\n feature_cols = numerical_cols + categorical_cols\n else:\n feature_cols = numerical_cols\n\n # Tolerance param\n max_iters = MAX_ITERS\n\n # Obtain vertices\n n = 0\n check = True\n\n # Drop duplicates\n df_anomalies_no_sub.drop_duplicates(inplace=True)\n df_anomalies_yes_sub.drop_duplicates(inplace=True)\n df_final = []\n \n # Ñapa: duplicate datapoints if below 2\n if len(df_anomalies_no_sub)<2:\n df_anomalies_no_sub = df_anomalies_no_sub.append(df_anomalies_no_sub)\n df_anomalies_no_sub = df_anomalies_no_sub.reset_index(drop=True)\n \n if len(df_anomalies_yes_sub)<2:\n df_anomalies_yes_sub = df_anomalies_yes_sub.append(df_anomalies_no_sub)\n df_anomalies_yes_sub = df_anomalies_yes_sub.reset_index(drop=True)\n \n # Data used -- start using all and 1 cluster\n dct_subdata = {\"data\": df_anomalies_no_sub, \"n_clusters\": 1}\n list_subdata = [dct_subdata]\n\n # Check until all non anomalous data is used for rule inferring\n j = 0\n while check:\n # When there is no data to infer rules, finish\n if len(list_subdata) == 0:\n break\n list_original = list_subdata.copy()\n list_subdata = [] # Reset list\n # For each subdata space, use two clusters to try and infer rules\n for dct_subdata in list_original:\n # Load data\n df_anomaly_no = dct_subdata['data']\n n = dct_subdata['n_clusters']\n j += 1\n\n # Check tolerance\n if j >= max_iters:\n check=False\n break\n # If there is only one point left, skip it\n elif n > len(df_anomaly_no):\n continue\n\n # Rules\n print(\"Iteration {0} | nº clusters used {1}\".format(j, n))\n # Returns n_vertex_numerical datapoints\n # if n_vertex_numerical > len(df_anomalies_no) for each cluster;\n # else returns df_anomalies_no\n dict_vectors_bound_all = obtain_vertices(\n df_anomaly_no,\n X_train,\n sc,\n n_vertex_numerical,\n numerical_cols,\n categorical_cols,\n clustering_algorithm,\n n_clusters=n)\n\n # For each cluster in that subdata\n for key, value in dict_vectors_bound_all.items():\n vectors_bound_all = value[0].copy()\n df_anomalies_yes_sub, limits = hyper_limits(\n vectors_bound_all, df_anomalies_yes_sub, feature_cols)\n list_check = list(\n df_anomalies_yes_sub[\"outside_hcube\"].unique())\n\n # Recover original indexes\n df_anomaly_iter = value[2]\n df_aux = df_anomaly_no.copy().reset_index()\n cols_merge = [\n column for column in list(df_anomaly_iter.columns)\n if column != \"distances\"\n ]\n df_anomaly_iter = df_anomaly_iter[cols_merge]\n df_anomaly_iter = df_anomaly_iter.merge(\n df_aux,\n how=\"left\",\n left_on=cols_merge,\n right_on=cols_merge)\n df_anomaly_iter.index = df_anomaly_iter['index']\n del df_anomaly_iter['index']\n\n # If there are points that belong to the other class,\n # retrain with one more cluster\n if False in list_check:\n dct_subdata = {'data': df_anomaly_iter, 'n_clusters': 2}\n list_subdata.append(dct_subdata)\n # When there are no points from the other class,\n # turn into rules (and do not use those points again)\n elif len(df_anomaly_no)==1.:\n df_final.append(limits)\n else:\n df_final.append(limits)\n\n return df_final",
"def coord_filter(self, chrom, start, end):\r\n try:\r\n bytes = self.coord_to_index[(chrom,str(start),str(end))] #see if coordinate exists\r\n except KeyError: \r\n print \"Warning: coordinate \" + str(chrom) + \", start: \" + str(start) + \", end: \" + str(end) + \\\r\n \", does not exist here\"\r\n return\r\n for byte in sorted(bytes): #go to line where coordinate exists\r\n self.file.seek(byte)\r\n line = self.file.readline()\r\n if not line:\r\n raise IndexError\r\n else:\r\n yield parse_gff_line(line, format=self.format)",
"def samples_filtered(self, fixed_params, enforce_bounds=False):\n for x, y, s in self:\n # For each sample, check if it's usable:\n usable = True\n for p_name, p_value in x.items():\n if p_name in fixed_params: # For all fixed_params, check if the value is correct\n if not self.default_params [p_name] == p_value:\n usable = False\n break\n else: # For all non-fixed-params: Check if the value is in the optimization bounds\n if p_value < self.design_space[p_name][0] or p_value > self.design_space[p_name][1]:\n usable = False\n break\n if usable:\n yield x, y, s",
"def gdf_clip(gdf,clip_geom):\n return gdf.loc[gdf['geometry'].apply(lambda x: x.within(clip_geom))].reset_index(drop=True)",
"def filter_data(self):\n\t\tself.genes_lowcoverage = self.indexed.groupby(['Sample Name', 'Gene Symbol' , 'Accession', 'Gene Size']).apply(lambda x: x[x['% Coverage at 30x'] < 100])\n\t\tself.gene_summary()\n\t\tself.test_values()",
"def filter(self, filter_params):\n pass",
"def filter_keypoints(points, shape):\n \"\"\"\n points:\n numpy (N, (x,y))\n shape:\n (y, x)\n \"\"\"\n mask = (points[:, 0] >= 0) & (points[:, 0] < shape[1]) &\\\n (points[:, 1] >= 0) & (points[:, 1] < shape[0])\n return points[mask, :]",
"def CellFilter(sc, min_genes, max_genes, expr_thres = 0):\n\n print(\"Applying Cell Filter . . .\")\n\n X = sc.getCounts()\n\n expr_count = np.sum(X , axis = 0, keepdims = False)\n cell_filter = (expr_count <= max_genes) & (expr_count >= min_genes)\n\n print(\"Number of samples remaining after cell filtering: \", np.sum(cell_filter))\n\n # If cell filtering removes all samples:\n if (np.sum(cell_filter) != 0):\n # Save the cell filter mask\n # sc_obj.addCellDataColumn(col_data = cell_filter, col_name = \"cell_filter\")\n sc = sc[:, cell_filter]\n else:\n print (\"Cell filtering removed all the samples!\")\n print (\"Set different cell filter parameters.\")\n print (\"Skipping Cell Filtering step...\")\n\n return sc",
"def _geo_query(self):\n feature_collection = self.value\n features = feature_collection['features']\n if len(features) < 2:\n return self._polygon_filter(features[0])\n else:\n poly_filters = []\n for feature in features:\n poly_filter = self._polygon_filter(feature)\n poly_filters.append(poly_filter)\n return bool_query(should=poly_filters)",
"def _filter_view(self, map_body):\n if map_body is True:\n map_body = \"function(doc) { emit(doc._id, null); };\"\n assert isinstance(map_body, str), \"View map must be a string\"\n view_doc = \"%s_viewdoc\" % self.prefix\n view_name = \"%s_viewname\" % self.prefix\n ddoc = {\"_id\": \"_design/%s\" % view_doc, \"views\": {view_name: {\"map\": map_body}}}\n rep_params = {\n \"filter\": \"_view\",\n \"query_params\": {\"view\": \"%s/%s\" % (view_doc, view_name)},\n }\n return (ddoc, rep_params)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the time tag's position in the given variable name. | def findTimeTagPos(self, varName):
for i in xrange(1, len(varName)):
if varName[-i] == "_":
return i | [
"def __locate_time_params(self):\n\n time_id, time_lsw, time_msw = 0, 0, 0\n long_names = self.__get_ch_attr('long')\n for name, long_name in zip(self.names, long_names):\n name_id = self.id_map.get(name)\n if long_name:\n keywords = (name.casefold(), long_name.casefold())\n else:\n keywords = (name.casefold(),)\n\n in_keyword_name = lambda wrd: any(wrd in kw for kw in keywords)\n if in_keyword_name('time'):\n if in_keyword_name('lsw'):\n time_lsw = name_id\n elif in_keyword_name('msw'):\n time_msw = name_id\n else:\n time_id = name_id\n\n return time_lsw, time_msw, time_id",
"def time_index(self):\n for column in self.columns.values():\n if 'time_index' in column.semantic_tags:\n return column.name\n return None",
"def find_interruption_pos_index(time, interrupt_start):\n\n pos = 0\n for i in range(0, len(time)):\n if time[i] == interrupt_start:\n pos = i\n break\n\n return pos",
"def linear_search_value(self, key_time):\n\n for i in range(len(self._time)):\n curr = self._time[i]\n if key_time == curr:\n return i\n\n # print('invalid time')\n return -1",
"def extract_time(self, tagged_text):\n time_found = []\n\n #trouver tout les tag TIMEX dans tagge_text\n timex_regex = re.compile(r'<TIMEX>.*?</TIMEX>', re.DOTALL)\n timex_found = timex_regex.findall(tagged_text)\n \n timex_found = map(lambda timex:re.sub(r'</?TIMEX.*?>', '', timex), timex_found)\n \n\n if len(timex_found) == 1:\n if reg9.match(timex_found[0]):\n token = self._split_inter(timex_found[0])\n t1 = self.to_datetime(token[0])[0]\n t2 = self.to_datetime(token[1])[0]\n t1[0] = t1[0].replace(month=t2[0].month)\n if re.search(' ' + r'et le|et' + ' ', timex_found[0]):\n time_found.append(t1)\n time_found.append(t2)\n else:\n time_found = self._make_list(t1, t2)\n return time_found\n else:\n return self.to_datetime(timex_found[0])\n\n elif len(timex_found) >= 2:\n if reg9.match(timex_found[1]):\n s_tag = '<TIMEX>' + timex_found[1] + '</TIMEX>'\n time_found = self.extract_time(s_tag)\n t_time = self.to_datetime(timex_found[0])[0]\n hour = t_time[0].hour\n minute = t_time[0].minute\n return self._apply_hour(hour, minute, time_found, t_time[1])\n\n elif (reg8.match(timex_found[1]) or reg1.match(timex_found[1]) or reg6.match(timex_found[1])) and reg9.match(timex_found[0]):\n s_tag = '<TIMEX>' + timex_found[1] + '</TIMEX> ' + '<TIMEX>' + timex_found[0] + '</TIMEX>'\n return self.extract_time(s_tag)\n\n else:\n d = datetime.datetime(2000, 1, 1).now()\n error = 10\n for i in range(0, len(timex_found)):\n tmp_d = self.to_datetime(timex_found[i], d)[0]\n if tmp_d[1] <= error or tmp_d[1] == 10:\n d = tmp_d[0]\n if tmp_d[1] != 10:\n error = tmp_d[1]\n\n time_found.append([d, error])\n return time_found\n else:\n return []",
"def find_var_pos(self, variable):\n try:\n index = self._variables.index(variable)\n return index\n except ValueError:\n return None",
"def variable_idx(self, name, search_range):\n cnt = 0\n idx = len(self._varTable)\n if idx == 0:\n return -1\n while idx > 0:\n idx -= 1\n # if self._varTable[idx][0] == name \\\n # and (self._curLayerNum - self._varTable[idx][1] <= search_range or self._varTable[idx][1] == 0):\n # return cnt\n if self._varTable[idx][0] == name:\n if self._curLayerNum - self._varTable[idx][1] <= search_range:\n return cnt\n elif self._varTable[idx][1] == 0:\n return -1 - self._varTable[idx][2]\n cnt += 1\n return -1",
"def _get_spos(pvname):\n # pos\n try:\n r = re.match(r'.*D([0-9]{4}).*', pvname)\n assert r is not None\n except AssertionError:\n pos = -1\n else:\n pos = float(r.group(1)) / 10.0 # m\n return pos",
"def find(self, value: 'SbTime', addifnotfound: 'SbBool'=0) -> \"int\":\n return _coin.SoMFTime_find(self, value, addifnotfound)",
"def find_bomb_time_at_location(self, loc):\n arr = self.world.bombs.values()\n lis = list(arr)\n if len(lis) > 0:\n for i in range(0, len(lis)):\n if loc[0] == lis[i].x and loc[1] == lis[i].y:\n return lis[i].timer",
"def getTime(sortedtimes,searchval):\n tindex = sortedtimes.index(searchval) \n if tindex != 0:\n return sortedtimes[tindex-1] + 1\n return 0",
"def pos(self, name):\n return self.ev.position[name]",
"def get_time_elem(self, name):\n text = self.get_child_text(name)\n try:\n return datetime.strptime(\n text, RFC3339NANO,\n ).replace(tzinfo=timezone.utc)\n except ValueError:\n return datetime.strptime(\n text, RFC3339,\n ).replace(tzinfo=timezone.utc)",
"def read(self, varName, time=None, start=None, count=None, stride=None):\n nc = self.__getNetcdfObj()\n assert( varName in nc.variables )\n v = nc.variables[varName]\n\n #FIXME: this is TIEGCM-specific!\n if time:\n # Get the requested day of year, hour and minute\n (doy, hour, minute) = [int(i) for i in time.strftime('%j %H %M').split() ]\n # Find the time index this maps to\n mtimes = self.read('mtime')\n\n for idx,value in enumerate(mtimes.data):\n if ((value[0] == doy) &\n (value[1] == hour) &\n (value[2] == minute)):\n timeIdx = idx\n break \n\n return (v, timeIdx) \n else:\n return v",
"def _FindLastEventWithTimeElseMostRecentEventLessthanTime_ORI(self, timeToFind):\n pos = 0\n lastoccurenceoftime = -1\n\n for event in self.mediator.story:\n eventtime = eventStrToList(event)[0]\n eventtime = eval(eventtime)\n\n if self.turnMgr.TimeToTimePosition(eventtime) <= self.turnMgr.TimeToTimePosition(\n timeToFind\n ):\n # if eventtime <= timeToFind:\n lastoccurenceoftime = pos\n pos += 1\n else:\n break\n # pos should now be positioned at the insertion point\n\n if lastoccurenceoftime != -1:\n return lastoccurenceoftime\n maxpos = len(self.mediator.story) - 1\n return min(maxpos, pos)",
"def lookup(self, time, time_cutoff=None):\n\t\t#do a binary search over the point set, comparing times\n\t\tpos = bisect(self.times, time)\n\t\tposition = None\n\t\tif pos==self.point_count:\n\t\t\tposition = self.points[pos-1]\n\t\telif pos>0 and (time - self.times[pos-1]) < (self.times[pos]-time):\n\t\t\t#check which of the two adjacent times is closer to time\n\t\t\tposition = self.points[pos-1]\n\t\telse:\n\t\t\tposition = self.points[pos]\n\n\t\tif time_cutoff is None or abs(position.time - time) <= time_cutoff:\n\t\t\treturn position\n\t\treturn None",
"def _get_placemark_time(self, placemark, ns):\n text = placemark.find(\"{%s}TimeStamp\" % ns).find(\"{%s}when\" % ns).text\n return datetime.datetime.strptime(text, TIME_IN_FMT)",
"def mysortbytime(elem):\r\n#----------------------------------------------------------------------\r\n (string,time,ampm) = elem.split()\r\n civilian_time = time + \" \" + ampm\r\n (hour,min) = to_military_time(civilian_time).split(':')\r\n return int(hour)",
"def get_time_span(time_spans, i):\n return time_spans[1][i]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add time tag to the end of the variable name in the given tuple | def addTimeTag(self, tup, time):
return (tup[0] + "_T" + str(time), tup[1]) | [
"def add_timestamp(name):\n return '{0}_{1}'.format(name, time.strftime(\"%Y%m%d-%H%M%S\"))",
"def addAutoTimeMarker(time, name):",
"def _add_time_variable(root, time, **kwds):\n units = kwds.get(\"units\", \"days\")\n reference = kwds.get(\"reference\", \"00:00:00 UTC\")\n\n netcdf_vars = root.variables\n\n try:\n time_var = netcdf_vars[\"t\"]\n except KeyError:\n time_var = root.createVariable(\"t\", \"f8\", (\"nt\",))\n time_var.units = \" \".join([units, \"since\", reference])\n time_var.long_name = \"time\"\n\n try:\n n_times = len(time_var)\n except TypeError:\n n_times = len(time_var[:])\n if time is not None:\n time_var[n_times] = time\n else:\n time_var[n_times] = n_times",
"def format_4(tup):\n \n string = \"Date/Time: {:02d} {:d} {:d} {:02d} {:d}\"\n f4_string = string.format(tup[3], tup[4], tup[2], tup[0], tup[1])\n return f4_string",
"def _handle_time(time_label, time_unit, times):\n _validate_type(time_label, (None, str, \"callable\"), \"time_label\")\n if time_label == \"auto\":\n if times is not None and len(times) > 1:\n if time_unit == \"s\":\n time_label = \"time=%0.3fs\"\n elif time_unit == \"ms\":\n time_label = \"time=%0.1fms\"\n else:\n time_label = None\n # convert to callable\n if isinstance(time_label, str):\n time_label_fmt = time_label\n\n def time_label(x):\n try:\n return time_label_fmt % x\n except Exception:\n return time_label # in case it's static\n\n assert time_label is None or callable(time_label)\n if times is not None:\n _, times = _check_time_unit(time_unit, times)\n return time_label, times",
"def add_time_variable(self):\n self.add_species(name='TIME', init_conc=0)\n self.dcdt['TIME'] = '1'",
"def separate_time_and_data(variables_dict):\n time_dict = {}\n var_dict = {}\n time_identifiers = ['Time','time','begin_end_size']\n #time_identifier = 'Time'\n #time_identifier2 = 'time'\n matches_identifier = lambda i: i in name,\n for name, var in variables_dict.iteritems():\n for identifier in time_identifiers:\n if identifier in name:\n time_dict[name] = var\n break\n #if time_identifier in name or time_identifier2 in name:\n # time_dict[name] = var\n else:\n var_dict[name] = var\n\n return (time_dict, var_dict)",
"def add_stage_time(self, name: str) -> None:\n self._stage_time.append((time.time(), name))",
"def time_text(expTime):\n return '' if not expTime else 'time=%g' % expTime",
"def at_time_display(time):\n return timeFormat(time, prefix=gettext(\"at \"))",
"def __value_time(self, value, time, just_value):\n if just_value:\n return value\n else:\n return value, time",
"def add_tagalong_time(session, tree):\n read_parameter_change_from_text_file(session, tree, \"./tagalong.txt\", \"tagalong\")",
"def time(self, value):",
"def format_str_for_namedtuple(stack):\n result = []\n for item in stack:\n tempt = item.replace(\" \", \"_\")\n result.append(tempt)\n return result",
"def _control_time_units(tunits):\n units = tunits.split()\n units[0] = unicode('days')\n if len(units[2].split('-')) == 1:\n units[2] = units[2] + '-{0}-{1}'.format('01','01')\n elif len(units[2].split('-')) == 2:\n units[2] = units[2] + '-{0}'.format('01')\n return ' '.join(units)",
"def format_split_times(split_times: List) -> str:\n return ', '.join([to_s(st['control_code']) + ': ' + to_s(st['time']) for st in split_times])",
"def fix_time_metadata(cube, coord=None):\n if coord is None:\n coord = cube.coord('time')\n elif isinstance(coord, str):\n coord = cube.coord(coord)\n coord.var_name = 'time'\n coord.standard_name = 'time'\n coord.long_name = 'time'\n return coord",
"def insert_partial(self, name, timestamp, newvalue):",
"def add_placeholder_duration(todo): \n if todo.strip()[-1] == ',':\n todo += ' 30 m'\n else:\n todo += ', 30 m'\n \n return todo\n # last_comma_idx = todo[::-1].find(',')\n # if last_comma_idx == -1:\n # todo += ', 30 m'\n # else:\n # # if there's nothing after the comma \n # # else, do the followin\n # import ipdb; ipdb.set_trace()\n\n # todo = todo[: -(last_comma_idx + 1)] + ' 30 m' + todo[-(last_comma_idx + 1):] \n # return todo "
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
find the action, PositionRow, and PositoinCol from a example | def getCoordAction(self, data):
row, col, action = None, None, None
for d in data[1:]:
if 'Row' in d[0]:
row = d
if 'Col' in d[0]:
col = d
if 'Action' in d[0]:
action = d
return row, col, action | [
"def get_position(event):\n\tline, column = text.index('insert').split('.')\n\ts = \"line=%s column=%s\" % (line, column)\n\tprint \"Karthik\",\n\tprint s",
"def where_is(piece, state):\n for row_index, row in enumerate(state):\n for col_index, current_piece in enumerate(row):\n if current_piece == piece:\n return row_index, col_index",
"def findDefinitionByCoordinates(self,filename_path,line,col):",
"def location(s, (x,y)):\n\t\treturn s.matrix[x][y]",
"def identifyPosn(self, widget):\n\n for row, cntrls in enumerate(self.controls):\n for col, cntrl in enumerate(cntrls):\n if cntrl == widget:\n return (row, col)\n return (None, None)",
"def detect_target_clicked(self):\n for clicked_x, clicked_y in self.clicked_locations:\n for i in range(BOARD_HEIGHT):\n for j in range(BOARD_WIDTH):\n # params of the (i, j) circle in the board\n (circle_x, circle_y), circle_radius = self.get_board_circle_params(i, j)\n\n # if clicked location is in the (i, j) circle and the circle is yellow\n if math.hypot(circle_x - clicked_x, circle_y - clicked_y) < circle_radius:\n if self.state.board_str[BOARD_HEIGHT * i + j] == \"y\":\n return (i, j)\n return None",
"def get_pos(self, piece):\r\n if piece == \"K\":\r\n return (WhiteKing.row, WhiteKing.col)\r\n for i in range(8):\r\n if piece == \"P\" + str(i):\r\n return (WhitePawn.row[i], WhitePawn.col[i])",
"def clicked_point(self):\n if self._pseudo_square.data_source.selected is not None:\n if len(self._pseudo_square.data_source.selected.indices) > 0:\n id = self._pseudo_square.data_source.selected.indices[0]\n x_coor = self._pseudo_square.data_source.data['x'][id]\n y_coor = self._pseudo_square.data_source.data['y'][id]\n return x_coor, y_coor\n else:\n return None, 0",
"def _determine_tri_idxs(trigger):\n if len(trigger) == 12:\n # Aurora version 2021.9.6 or greater\n trigger_frame_idx = 7\n desc_idx = 10\n elif len(trigger) == 9:\n # Aurora version 2021.9.5 or earlier\n trigger_frame_idx = 7\n desc_idx = 8\n else:\n raise RuntimeError(\"Unable to read trigger file.\")\n\n return trigger_frame_idx, desc_idx",
"def get_position(char, table):\n for row in xrange(5):\n for column in xrange(5):\n if table[row][column]==char:\n return [row, column]",
"def locations_for_target(self):\n target_row = self.landmarks[0][0]\n target_col = self.landmarks[0][1] - self.target_distance_from_beacon\n return [(target_row, target_col)]",
"def __translate_coords(self, row, col):\r\n assert(row >= 0 and row < len(self.rows) )\r\n assert(col >= 0 and col < 4)\r\n\r\n event = self.rows[row]\r\n event_row = row - event['start_row']\r\n assert(event_row >= 0)\r\n \r\n return (event, event_row)",
"def test_actions():\n board = Board(size=7)\n actions = board.actions()\n assert len(actions) == 5*5*4 + 5*4*3 + 4*2\n assert actions[0].x == 0\n assert actions[0].y == 0\n assert actions[0].direction == 1",
"def get_row_col(self, pos):\n\t\tif pos == '0':\n\t\t\trow=0\n\t\t\tcol=0\t \n\t\telif pos == '1':\n\t\t\trow=0\n\t\t\tcol=1\t \n\t\telif pos == '2':\n\t\t\trow=0\n\t\t\tcol=2\t \n\t\telif pos == '3':\n\t\t\trow=1\n\t\t\tcol=0\t \n\t\telif pos == '4':\n\t\t\trow=1\n\t\t\tcol=1\t \n\t\telif pos == '5':\n\t\t\trow=1\n\t\t\tcol=2\t \n\t\telif pos == '6':\n\t\t\trow=2\n\t\t\tcol=0\t \n\t\telif pos == '7':\n\t\t\trow=2\n\t\t\tcol=1\n\t\telse:\n\t\t\trow=2\n\t\t\tcol=2\t\t \n\t\treturn row, col",
"def _parse_sgf_move(node_value):\n\tif node_value == '' or node_value == 'tt':\n\t\treturn go.PASS_MOVE\n\telse:\n\t\trow = string.letters.index(node_value[1])\n\t\tcol = string.letters.index(node_value[0])\n\t\t# GameState expects (x, y) where x is column and y is row\n\t\treturn (col, row)",
"def _get_pathinfo_at_event(self, event):\n pthinfo = self.get_path_at_pos(event.x, event.y)\n if pthinfo is None:\n return None, None\n path, column, cellx, celly = pthinfo\n return path, column",
"def get_line_and_column(location):\n\n START_PATTERN = r'(start=)(?P<line>\\d+)(,(?P<column>\\d+))?'\n\n search_result = re.search(START_PATTERN, location or '')\n\n line = column = '0'\n if search_result:\n line = search_result.group('line')\n column = search_result.group('column') or '0'\n\n return line, column",
"def read_position(ws, row, fields, position):\n\tlogger.debug('read_position(): at row {0}'.format(row))\n\tinitialize_position(position)\n\n\ti = 0\n\tcell_value = ws.cell_value(row, 0)\n\trecord_type = cell_value.strip()\n\n\twhile row < ws.nrows:\n\t\tif record_type == 'Holding Details':\n\t\t\tread_position_holding_detail(ws, row, fields, position)\n\t\telif record_type == 'Sub-Total' or record_type == 'Sub-Total Per Instrument of Custody A/C':\n\t\t\tread_position_sub_total(ws, row, fields, position)\n\t\telif record_type == 'Available Balance':\n\t\t\tread_position_available_balance(ws, row, fields, position)\n\t\t\tbreak\n\n\t\trow = row + 1\n\t\tcell_value = ws.cell_value(row, 0)\n\t\trecord_type = cell_value.strip()\n\t# end of while loop\n\n\treturn row+1",
"def CurrentLineAndColumn():\n # See the comment in CurrentColumn about the calculation for the line and\n # column number\n line, column = vim.current.window.cursor\n line -= 1\n return line, column"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
convert data to tuple, excluding the time tag e.g. 1 PositionRow_1=5 PositionCol_1=6 Action_1=MoveEast => (1, ("PositionRow, "5"), ("PositionCol", "6"), ("Action", "MoveEast")) | def convertExampleToTuple(self, ex):
splitEX = map(lambda e: e.split("="), ex)
output = [(e[0][:-self.findTimeTagPos(e[0])], e[1]) for e in splitEX[1:]]
output.insert(0, int(splitEX[0][0]))
return tuple(output) | [
"def str_to_tuple(tuple_str):\r\n return tuple(tuple_str.strip('() ').split(','))",
"def _key_to_tuple(cls, data, section):\n if section not in data:\n return\n temp = {}\n for key in data[section]:\n item_list = key.split(\"-\")\n if len(item_list) != 2:\n raise BadConfiguration(\n \"ThermoConfig._key_to_tuple\",\n data,\n missing=None,\n why=\"\\n\" + section + \" tuple key must be only 2 items\\n\",\n )\n temp[tuple(item_list)] = data[section][key]\n data[section] = temp",
"def to_tuple(self: Row):\n data = self.asDict()\n return tuple(data.values())",
"def _key_to_tuple(cls, data, section):\n if section not in data:\n return\n temp = {}\n for key in data[section]:\n item_list = key.split(\"-\")\n if len(item_list) != 2:\n raise BadConfiguration(\n \"BaseConfig._key_to_tuple\",\n data,\n missing=None,\n why=\"\\n\" + section + \" tuple key must be only 2 items\\n\",\n )\n temp[tuple(item_list)] = data[section][key]\n data[section] = temp",
"def _tuple_to_filter(t: Tuple[str, str, Any]) -> Dict:\n return {\"field\": t[0], \"operator\": t[1], \"value\": t[2]}",
"def make_tuple_fromstr(s, value_type):\n # remove tuple braces and strip commands and space from all values in the tuple string\n values = []\n for x in s.strip(\"(), \").split(\",\"):\n x = x.strip(\"' \")\n if x:\n values.append(x)\n return tuple(value_type(i) for i in values)",
"def _field_to_tuple(field):\r\n if isinstance(field, (tuple, list)):\r\n return (field[0], field[1])\r\n return (field, None)",
"def optomux_data_to_tuple(self,data,bits=1):\n b = ''\n # for each hex digit in string\n for i in range(len(data)):\n # get the next nibble\n c = data[i]\n # if a valid hex digit\n if c in '0123456789abcdefABCDEF':\n # conver to int\n n = int(data[i],16)\n # for each bit in the nibble starting at msb\n for j in reversed(range(4)):\n # append a binary digit\n b += chr(ord('0') + ((n >> j) & 1))\n # tried to read an output counter\n elif c == '?':\n # 4 binary '?' s\n for i in range(4):\n b += '?'\n # create a tuple of ints using substrings of binary width bits\n # and expand optomux '????' as if the '?'s were binary digits\n # of all fields will be the same width\n lv = []\n for i in reversed(range(0,len(b),bits)):\n # read bits worth of binary digits\n v = b[i:i+bits]\n # try to convert to an int using base 2\n try:\n n = int(v,2)\n # poke a '?' placeholder so caller knows not to use\n # the value\n except:\n n = '?'\n # append the value to the list\n finally:\n lv.append(n)\n return tuple(lv)",
"def to_tuple(self) -> tuple:\n return (self.node, self.time_slice)",
"def file_line_into_tuple(line):\n formatted_data = [\"\", []]\n line = line.split(\" \")\n\n for x in range(len(line)):\n if(x == 0):\n formatted_data[0] = line[x]\n else:\n try:\n formatted_data[1].append(int(line[x]))\n except:\n pass\n\n return tuple(formatted_data)",
"def map_listcs(item):\n fields = item.split()\n\n fields = [x.split(\"=\", 1)[-1] for x in fields]\n\n return tuple( fields )",
"def parse_tuple(string):\n\n items = split_on_commas(TUPLE.match(string)[1])\n values = [parse_value(val) for val in items]\n\n return tuple(values)",
"def _list_to_tuple(cls, data, section):\n if section not in data:\n return\n for key in data[section]:\n if isinstance(data[section][key], list):\n data[section][key] = tuple(data[section][key])",
"def parse_pair(s):\n return tuple(int(x) for x in s.split(','))",
"def _convert_to_list_of_tuples(input_):\n # make it al into a list of tuple(s)\n if input_ is None:\n return None\n\n if isinstance(input_, list):\n for item in range(len(input_)):\n if isinstance(input_[item], str):\n input_[item] = (input_[item],)\n\n if isinstance(input_, tuple):\n input_ = [input_]\n\n if isinstance(input_, str):\n input_ = [(input_,),]\n\n return input_.copy()",
"def to_tuple(list_):\n i = 0\n while i < len(list_):\n list_[i] = tuple(list_[i])\n i += 1\n return list_",
"def split_affil_tuple(affil_tuple_input):\n affil_tuple = []\n for (tag, affil_text) in affil_tuple_input:\n if ',' in affil_text:\n for affil_split in affil_text.split(','):\n affil_tuple.append((tag, affil_split.strip()))\n else:\n affil_tuple.append((tag, affil_text))\n return affil_tuple",
"def _unpack_data(data: Union[Any, Tuple[Any, Any]]) -> Any:\n if isinstance(data, tuple) or isinstance(data, list):\n data = data[0]\n return data",
"def extract_values(values):\n packet = PacketBE()\n ordered_values = [values[x] for x in packet.keys()]\n return tuple(ordered_values)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Join API endpoints from two other modules These will be at ``/part1`` and ``/part2``, the paths being automatically generated from function names. | def with_other_apis():
return [part_1, part_2] | [
"def combine(self, part1, part2):\n part1 = part1.rstrip('/')\n part2 = part2.lstrip('/')\n return part1 + '/' + part2",
"def join_paths(*parts: str) -> str:\n return \".\".join(str(p).strip(\".\") for p in parts if p)",
"def join(base, *parts):\n path = base\n if not parts:\n path[:0] + SEP\n for part in parts:\n if part.startswith(SEP):\n path = part\n elif not path or path.endswith(SEP):\n path += part\n else:\n path += SEP + part\n return path",
"def joinPath(*paths):",
"def attach(restful, module, models):\n\n for resource in resources(module) + ensure(module, models):\n if resource.__name__.lower() not in restful.endpoints:\n restful.add_resource(resource, *resource.thy().endpoints())",
"def simple_urljoin(base, other):\n return '/'.join([base.rstrip('/'), other.lstrip('/')])",
"def pathjoin(p1, p2):\n if p2.startswith(\"/\"):\n return os.path.join(p1, p2[1:])\n else:\n return os.path.join(p1, p2)",
"def get_two_paths(host_1, host_2, base_url='http://localhost:8181/onos/v1'):\r\n\r\n host1_sw, host1_inport, host1_mac = get_host_info(host_1)\r\n host2_sw, host2_inport, host2_mac = get_host_info(host_2)\r\n\r\n url = base_url + '/paths/' + host1_sw + '/' + host2_sw\r\n print(url)\r\n res = requests.get(url, auth=('onos', 'rocks'))\r\n paths = res.json()\r\n\r\n # get the reverse path\r\n paths2 = requests.get(base_url + '/paths/' + host2_sw +\r\n '/' + host1_sw, auth=('onos', 'rocks'))\r\n\r\n return paths, paths2.json()",
"def path_join(*components):\n\n return '/'.join(filter(None, components))",
"def concat_url(endpoint, url):\n return \"%s/%s\" % (endpoint.rstrip(\"/\"), url.strip(\"/\"))",
"def join_path(base_path, child_path):\n # TODO: make this more general, allowing __up__ anywhere within child?\n while child_path.startswith(\".__up__\"):\n child_path = child_path[7:]\n base_path = base_path[:base_path.rindex(\".\")]\n return base_path + child_path",
"def join_paths(prefix: str, suffix: str) -> str:\n return os.path.join(prefix, suffix)",
"def join(self, path0, path1):\n if len(path0) == 0:\n return path1\n elif len(path1) == 0:\n return path0\n elif path0[-1] == path1[0]:\n return path0 + path1[1:]\n else:\n raise CheckersError(f\"path0 {path0[-1]} != path1 {path1[0]}\")",
"def join_paths(*paths):\n return os.path.join(*paths)",
"def includeme(config):\n config.add_route('info', '/api/v1/')\n config.add_route('register', '/api/v1/accounts')\n config.add_route('profile_detail', '/api/v1/accounts/{username}')\n config.add_route('login', '/api/v1/accounts/login')\n config.add_route('logout', '/api/v1/accounts/logout')\n config.add_route('tasks', '/api/v1/accounts/{username}/tasks')\n config.add_route('task_detail', '/api/v1/accounts/{username}/tasks/{id}')",
"def pathjoin(path1, path2):\n u = urllib.parse.urlparse(path1)\n if u.scheme != \"\":\n return urllib.parse.urljoin(path1, path2)\n\n return _os_path_join(path1, path2)",
"def join_path(path1, path2):\n if path1[-1] == path2[0]:\n return path1 + path2[1:]\n elif path2[-1] == path1[0]:\n return path2 + path1[1:]\n elif path1[-1] == path2[-1]:\n return path1 + path2[1::-1]\n elif path1[0] == path2[0]:\n return path2[:0:-1] + path1\n\n raise ValueError('Paths cannot be joined as they do not share any ends')",
"def _map_api_command(self, method, path_parts,\n query_data, post_data, fmt='html', async=False):\n output = self._choose_output(path_parts, fmt=fmt)\n for bp in reversed(range(1, len(path_parts) + 1)):\n try:\n return [\n output,\n self._command('/'.join(path_parts[:bp]),\n args=path_parts[bp:],\n query_data=query_data,\n post_data=post_data,\n method=method,\n async=async)\n ]\n except UsageError:\n pass\n except BadMethodError:\n break\n raise UsageError('Not available for %s: %s' % (method,\n '/'.join(path_parts)))",
"def join(self,other):\r\n # TODO: should we take a list of other, as os.path does\r\n if not isinstance(other,ufsi.PathInterface):\r\n other=ufsi.Path(other)\r\n\r\n if other.isAbsolute():\r\n return other\r\n\r\n # Sort out separators\r\n selfSep=self.getSeparator()\r\n otherStr=str(other).replace(other.getSeparator(),selfSep)\r\n selfStr=self._path\r\n if not selfStr.endswith(selfSep) and selfStr!='':\r\n selfStr=selfStr+selfSep\r\n if otherStr.startswith(selfSep):\r\n otherStr=otherStr[len(selfSep):]\r\n\r\n return self.__class__(selfStr+otherStr)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a target for a set of parameters. fp_mk file object of the Makefile. index numeric index of the simulation run. (Used to generate the output filename of the simulation run.) kv set of parameter values for this target conf_in_dir location of the conf.in directory to deduce configuration from. target_dir the target directory for all the simulation outputs. | def gen_mk_target_and_conf(fp_mk, index, kv, conf_in_dir, target_dir):
# We wrap the name with commas on both ends to make it easier to extract
# keys and values from the name with regular expressions. For
# example, the regular expression ,example_([^,]*), will then match
# the parameter/variable example, and the parenthesis subexpression
# its value.
outdir = "run%sparams_%05d" % (os.sep, index)
# Create the directories
os.makedirs(os.sep.join([target_dir, outdir, 'conf']), exist_ok=True)
conf_dir = os.sep.join([outdir, 'conf'])
# Create the configuration
genconf.apply_template(conf_in_dir,
os.sep.join([target_dir, conf_dir]),
kv)
# Create params file in outdir
fp = open(os.sep.join([target_dir, outdir, "params.txt"]), 'w')
for (k, v) in sorted(kv.items()):
fp.write("%-15s %s\n" % (k, v))
fp.close()
# Create makefile rules
fp_mk.write("%s/done_sim:\n" % (outdir,))
fp_mk.write(("\t/usr/bin/time -v ${MESH_SIM} `cat \"%s/cmdline_args.txt\"` "
+ "\"%s\" \"%s\" > \"%s/stdout.txt\" 2> \"%s/stderr.txt\"\n")
% (conf_dir, conf_dir, outdir, outdir, outdir))
fp_mk.write(("\tif find \"%s\"/*.pcap -maxdepth 0 >/dev/null 2>&1 ; then "
+ "gzip \"%s\"/*.pcap ; fi\n") % (outdir, outdir,))
fp_mk.write("\tdate > \"%s/done_sim\"\n" % (outdir,))
fp_mk.write("all: %s/done_sim\n" % (outdir,))
fp_mk.write(".PHONY: clean_%s\n" % (outdir,))
fp_mk.write("clean_%s:\n" % (outdir,))
fp_mk.write("\trm -f \"%s/done_sim\"\n" % (outdir,))
fp_mk.write("\trm -f \"%s\"/*.pcap.gz\n" % (outdir,))
fp_mk.write("clean: clean_%s\n" % (outdir,))
fp_mk.write("\n")
return True | [
"def generate(self, target_dir: Optional[str]):\n for config_file in self.config_files:\n config_file.write(target_dir)",
"def write_make_config(model_name, annotated_sequence, pairs_info, \n out_name='make_config.py'):\n data = make_config_template.format(model_name=model_name, \n annotated_sequence=annotated_sequence,\n pairs_info=pairs_info)\n \n if out_name is None:\n return data\n else:\n with open(out_name, \"w\") as text_file:\n text_file.write(data)",
"def create_new_config_file(self, output_dir: str):\n output_file_name = \"{outdir}data_miner_config_{method}_{date}.yaml\".format(\n outdir=output_dir, method=self.logparser, date=datetime.now().strftime(\"%m-%d-%Y_%Hh%Mm%Ss\")\n )\n config = dict()\n config[\"log_format\"] = self.log_format\n config[\"preprocess\"] = self.preprocess_regex\n lp = dict()\n lp[\"method\"] = self.logparser\n lp[\"parameters\"] = self.optimal_parameters\n config[\"logparser\"] = lp\n\n # the parameters are numpy objects, just extract the value here\n # otherwise yaml.dump will dump the object to the .yaml file\n for key, value in lp[\"parameters\"].items():\n lp[\"parameters\"][key] = float(value)\n\n with open(output_file_name, \"w\") as f:\n _ = yaml.dump(config, f)\n\n print(\"\\nNew template config file created: {config_temp}\".format(config_temp=output_file_name))",
"def make_config(configpath, geneID, ligand_inchikey, hm_name, pose_ID):\n # input the homology model to prediction\n tgt_location = wd + geneID + '/PDBQT/' + hm_name + '.pdbqt'\n # calculate the center of search space used during docking -- center of active site\n center = get_center_coords(geneID, hm_name, ligand_inchikey)\n # same search space\n size = 50\n \n with open(configpath, 'w+') as f:\n f.write('receptor='+str(tgt_location)+'\\n')\n f.write(' '+'\\n')\n f.write('center_x='+str(center[0])+'\\n')\n f.write('center_y='+str(center[1])+'\\n')\n f.write('center_z='+str(center[2])+'\\n')\n f.write(' '+'\\n')\n f.write('size_x='+str(size)+'\\n')\n f.write('size_y='+str(size)+'\\n')\n f.write('size_z='+str(size)+'\\n')\n f.write(' '+'\\n')\n # same parameters as docking\n f.write('exhaustiveness=8'+'\\n')\n f.write('num_modes=100'+'\\n')\n f.write('energy_range=3'+'\\n')\n f.close()",
"def write_temp_mkdocs_config(inconf):\n ignored_keys = ('gens_dir', 'pages', 'headers', 'generate', 'loader',\n 'preprocessor', 'additional_search_paths')\n\n config = {key: value for key, value in inconf.items() if key not in ignored_keys}\n config['docs_dir'] = inconf['gens_dir']\n if 'pages' in inconf:\n config['nav'] = inconf['pages']\n\n with open('mkdocs.yml', 'w') as fp:\n yaml.dump(config, fp)\n\n atexit.register(lambda: os.remove('mkdocs.yml'))",
"def build_paths(self) -> None:\n self.filepath = self.config['input_file']\n self.config['sub_dir'] = os.path.basename(self.filepath).split('.')[0]\n path = os.path.normpath(os.path.join(\n self.config['output_dir'],\n self.config['sub_dir'],\n ))\n self.config['path'] = path\n for file_type in ['train', 'test', 'val']:\n self.config[f'{file_type}_file'] = os.path.join(path, f'{file_type}.csv')",
"def generate(env):\n Builder = SCons.Builder.Builder\n\n def builder_print_action():\n # Suppress action command line printing... each action has its own\n # pretty message.\n pass\n\n def adj_ext_for_msg(target, new_ext):\n fname = os.path.splitext(target)\n return fname[0]+new_ext\n\n env['__kicadprj_adjext'] = adj_ext_for_msg\n\n # A multi stage builder that,\n # - generate .pro file\n # - generate .sch file\n # - generate fp-lib-table file\n first_action = SCons.Action.Action(kicad_project_builder,\n 'Generating: \"$TARGET.name\" (Kicad project file)',\n show=builder_print_action)\n second_action = SCons.Action.Action(kicad_schematic_builder,\n 'Generating: \"${__kicadprj_adjext(TARGET.name,\".sch\")}\" (Schematic file for project)',\n show=builder_print_action())\n third_action = SCons.Action.Action(kicad_libtable_builder,\n 'Generating: \"fp-lib-table and sym-lib-table\" (Footprint & symbol table for the project)',\n show=builder_print_action)\n \n kicad_prj_builder_action = [first_action, second_action, third_action]\n kicad_prj_builder = Builder (action = kicad_prj_builder_action,\n emitter = kicadprj_Emitter,\n suffix='.pro')\n\n env.Append(BUILDERS = {'KiCadProject' : kicad_prj_builder})",
"def generate_model_configuration(args):\n\n model_config = {\n\n \"dataset_path\": args.dataset_config.output_folder, # Input dataset folder path.\n \"reaction_classes\": args.dataset_config.final_classes, # Final list of reaction classes.\n \"input_configs\": args.descriptor_config.model_training, # List of input configurations to train the model on.\n\n \"logs_folder\": args.model_config.logs_folder, # Path to the designated log folder.\n \"use_oversampling\": eval(args.model_config.use_oversampling), # Use SMOTE oversampling.\n \"random_seed\": args.model_config.random_seed, # Random seed used for reproducibility purposes.\n \"learning_rate\": args.model_config.learning_rate, # ADAM optimizer learning rate.\n \"max_epochs\": args.model_config.max_epochs, # Maximum number of epochs.\n \"batch_size\": args.model_config.batch_size, # Batch size.\n \"early_stopping\": args.model_config.early_stopping, # Number of epochs for early stopping detection.\n\n \"input_size\": args.model_config.input_layer[\"size\"], # Input layer size.\n \"output_size\": args.model_config.output_layer[\"size\"], # Output layer size.\n \"output_act_fcn\": args.model_config.output_layer[\"activation_fcn\"], # Output layer activation.\n\n \"hidden_types\": args.model_config.hidden_layers[args.model_config.fixed_model][\"types\"], # Hidden layer types.\n \"hidden_sizes\": args.model_config.hidden_layers[args.model_config.fixed_model][\"sizes\"], # Hidden layer sizes.\n # Hidden layer activation functions.\n \"hidden_act_fcns\": args.model_config.hidden_layers[args.model_config.fixed_model][\"activation_fcns\"],\n # Hidden layer dropout values.\n \"hidden_dropouts\": args.model_config.hidden_layers[args.model_config.fixed_model][\"dropouts\"]\n }\n\n return model_config",
"def buildConfigFile_makeTauIdEffQCDtemplate(jobId, directory, inputFileName, tauIds, fitVariables, sysUncertainties, outputFilePath,\n regionQCDtemplateFromData_passed, regionQCDtemplateFromData_failed, regionQCDtemplateFromData_D, \n regionWplusJetsSideband_passed, regionWplusJetsSideband_failed, regionWplusJetsSideband_D, \n histQCDtemplateFromData_passed, histQCDtemplateFromData_failed, histQCDtemplateFromData_D):\n\n fitVariables_makeTauIdEffQCDtemplate = []\n fitVariables_makeTauIdEffQCDtemplate.extend(fitVariables)\n fitVariables_always = [\n 'diTauVisMass',\n 'diTauMt'\n ]\n for fitVariable_always in fitVariables_always:\n if fitVariables_makeTauIdEffQCDtemplate.count(fitVariable_always) == 0:\n fitVariables_makeTauIdEffQCDtemplate.append(fitVariable_always)\n\n if directory != \"\":\n outputFileName = 'makeTauIdEffQCDtemplate_%s_%s.root' % (jobId, directory)\n else:\n outputFileName = 'makeTauIdEffQCDtemplate_%s.root' % jobId\n outputFileName = outputFileName.replace('__', '_')\n outputFileName_full = os.path.join(outputFilePath, outputFileName)\n\n sysUncertainties_string = make_inputFileNames_vstring(sysUncertainties)\n\n config = \\\n\"\"\"\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.PSet()\n\nprocess.fwliteInput = cms.PSet(\n fileNames = cms.vstring('%s')\n)\n \nprocess.fwliteOutput = cms.PSet(\n fileName = cms.string('%s')\n)\n\nprocess.makeTauIdEffQCDtemplate = cms.PSet(\n\n # CV: set to '' if determining tau id. efficiency for whole Tag & Probe sample,\n # set to name of one individual bin in case you want to measure the tau id. efficiency as function of tauPt, tauEta,...\n # (needs as many 'makeTauIdEffQCDtemplate' jobs to be run in parallel as there are bins)\n directory = cms.string('%s'),\n\n # regions (in Data) from which templates for QCD background are taken\n regionTakeQCDtemplateFromData_passed = cms.string('%s'),\n regionTakeQCDtemplateFromData_failed = cms.string('%s'),\n regionTakeQCDtemplateFromData_D = cms.string('%s'),\n\n # regions (in Data) from which W + jets background contribution to QCD control region is estimated\n regionWplusJetsSideband_passed = cms.string('%s'),\n regionWplusJetsSideband_failed = cms.string('%s'),\n regionWplusJetsSideband_D = cms.string('%s'),\n\n # define \"all\", \"passed\" and \"failed\" regions\n regionStoreQCDtemplate_passed = cms.string('%s'),\n regionStoreQCDtemplate_failed = cms.string('%s'),\n regionStoreQCDtemplate_D = cms.string('%s'),\n \n tauIds = cms.vstring(\n%s\n ),\n\n fitVariables = cms.vstring(\n%s\n ),\n \n sysUncertainties = cms.vstring(\n%s\n )\n)\n\"\"\" % (inputFileName, outputFileName_full,\n directory,\n regionQCDtemplateFromData_passed, regionQCDtemplateFromData_failed, regionQCDtemplateFromData_D,\n regionWplusJetsSideband_passed, regionWplusJetsSideband_failed, regionWplusJetsSideband_D, \n histQCDtemplateFromData_passed, histQCDtemplateFromData_failed, histQCDtemplateFromData_D,\n tauIds, fitVariables_makeTauIdEffQCDtemplate, sysUncertainties_string)\n \n configFileName = outputFileName.replace('.root', '_cfg.py')\n configFileName_full = os.path.join(outputFilePath, configFileName) \n configFile = open(configFileName_full, \"w\")\n configFile.write(config)\n configFile.close()\n\n logFileName = configFileName.replace('_cfg.py', '.log')\n logFileName_full = os.path.join(outputFilePath, logFileName) \n\n retVal = {}\n retVal['configFileName'] = configFileName_full\n retVal['outputFileName'] = outputFileName_full\n retVal['logFileName'] = logFileName_full\n\n return retVal",
"def _paramsFileHead():\n\n str = \\\n\"\"\"\n# ----------------------------------------------------------------------\n# Numenta Platform for Intelligent Computing (NuPIC)\n# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from\n# Numenta, Inc. a separate commercial license for this software code, the\n# following terms and conditions apply:\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see http://www.gnu.org/licenses.\n#\n# http://numenta.org/licenses/\n# ----------------------------------------------------------------------\n\n## This file defines parameters for a prediction experiment.\n\n###############################################################################\n# IMPORTANT!!!\n# This params file is dynamically generated by the RunExperimentPermutations\n# script. Any changes made manually will be over-written the next time\n# RunExperimentPermutations is run!!!\n###############################################################################\n\n\nfrom nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription\n\n# the sub-experiment configuration\nconfig ={\n\"\"\"\n\n return str",
"def Create_makefile(dir,out_ext = \"exe\",in_ext = \"cpp\",recursive=False,delcommand = \"del\",compiler = \"g++\",scriptPath=os.path.abspath(__file__)):\n \n if os.path.isdir(dir):\n\n out_ext = out_ext.lstrip(\".\")\n \n if recursive:\n return CreateRecusive(dir,out_ext,in_ext,recursive,delcommand,compiler,scriptPath)\n else:\n #get all the files from the dir\n dir_ls = os.listdir(dir)\n\n dir_ls = [os.path.abspath(i) for i in dir_ls]\n\n filesTobeProcessed= list(filter(lambda file: os.path.isfile(os.path.join(dir,file)),dir_ls))\n\n # filesTobeProcessed = [os.path.basename()]\n #get all the files that have the ext\n filesTobeProcessed = list_files(dir,in_ext)\n\n #Make sure that the files list is not empty\n\n assert(len(filesTobeProcessed)>0)\n\n\n print(\"Selected Following files\\n\\t\"+\"\\n\\t\".join(str(i[0])+\". \"+i[1] for i in enumerate(filesTobeProcessed,1)))\n \n #appending (\"\")\n filesWithExt = list(map(lambda x: re.sub(in_ext,\"$(ext)\",x),filesTobeProcessed))\n\n print(\"Creating make file\")\n\n # print(filesWithExt)\n#-----------------------------------creating the make file---------------------------------------- \n #creating the make file \n with open(os.path.join(dir,\"makefile\"),\"w\") as mkfile:\n\n mkfile.write(\"#This make file was created by making.py from https://raw.githubusercontent.com/Satyamkumarai/Satyamkumarai.github.io/master/downloads/create_makefile.py\\n\")\n\n #writing the main target `all` that has all the output files as the dependencies\n mkfile.writelines(f\"ext = {out_ext}\\n\")\n \n mkfile.writelines(\"all: \"+\" \".join(filesWithExt)+\"\\n\")\n\n # creating the output for each file..\n for inp,outp in zip(filesTobeProcessed,filesWithExt):\n\n mkfile.writelines(f\"{outp}: {inp}\\n\") \n\n mkfile.writelines(f\"\\t{compiler} {inp} -o {outp}\\n\")\n \n #make update \n mkfile.write(f\"update:\\n\\tpy -c \\\"from requests import get;file =open(f'{scriptPath}','w');file.write( get('https://satyamkumarai.github.io/downloads/create_makefile.py').text);file.close()\\\"\\n\\tpy {scriptPath} {dir} -e $(ext)\\n\")\n \n #make clean\n mkfile.write(f\"clean:\\n\\t{delcommand} \\\"./*.exe\\\"\")\n \n print(\"Created in \"+os.path.join(dir,\"makefile\"))",
"def build(dir):\n\n try:\n graph = open(dir + \"/model_opt.json\").read()\n lib = tvm.runtime.load_module(dir + \"/model_opt.so\")\n params = bytearray(open(dir + \"/model_opt.params\", \"rb\").read())\n except FileNotFoundError:\n print(\"model_file_dir has does not contain correct files\")\n sys.exit(1)\n\n return graph, lib, params",
"def generate_files():\n \n # Generate a default job_file if necessary\n genfilename = \"job_file\"\n if os.path.exists(genfilename):\n logging.info(\"'job_file' already exists.\")\n else:\n genfile = open(genfilename, \"w\")\n genfile.write( \\\n \"username = '<user>'\\n\" \\\n \"nodes = [1]\\n\" \\\n \"foldername = 'experiment'\\n\" \\\n \"numreps = 1\\n\" \\\n \"exename = '<exe>'\\n\" \\\n \"exeinput = '%s'\\n\" \\\n \"exeseeds = 'integer'\\n\" \\\n )\n genfile.close()\n \n # Generate the config directory if necessary\n gendirname = \"config\"\n if os.path.exists(gendirname):\n logging.info(\"'config' directory already exists\")\n else:\n os.makedirs(gendirname)",
"def build_param_file( self, param_dict, directory=None ):\n if self.command and \"$param_file\" in self.command:\n fd, param_filename = tempfile.mkstemp( dir=directory )\n os.close( fd )\n f = open( param_filename, \"wt\" )\n for key, value in param_dict.items():\n # parameters can be strings or lists of strings, coerce to list\n if type(value) != type([]):\n value = [ value ]\n for elem in value:\n f.write( '%s=%s\\n' % (key, elem) ) \n f.close()\n param_dict['param_file'] = param_filename\n return param_filename\n else:\n return None",
"def gen_config_file_and_params(prefix, cfg_in, cfg_out, cfg_path):\n TEST_REPO_SECTION = 'internal_repo'\n command = ['repoclosure', '-t', '--config={}'.format(cfg_path)]\n internal_repo_ip = prefix.virt_env.get_net().gw()\n internal_repo_port = constants.REPO_SERVER_PORT\n internal_repo_url = 'http://{ip}:{port}/el7/'.format(\n ip=internal_repo_ip, port=internal_repo_port\n )\n\n config = ConfigParser.ConfigParser()\n config.readfp(cfg_in)\n for section in config.sections():\n if section == \"main\":\n continue\n command.append('--lookaside={}'.format(section))\n if config.has_option(section, 'exclude'):\n config.remove_option(section, 'exclude')\n if config.has_option(section, 'includepkgs'):\n config.remove_option(section, 'includepkgs')\n\n config.add_section(TEST_REPO_SECTION)\n config.set(TEST_REPO_SECTION, 'name', 'Local repo')\n config.set(TEST_REPO_SECTION, 'baseurl', internal_repo_url)\n config.set(TEST_REPO_SECTION, 'enabled', 1)\n config.set(TEST_REPO_SECTION, 'ip_resolve', 4)\n config.set(TEST_REPO_SECTION, 'gpgcheck', 0)\n config.set(TEST_REPO_SECTION, 'proxy', '_none_')\n command.append('--repoid={}'.format(TEST_REPO_SECTION))\n config.write(cfg_out)\n return command",
"def create_config_file(self):\n\t with open(self.userPath + '/config.py', 'w') as config_file:\n\t lines = ['#!/bin/env python \\n', 'import BlackDynamite as BD \\n','import re,os\\n',\n\t 'myrun, myjob = BD.getRunFromScript() \\n']\n\n\t for key, value in self.parametric_space.items():\n\t\t\t\tprint(\"key = \" +str(key))\n\t\t\t\tif 'Material' in key:\n\t\t\t\t\t# find the material name\n\t\t\t\t\tm = re.search(\n\t\t\t\t\t r\"Material_(?P<name>\\w+)_(?P<param>\\w+)\", key)\n\t\t\t\t\tmaterial_name = m.group('name')\n\t\t\t\t\t# find the parameter concerned\n\t\t\t\t\tparameter = m.group('param')\n\t\t\t\t\t# config opens material file and reads its content\n\t\t\t\t\tlines.append('file = open(\\\"material.dat\\\", \\\"r\\\")\\n')\n\t\t\t\t\tlines.append('lines = file.readlines()\\n')\n\t\t\t\t\t# then config close it to open it later in write mode\n\t\t\t\t\tlines.append('file.close()\\n')\n\t\t\t\t\t# config initializes a variable to know if it is in the right material\n\t\t\t\t\t# and to find the indexes where the change will be made\n\t\t\t\t\tlines.append('in_material=False\\n')\n\t\t\t\t\tlines.append('ind=0\\n')\n\t\t\t\t\tlines.append('for line in lines:\\n')\n\t\t\t\t\t# config file need to search the parameter\n\t\t\t\t\tlines.append(self.tab+'m=re.search(\\'{}\\',line)\\n'.format(\n\t\t\t\t\t material_name.lower()))\n\t\t\t\t\t# if it finds it, it needs to search the parameter\n\t\t\t\t\tlines.append(self.tab+'if m is not None:\\n')\n\t\t\t\t\tlines.append(self.tab+self.tab+'in_material=True\\n')\n\t\t\t\t\tlines.append(self.tab+'m=re.search(\\'{}=\\',line)\\n'.format(\n\t\t\t\t\t parameter))\n\t\t\t\t\tlines.append(self.tab+'if m is not None and in_material == True:\\n')\n\t\t\t\t\tlines.append(self.tab+self.tab+'lines[ind]=\\'{}=\\' + str(myjob.{}_{}_{}) + \\'\\\\n\\'\\n'.format(parameter,'material',material_name.lower(),parameter.lower()))\n\t\t\t\t\t# config has found the vaiable so we are moving to another parameter\n\t\t\t\t\tlines.append(self.tab+self.tab+'in_material=False \\n')\n\t\t\t\t\t# write the line\n\t\t\t\t\tlines.append(self.tab+'ind = ind+1\\n')\n\t\t\t\t\t# write the lines\n\t\t\t\t\tlines.append('file = open(\\\"material.dat\\\", \\\"w+\\\")\\n')\n\t\t\t\t\tlines.append('file.writelines(lines)\\n')\n\t\t\t\t\tlines.append('file.close()\\n')\n\n\t lines.append('myrun.start()\\n')\n\t # TODO push values\n\t lines.append('myrun.finish()\\n')\n\n\t # write file\n\t config_file.writelines(lines)",
"def createNewInput(self,currentInputFiles,oriInputFiles,samplerType,**Kwargs): \n import DecayParser\n import FissionYieldParser\n import QValuesParser\n import MaterialParser\n import PathParser\n \n keyWordDict = {}\n \n directoryFiles = ['path','library_fiss','input_dpl']\n #print (currentInputFiles)\n driverXML = 'test_phisics_code_interface.xml'\n keyWordDict = self.mapFile(driverXML)\n #print (keyWordDict)\n tarName = self.tarFiles(directoryFiles)\n runInfoList = self.getDirInfo(driverXML)\n #print (int(runInfoList[1]))\n N = int(runInfoList[1])\n \n \n #print (Kwargs)\n #print (\"\\n\\n\\n\\n\\n\\n\")\n perturbedVars = Kwargs['SampledVars']\n distributedPerturbedVars = self.distributeVariablesToParsers(perturbedVars)\n #print (distributedPerturbedVars)\n #perturbedVars = {'DECAY|BETA|U235':1.0778}\n #perturbedVars = {'FUEL1|DENSITY|U234':1.2, 'FUEL1|DENSITY|U235':1.08E+02}\n #perturbedVars = {'FY|FAST|PU241|SE78':1.2, 'FY|THERMAL|U238|ZN68':1.08E+02, 'FY|THERMAL|U235|ZN66':5.777}\n #perturbedVars = {'QVALUES|U235':4.5963, 'QVALUES|U238':1.08E+02, 'QVALUES|CF252':7.846}\n #perturbedVars = {'BETADECAY|U235':4.5963, 'BETADECAY|U238':1.08E+02, 'BETADECAY|CF252':7.846}\n \n # NOTE: IF YOU DON'T LIKE OR CAN'T GET THE THE KEYWORDS WIT THE DICTIONARY KEYWORDdICT, I CAN USE GETBASE TO \n # OBRAIN THE KEYWORD CORRESPONDING TO THE PARSER OF INTEREST. EXAMPLE: AAA = currentInputFiles[0].getBase()print (AAA)\n for i in distributedPerturbedVars.iterkeys():\n if i == 'DECAY' : decayParser = DecayParser.DecayParser(currentInputFiles[keyWordDict['decay']].getAbsFile(), **distributedPerturbedVars[i])\n if i == 'DENSITY' : materialParser = MaterialParser.MaterialParser(currentInputFiles[keyWordDict['material']].getAbsFile(), **distributedPerturbedVars[i])\n if i == 'FY' : FissionYieldParser = FissionYieldParser.FissionYieldParser(currentInputFiles[keyWordDict['fissionyield']].getAbsFile(), **distributedPerturbedVars[i])\n if i == 'QVALUES' : QValuesParser = QValuesParser.QValuesParser(currentInputFiles[keyWordDict['fissqvalue']].getAbsFile(), **distributedPerturbedVars[i])\n if i == 'BETADECAY': BetaDecayParser = PathParser.PathParser(currentInputFiles[keyWordDict['betadecay']].getAbsFile(), **distributedPerturbedVars[i])\n \n tarFiles = currentInputFiles[keyWordDict['dirfiles']].getAbsFile()\n workDir = currentInputFiles[0].getPath()\n #print (workDir)\n self.untarFolders(tarFiles, workDir)\n self.copyIntoFolders(workDir)\n \n return currentInputFiles",
"def _initial_target_setup(self):\n # Targets\n self.target = []\n n_targets = self.config['simulation']['n_targets']\n for target in self.config['simulation']['target_building_id']:\n info = {}\n info['target_id'] = target\n info['probability_goals'] = 1 / n_targets\n info['progress_goals'] = 0\n info['probability_goals_indoor'] = 1 / n_targets\n info['progress_goals_indoor'] = 0\n info['defence_perimeter'] = 0\n\n building_info = self.building_info(target)\n info['position'] = building_info['position']\n info['perimeter'] = building_info['perimeter']\n info['area'] = building_info['area']\n info['n_floors'] = building_info['n_floors']\n info['n_defence_perimeter'] = building_info['perimeter'] / (\n self.config['ugv']['defense_radius'] * 2)\n\n self.target.append(info)",
"def generate_config_file( defaults_file, template_file, extra_config=None ):\n \n # read the default configuration file and add in the extras\n conf_vars = {}\n\n try:\n conf_vars = read_config( defaults_file )\n except:\n pass\n \n if extra_config and isinstance( extra_config, dict):\n conf_vars.update( extra_config )\n\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n \n # rendering function\n def OUT( s ):\n stdout.write( s )\n stdout.write(\"\\n\")\n\n def ERR( s ):\n stderr.write( s )\n stderr.write(\"\\n\")\n\n # evaluate our config-generating script\n try:\n template_fd = open(template_file, \"r\")\n template_code = template_fd.read()\n template_fd.close()\n\n conf_vars['OUT'] = OUT\n conf_vars['ERR'] = ERR\n \n exec template_code in conf_vars\n\n config_data = stdout.getvalue()\n config_err = stderr.getvalue()\n \n except Exception, e:\n raise MDMethodFailed( \"generate_config_file\", e )\n\n return (config_data, config_err)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the reverse lookup fails on nonexistent energy group bounds. | def test_invalidGroupStructureType(self):
modifier = 1e-5
for groupStructureType in units.GROUP_STRUCTURE.keys():
energyBounds = units.getGroupStructure(groupStructureType)
energyBounds[0] = energyBounds[0] * modifier
with self.assertRaises(ValueError):
units.getGroupStructureType(energyBounds) | [
"def test_unused_locality_near_stops_has_nptg_entries():\n assert unused()",
"def test_get_fail_invalid_range(self):\n namespace = 'kytos.kronos.telemetry.switches.1.interfaces.232.bytes_in'\n start = None\n end = None\n\n influx.validate_timestamp.return_value = False\n self.backend._namespace_exists = mock.MagicMock()\n self.backend._namespace_exists.return_value = True\n with self.assertRaises(ValueError):\n self.backend.get(namespace, start, end)",
"def test_error_missing_time_bounds(blended_probability_below_cube, interpreter):\n blended_probability_below_cube.coord(\"time\").bounds = None\n with pytest.raises(ValueError, match=\"has no time bounds\"):\n interpreter.run(blended_probability_below_cube)",
"def test_xy_out_of_osgb_bounds(self):\n\n self.assertRaises(GridRefException, xy_to_osgb, -10, 0)",
"def test_get_bad_raster_index():\n res_fp = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5')\n\n # use a custom meta df because NSRDB/WTK resource test files are too small\n fp = os.path.join(TESTDATADIR, 'wtk/hawaii_grid.csv')\n meta = pd.read_csv(fp)\n\n target = (-90, -162)\n shape = (10, 5)\n with pytest.raises(RuntimeError):\n with NSRDBX(res_fp) as ext:\n ext.get_raster_index(target, shape, meta=meta)\n\n target = (16, 0)\n shape = (10, 5)\n with pytest.raises(RuntimeError):\n with NSRDBX(res_fp) as ext:\n ext.get_raster_index(target, shape, meta=meta)",
"def test_uncertainty(self):\n x = geo_uri(\"geo:40.685922,-111.853206,1321;crs=wgs84;u=0\")\n self.assertAlmostEqual(40.685922, x.lattitude, places=6)\n self.assertAlmostEqual(-111.853206, x.longitude, places=6)\n self.assertAlmostEqual(1321, x.altitude, places=3)\n\n xr = x.lattitude_range\n self.assertAlmostEqual(40.685922, xr[0], places=6)\n self.assertAlmostEqual(40.685922, xr[1], places=6)\n \n xr = x.longitude_range\n self.assertAlmostEqual(-111.853206, xr[0], places=6)\n self.assertAlmostEqual(-111.853206, xr[1], places=6)\n \n xr = x.altitude_range\n self.assertAlmostEqual(1321, xr[0], places=3)\n self.assertAlmostEqual(1321, xr[1], places=3)\n \n y = geo_uri(\"geo:40.685922,-111.853206,1321;crs=wgs84;u=30\")\n self.assertAlmostEqual(40.685922, y.lattitude, places=6)\n self.assertAlmostEqual(-111.853206, y.longitude, places=6)\n self.assertAlmostEqual(1321, y.altitude, places=3)\n \n yr = y.lattitude_range\n self.assertAlmostEqual(40.685652, yr[0], places=6)\n self.assertAlmostEqual(40.686192, yr[1], places=6)\n \n yr = y.longitude_range\n # TODO: This range assumes a sphere of radius 6378137 m, whereas\n # the earth is an elipsoid with that radius as the semi-major\n # axis and 6356752.3142 m as the radius of the semi-minor axis\n # at the poles.\n self.assertAlmostEqual(-111.853561, yr[0], places=6)\n self.assertAlmostEqual(-111.852851, yr[1], places=6)\n \n yr = y.altitude_range\n self.assertAlmostEqual(1291, yr[0], places=3)\n self.assertAlmostEqual(1351, yr[1], places=3)\n \n z = geo_uri(\"geo:40.685922,-111.853206,1321;crs=wgs84\")\n self.assertIsNone(z.lattitude_range)\n self.assertIsNone(z.longitude_range)\n self.assertIsNone(z.altitude_range)",
"def test_get_group_not_found(shared_zone_test_context):\n client = shared_zone_test_context.ok_vinyldns_client\n client.get_group(\"doesntexist\", status=404)",
"def test_notLookupMissingByValue(self):\n self.assertRaises(ValueError, self.FXF.lookupByValue, 0x10)",
"def test_invalid_range():\n with pytest.raises(ValueError):\n # Test with too-low value\n assert calculate_E_min(B_degrees=-10)\n with pytest.raises(ValueError):\n # Test with too-high value\n assert calculate_E_min(B_degrees=1_000)",
"def test_earth_relief_invalid_data_source():\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(\n resolution=\"01d\", registration=\"gridline\", data_source=\"invalid_source\"\n )",
"def test_inexistent_tile(self):\n self.assertRaises(urllib.error.HTTPError, get_map, 2016, range(75000, 75001), range(74956, 74957), \"test\")",
"def test_earth_relief_fails(data_source):\n resolutions = \"1m 1d bla 60d 001m 03\".split()\n resolutions.append(60)\n for resolution in resolutions:\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(resolution=resolution, data_source=data_source)",
"def test_faulty(self):\n self.assertRaises(ValueError, geo_uri, \"xxx:40.685922,-111.853206,1321;crs=wgs84;u=1.2\")\n self.assertRaises(ValueError, geo_uri, \"geo:40.685922,-111.853206,1321;u=1.2;crs=wgs84\")\n self.assertRaises(ValueError, geo_uri, \"geo:40.685922,-111.853206,1321;crs=wgs84;spam=1;u=1.2\")",
"def test_adjust_range(self):\n assert (\n AddressFilter.Range._adjust_range(GroupAddress.MAX_FREE + 1)\n == GroupAddress.MAX_FREE\n )\n assert AddressFilter.Range._adjust_range(-1) == 0",
"def test_elevation_getter_single_point_out_of_bounds(self):\n\n poly = get_raster_bbox_as_polygon(test_dem)\n bounds = poly.bounds\n x = bounds[0] - 1.0 # outside left bound\n y = bounds[1] - 1.0 # outside bottom bound\n\n self.assertRaises(IndexError, get_elevation, *[test_dem, x, y])",
"def test_preference_invalid_fail(lfric_sst):\n bbox = panel(\"africa\")\n emsg = \"Expected a preference of 'cell' or 'center' or 'point'\"\n with pytest.raises(ValueError, match=emsg):\n _ = bbox.enclosed(lfric_sst, preference=\"invalid\")",
"def test_error_wrong_group(self):\n # Pokémon get registered (unsurprisingly) under the ``pokemon`` group,\n # not ``random``.\n registry = EntryPointClassRegistry('random')\n\n with self.assertRaises(RegistryKeyError):\n registry.get('fire')",
"def test_lookup(self):\n\n # TEST 1: test with abbrevation and use_cache True\n self.assertEqual(states.lookup(val='KA', field='abbr'), states.KA)\n\n # TEST 2: test with full name and use_cache = True\n self.assertEqual(states.lookup(val='manipur', field='name'), states.MN)\n\n # TEST 3: test with abbrevation without using cache\n self.assertEqual(states.lookup(val='HR', field='abbr', use_cache=False), states.HR)\n\n # TEST 4: test with name without using cache\n self.assertEqual(states.lookup(val='delhi', field='name', use_cache=False), states.DL)\n\n # TEST 5: test with faulty name so that matching fail\n with self.assertRaises(ValueError):\n states.lookup(val='XY', field='abbr')",
"def test_erefused(self):\n self.assertIs(self.exceptionForCode(EREFUSED), DNSQueryRefusedError)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the reverse lookup of the energy group structures work. Notes Several group structures point to the same energy group structure so the reverse lookup will fail to get the correct group structure type. | def test_consistenciesBetweenGroupStructureAndGroupStructureType(self):
for groupStructureType in units.GROUP_STRUCTURE.keys():
self.assertEqual(
groupStructureType,
units.getGroupStructureType(
units.getGroupStructure(groupStructureType)
),
) | [
"def test_invalidGroupStructureType(self):\n modifier = 1e-5\n for groupStructureType in units.GROUP_STRUCTURE.keys():\n energyBounds = units.getGroupStructure(groupStructureType)\n energyBounds[0] = energyBounds[0] * modifier\n with self.assertRaises(ValueError):\n units.getGroupStructureType(energyBounds)",
"def test_nested_families(self):\n conf = config.Config(\"test_data/nested_families.yml\")\n desired_zones = conf.get_desired_dns()\n self.assertNotEqual(None, desired_zones)\n self.assertEqual(1, len(desired_zones.keys()))\n self.check_zone_soa_email(desired_zones, 'domain.com')\n zone_to_test = desired_zones['domain.com']\n self.assertEqual(7, len(zone_to_test.records.keys()))\n self.check_record(zone_to_test, 'A', 'root', '1.1.1.1', None, 10)\n self.check_record(zone_to_test, 'A', 'shadow1', '1.1.1.2', None, 11)\n self.check_record(zone_to_test, 'A', 'shadow1', '1.1.1.4', None, 11)\n self.check_record(zone_to_test, 'A', 'shadow1', '1.1.1.5', None, 31)\n self.check_record(zone_to_test, 'A', 'shadow2', '1.1.1.3', None, 12)\n self.check_record(zone_to_test, 'A', 'family1', '2.1.1.1', None, 20)\n self.check_record(zone_to_test, 'A', 'family2', '3.1.1.1', None, 21)",
"def test_e_regularization_structure(self):\n self.family.clean_tree()\n self.family.generate_tree(thermo_database=self.thermoDatabase, rxns=self.treerxns)\n self.family.check_tree()\n self.family.regularize(thermo_database=self.thermoDatabase, rxns=self.treerxns)\n self.family.check_tree()",
"def test_internal_data_groups(self, ltm_service_manager):\n # Should create one Data Group\n objs = self.get_created_ltm_objects(ltm_service_manager, InternalDataGroup)\n assert 1 == len(objs)\n assert objs[0].name == 'test-dgs'\n\n # Should update one Data Group\n self.ltm_service['internalDataGroups'][0]['name'] = 'test-dg'\n objs = self.get_updated_ltm_objects(ltm_service_manager, InternalDataGroup)\n assert 1 == len(objs)\n assert objs[0].name == 'test-dg'\n\n # Should delete one Data Group\n self.ltm_service['internalDataGroups'] = []\n objs = self.get_deleted_ltm_objects(ltm_service_manager, InternalDataGroup)\n assert 1 == len(objs)\n assert 'test-dg' == objs[0].name",
"def test_ethnicgroups_get(self):\n pass",
"def test_dials_symmetry_decide_pointgroup(\n reflection_spacegroup,\n experiments_spacegroup,\n expected_lattices,\n required_spacegroup_order,\n other_spacegroups,\n helper_directory,\n):\n helper, tmpdir = helper_directory\n refl_path = (tmpdir / \"test.refl\").strpath\n exp_path = (tmpdir / \"test.expt\").strpath\n generated_exp(space_group=experiments_spacegroup).as_file(exp_path)\n generate_reflections_in_sg(reflection_spacegroup).as_file(refl_path)\n\n symmetry_analyser = helper.dials_symmetry_decide_pointgroup([exp_path], [refl_path])\n\n # Note : instabilities have been observed in the order of the end of the\n # spacegroup list - this is likely due to the use of unseeded random number\n # generation in dials.symmetry symmetry element scoring, but this only seems\n # to affect the order of groups with a score near zero. Hence only assert the\n # order of the spacegroups that must be in order, near the start of the list.\n assert symmetry_analyser.get_possible_lattices() == expected_lattices\n spacegroups = symmetry_analyser.get_likely_spacegroups()\n assert spacegroups[: len(required_spacegroup_order)] == required_spacegroup_order\n assert set(spacegroups[len(required_spacegroup_order) :]) == set(other_spacegroups)",
"def test_division_logistics_item_groups_get(self):\n pass",
"def is_group_ref_group(group, key):\n # FIXME this is hardcoded for PIAEL order\n north_index = key[0].find(group[0])\n east_column = ''.join([key[i][25] for i in range(26)])\n east_index = east_column.find(group[1])\n\n if key[east_index][north_index] != group[2] or key[25][north_index] != group[3] \\\n or key[east_index][0] != group[4]:\n return False\n\n return True",
"def test_get_device_group(self):\n pass",
"def test_check_ncs_group_list(self):\n phil_groups = ncs_group_master_phil.fetch(\n iotbx.phil.parse(phil_str)).extract()\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy(),\n ncs_phil_groups=phil_groups.ncs_group)\n nrgl = ncs_obj_phil.get_ncs_restraints_group_list()\n pdb_inp = iotbx.pdb.input(lines=test_pdb_str_2,source_info=None)\n ph = pdb_inp.construct_hierarchy()\n # passing test\n self.assertTrue(nu.check_ncs_group_list(nrgl,ph,chain_max_rmsd=1))\n # make sure test fails when it suppose to\n nrgl[0].copies[1].t = matrix.col([100, -89.7668, 5.8996])\n self.assertFalse(nu.check_ncs_group_list(nrgl,ph,chain_max_rmsd=1))",
"def test_inverse_hierarchy(self):\r\n inverse_hierarchy = analyse.create_inverse_hierarchy(self.dom)\r\n self.assertEquals(len(inverse_hierarchy), 4)",
"def test_retrieve_services_network_group(self):\n pass",
"def test_get_additional_groups_caching(self):\n awstags = self.get_obj()\n awstags._ec2.get_all_instances = \\\n Mock(side_effect=get_all_instances)\n\n for hostname, expected in groups.items():\n metadata = Mock()\n metadata.hostname = hostname\n actual = awstags.get_additional_groups(metadata)\n msg = \"\"\"%s has incorrect groups:\nactual: %s\nexpected: %s\"\"\" % (hostname, actual, expected)\n self.assertItemsEqual(actual, expected, msg)",
"def test_expand_groups_unknown() -> None:\n with pytest.raises(KeyError):\n Environment._expand_groups([\"$list\", \"$UNKNOWN\", \"$str\", \"end\"], _GROUPS)",
"async def test_reproduce_group(opp):\n context = Context()\n\n def clone_state(state, entity_id):\n \"\"\"Return a cloned state with different entity_id.\"\"\"\n return State(\n entity_id,\n state.state,\n state.attributes,\n last_changed=state.last_changed,\n last_updated=state.last_updated,\n context=state.context,\n )\n\n with patch(\n \"openpeerpower.components.group.reproduce_state.async_reproduce_state\"\n ) as fun:\n fun.return_value = Future()\n fun.return_value.set_result(None)\n\n opp.states.async_set(\n \"group.test\",\n \"off\",\n {\"entity_id\": [\"light.test1\", \"light.test2\", \"switch.test1\"]},\n )\n opp.states.async_set(\"light.test1\", \"off\")\n opp.states.async_set(\"light.test2\", \"off\")\n opp.states.async_set(\"switch.test1\", \"off\")\n\n state = State(\"group.test\", \"on\")\n\n await async_reproduce_states(opp, [state], context=context)\n\n fun.assert_called_once_with(\n opp,\n [\n clone_state(state, \"light.test1\"),\n clone_state(state, \"light.test2\"),\n clone_state(state, \"switch.test1\"),\n ],\n context=context,\n reproduce_options=None,\n )",
"def test_rxn_family(self):\n self.rxn1.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(self.rxn1.family.label, 'H_Abstraction')\n self.assertTrue(self.rxn1.family_own_reverse)\n self.rxn2.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(self.rxn2.family.label, 'Disproportionation')\n self.assertFalse(self.rxn2.family_own_reverse)\n self.rxn3.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(self.rxn3.family.label, 'intra_H_migration')\n self.assertTrue(self.rxn3.family_own_reverse)\n self.rxn4.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(self.rxn4.family.label, 'H_Abstraction')\n self.rxn5.rmg_reaction_from_arc_species()\n self.rxn5.check_attributes()\n self.rxn5.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(self.rxn5.family.label, 'H_Abstraction')\n self.rxn9.rmg_reaction_from_arc_species()\n self.rxn9.check_attributes()\n self.rxn9.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(self.rxn9.family.label, 'HO2_Elimination_from_PeroxyRadical')\n rxn_9_flipped = self.rxn9.flip_reaction()\n rxn_9_flipped.rmg_reaction_from_arc_species()\n rxn_9_flipped.check_attributes()\n rxn_9_flipped.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(rxn_9_flipped.family.label, 'HO2_Elimination_from_PeroxyRadical')\n self.rxn10.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(self.rxn10.family.label, 'H_Abstraction')\n rxn_1 = ARCReaction(r_species=[ARCSpecies(label='C2H6', smiles='CC'),\n ARCSpecies(label='CCOOj', smiles='CCO[O]')],\n p_species=[ARCSpecies(label='CCOOH', smiles='CCOO'),\n ARCSpecies(label='C2H5', smiles='C[CH2]')])\n rxn_1.check_attributes()\n rxn_1.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(rxn_1.family.label, 'H_Abstraction')\n\n # Test identifying the reaction family for with zwitterions read from Arkane YAML files\n base_path = os.path.join(ARC_PATH, 'arc', 'testing', 'yml_testing', 'HNNO+NH3O=H2NO+NH2NO')\n rxn2 = ARCReaction(r_species=[ARCSpecies(label='HNNO', smiles='[O-][N+]=N', yml_path=os.path.join(base_path, 'HNNO.yml')),\n ARCSpecies(label='NH3O', smiles='[O-][NH3+]', yml_path=os.path.join(base_path, 'NH3O.yml'))],\n p_species=[ARCSpecies(label='H2NO', smiles='N[O]', yml_path=os.path.join(base_path, 'H2NO.yml')),\n ARCSpecies(label='NH2NO', smiles='NN=O', yml_path=os.path.join(base_path, 'NH2NO.yml'))])\n rxn2.determine_family(rmg_database=self.rmgdb)\n self.assertEqual(rxn2.family.label, 'H_Abstraction')",
"def test_get_end_roots(self):\n ends = self.family.get_end_roots()\n self.assertEquals(len(ends), 2)\n self.assertIn(self.family.groups.entries[\"Y_rad_out\"], ends)\n self.assertIn(self.family.groups.entries[\"XH_out\"], ends)",
"def test_subgroups(clean_raw_data):\n subgroup_names = subgroups(clean_raw_data)\n assert subgroup_names == ['spectrum1', 'spectrum2', 'spectrum3']",
"def test_retrieve_services_device_groups_device_group_device_group(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtain filename of the composition based on known program number and slug | def get_comp_filename(program_no, slug, lang):
curr_path= os.path.dirname(os.path.realpath(__file__)) # Save current path
result= {'comp_'+lang: '', 'mix_'+lang: '', 'seq_'+lang: ''}
# Try to get program number.
try:
no= "%05d" % int(program_no)
except:
return result # If program number can't be obtained, the rest doesn't make sense
# Try to comp filename.
os.chdir(conf.COMP_OUT)
comp_lang_file= glob.glob("%s*%s*%s.nk" % (no, slug, lang))
if comp_lang_file:
result.update({'comp_'+lang: comp_lang_file[0]})
# Try to get file of sound mix.
mix_lang_file= None
try:
os.chdir("%s%s_%s/sound" % (conf.ROOT_OUT, no, lang))
mix_lang_file= glob.glob("%s*%s*%s_mix.wav" % (no, slug, lang))
except:
pass
if mix_lang_file:
result.update({'mix_'+lang: mix_lang_file[0]})
# Try to get number of files in seq folder.
seq_lang= None
try:
path, dirs, files= os.walk('%s%s_%s/seq/' % (
conf.ROOT_OUT, no, lang)).next()
seq_lang= len(files)
except Exception as e:
print e
if seq_lang:
result.update({'seq_'+lang: seq_lang})
os.chdir(curr_path) # Restore current path
return result | [
"def program_name():\n return os.path.basename(sys.argv[0])",
"def get_filename(self):\n\n return \"-\".join([\n str(self.paper.module.code),\n str(self.paper.year_start),\n str(self.paper.year_stop),\n str(self.paper.sitting),\n PaperPDF.period_map[self.paper.period]]\n ) + \".pdf\"",
"def outputfilename(component):\n f = '%s-%s.out' % (component.__class__.__name__, component.componentname)\n return f",
"def psp_name(atomicnumber, exchange, kind):\n atom_symbol = str(atomic_symbol(atomicnumber))\n if kind == 'FHI' and exchange == 'LDA':\n filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.LDA.fhi'\n elif kind == 'FHI' and exchange == 'GGA':\n filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.GGA.fhi'\n elif kind == 'CORE' and exchange == 'LDA':\n filename = str(atomicnumber) + atom_symbol.lower() + '.1s_psp.mod'\n elif kind == 'GTH' and exchange == 'LDA':\n filename = str(atomicnumber).zfill(2) + atom_symbol.lower() + '.pspgth'\n elif kind == 'TM' and exchange == 'LDA':\n filename = str(atomicnumber) + atom_symbol.lower() + '.pspnc'\n elif kind == 'AE' and exchange == 'DEN':\n filename = '0.' + str(atomicnumber).zfill(2) + '-' + atom_symbol + '.8.density.AE'\n elif kind == 'FC' and exchange == 'DEN':\n filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.8.fc'\n elif kind == 'PAW' and exchange == 'GGA':\n filename = 'JTH-PBE-atomicdata-0.2/' + atom_symbol + '.GGA_PBE-JTH.xml'\n elif kind == 'PAW' and exchange == 'LDA':\n filename = 'JTH-LDA-atomicdata-0.2/' + atom_symbol + '.LDA_PW-JTH.xml'\n elif kind == 'HGH' and exchange == 'GGA':\n filename = str(atomicnumber).zfill(2) + atom_symbol.lower() + '.pbe_hgh'\n elif kind == 'ONC' and exchange == 'PBE':\n filename = 'pbe_s_sr' + os.sep + atom_symbol + '.psp8'\n else:\n print('The combination of exchange=%s and kind=%s is not known' % (exchange, kind))\n filename = ''\n return filename",
"def get_output_name_primer_pair(primer_pair,\n output_dir):\n \n if not output_dir.endswith('/'):\n output_dir += '/'\n \n forward_name = basename(primer_pair[0]).split('_')[0] + \"_\"\n reverse_name = basename(primer_pair[1]).split('_')[0] + \"_\"\n \n amplicons_fp = output_dir + forward_name + reverse_name + \"amplicons.fasta\"\n \n return amplicons_fp",
"def get_output_basename(self):\n cumf_base_name = self.options[\"full_task_name\"]\n cumf_base_name = re.sub(r\"[() ]\", r\"_\", cumf_base_name)\n if cumf_base_name.endswith(\"_\"):\n cumf_base_name = cumf_base_name[:-1]\n return \"ana.\" + cumf_base_name",
"def getPaletteFileName(fn):\n dirname = os.path.dirname(fn)\n title = parseFilename(fn)[0]\n return os.path.join(dirname,title+\".pal\")",
"def file_path() -> str:\n\n try:\n file_name = sys.argv[1]\n except IndexError:\n file_name = SCORES_PATH\n return file_name",
"def _file2name(self, filename):\n rel_filename = re.sub('^{0}/'.format(self._content_root()),\n '', filename)\n fullname = os.path.splitext(rel_filename)[0]\n return fullname",
"def fname_halpha(galname):\r\n out_dir = '/Users/ryan/venus/shared_data/califa/DR3-Niu/%s' % (galname, )\r\n if len(glob.glob(out_dir)) == 0:\r\n out_dir = '/Users/ryan/venus/shared_data/califa/DR3-V500-Niu/%s' % (\r\n galname, )\r\n\r\n out_name = out_dir + '/halpha_reproj.pk'\r\n return out_name",
"def reaction_url_to_filename( url ):\n\n s = url.split( '/' )\n\n post_id = s[ 4 ]\n page = s[ -1 ].split( '=' )[ -1 ]\n\n filename = f'{post_id}_page-{page}.html'\n\n return filename",
"def filename(self) -> str:\n return os.path.splitext(\n os.path.basename(\n unquote(\n urlparse(\n self.original_url\n ).path\n )\n )\n )[0] + \".png\"",
"def filename_core (apath):\n if (apath is None): # sanity check\n return ''\n return os.path.basename(os.path.splitext(apath)[0])",
"def _query_to_filename(self, endpoint, query, page=1):\n slug = '{}-{}-{}-{}'.format(self.service, self.id, endpoint,\n query).lower()\n slug = re.sub('[^a-zA-Z0-9\\-_\\.]', '-', slug)\n slug = re.sub('[^a-zA-Z0-9]+$', '', slug)\n slug = slug + '.json'\n return slug",
"def __createFileName(self, application): #pylint: disable=too-many-branches\n # TODO: Make sure basename is correct. Maybe allow for setting basename prefix\n # Final name being e.g. NAME_rec.slcio, need to define NAME, maybe based on meta data (include\n # EvtClass automatically)\n if not self.basename:\n if 'ILDConfigVersion' in self.prodparameters:\n if application.appname in ( 'mokka', 'ddsim' ): # sim\n self.basename = 's' + self.prodparameters['ILDConfigVersion']\n elif application.appname == 'marlin': # reco\n self.basename = 'r' + self.prodparameters['ILDConfigVersion']\n self.basename += '.s' + self.compatmeta['ILDConfig']\n # we dont need this tag in stdhep's: metadata search will fail\n # if not present\n elif application.appname == 'stdhepsplit':\n self.compatmeta.pop( 'SoftwareTag', None )\n self._reportError( \"Drop 'SoftwareTag' from metadata: not needed for stdhepsplit app\" )\n # need extension if planning to use additional modules (LCIOSplit)\n else:\n if application.datatype not in ( 'gen', 'gensplit'): # for stdhepsplit we dont need to return\n self._reportError(\" Printing metadata before exit:\")\n pprint.pprint( self.compatmeta )\n pprint.pprint( self.prodparameters )\n return self._reportError( \"'ILDConfigVersion' should be defined to build the path\")\n\n if 'DetectorModel' in self.compatmeta:\n self.basename += '.m' + self.compatmeta['DetectorModel']\n elif self.detector:\n self.basename += '.m' + self.detector\n\n if self.energy:\n self.basename += '.' if self.basename else ''\n self.basename += 'E' + str( self.energy )\n\n if 'MachineParams' in self.compatmeta:\n self.basename += '-' + self.compatmeta['MachineParams']\n\n if 'GenProcessID' in self.compatmeta:\n self.basename += '.I' + str( self.compatmeta['GenProcessID'] )\n elif 'ProcessID' in self.compatmeta:\n self.basename += '.I' + str( self.compatmeta['ProcessID'] )\n\n if 'GenProcessName' in self.compatmeta:\n self.basename += '.P' + self.compatmeta['GenProcessName']\n elif self.genprocname:\n self.basename += '.P' + self.genprocname\n else:\n return self._reportError( \"GenProcessName is missing! It should appear in the basename\")\n\n ##always use e and p for beam polarisation fields\n self.basename += '.e%s' % self.compatmeta.get( 'PolarizationB1', '' )\n self.basename += '.p%s' % self.compatmeta.get( 'PolarizationB2', '' )\n\n return S_OK()",
"def create_filename(self, title):\n slug = slugify(title)\n if slug in self.slugs:\n slug = slug + '_'\n if len(slug) > 100:\n slug = slug[0:100]\n self.slugs.append(slug)\n return slug + '.html'",
"def cipFileName(self):\n p = os.path.splitext(self.cctFileName().strip())[0] + '.cip'\n return p",
"def get_filename(self, migration):\n return os.path.join(self.directory, '{}{}'.format(migration, self.ext))",
"def getFilename(self, frameNum):\n\t\treturn self.format % (self.dirname, self.frameName, frameNum)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fill language specific data for a Celebrity | def fill_lang_data(slug, lang):
celeb_lang_uri= '%s%s/%s/?all=1' % (conf.CELEBRITY_URI_TEMPL, slug, lang)
try:
celeb_lang_request= HttpObject().get_data_from_http_response('GET',
conf.HOST, celeb_lang_uri)
except Exception as e:
print "WARNING! Cannot get script data from http://%s%s\nThe error is: %s" % (
conf.HOST, celeb_lang_uri, e)
return None
celeb_lang_data= celeb_lang_request['celebrity'][lang]
script= celeb_lang_data.pop('script')
# Rename keys.
celeb_lang_data['name_'+lang]= celeb_lang_data.pop('name')
celeb_lang_data['duration_'+lang]= celeb_lang_data.pop('total_dur')
celeb_lang_data['scenes_'+lang]= celeb_lang_data.pop('total_scenes')
celeb_lang_data['user_'+lang]= celeb_lang_data.pop('user')
return celeb_lang_data | [
"def fill_language_data(lang, fields):\r\n lang.code_aliases = fields['code_aliases']\r\n lang.name = fields['name']\r\n lang.description = fields['description']\r\n lang.specialchars = fields['specialchars']\r\n lang.nplurals = fields['nplurals']\r\n lang.pluralequation = fields['pluralequation']\r\n lang.rule_zero = fields['rule_zero']\r\n lang.rule_one = fields['rule_one']\r\n lang.rule_two = fields['rule_two']\r\n lang.rule_few = fields['rule_few']\r\n lang.rule_many = fields['rule_many']\r\n lang.rule_other = fields['rule_other']\r\n lang.save()",
"def fill_celebrity_list(celebrity_list):\n for celebrity in celebrity_list:\n print '...processing %s' % celebrity['slug']\n celeb_langs_uri= '%s%s/?all=1' % (conf.CELEBRITY_URI_TEMPL, celebrity['slug'])\n\n # Data obtained from the db\n try:\n celeb_langs_request= HttpObject().get_data_from_http_response('GET',\n conf.HOST, celeb_langs_uri)\n except Exception as e:\n print \"WARNING! Cannot get the list of languages from http://%s%s\\nThe error is: %s\" % (\n conf.HOST, celeb_langs_uri, e)\n return None\n\n # Try to obtain data about program and number in it\n prog_data= fill_prog_data(celebrity['slug'])\n celebrity.update(prog_data)\n \n if celeb_langs_request['status'] == 'OK':\n langs= celeb_langs_request['celebrity']['language']\n slug= celebrity['slug']\n for lang in langs:\n celebrity.update({'completed_'+lang['title']: lang['completed']})\n celebrity_lang_details= fill_lang_data(slug, lang['title'])\n if celebrity_lang_details:\n celebrity.update(celebrity_lang_details)\n\n # Data obtained from filesystem\n\n celebrity_voice_data= fill_voice_data(slug, lang['title'])\n if celebrity_voice_data:\n celebrity.update(celebrity_voice_data)\n\n # Try to get filename of the composition\n comp_filename= get_comp_filename(\n prog_data['program_no'], celebrity['slug'], lang['title'])\n celebrity.update(comp_filename)\n\n else:\n print \"ERROR! The response from http://%s%s is %s\" % (\n conf.HOST, celeb_lang_uri, celeb_request['celebrity'])\n return None\n\n return celebrity_list",
"def update(self,language):\n\n for key, value in language.items():\n self.language[key] = value",
"def language_register(df):\n df['training language'] = ['None' if e == 'None' else corpora.language(e)\n for e in df['training set']]\n df['test language'] = ['None' if e == 'None' else corpora.language(e)\n for e in df['test set']]\n df['training register'] = ['None' if e == 'None' else corpora.register(e)\n for e in df['training set']]\n df['test register'] = ['None' if e == 'None' else corpora.register(e)\n for e in df['test set']]\n return df",
"def populate_languages(db: SQLAlchemy):\n resp = translator.get_languages()\n\n for short in resp:\n lang = Language()\n lang.microsoft_name = short\n try:\n lang.name = MICROSOFT_SHORT_TO_FULL_LANGUAGE_STRING[short]\n except KeyError:\n # Maybe microsoft added a new language. Like Fijian.\n continue\n\n # That's pbbly not 100% accurate and some short names contain hyphens\n # This wont work with android resource naming but eh.\n lang.icon_name = short\n db.session.add(lang)\n\n db.session.commit()",
"def test_load_local_data__languages(self):\n services = {'Abbr1': 'Service 1', 'Abbr2': 'Service 2'}\n languages = {'Lang1': 'Language 1', 'Lang2': 'Language 2', 'Lang3': 'Language 3'}\n self.cmd.languages = languages\n self.cmd.services = services\n self.cmd.save_data()\n self.cmd.services = None\n self.cmd.languages['Lang4'] = 'Language 4'\n self.cmd.load_local_data()\n self.assertTrue(len(self.cmd.languages) == 4)\n self.assertTrue('Lang4' in self.cmd.languages)\n self.assertTrue(len(self.cmd.services) == 2)",
"def load_initial_values(self):\n languages = self.storage.get_languages()\n for lang, folder in languages:\n self.langs[lang] = folder\n self.languagesBox.addItem(lang)",
"def set_language_properties(self,iSurveyID,aSurveyLocaleData,sLanguage=None):",
"def country(self, name):\n self.data, self.flag = self.get_content(name)",
"def __init__(self, language, symbols):\r\n self.language = LanguageWrapper(language)\r\n self.symbolData = symbols\r\n for symbolData in self.symbolData:\r\n symbolData.setLanguage(self.language)",
"def convert(language='c'):",
"def language_changed(self):\n self.languagesBox.clear()\n self.langs = {}\n self.load_initial_values()",
"def load(self,language):\n\n self.language = language",
"def add_language(self,iSurveyID,sLanguage):",
"def __init__(self,collection):\n\n self.language = {}\n self.collection = collection",
"def augment_csdata(self) -> None:",
"def test_language_translation_translate_deu_to_eng(self):\n pass",
"def setup_data(self, domain=None):\n\t\tself.data = frappe.get_domain_data(self.name)",
"def load_wiktionary(configuration, verbose=0):\n\n df = pandas.read_csv(configuration['wiktionary_translations_path'],\n sep='\\t', usecols=['ID', 'Concept_ID', 'Concept', 'Languoid', 'Language_name', 'Form'])\n\n\n if verbose:\n print()\n print('number of available languages', len(set(df.Language_name)))\n print('language that have Dutch in the name')\n for language in set(df.Language_name):\n if 'Dutch' in language:\n print(language)\n print('we use: Dutch; Flemish')\n\n df = df[df.Language_name == 'Dutch; Flemish']\n\n english_lemmas = []\n english_definitions = []\n\n for index, row in df.iterrows():\n concept = row['Concept']\n lemma, *definitions = concept.split('/')\n english_lemmas.append(lemma)\n english_definitions.append('/'.join(definitions))\n\n df['English_lemma'] = english_lemmas\n\n dutch2english = defaultdict(set)\n english2dutch = defaultdict(set)\n\n for index, row in df.iterrows():\n english_lemma = row['English_lemma']\n dutch_lemma = row['Form']\n dutch2english[dutch_lemma].add(english_lemma)\n english2dutch[english_lemma].add(dutch_lemma)\n\n if verbose:\n print(f'Dutch lemmas with English translations: {len(dutch2english)}')\n print(f'English lemmas with Dutch translations: {len(english2dutch)}')\n\n return dutch2english, english2dutch"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fill the list of Celebrities with language specific data | def fill_celebrity_list(celebrity_list):
for celebrity in celebrity_list:
print '...processing %s' % celebrity['slug']
celeb_langs_uri= '%s%s/?all=1' % (conf.CELEBRITY_URI_TEMPL, celebrity['slug'])
# Data obtained from the db
try:
celeb_langs_request= HttpObject().get_data_from_http_response('GET',
conf.HOST, celeb_langs_uri)
except Exception as e:
print "WARNING! Cannot get the list of languages from http://%s%s\nThe error is: %s" % (
conf.HOST, celeb_langs_uri, e)
return None
# Try to obtain data about program and number in it
prog_data= fill_prog_data(celebrity['slug'])
celebrity.update(prog_data)
if celeb_langs_request['status'] == 'OK':
langs= celeb_langs_request['celebrity']['language']
slug= celebrity['slug']
for lang in langs:
celebrity.update({'completed_'+lang['title']: lang['completed']})
celebrity_lang_details= fill_lang_data(slug, lang['title'])
if celebrity_lang_details:
celebrity.update(celebrity_lang_details)
# Data obtained from filesystem
celebrity_voice_data= fill_voice_data(slug, lang['title'])
if celebrity_voice_data:
celebrity.update(celebrity_voice_data)
# Try to get filename of the composition
comp_filename= get_comp_filename(
prog_data['program_no'], celebrity['slug'], lang['title'])
celebrity.update(comp_filename)
else:
print "ERROR! The response from http://%s%s is %s" % (
conf.HOST, celeb_lang_uri, celeb_request['celebrity'])
return None
return celebrity_list | [
"def allCountries():",
"def populate_languages(db: SQLAlchemy):\n resp = translator.get_languages()\n\n for short in resp:\n lang = Language()\n lang.microsoft_name = short\n try:\n lang.name = MICROSOFT_SHORT_TO_FULL_LANGUAGE_STRING[short]\n except KeyError:\n # Maybe microsoft added a new language. Like Fijian.\n continue\n\n # That's pbbly not 100% accurate and some short names contain hyphens\n # This wont work with android resource naming but eh.\n lang.icon_name = short\n db.session.add(lang)\n\n db.session.commit()",
"def load_initial_values(self):\n languages = self.storage.get_languages()\n for lang, folder in languages:\n self.langs[lang] = folder\n self.languagesBox.addItem(lang)",
"def fill_lang_data(slug, lang):\n celeb_lang_uri= '%s%s/%s/?all=1' % (conf.CELEBRITY_URI_TEMPL, slug, lang)\n try:\n celeb_lang_request= HttpObject().get_data_from_http_response('GET',\n conf.HOST, celeb_lang_uri)\n except Exception as e:\n print \"WARNING! Cannot get script data from http://%s%s\\nThe error is: %s\" % (\n conf.HOST, celeb_lang_uri, e)\n return None\n celeb_lang_data= celeb_lang_request['celebrity'][lang]\n script= celeb_lang_data.pop('script')\n\n # Rename keys.\n celeb_lang_data['name_'+lang]= celeb_lang_data.pop('name')\n celeb_lang_data['duration_'+lang]= celeb_lang_data.pop('total_dur')\n celeb_lang_data['scenes_'+lang]= celeb_lang_data.pop('total_scenes')\n celeb_lang_data['user_'+lang]= celeb_lang_data.pop('user')\n\n return celeb_lang_data",
"def world_cups():\n return [(\"Germany\", 2006, \"Italy\"), (\"South-Africa\", 2010, \"Spain\"), (\"Brazil\", 2014, \"Germany\")]",
"def language_register(df):\n df['training language'] = ['None' if e == 'None' else corpora.language(e)\n for e in df['training set']]\n df['test language'] = ['None' if e == 'None' else corpora.language(e)\n for e in df['test set']]\n df['training register'] = ['None' if e == 'None' else corpora.register(e)\n for e in df['training set']]\n df['test register'] = ['None' if e == 'None' else corpora.register(e)\n for e in df['test set']]\n return df",
"def getCityNameList(self):\n pass",
"def fill_language_data(lang, fields):\r\n lang.code_aliases = fields['code_aliases']\r\n lang.name = fields['name']\r\n lang.description = fields['description']\r\n lang.specialchars = fields['specialchars']\r\n lang.nplurals = fields['nplurals']\r\n lang.pluralequation = fields['pluralequation']\r\n lang.rule_zero = fields['rule_zero']\r\n lang.rule_one = fields['rule_one']\r\n lang.rule_two = fields['rule_two']\r\n lang.rule_few = fields['rule_few']\r\n lang.rule_many = fields['rule_many']\r\n lang.rule_other = fields['rule_other']\r\n lang.save()",
"def test_load_local_data__languages(self):\n services = {'Abbr1': 'Service 1', 'Abbr2': 'Service 2'}\n languages = {'Lang1': 'Language 1', 'Lang2': 'Language 2', 'Lang3': 'Language 3'}\n self.cmd.languages = languages\n self.cmd.services = services\n self.cmd.save_data()\n self.cmd.services = None\n self.cmd.languages['Lang4'] = 'Language 4'\n self.cmd.load_local_data()\n self.assertTrue(len(self.cmd.languages) == 4)\n self.assertTrue('Lang4' in self.cmd.languages)\n self.assertTrue(len(self.cmd.services) == 2)",
"def get_published_languages(self):",
"def set_municipalities(self):\n municipalities_raw = self.raw_data[\"KOMMUN\"].split(\",\")\n for municipality in municipalities_raw:\n municipality = municipality.strip()\n if municipality == \"Malung\": # Changed in 2007.\n municipality = \"Malung-Sälen\"\n elif municipality == \"Göteborg\":\n municipality = \"Gothenburg\"\n\n municipality_long = municipality.lower() + \" municipality\"\n m_item = [x[\"item\"] for\n x in self.municipalities\n if x[\"en\"].lower() == municipality_long]\n self.add_statement(\"located_adm\", m_item[0])",
"def setUp(self):\n if not self.all_countries:\n print(\"Loading all countries...\")\n country_names = CountryInfo().all()\n for name in country_names:\n country = CountryInfo(name)\n self.all_countries[name] = country",
"def update_languages(self) -> None:\n logger.info(\"Updating language list\")\n\n for lang in import_json(LANGUAGE_JSON_PATH):\n language_obj = Language.objects.filter(name=lang[\"name\"]).first()\n if language_obj is not None:\n logger.info(\"Updating language: %s\", lang[\"name\"])\n self.increment_updated(\"Language\")\n else:\n logger.info(\"Creating new language: %s\", lang[\"name\"])\n language_obj = Language(name=lang[\"name\"])\n self.increment_created(\"Language\")\n\n language_obj.code = lang[\"code\"]\n language_obj.full_clean()\n language_obj.save()\n\n logger.info(\"Language update complete\")",
"def cultures(inSelf):\n # la liste des cultures n'est pas un attribut il faut donc la créer d'abord\n # on aurait pu définir un attribut _lstCulture et créer une procédure pour remplir la liste\n lstCulture = []\n for parcelle in inSelf._lstParcelles:\n culture = parcelle.culture()\n existe = False \n\n # plusieurs parcelles peuvent avoir la même culture il faut vérifier que la culture traitée n'est pas déjà présente\n for cultureClasse in lstCulture: \n if culture == cultureClasse:\n existe = True\n \n if existe == False:\n lstCulture.append(culture.nom())\n\n return lstCulture",
"def test_countries_list(self):\n pass",
"def languages(dic, languageList):\n (name, id) = getRand(languageList)\n # Finds if the skill is a duplicate and\n # adds the element if it is not a duplicate\n if(not ignoreDuplicate(dic, id)):\n nameIns = {\"name\" : name}\n newIn = {\"id\": id, \"language\": nameIns}\n addToDic(dic, newIn)",
"def _load_languages():\n choices = [(k, v) for k, v in sorted(LANGUAGES.items()) if v in AVAILABLE_MODELS]\n print(f\"Loading languages: {', '.join([i[0] for i in choices])}...\")\n for longname, short in choices:\n try:\n Language(name=longname, short=short).save()\n except IntegrityError:\n pass",
"def prepare_languages(self, instance):\n if instance.languages.count():\n return [lang.name for lang in instance.languages.all()]\n return [\"[no language]\"]",
"def __init__(self,collection):\n\n self.language = {}\n self.collection = collection"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
V.SetPoint(float, float, float) > int | def SetPoint(self, p_float, p_float_1, p_float_2):
... | [
"def setPoint(self, *args) -> \"void\":\n return _coin.SoPrimitiveVertex_setPoint(self, *args)",
"def SetVectorVariableValue(self, string, p_float, p_float_1, p_float_2):\n ...",
"def SetPoint(self, *args):\n return _itkPointSetPython.itkPointSetD2S_SetPoint(self, *args)",
"def SetPoint(self, *args):\n return _itkPointSetPython.itkPointSetD3S_SetPoint(self, *args)",
"def pointAtParm(*args, **kwargs):\n \n pass",
"def SetPoint(self, *args):\n return _itkPointSetPython.itkPointSetD2Q_SetPoint(self, *args)",
"def set(self, x, y, v):\n self.data[(y * self.sx) + x] = v",
"def getSetPoint(self):\n return self.setPoint",
"def SetWorldPoint(self, p_float, p_float_1, p_float_2, p_float_3):\n ...",
"def SetPoint(self, *args):\n return _itkPointSetPython.itkPointSetD3Q_SetPoint(self, *args)",
"def SetPoint(self, *args):\n return _itkPointSetPython.itkPointSetD2D_SetPoint(self, *args)",
"def SetPoint(self, *args):\n return _itkPointSetPython.itkPointSetD3D_SetPoint(self, *args)",
"def setVX(self, val):\n assert isinstance(val, int) or isinstance(val,float), 'val is not a number'\n \n self._vx = val",
"def value(self, v):\n if not self.write_access:\n _LOGGER.warning(\"{} [{}] is read only.\".format(self.ename, self.name))\n return\n # wrap phase value to -180, 180.\n if self.name in ('PHA', 'PHA1', 'PHA2', 'PHA3', 'PHASE', 'PHASE1', 'PHASE2', 'PHASE3'):\n v = wrap_phase(v)\n #\n self.write_policy(self.setpoint_pv, v, timeout=self.timeout,\n wait=self.wait)",
"def setValue(self, p0: 'SbVec3d', p1: 'SbVec3d') -> \"void\":\n return _coin.SbDPLine_setValue(self, p0, p1)",
"def test_set_final_point_fails_if_scalar():\n point = 42\n v.final_point = point",
"def updatePoints(self, x, y):",
"def set_point(agent, point_name, value):\n return agent.vip.rpc.call('test_IEEE2030_5agent', 'set_point', DEVICE_ID, point_name, value).get(timeout=10)",
"def op_point(self, v: dict) -> dict:\n\n vds = self.polarity * (v['d'] - v['s'])\n vds = min(vds, 1.0)\n vgs = self.polarity * (v['g'] - v['s'])\n vgs = min(vgs, 1.0)\n vov = vgs - self.vth\n\n reversed = bool(vds < 0)\n if reversed: vds = -1 * vds\n\n if vov <= 0: # Cutoff\n mode = 'CUTOFF'\n ids = 0\n gm = 0\n gds = 0\n elif vds >= vov: # Saturation\n mode = 'SAT'\n ids = self.beta / 2 * (vov ** 2) * (1 + self.lam * vds)\n gm = self.beta * vov * (1 + self.lam * vds)\n gds = self.lam * self.beta / 2 * (vov ** 2)\n else: # Triode\n mode = 'TRIODE'\n ids = self.beta * ((vov * vds) - (vds ** 2) / 2) * (1 + self.lam * vds)\n gm = self.beta * vds * (1 + self.lam * vds)\n gds = self.beta * ((vov - vds) * (1 + self.lam * vds) + self.lam * ((vov * vds) - (vds ** 2) / 2))\n\n rds = np.NaN if gds == 0 else 1 / gds\n d_ = {\"ids\": ids, \"gds\": gds, \"gm\": gm, \"rds\": rds, \"mode\": mode, 'rev': reversed}\n # print(f'Op Point: {d_}')\n return d_"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests if adding an argument that already exists to a parser raises an AlreadyAddedArgumentException | def test_add_argument_to_cli_parser_that_already_exist_raise_an_exception():
parser_manager = RootConfigParsingManager()
parser_manager.add_argument('a')
with pytest.raises(AlreadyAddedArgumentException):
parser_manager.add_argument('a')
with pytest.raises(AlreadyAddedArgumentException):
parser_manager.add_argument('help')
assert len(parser_manager.cli_parser.arguments) == 3 # help argument + a argument | [
"def is_argparse_add_argument(node):\n return (\n isinstance(node, Expr)\n and isinstance(node.value, Call)\n and isinstance(node.value.func, Attribute)\n and node.value.func.attr == \"add_argument\"\n and isinstance(node.value.func.value, Name)\n and node.value.func.value.id == \"argument_parser\"\n )",
"def add_argument(self, *args, **kwargs):\n if self.__parser is None:\n raise AttributeError(\"Already parsed\")\n else:\n self.__parser.add_argument(*args, **kwargs)",
"def add_argument(self, *args, **kwargs):\n if not args or len(args) == 1 and args[0][0] not in self.prefix_chars:\n raise ValueError(\"Positional arguments are not allowed! Defining them could mess up grid running!\")\n return super(AthenaArgumentParser, self).add_argument(*args, **kwargs)",
"def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('coco', 'dodo')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('ddddd', 'plplp')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('c', 'd')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('t', 's')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_add_subgroup_parser_that_already_exists_raises_an_exception_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='toto')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('toto', subparser)\n\n repeated_subparser = SubgroupConfigParsingManager('titi')\n repeated_subparser.add_argument('n', 'name')\n\n with pytest.raises(AlreadyAddedSubparserException):\n parser_manager.add_subgroup_parser('toto', repeated_subparser)",
"def ensure_arg(args, arg, param=None):\n for idx, found_arg in enumerate(args):\n if found_arg == arg:\n if param is not None:\n args[idx + 1] = param\n return args\n\n args.append(arg)\n if param is not None:\n args.append(param)\n return args",
"def test_add_to_phaselist_raises(self):\n pl = PhaseList(names=[\"a\"])\n with pytest.raises(ValueError, match=\"'a' is already in the phase list\"):\n pl.add(Phase(\"a\"))",
"def existing(self, name):\n return name in self._arg_tree",
"def add_arg(self, name, alias=None, **attributes):\n if name in self._positional_args or name in self._optional_args:\n raise DuplicateArgError(name)\n\n new_arg = _Argument(alias, **attributes)\n\n if name[0] is '-':\n self._optional_args.append(name)\n else:\n self._positional_args.append(name)\n if new_arg.required is True:\n self._required_args.append(name)\n self._args[name] = new_arg",
"def add_arg(self, *args, **kwargs) -> argparse.Action:\n return self.parser.add_argument(*args, **kwargs)",
"def _check_arg_consistency(datasets):\n specified = False\n assigned_args = []\n for dataset in datasets:\n if dataset.get('arg'):\n if dataset['arg'] in assigned_args:\n raise ArgError(f'arg {dataset[\"arg\"]} already assigned.')\n assigned_args.append(dataset['arg'])\n specified = True\n if specified and not dataset.get('arg'):\n raise ArgError",
"def add_argument_cmd(self, *args, **kwargs):\n pass",
"def test_add_subgroup_parser_without_name_argument_raise_an_exception_in_root_parsing_manager():\n parser = RootConfigParsingManager()\n subparser = SubgroupConfigParsingManager('titi')\n\n with pytest.raises(SubgroupParserWithoutNameArgumentException):\n parser.add_subgroup_parser('toto', subparser)",
"def test_version_add_error_already_exists(self):\n rv, output = self.execute(\n 'version add 1.0 \"%s\"' % self._test_date)\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)",
"def test_add_duplicate_alias(self):\n cmd = BaseCommand()\n cmd.name = 'cmd'\n cmd.aliases = ['l', 't']\n self.table.add_command(cmd)\n\n cmd2 = BaseCommand()\n cmd2.name = 'cmd2'\n cmd2.aliases = ['g', 't']\n # This is a duplicate, should raise exception.\n self.assertRaises(DuplicateCommandException, self.table.add_command, cmd2)",
"def test_missing_arg(self):\n parser, config_dict = set_args()\n with self.assertRaises(SystemExit):\n args = parser.parse_args(self.cmd_args[9])",
"def test_check_shared_args(self):\n parser = argparse.ArgumentParser()\n add_shared_args(parser)\n parser.parse_args(['--version', '-v'])\n check_shared_args(parser.parse_args(['-v']))\n self.assertRaises(SystemExit, check_shared_args, parser.parse_args(['--version']))",
"def add_argument(self, argument):\n self.arguments.insert_argument(argument)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that adding flag arguments with a short name to cli parser modified short_arg string | def test_add_flag_arguments_with_short_name_to_cli_parser():
parser_manager = RootConfigParsingManager()
assert parser_manager.cli_parser.short_arg == 'h'
parser_manager.add_argument('a', is_flag=True)
parser_manager.add_argument('x', is_flag=True)
assert parser_manager.cli_parser.short_arg == 'hax' | [
"def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'ha'\n parser_manager.add_argument('b')\n assert parser_manager.cli_parser.short_arg == 'hab:'\n parser_manager.add_argument('c', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hab:c'\n\n assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument\n\n assert parser_manager.cli_parser.long_arg == [\"help\"] # Only help is arg argument",
"def test_add_flag_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa']\n parser_manager.add_argument('tttt', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa', 'tttt']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_add_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']\n parser_manager.add_argument('xx')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def test_arg() -> None:\n parser = arg_parser()\n parsed = parser.parse_args(\n [\"--test\", \"test_name\", \"-n\", \"52\", \"--tool\", \"cwltool\", \"-j\", \"4\"]\n )\n assert parsed.test == \"test_name\"\n assert parsed.n == \"52\"\n assert parsed.tool == \"cwltool\"\n assert parsed.j == 4",
"def parse_flags_simple(argv):\n parser = argparse_flags.ArgumentParser(\n description='A simple example of argparse_flags.')\n parser.add_argument(\n '--argparse_echo', help='The echo message from argparse_flags')\n return parser.parse_args(argv[1:])",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('c', 'd')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('t', 's')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_string_argument_parsing():\n arguments = [\n {\n \"name\": \"firstname\",\n \"type\": \"str\",\n \"default\": \"Allysa P. Hacker\",\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([\"--firstname\", \"john\"])\n assert values.firstname == \"john\"",
"def add_simple_args(self):\n self.ctrl_parser.add_argument(\"-V\", \"--version\", action=\"version\", version='0.1.0',\n help='Provides the version of the tool')\n self.ctrl_parser.add_argument(\"-v\", \"--verbosity\", action=\"count\", help=\"increase output verbosity\")\n self.ctrl_parser.add_argument(\"-i\", action=InteractiveCli, nargs=0, help=\"Start in interactive mode\")\n self.ctrl_parser.add_argument(\"-t\", \"--timeout\", type=float,\n help=\"Provides a timeout for the command\")",
"def test_add_argument_show(self):\n self.subcommand_class = self.commands.get('classificationobject-show')\n parser = argparse.ArgumentParser(description='parser for arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n self.assertEqual(arg1.id, None)",
"def test_cli_argparse_works(self):\n with tests.working_directory(self.builtdir):\n # move package module out of src to top-level to prevent path error\n # otherwise, test would need to install Pipenv for template\n shutil.move(os.path.join('src', package_name), '.')\n # run default unit tests in built template and check results\n cli_entry_point = command_line_interface_bin_name\n cli_arg = 'arg'\n result = subprocess.check_output(\n shlex.split(\n 'python -m {}.{} {} -h'.format(\n package_name, script_name, cli_entry_point\n )\n )\n )\n self.assertTrue(cli_arg in str(result))",
"def setShort(*args, **kwargs):\n \n pass",
"def test_short_arguments():\n class MyForm(Form):\n short_arguments = dict(a='alpha', b='bravo', c='echo')\n\n @staticmethod\n def get_short_arguments():\n return dict(c='charlie', d='delta')\n\n alpha = StringField()\n bravo = StringField()\n charlie = StringField()\n delta = StringField()\n\n harness = Harness(MyForm)\n for name in ('alpha', 'bravo', 'charlie', 'delta'):\n assert name in harness\n harness[name].assert_short_name(name[0])\n\n expected = dict(alpha=None, bravo=None, charlie=None, delta=None)\n assert harness.result_for() == expected\n\n args = ('-aecho', '-bgolf', '-ckilo', '-dlima')\n expected = dict(alpha='echo', bravo='golf', charlie='kilo', delta='lima')\n assert harness.result_for(*args) == expected",
"def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('coco', 'dodo')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('ddddd', 'plplp')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_hensonclidirective_sets_parser(test_directive):\n test_directive.prepare_autoprogram()\n assert test_directive.arguments == ('henson.cli:parser',)",
"def expand_flag(flag: str) -> str:\n flag = flag.replace(\"-\", \"_\")\n if flag not in FLAG_SHORT_TO_LONG.values():\n try:\n longflag = FLAG_SHORT_TO_LONG[flag]\n except KeyError:\n raise HERAError(\"Unrecognized flag: `{}`.\".format(flag))\n else:\n return \"flag_\" + longflag\n else:\n return \"flag_\" + flag",
"def test_add_shared_args(self):\n parser = argparse.ArgumentParser()\n add_shared_args(parser)\n args = parser.parse_args(['--version', '-v'])\n self.assertTrue(args.version)\n self.assertTrue(args.verbose)",
"def test_add_Argument_with_valid_parser(self):\n# pdb.set_trace()\n self.subcommand_class = self.commands.get('classificationobject-create')\n parser = argparse.ArgumentParser(description='parser for classificationobject arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n print(\"arg1 in test_add_Argument_with_valid_parser:\", arg1)\n self.assertEqual(arg1.name,'test')\n self.assertEqual(arg1.src_ip,None)\n self.assertEqual(arg1.src_mac,None) \n self.assertEqual(arg1.src_ip_subnet,None)\n self.assertEqual(arg1.minimum_src_port,None)\n self.assertEqual(arg1.maximum_src_port,None)\n self.assertEqual(arg1.dst_ip,None)\n self.assertEqual(arg1.dst_mac,None)\n self.assertEqual(arg1.dst_ip_subnet,None)\n self.assertEqual(arg1.minimum_dst_port,None)\n self.assertEqual(arg1.maximum_dst_port,None)\n self.assertEqual(arg1.protocol,None)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if adding arguments (flag and no flag) to the parser modified short_arg string. long_arg list is not changed | def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():
parser_manager = RootConfigParsingManager()
assert parser_manager.cli_parser.short_arg == 'h'
parser_manager.add_argument('a', is_flag=True)
assert parser_manager.cli_parser.short_arg == 'ha'
parser_manager.add_argument('b')
assert parser_manager.cli_parser.short_arg == 'hab:'
parser_manager.add_argument('c', is_flag=True)
assert parser_manager.cli_parser.short_arg == 'hab:c'
assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument
assert parser_manager.cli_parser.long_arg == ["help"] # Only help is arg argument | [
"def test_add_flag_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa']\n parser_manager.add_argument('tttt', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa', 'tttt']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_add_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n parser_manager.add_argument('x', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hax'",
"def remove_long_internal():\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def test_add_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']\n parser_manager.add_argument('xx')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def _remove_option(\n args, long_option_name, short_option_name, takes_arg):\n if long_option_name is not None:\n regex_string = \"^\" + long_option_name + \"=\"\n long_regex = re.compile(regex_string)\n if short_option_name is not None:\n # Short options we only match the -X and assume\n # any arg is one command line argument jammed together.\n # i.e. -O--abc=1 is a single argument in the args list.\n # We don't handle -O --abc=1, as argparse doesn't handle\n # it, either.\n regex_string = \"^\" + short_option_name\n short_regex = re.compile(regex_string)\n\n def remove_long_internal():\n \"\"\"Removes one matching long option from args.\n @returns True if one was found and removed; False otherwise.\n \"\"\"\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False\n\n def remove_short_internal():\n \"\"\"Removes one matching short option from args.\n @returns True if one was found and removed; False otherwise.\n \"\"\"\n for index in range(len(args)):\n match = short_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False\n\n removal_count = 0\n while long_option_name is not None and remove_long_internal():\n removal_count += 1\n while short_option_name is not None and remove_short_internal():\n removal_count += 1\n if removal_count == 0:\n raise Exception(\n \"failed to find at least one of '{}', '{}' in options\".format(\n long_option_name, short_option_name))",
"def remove_short_internal():\n for index in range(len(args)):\n match = short_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def redispatch_short_arg(self, rest, ba, i):\n if not rest:\n return\n try:\n nparam = ba.sig.aliases['-' + rest[0]]\n except KeyError as e:\n raise errors.UnknownOption(e.args[0])\n orig_args = ba.in_args\n ba.in_args = ba.in_args[:i] + ('-' + rest,) + ba.in_args[i + 1:]\n try:\n nparam.read_argument(ba, i)\n finally:\n ba.in_args = orig_args\n ba.unsatisfied.discard(nparam)\n ba.not_provided.discard(nparam)",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def check_arguments():\n global nargs, progname\n nargs = len(sys.argv) - 1\n progname = os.path.basename(sys.argv[0])\n flag = True\n if nargs != 0 and N_ARGUMENTS[-1] == '*':\n flag = False\n else:\n for i in N_ARGUMENTS:\n if nargs == i:\n flag = False\n if flag:\n usage()",
"def find_full_argument(all_possibles, short_argument):\n possibles = [x for x in all_possibles if x.startswith(short_argument)]\n if not possibles:\n raise BadArgumentsError('Unknown argument \"%s\"' % short_argument)\n if len(possibles) > 1:\n raise BadArgumentsError('Ambiguous argument \"%s\" (possible: %s)' % (short_argument, ', '.join(possibles)))\n return possibles[0]",
"def parse_flags_simple(argv):\n parser = argparse_flags.ArgumentParser(\n description='A simple example of argparse_flags.')\n parser.add_argument(\n '--argparse_echo', help='The echo message from argparse_flags')\n return parser.parse_args(argv[1:])",
"def boolean_flag(\n parser, name: str, default: bool = False, help: str = None\n) -> None:\n dest = name.replace(\"-\", \"_\")\n parser.add_argument(\n \"--\" + name,\n action=\"store_true\",\n default=default,\n dest=dest,\n help=help\n )\n parser.add_argument(\"--no-\" + name, action=\"store_false\", dest=dest)",
"def expand_flag(flag: str) -> str:\n flag = flag.replace(\"-\", \"_\")\n if flag not in FLAG_SHORT_TO_LONG.values():\n try:\n longflag = FLAG_SHORT_TO_LONG[flag]\n except KeyError:\n raise HERAError(\"Unrecognized flag: `{}`.\".format(flag))\n else:\n return \"flag_\" + longflag\n else:\n return \"flag_\" + flag",
"def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('coco', 'dodo')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('ddddd', 'plplp')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def usesAllFlags(self, argv):\n\n i = 0\n ret = True\n while i < len(argv):\n arg = argv[i]\n thisCount = self.usesFlag(arg)\n\n if thisCount == 0: # parsing this arg failed\n thisCount = 1\n ret = False\n\n # advance past the flag and any of its args\n i = i + thisCount\n\n return ret",
"def arg_filter(arg: str, keyword: str) -> bool:\n arg = arg.strip()\n return (\n arg.startswith(f\"--{keyword}=\")\n or arg.startswith(f\"-{keyword}=\")\n or arg == f\"--{keyword}\"\n or arg == f\"-{keyword}\"\n )",
"def test_check_if_help_or_version_in_arguments(self):\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--help\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'help')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"-h\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'help')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--version\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'version')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss']\n with patch.object(sys, 'argv', test_args):\n self.assertIsNone(argparse_handler.check_if_help_or_version_in_arguments())",
"def _has_arg(opcode):\n return opcode == 'ildc' or opcode == 'jz' or opcode == 'jnz' or opcode == 'jmp'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if adding arguments with long name to cli parser modifies long_arg list. short_arg string is not changed | def test_add_arguments_with_long_name_to_cli_parser():
parser_manager = RootConfigParsingManager()
assert parser_manager.cli_parser.long_arg == ['help']
parser_manager.add_argument('aaa')
assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']
parser_manager.add_argument('xx')
assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']
assert parser_manager.cli_parser.short_arg == 'h' | [
"def test_add_flag_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa']\n parser_manager.add_argument('tttt', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa', 'tttt']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'ha'\n parser_manager.add_argument('b')\n assert parser_manager.cli_parser.short_arg == 'hab:'\n parser_manager.add_argument('c', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hab:c'\n\n assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument\n\n assert parser_manager.cli_parser.long_arg == [\"help\"] # Only help is arg argument",
"def test_add_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n parser_manager.add_argument('x', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hax'",
"def remove_long_internal():\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def _remove_option(\n args, long_option_name, short_option_name, takes_arg):\n if long_option_name is not None:\n regex_string = \"^\" + long_option_name + \"=\"\n long_regex = re.compile(regex_string)\n if short_option_name is not None:\n # Short options we only match the -X and assume\n # any arg is one command line argument jammed together.\n # i.e. -O--abc=1 is a single argument in the args list.\n # We don't handle -O --abc=1, as argparse doesn't handle\n # it, either.\n regex_string = \"^\" + short_option_name\n short_regex = re.compile(regex_string)\n\n def remove_long_internal():\n \"\"\"Removes one matching long option from args.\n @returns True if one was found and removed; False otherwise.\n \"\"\"\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False\n\n def remove_short_internal():\n \"\"\"Removes one matching short option from args.\n @returns True if one was found and removed; False otherwise.\n \"\"\"\n for index in range(len(args)):\n match = short_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False\n\n removal_count = 0\n while long_option_name is not None and remove_long_internal():\n removal_count += 1\n while short_option_name is not None and remove_short_internal():\n removal_count += 1\n if removal_count == 0:\n raise Exception(\n \"failed to find at least one of '{}', '{}' in options\".format(\n long_option_name, short_option_name))",
"def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('coco', 'dodo')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('ddddd', 'plplp')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def remove_short_internal():\n for index in range(len(args)):\n match = short_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('c', 'd')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('t', 's')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def redispatch_short_arg(self, rest, ba, i):\n if not rest:\n return\n try:\n nparam = ba.sig.aliases['-' + rest[0]]\n except KeyError as e:\n raise errors.UnknownOption(e.args[0])\n orig_args = ba.in_args\n ba.in_args = ba.in_args[:i] + ('-' + rest,) + ba.in_args[i + 1:]\n try:\n nparam.read_argument(ba, i)\n finally:\n ba.in_args = orig_args\n ba.unsatisfied.discard(nparam)\n ba.not_provided.discard(nparam)",
"def test_add_argument_list(self):\n self.subcommand_class = self.commands.get('classificationobject-list')\n parser = argparse.ArgumentParser(description='parser for arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n self.assertEqual(arg1.show_details,None)\n self.assertEqual(arg1.fields,None)\n self.assertEqual(arg1.sort_key,None)\n self.assertEqual(arg1.sort_direction,None)",
"def test_check_if_help_or_version_in_arguments(self):\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--help\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'help')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"-h\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'help')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--version\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'version')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss']\n with patch.object(sys, 'argv', test_args):\n self.assertIsNone(argparse_handler.check_if_help_or_version_in_arguments())",
"def has_option (self, long_option):\r\n return self.option_index.has_key(long_option)",
"def test_arg() -> None:\n parser = arg_parser()\n parsed = parser.parse_args(\n [\"--test\", \"test_name\", \"-n\", \"52\", \"--tool\", \"cwltool\", \"-j\", \"4\"]\n )\n assert parsed.test == \"test_name\"\n assert parsed.n == \"52\"\n assert parsed.tool == \"cwltool\"\n assert parsed.j == 4",
"def setShort(*args, **kwargs):\n \n pass",
"def find_full_argument(all_possibles, short_argument):\n possibles = [x for x in all_possibles if x.startswith(short_argument)]\n if not possibles:\n raise BadArgumentsError('Unknown argument \"%s\"' % short_argument)\n if len(possibles) > 1:\n raise BadArgumentsError('Ambiguous argument \"%s\" (possible: %s)' % (short_argument, ', '.join(possibles)))\n return possibles[0]",
"def test_add_argument_show(self):\n self.subcommand_class = self.commands.get('classificationobject-show')\n parser = argparse.ArgumentParser(description='parser for arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n self.assertEqual(arg1.id, None)",
"def boolean_flag(\n parser, name: str, default: bool = False, help: str = None\n) -> None:\n dest = name.replace(\"-\", \"_\")\n parser.add_argument(\n \"--\" + name,\n action=\"store_true\",\n default=default,\n dest=dest,\n help=help\n )\n parser.add_argument(\"--no-\" + name, action=\"store_false\", dest=dest)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if adding a flag arguments with long to the parser modifies the long_arg list. short_arg string is not changed | def test_add_flag_arguments_with_long_name_to_cli_parser():
parser_manager = RootConfigParsingManager()
assert parser_manager.cli_parser.long_arg == ['help']
parser_manager.add_argument('aaa', is_flag=True)
assert parser_manager.cli_parser.long_arg == ['help', 'aaa']
parser_manager.add_argument('tttt', is_flag=True)
assert parser_manager.cli_parser.long_arg == ['help', 'aaa', 'tttt']
assert parser_manager.cli_parser.short_arg == 'h' | [
"def remove_long_internal():\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def test_add_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']\n parser_manager.add_argument('xx')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'ha'\n parser_manager.add_argument('b')\n assert parser_manager.cli_parser.short_arg == 'hab:'\n parser_manager.add_argument('c', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hab:c'\n\n assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument\n\n assert parser_manager.cli_parser.long_arg == [\"help\"] # Only help is arg argument",
"def test_add_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n parser_manager.add_argument('x', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hax'",
"def _remove_option(\n args, long_option_name, short_option_name, takes_arg):\n if long_option_name is not None:\n regex_string = \"^\" + long_option_name + \"=\"\n long_regex = re.compile(regex_string)\n if short_option_name is not None:\n # Short options we only match the -X and assume\n # any arg is one command line argument jammed together.\n # i.e. -O--abc=1 is a single argument in the args list.\n # We don't handle -O --abc=1, as argparse doesn't handle\n # it, either.\n regex_string = \"^\" + short_option_name\n short_regex = re.compile(regex_string)\n\n def remove_long_internal():\n \"\"\"Removes one matching long option from args.\n @returns True if one was found and removed; False otherwise.\n \"\"\"\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False\n\n def remove_short_internal():\n \"\"\"Removes one matching short option from args.\n @returns True if one was found and removed; False otherwise.\n \"\"\"\n for index in range(len(args)):\n match = short_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False\n\n removal_count = 0\n while long_option_name is not None and remove_long_internal():\n removal_count += 1\n while short_option_name is not None and remove_short_internal():\n removal_count += 1\n if removal_count == 0:\n raise Exception(\n \"failed to find at least one of '{}', '{}' in options\".format(\n long_option_name, short_option_name))",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def remove_short_internal():\n for index in range(len(args)):\n match = short_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def set_long(self, new_long: bool):\n self.is_long = new_long",
"def expand_flag(flag: str) -> str:\n flag = flag.replace(\"-\", \"_\")\n if flag not in FLAG_SHORT_TO_LONG.values():\n try:\n longflag = FLAG_SHORT_TO_LONG[flag]\n except KeyError:\n raise HERAError(\"Unrecognized flag: `{}`.\".format(flag))\n else:\n return \"flag_\" + longflag\n else:\n return \"flag_\" + flag",
"def redispatch_short_arg(self, rest, ba, i):\n if not rest:\n return\n try:\n nparam = ba.sig.aliases['-' + rest[0]]\n except KeyError as e:\n raise errors.UnknownOption(e.args[0])\n orig_args = ba.in_args\n ba.in_args = ba.in_args[:i] + ('-' + rest,) + ba.in_args[i + 1:]\n try:\n nparam.read_argument(ba, i)\n finally:\n ba.in_args = orig_args\n ba.unsatisfied.discard(nparam)\n ba.not_provided.discard(nparam)",
"def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('coco', 'dodo')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('ddddd', 'plplp')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def setShort(*args, **kwargs):\n \n pass",
"def has_option (self, long_option):\r\n return self.option_index.has_key(long_option)",
"def set2Short(*args, **kwargs):\n \n pass",
"def flag_match(self, flag):\n return flag == self.short or flag == self.long",
"def addLong(self, ln, dv = 0):\n \n cmds.addAttr( ln = ln, at = 'long', dv = dv)",
"def AddJobFlag(parser):\n parser.add_argument(\n '--job', required=False, help='Limit matched resources to the given job.'\n )",
"def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('c', 'd')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('t', 's')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to parse arguments with a parsing manager containing a subgroup parser. It must retrieve the following | def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):
subparser = SubgroupConfigParsingManager('toto')
subparser.add_argument('b', is_flag=True, action=store_true)
subparser.add_argument('n', 'name')
root_config_parsing_manager.add_subgroup_parser('sub', subparser)
check_parse_cli_result(root_config_parsing_manager, "", {})
with pytest.raises(UnknownArgException):
check_parse_cli_result(root_config_parsing_manager, "-z", {})
check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})
with pytest.raises(NoNameSpecifiedForSubgroupException):
check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})
check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',
{'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})
with pytest.raises(BadContextException):
check_parse_cli_result(root_config_parsing_manager, "-b", {}) | [
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def subgroup_parser():\n parser = SubgroupConfigParser('test')\n parser.add_argument('a', is_flag=True)\n return parser",
"def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('type', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n dic_a = {'a': True}\n\n dic_z = {\n \"z\": True\n }\n\n dic_b = {\n 'b': \"type\"\n }\n\n dic_a_sub = {\n 'a': True,\n 'sub': {\n 'titi':\n {\n 'type': 'toto',\n 'b': \"type\"\n }\n }\n }\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_z)\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_b)\n\n assert root_config_parsing_manager.validate(dic_a) == dic_a\n\n assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub\n\n assert root_config_parsing_manager.validate({}) == {}",
"def test_add_subgroup_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 0\n\n parser_manager.add_subgroup(name='sub')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 1\n\n parser_manager.add_subgroup(name='sub1')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 2\n\n parser_manager.add_subgroup(name='sub3')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 3",
"def root_config_parser_with_subgroups(root_config_parser_with_mandatory_and_optional_arguments):\n\n root_config_parser_with_mandatory_and_optional_arguments.add_argument_prefix(argument_prefix='TEST_')\n\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup(subgroup_type='g1', prefix='TEST_G1_')\n\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup(subgroup_type='g2', prefix='TEST_G2_')\n\n subgroup_parser_g1 = SubgroupConfigParser(name='type1')\n subgroup_parser_g1.add_argument('1', 'a1', argument_type=str, is_mandatory=True)\n subgroup_parser_g1.add_argument('2', 'a2', argument_type=bool, default_value=True)\n subgroup_parser_g1.add_argument('3', 'a3', argument_type=str, default_value=69)\n subgroup_parser_g1.add_argument('n', 'name', argument_type=str)\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup_parser(subgroup_type='g1',\n subgroup_parser=subgroup_parser_g1)\n\n subgroup_parser_g2 = SubgroupConfigParser(name='type2')\n subgroup_parser_g2.add_argument('1', 'a1', argument_type=float, is_mandatory=False)\n subgroup_parser_g2.add_argument('2', 'a2', argument_type=str)\n subgroup_parser_g2.add_argument('3', 'a3', argument_type=str)\n subgroup_parser_g2.add_argument('4', 'a4', argument_type=str)\n subgroup_parser_g2.add_argument('n', 'name', argument_type=str)\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup_parser(subgroup_type='g2',\n subgroup_parser=subgroup_parser_g2)\n\n return root_config_parser_with_mandatory_and_optional_arguments",
"def test_get_argument_groups(self) -> None:\n MockCustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 3, f\"Expected 3 groups, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )\n self.assertIn(\"test_group\", group_dict.keys(), \"test group not found\")\n self.assertIn(\"test_subgroup\", group_dict.keys(), \"test subgroup not found\")\n self.assertIn(self.test_group, group_dict.values(), \"test group doesn't match\")",
"def test_add_subgroup_parser_without_name_argument_raise_an_exception_in_root_parsing_manager():\n parser = RootConfigParsingManager()\n subparser = SubgroupConfigParsingManager('titi')\n\n with pytest.raises(SubgroupParserWithoutNameArgumentException):\n parser.add_subgroup_parser('toto', subparser)",
"def root_config_parsing_manager_with_mandatory_and_optional_arguments():\n parser_manager = RootConfigParsingManager()\n\n parser_manager.add_argument_prefix(argument_prefix='TEST_')\n\n parser_manager.add_subgroup(name='input', prefix='TEST_INPUT_')\n\n parser_manager.add_subgroup(name='output', prefix='TEST_OUTPUT_')\n\n parser_manager.add_argument('a', argument_type=bool, is_flag=True, action=store_true)\n\n parser_manager.add_argument('1', 'argument1', default_value=3, argument_type=int, is_mandatory=False)\n\n parser_manager.add_argument('argumento2', '2', argument_type=str, is_mandatory=True)\n\n parser_manager.add_argument('arg3', 'argument3', argument_type=bool, is_mandatory=False)\n\n parser_manager.add_argument('d', 'arg4', argument_type=float, is_mandatory=True)\n\n parser_manager.add_argument('arg5', '5', default_value='default value', argument_type=str,\n help_text='help 5')\n\n i1_type_subgroup_parser_manager = SubgroupConfigParsingManager(name=\"i1_type\")\n i1_type_subgroup_parser_manager.add_argument('model', 'm', argument_type=str, is_mandatory=True)\n i1_type_subgroup_parser_manager.add_argument('db', 'd', argument_type=str, is_mandatory=False)\n i1_type_subgroup_parser_manager.add_argument('port', 'p', argument_type=int, is_mandatory=False)\n i1_type_subgroup_parser_manager.add_argument('name', 'n', argument_type=str, is_mandatory=False,\n default_value='my_i1_instance')\n\n parser_manager.add_subgroup_parser(subgroup_name=\"input\", subgroup_parser=i1_type_subgroup_parser_manager)\n\n o1_type_subgroup_parser_manager = SubgroupConfigParsingManager(name=\"o1_type\")\n o1_type_subgroup_parser_manager.add_argument('model', 'm', argument_type=str, is_mandatory=True)\n o1_type_subgroup_parser_manager.add_argument('db', 'd', argument_type=str, is_mandatory=False)\n o1_type_subgroup_parser_manager.add_argument('name', 'n', argument_type=str, is_mandatory=False,\n default_value='my_o1_instance')\n o1_type_subgroup_parser_manager.add_argument('collection', 'c', argument_type=str)\n\n parser_manager.add_subgroup_parser(subgroup_name=\"output\", subgroup_parser=o1_type_subgroup_parser_manager)\n\n o2_type_subgroup_parser_manager = SubgroupConfigParsingManager(name=\"o2_type\")\n o2_type_subgroup_parser_manager.add_argument('model', 'm', argument_type=str, is_mandatory=True)\n o2_type_subgroup_parser_manager.add_argument('db', 'd', argument_type=str, is_mandatory=False)\n o2_type_subgroup_parser_manager.add_argument('name', 'n', argument_type=str, is_mandatory=False,\n default_value='my_o2_instance')\n o2_type_subgroup_parser_manager.add_argument('collection', 'c', argument_type=str)\n\n parser_manager.add_subgroup_parser(subgroup_name=\"output\", subgroup_parser=o2_type_subgroup_parser_manager)\n\n return parser_manager",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def test_add_subgroup_parser_that_already_exists_raises_an_exception_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='toto')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('toto', subparser)\n\n repeated_subparser = SubgroupConfigParsingManager('titi')\n repeated_subparser.add_argument('n', 'name')\n\n with pytest.raises(AlreadyAddedSubparserException):\n parser_manager.add_subgroup_parser('toto', repeated_subparser)",
"def test_add_common_args_with_group(self):\n # Add a requirement group to a different parser that we pass in to make sure we will use\n # the required group passed in.\n temp_subparser_2 = self.subparsers.add_parser('eat2', help='eat some food again.')\n required_group = temp_subparser_2.add_argument_group(\"required arguments\")\n driver._add_common_args(parser=self.temp_subparser, required_group=required_group)\n actions = self.temp_subparser.__dict__['_option_string_actions']\n temp_subparser_2_actions = temp_subparser_2.__dict__['_option_string_actions']\n self.assertIn('--database', temp_subparser_2_actions.keys())\n self.assertIn('-d', temp_subparser_2_actions.keys())\n\n self.assertIn('--database-type', actions.keys())\n self.assertIn('--keyspace', actions.keys())\n self.assertTrue(len(actions), 2)",
"def test_get_argument_groups_empty(self) -> None:\n MockUncustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 1, f\"Expected 1 group, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )",
"def root_config_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_argument('a', argument_type=bool, is_flag=True, action=store_true)\n parser_manager.add_subgroup(name='sub')\n\n return parser_manager",
"def test_parsing_environment_variables_with_subgroups_and_long_and_short_names_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_long_and_short_names.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file('root_manager_configuration_with_subgroups.json')\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['model'] = 'o1_model_x'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def _general_argument_parser(self, args_group):\n parser_handler = {UIConsts.MANDATORY: self._parse_mandatory_arguments,\n UIConsts.BASIC_MODE: self._parse_basic_mode_arguments,\n UIConsts.REGEX_MODE: self._parse_regex_mode_arguments,\n UIConsts.CUSTOM_MODE: self._parse_custom_mode_arguments}\n while self.num_of_attempts > 0:\n is_valid = parser_handler[args_group]()\n if not is_valid:\n continue\n else:\n self.num_of_attempts = 3\n return True\n return False",
"def test_config_priority_between_environ_variables_and_configuration_file_with_subgroups_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments,\n test_files_path):\n\n config_file_environment_variables = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file_environment_variables,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n sys.argv.append('--config-file')\n\n sys.argv.append(test_files_path + '/root_manager_configuration_with_subgroups_and_long_and_short_names.json')\n\n expected_dict = load_configuration_from_json_file(config_file_environment_variables)\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['name'] = 'o1_name'\n expected_dict['output']['o2']['name'] = 'o2_name'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n sys.argv = []\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_add_Argument_with_valid_parser(self):\n# pdb.set_trace()\n self.subcommand_class = self.commands.get('classificationobject-create')\n parser = argparse.ArgumentParser(description='parser for classificationobject arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n print(\"arg1 in test_add_Argument_with_valid_parser:\", arg1)\n self.assertEqual(arg1.name,'test')\n self.assertEqual(arg1.src_ip,None)\n self.assertEqual(arg1.src_mac,None) \n self.assertEqual(arg1.src_ip_subnet,None)\n self.assertEqual(arg1.minimum_src_port,None)\n self.assertEqual(arg1.maximum_src_port,None)\n self.assertEqual(arg1.dst_ip,None)\n self.assertEqual(arg1.dst_mac,None)\n self.assertEqual(arg1.dst_ip_subnet,None)\n self.assertEqual(arg1.minimum_dst_port,None)\n self.assertEqual(arg1.maximum_dst_port,None)\n self.assertEqual(arg1.protocol,None)",
"def test_parsing_configuration_file_with_long_and_short_names_for_arguments_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_basic_configuration_with_long_and_short_names.json'\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['argumento2'] = expected_dict.pop('2')\n expected_dict['arg5'] = expected_dict.pop('5')\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +\n test_files_path +\n '/' + config_file).split())\n assert result == expected_dict",
"def run_parser(self, parser: ArgumentParser):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to validate arguments with a parsing manager containing a subgroup parser. It must retrieve the following | def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):
subparser = SubgroupConfigParsingManager('toto')
subparser.add_argument('b', is_flag=True, action=store_true)
subparser.add_argument('type', is_flag=True, action=store_true)
subparser.add_argument('n', 'name')
root_config_parsing_manager.add_subgroup_parser('sub', subparser)
dic_a = {'a': True}
dic_z = {
"z": True
}
dic_b = {
'b': "type"
}
dic_a_sub = {
'a': True,
'sub': {
'titi':
{
'type': 'toto',
'b': "type"
}
}
}
with pytest.raises(UnknownArgException):
root_config_parsing_manager.validate(dic_z)
with pytest.raises(UnknownArgException):
root_config_parsing_manager.validate(dic_b)
assert root_config_parsing_manager.validate(dic_a) == dic_a
assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub
assert root_config_parsing_manager.validate({}) == {} | [
"def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n check_parse_cli_result(root_config_parsing_manager, \"\", {})\n\n with pytest.raises(UnknownArgException):\n check_parse_cli_result(root_config_parsing_manager, \"-z\", {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})\n\n with pytest.raises(NoNameSpecifiedForSubgroupException):\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',\n {'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})\n\n with pytest.raises(BadContextException):\n check_parse_cli_result(root_config_parsing_manager, \"-b\", {})",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def subgroup_parser():\n parser = SubgroupConfigParser('test')\n parser.add_argument('a', is_flag=True)\n return parser",
"def test_add_subgroup_parser_without_name_argument_raise_an_exception_in_root_parsing_manager():\n parser = RootConfigParsingManager()\n subparser = SubgroupConfigParsingManager('titi')\n\n with pytest.raises(SubgroupParserWithoutNameArgumentException):\n parser.add_subgroup_parser('toto', subparser)",
"def test_get_argument_groups(self) -> None:\n MockCustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 3, f\"Expected 3 groups, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )\n self.assertIn(\"test_group\", group_dict.keys(), \"test group not found\")\n self.assertIn(\"test_subgroup\", group_dict.keys(), \"test subgroup not found\")\n self.assertIn(self.test_group, group_dict.values(), \"test group doesn't match\")",
"def test_add_subgroup_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 0\n\n parser_manager.add_subgroup(name='sub')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 1\n\n parser_manager.add_subgroup(name='sub1')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 2\n\n parser_manager.add_subgroup(name='sub3')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 3",
"def root_config_parser_with_subgroups(root_config_parser_with_mandatory_and_optional_arguments):\n\n root_config_parser_with_mandatory_and_optional_arguments.add_argument_prefix(argument_prefix='TEST_')\n\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup(subgroup_type='g1', prefix='TEST_G1_')\n\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup(subgroup_type='g2', prefix='TEST_G2_')\n\n subgroup_parser_g1 = SubgroupConfigParser(name='type1')\n subgroup_parser_g1.add_argument('1', 'a1', argument_type=str, is_mandatory=True)\n subgroup_parser_g1.add_argument('2', 'a2', argument_type=bool, default_value=True)\n subgroup_parser_g1.add_argument('3', 'a3', argument_type=str, default_value=69)\n subgroup_parser_g1.add_argument('n', 'name', argument_type=str)\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup_parser(subgroup_type='g1',\n subgroup_parser=subgroup_parser_g1)\n\n subgroup_parser_g2 = SubgroupConfigParser(name='type2')\n subgroup_parser_g2.add_argument('1', 'a1', argument_type=float, is_mandatory=False)\n subgroup_parser_g2.add_argument('2', 'a2', argument_type=str)\n subgroup_parser_g2.add_argument('3', 'a3', argument_type=str)\n subgroup_parser_g2.add_argument('4', 'a4', argument_type=str)\n subgroup_parser_g2.add_argument('n', 'name', argument_type=str)\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup_parser(subgroup_type='g2',\n subgroup_parser=subgroup_parser_g2)\n\n return root_config_parser_with_mandatory_and_optional_arguments",
"def root_config_parsing_manager_with_mandatory_and_optional_arguments():\n parser_manager = RootConfigParsingManager()\n\n parser_manager.add_argument_prefix(argument_prefix='TEST_')\n\n parser_manager.add_subgroup(name='input', prefix='TEST_INPUT_')\n\n parser_manager.add_subgroup(name='output', prefix='TEST_OUTPUT_')\n\n parser_manager.add_argument('a', argument_type=bool, is_flag=True, action=store_true)\n\n parser_manager.add_argument('1', 'argument1', default_value=3, argument_type=int, is_mandatory=False)\n\n parser_manager.add_argument('argumento2', '2', argument_type=str, is_mandatory=True)\n\n parser_manager.add_argument('arg3', 'argument3', argument_type=bool, is_mandatory=False)\n\n parser_manager.add_argument('d', 'arg4', argument_type=float, is_mandatory=True)\n\n parser_manager.add_argument('arg5', '5', default_value='default value', argument_type=str,\n help_text='help 5')\n\n i1_type_subgroup_parser_manager = SubgroupConfigParsingManager(name=\"i1_type\")\n i1_type_subgroup_parser_manager.add_argument('model', 'm', argument_type=str, is_mandatory=True)\n i1_type_subgroup_parser_manager.add_argument('db', 'd', argument_type=str, is_mandatory=False)\n i1_type_subgroup_parser_manager.add_argument('port', 'p', argument_type=int, is_mandatory=False)\n i1_type_subgroup_parser_manager.add_argument('name', 'n', argument_type=str, is_mandatory=False,\n default_value='my_i1_instance')\n\n parser_manager.add_subgroup_parser(subgroup_name=\"input\", subgroup_parser=i1_type_subgroup_parser_manager)\n\n o1_type_subgroup_parser_manager = SubgroupConfigParsingManager(name=\"o1_type\")\n o1_type_subgroup_parser_manager.add_argument('model', 'm', argument_type=str, is_mandatory=True)\n o1_type_subgroup_parser_manager.add_argument('db', 'd', argument_type=str, is_mandatory=False)\n o1_type_subgroup_parser_manager.add_argument('name', 'n', argument_type=str, is_mandatory=False,\n default_value='my_o1_instance')\n o1_type_subgroup_parser_manager.add_argument('collection', 'c', argument_type=str)\n\n parser_manager.add_subgroup_parser(subgroup_name=\"output\", subgroup_parser=o1_type_subgroup_parser_manager)\n\n o2_type_subgroup_parser_manager = SubgroupConfigParsingManager(name=\"o2_type\")\n o2_type_subgroup_parser_manager.add_argument('model', 'm', argument_type=str, is_mandatory=True)\n o2_type_subgroup_parser_manager.add_argument('db', 'd', argument_type=str, is_mandatory=False)\n o2_type_subgroup_parser_manager.add_argument('name', 'n', argument_type=str, is_mandatory=False,\n default_value='my_o2_instance')\n o2_type_subgroup_parser_manager.add_argument('collection', 'c', argument_type=str)\n\n parser_manager.add_subgroup_parser(subgroup_name=\"output\", subgroup_parser=o2_type_subgroup_parser_manager)\n\n return parser_manager",
"def test_add_Argument_with_valid_parser(self):\n# pdb.set_trace()\n self.subcommand_class = self.commands.get('classificationobject-create')\n parser = argparse.ArgumentParser(description='parser for classificationobject arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n print(\"arg1 in test_add_Argument_with_valid_parser:\", arg1)\n self.assertEqual(arg1.name,'test')\n self.assertEqual(arg1.src_ip,None)\n self.assertEqual(arg1.src_mac,None) \n self.assertEqual(arg1.src_ip_subnet,None)\n self.assertEqual(arg1.minimum_src_port,None)\n self.assertEqual(arg1.maximum_src_port,None)\n self.assertEqual(arg1.dst_ip,None)\n self.assertEqual(arg1.dst_mac,None)\n self.assertEqual(arg1.dst_ip_subnet,None)\n self.assertEqual(arg1.minimum_dst_port,None)\n self.assertEqual(arg1.maximum_dst_port,None)\n self.assertEqual(arg1.protocol,None)",
"def test_add_subgroup_parser_that_already_exists_raises_an_exception_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='toto')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('toto', subparser)\n\n repeated_subparser = SubgroupConfigParsingManager('titi')\n repeated_subparser.add_argument('n', 'name')\n\n with pytest.raises(AlreadyAddedSubparserException):\n parser_manager.add_subgroup_parser('toto', repeated_subparser)",
"def test_get_argument_groups_empty(self) -> None:\n MockUncustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 1, f\"Expected 1 group, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )",
"def test_add_common_args_with_group(self):\n # Add a requirement group to a different parser that we pass in to make sure we will use\n # the required group passed in.\n temp_subparser_2 = self.subparsers.add_parser('eat2', help='eat some food again.')\n required_group = temp_subparser_2.add_argument_group(\"required arguments\")\n driver._add_common_args(parser=self.temp_subparser, required_group=required_group)\n actions = self.temp_subparser.__dict__['_option_string_actions']\n temp_subparser_2_actions = temp_subparser_2.__dict__['_option_string_actions']\n self.assertIn('--database', temp_subparser_2_actions.keys())\n self.assertIn('-d', temp_subparser_2_actions.keys())\n\n self.assertIn('--database-type', actions.keys())\n self.assertIn('--keyspace', actions.keys())\n self.assertTrue(len(actions), 2)",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def test_parsing_environment_variables_with_subgroups_and_long_and_short_names_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_long_and_short_names.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file('root_manager_configuration_with_subgroups.json')\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['model'] = 'o1_model_x'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def _validate_inner(config: ParameterGroup):\n attr.validate(config)\n if config.groups:\n for group_name in config.groups:\n group = getattr(config, group_name)\n _validate_inner(group)",
"def _general_argument_parser(self, args_group):\n parser_handler = {UIConsts.MANDATORY: self._parse_mandatory_arguments,\n UIConsts.BASIC_MODE: self._parse_basic_mode_arguments,\n UIConsts.REGEX_MODE: self._parse_regex_mode_arguments,\n UIConsts.CUSTOM_MODE: self._parse_custom_mode_arguments}\n while self.num_of_attempts > 0:\n is_valid = parser_handler[args_group]()\n if not is_valid:\n continue\n else:\n self.num_of_attempts = 3\n return True\n return False",
"def test_config_priority_between_environ_variables_and_configuration_file_with_subgroups_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments,\n test_files_path):\n\n config_file_environment_variables = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file_environment_variables,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n sys.argv.append('--config-file')\n\n sys.argv.append(test_files_path + '/root_manager_configuration_with_subgroups_and_long_and_short_names.json')\n\n expected_dict = load_configuration_from_json_file(config_file_environment_variables)\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['name'] = 'o1_name'\n expected_dict['output']['o2']['name'] = 'o2_name'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n sys.argv = []\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_parsing_environment_variables_with_subgroups_and_wrong_type_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_wrong_argument_type_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_parsing_configuration_file_with_long_and_short_names_for_arguments_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_basic_configuration_with_long_and_short_names.json'\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['argumento2'] = expected_dict.pop('2')\n expected_dict['arg5'] = expected_dict.pop('5')\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +\n test_files_path +\n '/' + config_file).split())\n assert result == expected_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the type of argument is correctly checked by the parsing manager when a string is used as input | def test_arguments_string_parsing_type_checking_in_root_parsing_manager(root_config_parsing_manager):
root_config_parsing_manager.add_argument('c', argument_type=int)
with pytest.raises(BadTypeException):
check_parse_cli_result(root_config_parsing_manager, '-c string', {'c': 'string'})
check_parse_cli_result(root_config_parsing_manager, '-c 1', {'c': 1}) | [
"def input_type_check(data: object) -> None:\n if not isinstance(data, str):\n raise TypeError(\"Input data must be a 'str' object.\")",
"def __expectString(val):\n if type(val) != str:\n raise Exception('Expected string, received {}'.format(type(val)))",
"def test_parsing_of_arguments_string_with_wrong_type_raise_an_exception_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_argument('a', argument_type=int)\n\n with pytest.raises(BadTypeException):\n parser_manager._parse_cli('-a a'.split())",
"def parse_arg_type(arg):\n if type(arg) != str:\n return arg\n else:\n # check int\n try:\n return int(arg)\n except ValueError:\n pass\n # check float\n try:\n return float(arg)\n except ValueError:\n pass\n # check bool\n if arg.lower() == \"true\":\n return True\n elif arg.lower() == \"false\":\n return False\n # return any other string\n return arg",
"def check_type_arg_validity(arg):\n\n return arg.lower() in (\"o\", \"output\", \"i\", \"input\")",
"def test_any_type(self):\n\n @typecheck(int, None)\n def to_string(x, y):\n x = y\n return str(x)\n\n try:\n to_string(1, 9)\n except InvalidArgumentType:\n self.fail(\"Failed typecheck while it shouldn't have, given the first argument has the correct type and no type check should be performed on the second argument.\")",
"def _ValidateString(arg_internal_name, arg_value):\n if isinstance(arg_value, basestring):\n return arg_value\n if isinstance(arg_value, int): # convert int->str if str is really expected\n return str(arg_value)\n raise InvalidArgException(arg_internal_name, arg_value)",
"def _assert_type_string(self, name, val):\n self._assert_type(name, val, basestring)",
"def check_string(seq):\n if not isinstance(seq, str):\n assert False, \"Input is not a string.\"\n else:\n pass\n return None",
"def _assert_type_str_or_list(cls, variable, name_arg):\n assert isinstance(variable, (str, list)), \\\n \"Error: %s argument must be a string or a list.\" % name_arg",
"def test_listr_string_error(self):\n with pytest.raises(TypeError, match=\"Strings cannot be passed\"):\n _listr(\"abc\")",
"def test_string_argument_parsing():\n arguments = [\n {\n \"name\": \"firstname\",\n \"type\": \"str\",\n \"default\": \"Allysa P. Hacker\",\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([\"--firstname\", \"john\"])\n assert values.firstname == \"john\"",
"def test_valid_arg_coding(self):\n # Args should be unicode; if they are not unicode, they should be utf-8\n # decoded.\n args = ['ls', '-p', 'abc:def', 'gs://bucket']\n HandleArgCoding(args)\n for a in args:\n self.assertTrue(isinstance(a, six.text_type))",
"def test_judge_type_normal_case(self):\n # Try to judge with int string\n assert utils.judge_type(\"123\") == int\n # Try to judge with float string\n assert utils.judge_type(\"123.456\") == float\n # Try to judge with str string\n assert utils.judge_type(\"test string\") == str",
"def check_argument_type (arg_name, parsed_arg, argument_types):\n if argument_types and not isinstance(parsed_arg, argument_types):\n err_msg = (\"The '%s' argument should be %s:\\n%s\" %\n (arg_name, # argument name\n \" or \".join(\"%s\" % tt.__name__ for tt in argument_types), # type names\n get_parsed_arguments_str_for_errors(task_description, # bad arg in context of parsed\n \"%s = %r\" % (arg_name, parsed_arg),\n unnamed_result_strs, named_result_strs)))\n #print (err_msg, file=sys.stderr)\n raise TypeError(err_msg)\n\n return parsed_arg",
"def verify(typestring, typeTable):\n\tif typestring == inspect.Parameter.empty:\n\t\treturn True\n\n\tif isinstance(typestring, str):\n\t\tpass\n\telif isinstance(typestring, tuple) and len(typestring) == 2 and isinstance(typestring[0], str) and callable(typestring[1]):\n\t\ttypestring = typestring[0]\n\telse:\n\t\traise ValueError(\"Invalid typestring `%s': not a string or string/predicate\")\n\n\tif typestring.strip() == '':\n\t\treturn True\n\tdescribeTypestring(typestring, typeTable) # Will throw ValueError if bad\n\treturn True",
"def test_typecheck_raises_on_failed_check(self):\n \n @typecheck(int, int, prompt=str)\n def sum_string(x, y, prompt='The sum of {} and {} is {}.'):\n return prompt.format(str(x), str(y), str(x+y))\n\n try:\n sum_string(1, 2, prompt='{} + {} = {}')\n except InvalidArgumentType:\n self.fail(\"Failed typecheck while it shouldn't have.\")\n with self.assertRaises(InvalidArgumentType):\n sum_string('hello', 'world')",
"def validate_basestring(option, value):\n if isinstance(value, basestring):\n return value\n raise TypeError(\"Wrong type for %s, value must be an \"\n \"instance of basestring\" % (option,))",
"def test_string_unpacking(self):\n for source in [\"conf file\", \"keyval list\"]:\n self.assertEqual(self.setting.detect_type(u\"3.14\", source), \"float\")\n self.assertEqual(self.setting.detect_type(u\"-3.14\", source), \"float\")\n self.assertEqual(self.setting.detect_type(u'\"3.14\"', source), \"unicode\")\n self.assertEqual(self.setting.detect_type(u\"yes\", source), \"bool\")\n self.assertEqual(self.setting.detect_type(u\"True\", source), \"unicode\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the argument type is correctly validated by the parser when a dict is used as input | def test_validation_of_arguments_dict_type_checking_in_root_parsing_manager(root_config_parsing_manager):
root_config_parsing_manager.add_argument('c', argument_type=int)
str_dic = {'c': 'string'}
int_dic = {'c': 42}
with pytest.raises(BadTypeException):
root_config_parsing_manager.validate(str_dic)
assert root_config_parsing_manager.validate(int_dic) == int_dic | [
"def test_valchk_dict_value_type():\n\n allowed = {\"test\": str, \"test2\": int, \"test3\": bool}\n passed = badparams(allowed)\n ep = Endpoint()\n\n assert ep.__valchk__(passed, allowed) is False",
"def test_transformer_input_dict_error():\n msg = \"`user_item_dict` must be a dict\"\n with pytest.raises(TypeError, match=msg):\n Transformer(user_item_dict=\"dict\")",
"def test_dict_parameterized_serializer_type(self):\n type_data = self.dict_field_data.copy()\n type_data[\"types\"] = [self.dict_field_data[\"types\"]]\n wrong_type = ExampleDictFieldSerializer(data=type_data)\n with self.assertRaises(exceptions.ValidationError) as cm:\n wrong_type.is_valid(raise_exception=True)\n self.assertIn(\n 'expected a dictionary of items',\n cm.exception.detail[\"types\"][0].lower(),\n 'Wrong dict type validation error')",
"def validate_data_arg_type(data, mode):\n\n if not isinstance(data, dict):\n if not isinstance(data, (list, tuple)):\n raise WrongDataArgumentType(\n f\"\\nYou passed a {type(data)} the \\\"data\\\" argument.\\n\"\n + bad_data_error)\n\n else: # dictionaries are only for insertion.\n if mode not in [\"w\", \"wr\"]:\n raise WrongDataArgumentType(\n f\"\\nYou passed a {type(data)} the \\\"data\\\" argument while in\\n\"\n f\"{mode} mode. You can only pass dictionaries when inserting.\"\n + bad_data_error)",
"def validate_args(kwdict, section, key, dtype, default=None):\n\n # The input has already been parsed from the config file using\n # ast.literal_eval, so the dictionary values should have recognisable\n # python types.\n\n if default is not None:\n val = kwdict[section].pop(key, default)\n else:\n val = kwdict[section][key]\n\n if dtype is str:\n try:\n val = str(val).rstrip('/ ')\n except UnicodeError as err: # Pretty much the only error str() can raise\n raise\n elif dtype is int:\n try:\n val = int(val)\n except ValueError as err:\n raise\n elif dtype is float:\n try:\n val = float(val)\n except ValueError as err:\n raise\n elif dtype is bool:\n try:\n val = bool(val)\n except ValueError as err:\n raise\n else:\n raise NotImplementedError('Only str, int, bool, and float are valid types.')\n\n return val",
"def test_dict2Args(dictionary):\r\n result = dict2Args(dictionary)\r\n if result and isinstance(result, list):\r\n logger.debug(\"dict2Args converted Arguments are: {}\".format(result))\r\n else:\r\n pytest.fail(\"Failed to complete this test!\")",
"def __validateDictionary(dictionary: Dict[str, Any], *, keyType: type = str, dictionaryName: str = \"argument\") -> None:\n if dictionary is None:\n return\n if not type(dictionary) is dict:\n raise TypeError(f\"Provided {dictionaryName} '{dictionary}' is of type {type(dictionary).__name__}, it needs to be of type dict\")\n for key in dictionary.keys():\n if not type(key) is keyType:\n raise TypeError(f\"Key '{key}' in dictionary '{dictionaryName}' is of type {type(key).__name__}, it needs to be of type {keyType.__name__}\")",
"def test_item_not_a_dict(self):\n item_args = [\"this is not a dict\"]\n operation = \"dummy\"\n with pytest.raises(MWSError):\n parse_item_args(item_args, operation)",
"def verify_initdict(initdict: InitDict) -> None:\n if (\n not isinstance(initdict, dict)\n or ARGS_LABEL not in initdict\n or KWARGS_LABEL not in initdict\n ):\n raise ValueError(\"Not an InitDict dictionary\")",
"def test_dict_optional_args_with_autocast(self, request_args):\n request_args.data = {\n 'string': 'string',\n 'int': 3,\n 'none': None,\n 'datetime': datetime(2000, 1, 2, 3, 4, 5),\n 'date': date(2000, 1, 2),\n 'list': ['a', 'b', 'c'],\n 'dict': {\n 'a': 1,\n },\n }\n args = request_args.dict_optional_args(\n autocast_arguments_to_string=True,\n )\n assert args['data'] == {\n 'string': 'string',\n 'int': 3,\n 'none': None,\n 'datetime': '2000-01-02 03:04:05',\n 'date': '2000-01-02 00:00:00',\n 'list': '[\"a\", \"b\", \"c\"]',\n 'dict': '{\"a\": 1}',\n }",
"def test_dict_optional_args(self, request_args):\n args = request_args.dict_optional_args(\n autocast_arguments_to_string=False,\n )\n assert args['data'] == {'d1': 1, 'd2': 2}\n assert 'method' not in args\n assert 'url' not in args\n assert 'full_url' not in args",
"def test_setitem_check_new_valid_type(dictionary):\n\n val = list(dictionary.values())[0]\n matching = BaseMatching(dictionary)\n assert matching._check_new_valid_type(val, str) is None\n\n with pytest.raises(ValueError):\n matching._check_new_valid_type(val, float)",
"def test_dict(artifacts, expected_type):\n returned_type = artifacts_type.typed_dict(artifacts=artifacts)\n\n assert returned_type == expected_type",
"def test_preprocess_input_dict() -> None:\n input = json.dumps({\"inputs\": [\"test\"]})\n with pytest.raises(AssertionError):\n main.__process_input(input)",
"def test_PluggableTransport_parseArgumentsIntoDict_valid_list(self):\n pt = bridges.PluggableTransport()\n args = pt._parseArgumentsIntoDict([\"sharedsecret=foobar\",\n \"publickey=1234\"])\n self.assertIsInstance(args, dict)\n self.assertItemsEqual(args, {\"sharedsecret\": \"foobar\",\n \"publickey\": \"1234\"})",
"def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('type', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n dic_a = {'a': True}\n\n dic_z = {\n \"z\": True\n }\n\n dic_b = {\n 'b': \"type\"\n }\n\n dic_a_sub = {\n 'a': True,\n 'sub': {\n 'titi':\n {\n 'type': 'toto',\n 'b': \"type\"\n }\n }\n }\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_z)\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_b)\n\n assert root_config_parsing_manager.validate(dic_a) == dic_a\n\n assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub\n\n assert root_config_parsing_manager.validate({}) == {}",
"def test_parser_dict(fresh_aiida_env, incar_dict_example):\n\n parser = IncarParser(data=get_data_node('dict', dict=incar_dict_example))\n assert isinstance(parser.incar, get_data_class('dict'))",
"def check_type_arg_validity(arg):\n\n return arg.lower() in (\"o\", \"output\", \"i\", \"input\")",
"def _type_check(self, key):\n if self._type == \"I\" and isinstance(key,str):\n raise TypeError(\"STDict keys is set as type int()\")\n\n elif self._type == \"S\" and isinstance(key,int):\n raise TypeError(\"STDict keys is set as type str()\")\n else:\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that arguments parsing only relates parsing result to long name in arguments with long and short names | def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):
root_config_parsing_manager.add_argument('c', 'coco')
root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)
check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})
check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555}) | [
"def test_add_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']\n parser_manager.add_argument('xx')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('coco', 'dodo')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('ddddd', 'plplp')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_add_flag_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa']\n parser_manager.add_argument('tttt', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa', 'tttt']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('c', 'd')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('t', 's')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'ha'\n parser_manager.add_argument('b')\n assert parser_manager.cli_parser.short_arg == 'hab:'\n parser_manager.add_argument('c', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hab:c'\n\n assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument\n\n assert parser_manager.cli_parser.long_arg == [\"help\"] # Only help is arg argument",
"def test_arg() -> None:\n parser = arg_parser()\n parsed = parser.parse_args(\n [\"--test\", \"test_name\", \"-n\", \"52\", \"--tool\", \"cwltool\", \"-j\", \"4\"]\n )\n assert parsed.test == \"test_name\"\n assert parsed.n == \"52\"\n assert parsed.tool == \"cwltool\"\n assert parsed.j == 4",
"def testCantUseBothLongAndShortName(self):\n params = dict(size='long', chs='short')\n self.assertRaises(KeyError, util.ShortenParameterNames, params)",
"def test_add_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n parser_manager.add_argument('x', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hax'",
"def remove_long_internal():\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def test_short_arguments():\n class MyForm(Form):\n short_arguments = dict(a='alpha', b='bravo', c='echo')\n\n @staticmethod\n def get_short_arguments():\n return dict(c='charlie', d='delta')\n\n alpha = StringField()\n bravo = StringField()\n charlie = StringField()\n delta = StringField()\n\n harness = Harness(MyForm)\n for name in ('alpha', 'bravo', 'charlie', 'delta'):\n assert name in harness\n harness[name].assert_short_name(name[0])\n\n expected = dict(alpha=None, bravo=None, charlie=None, delta=None)\n assert harness.result_for() == expected\n\n args = ('-aecho', '-bgolf', '-ckilo', '-dlima')\n expected = dict(alpha='echo', bravo='golf', charlie='kilo', delta='lima')\n assert harness.result_for(*args) == expected",
"def test_parsing_configuration_file_with_long_and_short_names_for_arguments_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_basic_configuration_with_long_and_short_names.json'\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['argumento2'] = expected_dict.pop('2')\n expected_dict['arg5'] = expected_dict.pop('5')\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +\n test_files_path +\n '/' + config_file).split())\n assert result == expected_dict",
"def test_check_if_help_or_version_in_arguments(self):\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--help\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'help')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"-h\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'help')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--version\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'version')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss']\n with patch.object(sys, 'argv', test_args):\n self.assertIsNone(argparse_handler.check_if_help_or_version_in_arguments())",
"def test_string_argument_parsing():\n arguments = [\n {\n \"name\": \"firstname\",\n \"type\": \"str\",\n \"default\": \"Allysa P. Hacker\",\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([\"--firstname\", \"john\"])\n assert values.firstname == \"john\"",
"def test_list_argument_parsing():\n arguments = [\n {\n \"name\": \"places\",\n \"type\": \"list\",\n \"default\": None\n }\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([\"--places\", \"hawaii\", \"california\", \"oregon\"])\n assert values.places == [\"hawaii\", \"california\", \"oregon\"]\n\n values_with_spaces = parser.parse_args(['--places', \"california\",\n \"new mexico\", \"washington\"])\n assert values_with_spaces.places == [\"california\",\n \"new mexico\",\n \"washington\"]",
"def test_add_argument_show(self):\n self.subcommand_class = self.commands.get('classificationobject-show')\n parser = argparse.ArgumentParser(description='parser for arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n self.assertEqual(arg1.id, None)",
"def test_prefix_and_suffix_options_valid():\n res = cli.parse_args([\n \"-i\", os.path.join(IMAGES, \"jpg\"),\n \"--prefix\", \"no_exif\",\n \"--suffix\", \"no_exif\",\n ])\n assert res.suffix == \"no_exif\"\n assert res.prefix == \"no_exif\"",
"def test_add_argument_list(self):\n self.subcommand_class = self.commands.get('classificationobject-list')\n parser = argparse.ArgumentParser(description='parser for arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n self.assertEqual(arg1.show_details,None)\n self.assertEqual(arg1.fields,None)\n self.assertEqual(arg1.sort_key,None)\n self.assertEqual(arg1.sort_direction,None)",
"def test_maya_name_correct_long_name_MB():\r\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if adding arguments to a parser with two short names raise a SameLengthArgumentNamesException The arguments are not added | def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):
with pytest.raises(SameLengthArgumentNamesException):
root_config_parsing_manager.add_argument('c', 'd')
with pytest.raises(SameLengthArgumentNamesException):
root_config_parsing_manager.add_argument('t', 's')
assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub
assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']
assert root_config_parsing_manager.cli_parser.short_arg == 'ha' | [
"def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('coco', 'dodo')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('ddddd', 'plplp')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_add_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']\n parser_manager.add_argument('xx')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'ha'\n parser_manager.add_argument('b')\n assert parser_manager.cli_parser.short_arg == 'hab:'\n parser_manager.add_argument('c', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hab:c'\n\n assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument\n\n assert parser_manager.cli_parser.long_arg == [\"help\"] # Only help is arg argument",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def test_add_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n parser_manager.add_argument('x', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hax'",
"def test_add_flag_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa']\n parser_manager.add_argument('tttt', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa', 'tttt']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def add_argument(self, *args, **kwargs):\n if not args or len(args) == 1 and args[0][0] not in self.prefix_chars:\n raise ValueError(\"Positional arguments are not allowed! Defining them could mess up grid running!\")\n return super(AthenaArgumentParser, self).add_argument(*args, **kwargs)",
"def test_add_argument_to_cli_parser_that_already_exist_raise_an_exception():\n\n parser_manager = RootConfigParsingManager()\n parser_manager.add_argument('a')\n\n with pytest.raises(AlreadyAddedArgumentException):\n parser_manager.add_argument('a')\n\n with pytest.raises(AlreadyAddedArgumentException):\n parser_manager.add_argument('help')\n\n assert len(parser_manager.cli_parser.arguments) == 3 # help argument + a argument",
"def check_arguments():\n global nargs, progname\n nargs = len(sys.argv) - 1\n progname = os.path.basename(sys.argv[0])\n flag = True\n if nargs != 0 and N_ARGUMENTS[-1] == '*':\n flag = False\n else:\n for i in N_ARGUMENTS:\n if nargs == i:\n flag = False\n if flag:\n usage()",
"def validate_args(argv):\n\tif len(argv) < 2:\n\t\tprint \"Insufficient command line arguments\"\n\t\tusage()\n\t\tsys.exit(-1)\n\tif len(argv) > 2:\n\t\tprint \"Too many command line arguments, extra arguments ignored\"",
"def remove_long_internal():\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def test_argparse_version(self) -> None:\n SUPPORTED_ARGPARSERS = [\"1.1\"]\n self.assertIn(argparse.__version__, SUPPORTED_ARGPARSERS) # type: ignore",
"def require_args(args, min, msg):\n if len(args) < min:\n raise optparse.OptParseError(msg)",
"def check_conflicting(self, arguments):\n for lists in self.mutually_exclusive_options:\n commands_taken_from_list = []\n for command in lists[0]:\n if command in arguments:\n commands_taken_from_list.append(command)\n if len(commands_taken_from_list) > 1:\n raise ValidationError('The commands ' + ' '.join(str(conflicting_command)\n\t\t\t\tfor conflicting_command in commands_taken_from_list) + ' cannot be used together')\n if lists[1] is True:\n if len(commands_taken_from_list) < 1:\n raise ValidationError('Atleast one from the mutually exclusive options '+\n\t\t\t\t ' '.join(str(command) for command in lists[0])+ ' required')",
"def test_add_Argument_with_valid_parser(self):\n# pdb.set_trace()\n self.subcommand_class = self.commands.get('classificationobject-create')\n parser = argparse.ArgumentParser(description='parser for classificationobject arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n print(\"arg1 in test_add_Argument_with_valid_parser:\", arg1)\n self.assertEqual(arg1.name,'test')\n self.assertEqual(arg1.src_ip,None)\n self.assertEqual(arg1.src_mac,None) \n self.assertEqual(arg1.src_ip_subnet,None)\n self.assertEqual(arg1.minimum_src_port,None)\n self.assertEqual(arg1.maximum_src_port,None)\n self.assertEqual(arg1.dst_ip,None)\n self.assertEqual(arg1.dst_mac,None)\n self.assertEqual(arg1.dst_ip_subnet,None)\n self.assertEqual(arg1.minimum_dst_port,None)\n self.assertEqual(arg1.maximum_dst_port,None)\n self.assertEqual(arg1.protocol,None)",
"def testCantUseBothLongAndShortName(self):\n params = dict(size='long', chs='short')\n self.assertRaises(KeyError, util.ShortenParameterNames, params)",
"def is_argparse_add_argument(node):\n return (\n isinstance(node, Expr)\n and isinstance(node.value, Call)\n and isinstance(node.value.func, Attribute)\n and node.value.func.attr == \"add_argument\"\n and isinstance(node.value.func.value, Name)\n and node.value.func.value.id == \"argument_parser\"\n )",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def test_add_shared_args(self):\n parser = argparse.ArgumentParser()\n add_shared_args(parser)\n args = parser.parse_args(['--version', '-v'])\n self.assertTrue(args.version)\n self.assertTrue(args.verbose)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if adding arguments to a parser with long names raise a SameLengthArgumentNamesException. The arguments are not added | def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):
with pytest.raises(SameLengthArgumentNamesException):
root_config_parsing_manager.add_argument('coco', 'dodo')
with pytest.raises(SameLengthArgumentNamesException):
root_config_parsing_manager.add_argument('ddddd', 'plplp')
assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub
assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']
assert root_config_parsing_manager.cli_parser.short_arg == 'ha' | [
"def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('c', 'd')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('t', 's')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_add_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']\n parser_manager.add_argument('xx')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def remove_long_internal():\n try:\n index = args.index(long_option_name)\n # Handle the exact match case.\n if takes_arg:\n removal_count = 2\n else:\n removal_count = 1\n del args[index:index+removal_count]\n return True\n except ValueError:\n # Thanks to argparse not handling options with known arguments\n # like other options parsing libraries (see\n # https://bugs.python.org/issue9334), we need to support the\n # --results-formatter-options={second-level-arguments} (note\n # the equal sign to fool the first-level arguments parser into\n # not treating the second-level arguments as first-level\n # options). We're certainly at risk of getting this wrong\n # since now we're forced into the business of trying to figure\n # out what is an argument (although I think this\n # implementation will suffice).\n for index in range(len(args)):\n match = long_regex.search(args[index])\n if match:\n del args[index]\n return True\n return False",
"def test_add_flag_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa']\n parser_manager.add_argument('tttt', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa', 'tttt']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def test_add_argument_to_cli_parser_that_already_exist_raise_an_exception():\n\n parser_manager = RootConfigParsingManager()\n parser_manager.add_argument('a')\n\n with pytest.raises(AlreadyAddedArgumentException):\n parser_manager.add_argument('a')\n\n with pytest.raises(AlreadyAddedArgumentException):\n parser_manager.add_argument('help')\n\n assert len(parser_manager.cli_parser.arguments) == 3 # help argument + a argument",
"def add_argument(self, *args, **kwargs):\n if not args or len(args) == 1 and args[0][0] not in self.prefix_chars:\n raise ValueError(\"Positional arguments are not allowed! Defining them could mess up grid running!\")\n return super(AthenaArgumentParser, self).add_argument(*args, **kwargs)",
"def check_arguments():\n global nargs, progname\n nargs = len(sys.argv) - 1\n progname = os.path.basename(sys.argv[0])\n flag = True\n if nargs != 0 and N_ARGUMENTS[-1] == '*':\n flag = False\n else:\n for i in N_ARGUMENTS:\n if nargs == i:\n flag = False\n if flag:\n usage()",
"def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'ha'\n parser_manager.add_argument('b')\n assert parser_manager.cli_parser.short_arg == 'hab:'\n parser_manager.add_argument('c', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hab:c'\n\n assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument\n\n assert parser_manager.cli_parser.long_arg == [\"help\"] # Only help is arg argument",
"def validate_args(argv):\n\tif len(argv) < 2:\n\t\tprint \"Insufficient command line arguments\"\n\t\tusage()\n\t\tsys.exit(-1)\n\tif len(argv) > 2:\n\t\tprint \"Too many command line arguments, extra arguments ignored\"",
"def test_argparse_version(self) -> None:\n SUPPORTED_ARGPARSERS = [\"1.1\"]\n self.assertIn(argparse.__version__, SUPPORTED_ARGPARSERS) # type: ignore",
"def _check_arg_consistency(datasets):\n specified = False\n assigned_args = []\n for dataset in datasets:\n if dataset.get('arg'):\n if dataset['arg'] in assigned_args:\n raise ArgError(f'arg {dataset[\"arg\"]} already assigned.')\n assigned_args.append(dataset['arg'])\n specified = True\n if specified and not dataset.get('arg'):\n raise ArgError",
"def check_conflicting(self, arguments):\n for lists in self.mutually_exclusive_options:\n commands_taken_from_list = []\n for command in lists[0]:\n if command in arguments:\n commands_taken_from_list.append(command)\n if len(commands_taken_from_list) > 1:\n raise ValidationError('The commands ' + ' '.join(str(conflicting_command)\n\t\t\t\tfor conflicting_command in commands_taken_from_list) + ' cannot be used together')\n if lists[1] is True:\n if len(commands_taken_from_list) < 1:\n raise ValidationError('Atleast one from the mutually exclusive options '+\n\t\t\t\t ' '.join(str(command) for command in lists[0])+ ' required')",
"def require_args(args, min, msg):\n if len(args) < min:\n raise optparse.OptParseError(msg)",
"def testTooFewArgumentsFails(self):\n # No arguments for set, but valid subcommand.\n stderr = self.RunGsUtil(self._set_rp_cmd,\n return_stderr=True,\n expected_status=1)\n self.assertIn('command requires at least', stderr)\n\n # No arguments for get, but valid subcommand.\n stderr = self.RunGsUtil(self._get_rp_cmd,\n return_stderr=True,\n expected_status=1)\n self.assertIn('command requires at least', stderr)\n\n # Neither arguments nor subcommand.\n stderr = self.RunGsUtil(['requesterpays'],\n return_stderr=True,\n expected_status=1)\n self.assertIn('command requires at least', stderr)",
"def test_add_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n parser_manager.add_argument('x', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hax'",
"def check_nb_args(cmd, maxi=None, mini=1):\n if len(cmd) < mini:\n print(\"Error: wrong number of arguments\")\n return False\n if maxi != None:\n if len(cmd) > maxi:\n print(\"Error: wrong number of arguments\")\n return False\n return True",
"def _test_parse_args_fails(self, args: str) -> None:\n with self.assertRaises(OatmealParseError):\n OatmealMsg._parse_args(args.encode('ascii'))",
"def test_check_shared_args(self):\n parser = argparse.ArgumentParser()\n add_shared_args(parser)\n parser.parse_args(['--version', '-v'])\n check_shared_args(parser.parse_args(['-v']))\n self.assertRaises(SystemExit, check_shared_args, parser.parse_args(['--version']))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that parsing arguments with a wrong value type raises a BadTypeException | def test_parsing_of_arguments_string_with_wrong_type_raise_an_exception_in_root_parsing_manager():
parser_manager = RootConfigParsingManager()
parser_manager.add_argument('a', argument_type=int)
with pytest.raises(BadTypeException):
parser_manager._parse_cli('-a a'.split()) | [
"def test_arguments_string_parsing_type_checking_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', argument_type=int)\n\n with pytest.raises(BadTypeException):\n check_parse_cli_result(root_config_parsing_manager, '-c string', {'c': 'string'})\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'c': 1})",
"def test_validation_of_arguments_dict_type_checking_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', argument_type=int)\n\n str_dic = {'c': 'string'}\n int_dic = {'c': 42}\n\n with pytest.raises(BadTypeException):\n root_config_parsing_manager.validate(str_dic)\n\n assert root_config_parsing_manager.validate(int_dic) == int_dic",
"def test_params_value_type_check(test_endpoint):\n\n with pytest.raises(ValueError):\n test_endpoint.params = badparams(test_endpoint._Endpoint__allowed_params)",
"def test_invalid_type():\n with pytest.raises(TypeError):\n # Test with string value\n assert calculate_E_min(B_degrees=\"blah\")\n with pytest.raises(ValueError):\n # Test with NaN value\n assert calculate_E_min(B_degrees=nan)\n with pytest.raises(ValueError):\n # Test with infinite value\n assert calculate_E_min(B_degrees=inf)",
"def test_bad_message_no_type():\n with pytest.raises(ValueError):\n Message.parse_obj({\"test\": \"test\"})",
"def test_argument_raises(self):\n with self.assertRaises(TypeError):\n # These functions should raise TypeErrors\n type_hint_test(1, 1, 1)\n type_hint_test('a', 'a', 'a')",
"def test_typecheck_raises_on_failed_check(self):\n \n @typecheck(int, int, prompt=str)\n def sum_string(x, y, prompt='The sum of {} and {} is {}.'):\n return prompt.format(str(x), str(y), str(x+y))\n\n try:\n sum_string(1, 2, prompt='{} + {} = {}')\n except InvalidArgumentType:\n self.fail(\"Failed typecheck while it shouldn't have.\")\n with self.assertRaises(InvalidArgumentType):\n sum_string('hello', 'world')",
"def test_should_raise_error_if_type_is_invalid(self):\n with self.assertRaises(ValueError):\n self.spec_parser.parse_statement({'type': 'sugar'})",
"def test_bad_message_type(type_str):\n with pytest.raises(InvalidType):\n Message({'@type': type_str})",
"def test_bad_message_type(type_str):\n with pytest.raises(InvalidType):\n MsgType(type_str)",
"def _test_parse_args_fails(self, args: str) -> None:\n with self.assertRaises(OatmealParseError):\n OatmealMsg._parse_args(args.encode('ascii'))",
"def test_validator_invalid_values_for_stdtype_should_raise_typeerror(self):\n with self.assertRaises(TypeError):\n self.dummy.stdtype_bytearray = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_bytes = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_complex = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_dict = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_float = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_frozenset = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_int = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_list = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_memoryview = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_range = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_set = 'invalid_type'\n with self.assertRaises(TypeError):\n self.dummy.stdtype_str = ['invalid_type']\n with self.assertRaises(TypeError):\n self.dummy.stdtype_tuple = 'invalid_type'",
"def test_invalidArguments(self):\n class Unexpected(object):\n def __str__(self):\n return \"wrong\"\n def __repr__(self):\n return \"<unexpected>\"\n defaultExpectation = \"unicode\" if bytes is str else \"str\"\n def assertRaised(raised, expectation, name):\n self.assertEqual(str(raised.exception),\n \"expected {} for {}, got {}\".format(\n expectation,\n name, \"<unexpected>\"))\n\n def check(param, expectation=defaultExpectation):\n with self.assertRaises(TypeError) as raised:\n URL(**{param: Unexpected()})\n assertRaised(raised, expectation, param)\n check(\"scheme\")\n check(\"host\")\n check(\"fragment\")\n check(\"rooted\", \"bool\")\n check(\"userinfo\")\n check(\"port\", \"int or NoneType\")\n\n with self.assertRaises(TypeError) as raised:\n URL(path=[Unexpected(),])\n assertRaised(raised, defaultExpectation, \"path segment\")\n with self.assertRaises(TypeError) as raised:\n URL(query=[(u\"name\", Unexpected()),])\n assertRaised(raised, defaultExpectation + \" or NoneType\",\n \"query parameter value\")\n with self.assertRaises(TypeError) as raised:\n URL(query=[(Unexpected(), u\"value\"),])\n assertRaised(raised, defaultExpectation, \"query parameter name\")\n # No custom error message for this one, just want to make sure\n # non-2-tuples don't get through.\n with self.assertRaises(TypeError):\n URL(query=[Unexpected()])\n with self.assertRaises(ValueError):\n URL(query=[(u'k', u'v', u'vv')])\n with self.assertRaises(ValueError):\n URL(query=[(u'k',)])\n\n url = URL.fromText(\"https://valid.example.com/\")\n with self.assertRaises(TypeError) as raised:\n url.child(Unexpected())\n assertRaised(raised, defaultExpectation, \"path segment\")\n with self.assertRaises(TypeError) as raised:\n url.sibling(Unexpected())\n assertRaised(raised, defaultExpectation, \"path segment\")\n with self.assertRaises(TypeError) as raised:\n url.click(Unexpected())\n assertRaised(raised, defaultExpectation, \"relative URL\")",
"def test_instantiation_with_wrong_data_type_fails(\n self, param: str, value_wrong_type: Any, config: Dict[str, Any]\n ):\n\n config[param] = value_wrong_type\n with pytest.raises(ValueError):\n BinnedUniformSampler(**config)",
"def test_bad_values(self):\n self.assertOK(['upgrade'])\n self.assertOK(['foo'])\n self.assertRaisesInternalError([1])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])",
"def test_parse_int_invalid_input(test_input):\n with pytest.raises(ValueError):\n parse_int(test_input)",
"def test_invalid_file_like_types(file):\n with pytest.raises(ValueError) as e:\n WeldxFile(file)\n assert \"path\" in e.value.args[0]",
"def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"Unsupported type <type \\'int\\'> for schema parameter\"):\n self.context.frame.import_csv(self.dataset, schema=bad_schema)",
"def test_bad_message_no_type():\n with pytest.raises(InvalidMessage):\n Message({'test': 'test'})"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that adding a subgroup parser that already exists raises an AlreadyAddedSubparserException | def test_add_subgroup_parser_that_already_exists_raises_an_exception_in_root_parsing_manager():
parser_manager = RootConfigParsingManager()
parser_manager.add_subgroup(name='toto')
subparser = SubgroupConfigParsingManager('titi')
subparser.add_argument('n', 'name')
parser_manager.add_subgroup_parser('toto', subparser)
repeated_subparser = SubgroupConfigParsingManager('titi')
repeated_subparser.add_argument('n', 'name')
with pytest.raises(AlreadyAddedSubparserException):
parser_manager.add_subgroup_parser('toto', repeated_subparser) | [
"def test_add_subgroup_parser_without_name_argument_raise_an_exception_in_root_parsing_manager():\n parser = RootConfigParsingManager()\n subparser = SubgroupConfigParsingManager('titi')\n\n with pytest.raises(SubgroupParserWithoutNameArgumentException):\n parser.add_subgroup_parser('toto', subparser)",
"def test_add_subgroup_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 0\n\n parser_manager.add_subgroup(name='sub')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 1\n\n parser_manager.add_subgroup(name='sub1')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 2\n\n parser_manager.add_subgroup(name='sub3')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 3",
"def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n check_parse_cli_result(root_config_parsing_manager, \"\", {})\n\n with pytest.raises(UnknownArgException):\n check_parse_cli_result(root_config_parsing_manager, \"-z\", {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})\n\n with pytest.raises(NoNameSpecifiedForSubgroupException):\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',\n {'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})\n\n with pytest.raises(BadContextException):\n check_parse_cli_result(root_config_parsing_manager, \"-b\", {})",
"def test_add_argument_to_cli_parser_that_already_exist_raise_an_exception():\n\n parser_manager = RootConfigParsingManager()\n parser_manager.add_argument('a')\n\n with pytest.raises(AlreadyAddedArgumentException):\n parser_manager.add_argument('a')\n\n with pytest.raises(AlreadyAddedArgumentException):\n parser_manager.add_argument('help')\n\n assert len(parser_manager.cli_parser.arguments) == 3 # help argument + a argument",
"def subgroup_parser():\n parser = SubgroupConfigParser('test')\n parser.add_argument('a', is_flag=True)\n return parser",
"def test_severity_add_error_already_exists(self):\n self.execute('severity add blocker')\n rv, output = self.execute('severity add blocker')\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)",
"def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('type', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n dic_a = {'a': True}\n\n dic_z = {\n \"z\": True\n }\n\n dic_b = {\n 'b': \"type\"\n }\n\n dic_a_sub = {\n 'a': True,\n 'sub': {\n 'titi':\n {\n 'type': 'toto',\n 'b': \"type\"\n }\n }\n }\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_z)\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_b)\n\n assert root_config_parsing_manager.validate(dic_a) == dic_a\n\n assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub\n\n assert root_config_parsing_manager.validate({}) == {}",
"def test_priority_add_error_already_exists(self):\n rv, output = self.execute('priority add blocker')\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)",
"def test_repeat_operation_group_definition(self):\n\n class A(FlowProject):\n pass\n\n foo_group = A.make_group(\"foo\")\n\n with pytest.raises(FlowProjectDefinitionError):\n\n @foo_group\n @foo_group\n @A.operation\n def foo_operation(job):\n pass",
"def add_to_parser(cls, subparsers):\n gene_assignment_group = subparsers.add_parser(\"gene_assignment\")\n gene_assignment_group.add_argument(\"--coordinates-geojson\", type=FsExistsType(), required=True)\n gene_assignment_group.add_argument(\"--spots-json\", type=FsExistsType(), required=True)\n gene_assignment_group.add_argument(\"-o\", \"--output\", required=True)\n gene_assignment_group.set_defaults(starfish_command=GeneAssignment._cli)\n gene_assignment_subparsers = gene_assignment_group.add_subparsers(dest=\"gene_assignment_algorithm_class\")\n\n for algorithm_cls in cls.algorithm_to_class_map().values():\n group_parser = gene_assignment_subparsers.add_parser(algorithm_cls.get_algorithm_name())\n group_parser.set_defaults(gene_assignment_algorithm_class=algorithm_cls)\n algorithm_cls.add_arguments(group_parser)\n\n cls.gene_assignment_group = gene_assignment_group",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def test_add_common_args_with_group(self):\n # Add a requirement group to a different parser that we pass in to make sure we will use\n # the required group passed in.\n temp_subparser_2 = self.subparsers.add_parser('eat2', help='eat some food again.')\n required_group = temp_subparser_2.add_argument_group(\"required arguments\")\n driver._add_common_args(parser=self.temp_subparser, required_group=required_group)\n actions = self.temp_subparser.__dict__['_option_string_actions']\n temp_subparser_2_actions = temp_subparser_2.__dict__['_option_string_actions']\n self.assertIn('--database', temp_subparser_2_actions.keys())\n self.assertIn('-d', temp_subparser_2_actions.keys())\n\n self.assertIn('--database-type', actions.keys())\n self.assertIn('--keyspace', actions.keys())\n self.assertTrue(len(actions), 2)",
"def test_create_sec_grp_no_name(self):\n with self.assertRaises(Exception):\n sec_grp_settings = SecurityGroupConfig()\n self.security_groups.append(\n neutron_utils.create_security_group(\n self.neutron, self.keystone, sec_grp_settings))",
"def make_subparser(command):\n try:\n subparser = subparsers.add_parser(command, aliases=aliases[command])\n except TypeError, ex:\n # Workaround for http://bugs.python.org/issue13879\n if ex.args != (\"__init__() got an unexpected keyword argument 'aliases'\",):\n raise\n subparser = subparsers.add_parser(command)\n return subparser",
"def test_component_add_error_already_exists(self):\n rv, output = self.execute('component add component1 new_user')\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)",
"def _add_subparsers(self):\n runner = self.subparsers.add_parser(\"run\", help=\"Run a Test\")\n runner.add_argument(\"glob\", help=\"A file glob to match config files (default='%(default)s').\",\n metavar=\"<config-file glob>\",\n default=\"*.ini\",\n nargs=\"?\")\n runner.set_defaults(function=self.strategerizer.run)\n\n fetcher = self.subparsers.add_parser(\"fetch\", help=\"Fetch a sample config file.\")\n fetcher.set_defaults(function=self.strategerizer.fetch)\n\n tester = self.subparsers.add_parser('test', help='Test your setup.')\n tester.add_argument(\"glob\", help=\"A file glob to match config files (e.g. *.ini - default='%(default)s').\",\n metavar=\"<config-file glob>\",\n default=\"*.ini\",\n nargs=\"?\")\n\n tester.set_defaults(function=self.strategerizer.test)\n\n #helper = self.subparsers.add_parser(\"help\", help=\"Show more help\")\n #helper.add_argument('topic', help=\"A specific subject to inquire about.\", nargs=\"?\")\n #helper.set_defaults(function=self.strategerizer.handle_help)\n return",
"def add_to_subparser_object(subparserObject, parentParser):\n testParser = subparserObject.add_parser(SUBPARSER_KEYWORD, parents=[parentParser])\n testParser.add_argument('writers', nargs='*')\n testParser.add_argument('--showpass', action='store_true')\n testParser.add_argument('--diff', action='store_true')\n testParser.set_defaults(func=operate)",
"def append_subparsers(parser):\n\n subcommands = parser.add_subparsers(help='sub-commands')\n\n for group in SubparserGroupsRegistry.registry:\n group.parser = subcommands.add_parser(help=group.name, name=group.name)\n\n for group in SubparserGroupsRegistry.registry:\n\n name = group.name\n subparsers = group.registry\n\n if subparsers:\n _subparser_group = group.parser.add_subparsers(\n title=name,\n description=name + ' arguments: ' + ', '.join(subparsers.keys())\n )\n\n for subparser in subparsers.values():\n subparser.add_to_argparse(_subparser_group)",
"def add_singlesample_parser(subparsers):\n\n argparser_gsea = subparsers.add_parser(\"ssgsea\", help=\"Run Single Sample GSEA.\")\n\n # group for input files\n group_input = argparser_gsea.add_argument_group(\"Input files arguments\")\n group_input.add_argument(\n \"-d\",\n \"--data\",\n dest=\"data\",\n action=\"store\",\n type=str,\n required=True,\n help=\"Input gene expression dataset file in txt format. Same with GSEA.\",\n )\n group_input.add_argument(\n \"-g\",\n \"--gmt\",\n dest=\"gmt\",\n action=\"store\",\n type=str,\n required=True,\n help=\"Gene set database in GMT format. Same with GSEA.\",\n )\n # group for output files\n group_output = argparser_gsea.add_argument_group(\"Output arguments\")\n add_output_option(group_output)\n\n # group for General options.\n group_opt = argparser_gsea.add_argument_group(\n \"Single Sample GSEA advanced arguments\"\n )\n group_opt.add_argument(\n \"--sn\",\n \"--sample-norm\",\n dest=\"norm\",\n action=\"store\",\n type=str,\n default=\"rank\",\n metavar=\"normalize\",\n choices=(\"rank\", \"log\", \"log_rank\", \"custom\"),\n help=\"Sample normalization method. Choose from {'rank', 'log', 'log_rank','custom'}. Default: rank\",\n )\n\n group_opt.add_argument(\n \"-c\",\n \"--correl-type\",\n dest=\"correl\",\n action=\"store\",\n type=str,\n default=\"rank\",\n metavar=\"transform\",\n choices=(\"rank\", \"symrank\", \"zscore\"),\n help=\"Input data transformation after sample normalization. Choose from {'rank','symrank', 'zscore'}. Default: rank\",\n )\n group_opt.add_argument(\n \"--ns\",\n \"--no-scale\",\n action=\"store_false\",\n dest=\"scale\",\n default=True,\n help=\"If the flag was set, don't normalize the enrichment scores by number of genes.\",\n )\n group_opt.add_argument(\n \"-n\",\n \"--permu-num\",\n dest=\"n\",\n action=\"store\",\n type=int,\n default=0,\n metavar=\"nperm\",\n help=\"Number of random permutations. For calculating esnulls. Default: 0\",\n )\n group_opt.add_argument(\n \"--min-size\",\n dest=\"mins\",\n action=\"store\",\n type=int,\n default=15,\n metavar=\"int\",\n help=\"Min size of input genes presented in Gene Sets. Default: 15\",\n )\n group_opt.add_argument(\n \"--max-size\",\n dest=\"maxs\",\n action=\"store\",\n type=int,\n default=2000,\n metavar=\"int\",\n help=\"Max size of input genes presented in Gene Sets. Default: 2000\",\n )\n group_opt.add_argument(\n \"-w\",\n \"--weight\",\n action=\"store\",\n dest=\"weight\",\n default=0.25,\n type=float,\n metavar=\"weight\",\n help=\"Weighted_score of rank_metrics. For weighting input genes. Default: 0.25\",\n )\n group_opt.add_argument(\n \"-a\",\n \"--ascending\",\n action=\"store_true\",\n dest=\"ascending\",\n default=False,\n help=\"Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.\",\n )\n group_opt.add_argument(\n \"-s\",\n \"--seed\",\n dest=\"seed\",\n action=\"store\",\n type=int,\n default=123,\n metavar=\"\",\n help=\"Number of random seed. Default: 123\",\n )\n group_opt.add_argument(\n \"-p\",\n \"--threads\",\n dest=\"threads\",\n action=\"store\",\n type=int,\n default=4,\n metavar=\"procs\",\n help=\"Number of Processes you are going to use. Default: 4\",\n )\n\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that parsing arguments of a subgroup parser with long and short names arguments only binds parser results to the long name | def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():
parser_manager = RootConfigParsingManager()
parser_manager.add_subgroup(name='sub')
subparser = SubgroupConfigParsingManager('titi')
subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)
subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)
subparser.add_argument('n', 'name')
parser_manager.add_subgroup_parser('sub', subparser)
check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',
{'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}}) | [
"def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n check_parse_cli_result(root_config_parsing_manager, \"\", {})\n\n with pytest.raises(UnknownArgException):\n check_parse_cli_result(root_config_parsing_manager, \"-z\", {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})\n\n with pytest.raises(NoNameSpecifiedForSubgroupException):\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',\n {'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})\n\n with pytest.raises(BadContextException):\n check_parse_cli_result(root_config_parsing_manager, \"-b\", {})",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def test_add_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']\n parser_manager.add_argument('xx')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_add_arguments_with_two_long_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('coco', 'dodo')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('ddddd', 'plplp')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # -a, --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('c', 'd')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('t', 's')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_add_common_args_with_group(self):\n # Add a requirement group to a different parser that we pass in to make sure we will use\n # the required group passed in.\n temp_subparser_2 = self.subparsers.add_parser('eat2', help='eat some food again.')\n required_group = temp_subparser_2.add_argument_group(\"required arguments\")\n driver._add_common_args(parser=self.temp_subparser, required_group=required_group)\n actions = self.temp_subparser.__dict__['_option_string_actions']\n temp_subparser_2_actions = temp_subparser_2.__dict__['_option_string_actions']\n self.assertIn('--database', temp_subparser_2_actions.keys())\n self.assertIn('-d', temp_subparser_2_actions.keys())\n\n self.assertIn('--database-type', actions.keys())\n self.assertIn('--keyspace', actions.keys())\n self.assertTrue(len(actions), 2)",
"def subgroup_parser():\n parser = SubgroupConfigParser('test')\n parser.add_argument('a', is_flag=True)\n return parser",
"def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('type', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n dic_a = {'a': True}\n\n dic_z = {\n \"z\": True\n }\n\n dic_b = {\n 'b': \"type\"\n }\n\n dic_a_sub = {\n 'a': True,\n 'sub': {\n 'titi':\n {\n 'type': 'toto',\n 'b': \"type\"\n }\n }\n }\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_z)\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_b)\n\n assert root_config_parsing_manager.validate(dic_a) == dic_a\n\n assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub\n\n assert root_config_parsing_manager.validate({}) == {}",
"def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'ha'\n parser_manager.add_argument('b')\n assert parser_manager.cli_parser.short_arg == 'hab:'\n parser_manager.add_argument('c', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hab:c'\n\n assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument\n\n assert parser_manager.cli_parser.long_arg == [\"help\"] # Only help is arg argument",
"def test_get_argument_groups(self) -> None:\n MockCustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 3, f\"Expected 3 groups, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )\n self.assertIn(\"test_group\", group_dict.keys(), \"test group not found\")\n self.assertIn(\"test_subgroup\", group_dict.keys(), \"test subgroup not found\")\n self.assertIn(self.test_group, group_dict.values(), \"test group doesn't match\")",
"def test_add_flag_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa']\n parser_manager.add_argument('tttt', is_flag=True)\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa', 'tttt']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_add_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n parser_manager.add_argument('x', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hax'",
"def root_config_parser_with_subgroups(root_config_parser_with_mandatory_and_optional_arguments):\n\n root_config_parser_with_mandatory_and_optional_arguments.add_argument_prefix(argument_prefix='TEST_')\n\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup(subgroup_type='g1', prefix='TEST_G1_')\n\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup(subgroup_type='g2', prefix='TEST_G2_')\n\n subgroup_parser_g1 = SubgroupConfigParser(name='type1')\n subgroup_parser_g1.add_argument('1', 'a1', argument_type=str, is_mandatory=True)\n subgroup_parser_g1.add_argument('2', 'a2', argument_type=bool, default_value=True)\n subgroup_parser_g1.add_argument('3', 'a3', argument_type=str, default_value=69)\n subgroup_parser_g1.add_argument('n', 'name', argument_type=str)\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup_parser(subgroup_type='g1',\n subgroup_parser=subgroup_parser_g1)\n\n subgroup_parser_g2 = SubgroupConfigParser(name='type2')\n subgroup_parser_g2.add_argument('1', 'a1', argument_type=float, is_mandatory=False)\n subgroup_parser_g2.add_argument('2', 'a2', argument_type=str)\n subgroup_parser_g2.add_argument('3', 'a3', argument_type=str)\n subgroup_parser_g2.add_argument('4', 'a4', argument_type=str)\n subgroup_parser_g2.add_argument('n', 'name', argument_type=str)\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup_parser(subgroup_type='g2',\n subgroup_parser=subgroup_parser_g2)\n\n return root_config_parser_with_mandatory_and_optional_arguments",
"def test_add_subgroup_parser_without_name_argument_raise_an_exception_in_root_parsing_manager():\n parser = RootConfigParsingManager()\n subparser = SubgroupConfigParsingManager('titi')\n\n with pytest.raises(SubgroupParserWithoutNameArgumentException):\n parser.add_subgroup_parser('toto', subparser)",
"def test_get_argument_groups_empty(self) -> None:\n MockUncustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 1, f\"Expected 1 group, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )",
"def test_build_group_args(self):\n mydir = os.path.dirname(__file__)\n path = os.path.join(mydir, './build_group_args.yml')\n build.build_package(None, 'groups', 'pkg', [], path)\n\n from quilt.data.groups import pkg\n\n assert isinstance(pkg.group_a.csv(), DataFrame), \\\n 'Expected parent `transform: csv` to affect group_a.csv()'\n assert isinstance(pkg.group_a.tsv(), DataFrame), \\\n 'Expected local `transform: tsv` to affect group_a.tsv()'\n # TODO these tests should really test the node type and verify it as a file node\n # but currently both raw files and DFs are DataNode instances\n assert isinstance(pkg.group_b.txt(), string_types), \\\n 'Expected `transform: id` to be inferred from file extension'\n assert isinstance(pkg.group_b.subgroup.txt(), string_types), \\\n 'Expected `transform: id` to be inferred from file extension'\n # ENDTODO\n assert isinstance(pkg.group_b.tsv(), DataFrame), \\\n 'Expected `transform: tsv` to be inferred from file extension'\n assert pkg.group_b.tsv()['Date0'].dtype == np.dtype('<M8[ns]'), \\\n 'Expected Date0 column to parse as date'\n assert pkg.group_b.subgroup.tsv().shape == (1, 3), \\\n 'Expected `transform: tsv` and one skipped row from group args'\n assert pkg.group_b.subgroup.csv().shape == (0, 2), \\\n 'Expected local `transform: csv` and one skipped row from group args'\n assert pkg.group_b.subgroup.many_tsv.one().shape == (1, 3), \\\n 'Expected local `transform: csv` and one skipped row from group args'\n assert isinstance(pkg.group_b.subgroup.many_tsv.two(), DataFrame), \\\n 'Expected `transform: tsv` from ancestor'\n assert isinstance(pkg.group_b.subgroup.many_tsv.three(), DataFrame), \\\n 'Expected `transform: tsv` from ancestor'\n assert not pkg.group_empty._keys(), 'Expected group_empty to be empty'\n assert not pkg.group_x.empty_child._keys(), 'Expected group_x.emptychild to be empty'",
"def test_add_Argument_with_valid_parser(self):\n# pdb.set_trace()\n self.subcommand_class = self.commands.get('classificationobject-create')\n parser = argparse.ArgumentParser(description='parser for classificationobject arguments')\n parser = self.subcommand_class().add_known_arguments(parser)\n args = parser.parse_known_args()\n arg1 = parser.parse_args()\n print(\"arg1 in test_add_Argument_with_valid_parser:\", arg1)\n self.assertEqual(arg1.name,'test')\n self.assertEqual(arg1.src_ip,None)\n self.assertEqual(arg1.src_mac,None) \n self.assertEqual(arg1.src_ip_subnet,None)\n self.assertEqual(arg1.minimum_src_port,None)\n self.assertEqual(arg1.maximum_src_port,None)\n self.assertEqual(arg1.dst_ip,None)\n self.assertEqual(arg1.dst_mac,None)\n self.assertEqual(arg1.dst_ip_subnet,None)\n self.assertEqual(arg1.minimum_dst_port,None)\n self.assertEqual(arg1.maximum_dst_port,None)\n self.assertEqual(arg1.protocol,None)",
"def test_parsing_configuration_file_with_long_and_short_names_for_arguments_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_basic_configuration_with_long_and_short_names.json'\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['argumento2'] = expected_dict.pop('2')\n expected_dict['arg5'] = expected_dict.pop('5')\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +\n test_files_path +\n '/' + config_file).split())\n assert result == expected_dict",
"def test_parsing_environment_variables_with_subgroups_and_long_and_short_names_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_long_and_short_names.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file('root_manager_configuration_with_subgroups.json')\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['model'] = 'o1_model_x'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that adding a subgroup parser with no argument 'name' raises a SubgroupParserWithoutNameArgumentException | def test_add_subgroup_parser_without_name_argument_raise_an_exception_in_root_parsing_manager():
parser = RootConfigParsingManager()
subparser = SubgroupConfigParsingManager('titi')
with pytest.raises(SubgroupParserWithoutNameArgumentException):
parser.add_subgroup_parser('toto', subparser) | [
"def test_add_subgroup_parser_that_already_exists_raises_an_exception_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='toto')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('toto', subparser)\n\n repeated_subparser = SubgroupConfigParsingManager('titi')\n repeated_subparser.add_argument('n', 'name')\n\n with pytest.raises(AlreadyAddedSubparserException):\n parser_manager.add_subgroup_parser('toto', repeated_subparser)",
"def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n check_parse_cli_result(root_config_parsing_manager, \"\", {})\n\n with pytest.raises(UnknownArgException):\n check_parse_cli_result(root_config_parsing_manager, \"-z\", {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})\n\n with pytest.raises(NoNameSpecifiedForSubgroupException):\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',\n {'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})\n\n with pytest.raises(BadContextException):\n check_parse_cli_result(root_config_parsing_manager, \"-b\", {})",
"def test_add_subgroup_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 0\n\n parser_manager.add_subgroup(name='sub')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 1\n\n parser_manager.add_subgroup(name='sub1')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 2\n\n parser_manager.add_subgroup(name='sub3')\n\n assert len(parser_manager.cli_parser.subgroup_parsers) == 3",
"def subgroup_parser():\n parser = SubgroupConfigParser('test')\n parser.add_argument('a', is_flag=True)\n return parser",
"def test_get_argument_groups_empty(self) -> None:\n MockUncustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 1, f\"Expected 1 group, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def test_add_group_zero_members2() -> None:\n g = Group([])\n v = Grouping()\n v.add_group(g)\n assert v._groups == []",
"def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('type', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n dic_a = {'a': True}\n\n dic_z = {\n \"z\": True\n }\n\n dic_b = {\n 'b': \"type\"\n }\n\n dic_a_sub = {\n 'a': True,\n 'sub': {\n 'titi':\n {\n 'type': 'toto',\n 'b': \"type\"\n }\n }\n }\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_z)\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_b)\n\n assert root_config_parsing_manager.validate(dic_a) == dic_a\n\n assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub\n\n assert root_config_parsing_manager.validate({}) == {}",
"def test_group_operation_without_operation_definition_anonymous(self):\n\n class A(FlowProject):\n pass\n\n group = A.make_group(\"foo\")\n\n with pytest.raises(FlowProjectDefinitionError):\n group(lambda job: print(job))",
"def test_create_sec_grp_no_name(self):\n with self.assertRaises(Exception):\n sec_grp_settings = SecurityGroupConfig()\n self.security_groups.append(\n neutron_utils.create_security_group(\n self.neutron, self.keystone, sec_grp_settings))",
"def test_expand_groups_unknown() -> None:\n with pytest.raises(KeyError):\n Environment._expand_groups([\"$list\", \"$UNKNOWN\", \"$str\", \"end\"], _GROUPS)",
"def test_get_argument_groups(self) -> None:\n MockCustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 3, f\"Expected 3 groups, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )\n self.assertIn(\"test_group\", group_dict.keys(), \"test group not found\")\n self.assertIn(\"test_subgroup\", group_dict.keys(), \"test subgroup not found\")\n self.assertIn(self.test_group, group_dict.values(), \"test group doesn't match\")",
"def test_constructor_without_defined_groups(self):\n Bullet.groups = None\n self.assertRaises(TypeError, Bullet, RED, 5)",
"def test_service_groups_missing_group(self):\n self.assertNotIn(\"not_a_service_group\", EFConfig.SERVICE_GROUPS)",
"def test_subgroups(clean_raw_data):\n subgroup_names = subgroups(clean_raw_data)\n assert subgroup_names == ['spectrum1', 'spectrum2', 'spectrum3']",
"def test_nested_exclusive_option_groups(self):\n self.assertRaises(SystemExit,\n self._test_options, [\"--test1\", \"--test2\"])",
"def test_add_arguments_with_two_short_names_raise_an_exception_in_root_parsing_manager(root_config_parsing_manager):\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('c', 'd')\n\n with pytest.raises(SameLengthArgumentNamesException):\n root_config_parsing_manager.add_argument('t', 's')\n\n assert len(root_config_parsing_manager.cli_parser.arguments) == 4 # --help, -h and sub\n\n assert root_config_parsing_manager.cli_parser.long_arg == ['help', 'sub=']\n assert root_config_parsing_manager.cli_parser.short_arg == 'ha'",
"def test_repeat_operation_group_definition(self):\n\n class A(FlowProject):\n pass\n\n foo_group = A.make_group(\"foo\")\n\n with pytest.raises(FlowProjectDefinitionError):\n\n @foo_group\n @foo_group\n @A.operation\n def foo_operation(job):\n pass",
"def root_config_parser_with_subgroups(root_config_parser_with_mandatory_and_optional_arguments):\n\n root_config_parser_with_mandatory_and_optional_arguments.add_argument_prefix(argument_prefix='TEST_')\n\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup(subgroup_type='g1', prefix='TEST_G1_')\n\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup(subgroup_type='g2', prefix='TEST_G2_')\n\n subgroup_parser_g1 = SubgroupConfigParser(name='type1')\n subgroup_parser_g1.add_argument('1', 'a1', argument_type=str, is_mandatory=True)\n subgroup_parser_g1.add_argument('2', 'a2', argument_type=bool, default_value=True)\n subgroup_parser_g1.add_argument('3', 'a3', argument_type=str, default_value=69)\n subgroup_parser_g1.add_argument('n', 'name', argument_type=str)\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup_parser(subgroup_type='g1',\n subgroup_parser=subgroup_parser_g1)\n\n subgroup_parser_g2 = SubgroupConfigParser(name='type2')\n subgroup_parser_g2.add_argument('1', 'a1', argument_type=float, is_mandatory=False)\n subgroup_parser_g2.add_argument('2', 'a2', argument_type=str)\n subgroup_parser_g2.add_argument('3', 'a3', argument_type=str)\n subgroup_parser_g2.add_argument('4', 'a4', argument_type=str)\n subgroup_parser_g2.add_argument('n', 'name', argument_type=str)\n root_config_parser_with_mandatory_and_optional_arguments.add_subgroup_parser(subgroup_type='g2',\n subgroup_parser=subgroup_parser_g2)\n\n return root_config_parser_with_mandatory_and_optional_arguments"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the result of parsing an empty dict is a dict of arguments with their default value | def test_validate_empty_dict_return_default_values_of_arguments_in_root_parsing_manager():
parser_manager = RootConfigParsingManager()
parser_manager.add_argument('c', argument_type=int, default_value=1)
parser_manager.add_argument('hello', argument_type=str, default_value="world")
default_dic = {}
expected_dic = {'c': 1, 'hello': 'world'}
assert parser_manager.validate(default_dic) == expected_dic | [
"def test_falsy_default_argument_values():\n arguments = [\n {\n \"name\": \"nonrequired\",\n \"type\": \"str\",\n \"default\": None\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([])\n assert values.nonrequired == ''",
"def test_get_default_arg_dict(self) -> None:\n default_dict = get_default_arg_dict(MockCustomizableClass)\n self.assertEqual(len(default_dict), 1, \"Only args with defaults should show\")\n default_arg = default_dict[MockCustomizableClass.DEFAULT_NAME]\n self.assertEqual(default_arg, MockCustomizableClass.DEFAULT_VALUE)",
"def test_dict_optional_args(self, request_args):\n args = request_args.dict_optional_args(\n autocast_arguments_to_string=False,\n )\n assert args['data'] == {'d1': 1, 'd2': 2}\n assert 'method' not in args\n assert 'url' not in args\n assert 'full_url' not in args",
"def test_interaction_dict_none():\n res = dict_utils.interaction_dict(None)\n assert res is None",
"def test_get_argument_group_dict_empty(self) -> None:\n MockUncustomizableClass.add_args_to_group(self.test_group)\n group_as_dict = get_argument_group_dict(self.test_group)\n self.assertIsNone(group_as_dict)",
"def test_passed_noDefaultValues(self):\n\n def func(a, b, c=1, d=2, e=3):\n pass\n\n self.assertEqual(self.checkPassed(func, 1, 2, e=7), dict(a=1, b=2, e=7))",
"def test_dict_optional_args_with_autocast(self, request_args):\n request_args.data = {\n 'string': 'string',\n 'int': 3,\n 'none': None,\n 'datetime': datetime(2000, 1, 2, 3, 4, 5),\n 'date': date(2000, 1, 2),\n 'list': ['a', 'b', 'c'],\n 'dict': {\n 'a': 1,\n },\n }\n args = request_args.dict_optional_args(\n autocast_arguments_to_string=True,\n )\n assert args['data'] == {\n 'string': 'string',\n 'int': 3,\n 'none': None,\n 'datetime': '2000-01-02 03:04:05',\n 'date': '2000-01-02 00:00:00',\n 'list': '[\"a\", \"b\", \"c\"]',\n 'dict': '{\"a\": 1}',\n }",
"def test_parsing_configuration_file_with_no_argument_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['arg5'] = 'default value'\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +\n test_files_path + '/' +\n config_file).split())\n\n assert result == expected_dict",
"def test_process_json_dict_no_apply(self):\r\n result = json_processor.process_json_dict(self.test_dict_no_apply)\r\n self.assertEqual(None, result)",
"def test_default_required_output_for_dict(self):\n class ExampleSerializer(serializers.Serializer):\n omitted = serializers.CharField(default='abc')\n included = serializers.CharField()\n\n serializer = ExampleSerializer({'included': 'abc'})\n with pytest.raises(KeyError):\n serializer.data",
"def default_dict(obj):\n return obj or {}",
"def test_check_default_values():\n iniconf.check_default_values(spec, 'key1')\n iniconf.check_default_values(spec, 'key2')\n try:\n iniconf.check_default_values(spec, 'key3')\n except ConfigError:\n spec['key3'] = 'integer(default=1)'\n else:\n raise AssertionError(\"Checking for a default value should have failed with: %s\" % spec['key3'])",
"def _none_to_empty_dict(*args):\n out_arg_lst = []\n for arg in args:\n if arg is None:\n out_arg_lst.append({})\n else:\n out_arg_lst.append(arg)\n return out_arg_lst",
"def test_parsing_environment_variables_with_no_argument_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments):\n config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['arg5'] = 'default value'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_empty_str_to_dict(self):\n d = msgmap.str_to_dict('')\n self.assertEqual(len(d), 0)",
"def _nonnull_dict(**kwargs):\n return {key: value\n for key, value in kwargs.items()\n if value is not None}",
"def test_get_any():\n expected = {\n \"true\": True,\n \"false\": False,\n \"positive\": 123,\n \"negative\": -456,\n \"zero\": 0,\n \"float\": \"1.1\",\n \"expression\": \"123-456\",\n \"none\": \"None\",\n \"string\": \"a b c d e f g\",\n \"list_of_int\": \"[3,4,5]\",\n \"list_of_str\": '[\"x\", \"y\", \\'z\\']',\n }\n for key, value in expected.items():\n assert envs.get_any(key, \"default\") == value\n assert envs.get_any(\"missing\", \"default\") == \"default\"",
"def test_emptydict_json(self):\n dic = Base.to_json_string([{}])\n self.assertEqual(dic, \"[{}]\")",
"def test_kwargs_none(self):\n n = {None: None}\n with self.assertRaises(TypeError):\n new = self.value(**n)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a json file containing a configuration with long and short names for arguments is correctly parsed | def test_parsing_configuration_file_with_long_and_short_names_for_arguments_in_root_parsing_manager(
root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):
config_file = 'root_manager_basic_configuration_with_long_and_short_names.json'
expected_dict = load_configuration_from_json_file(config_file)
expected_dict['argumento2'] = expected_dict.pop('2')
expected_dict['arg5'] = expected_dict.pop('5')
result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +
test_files_path +
'/' + config_file).split())
assert result == expected_dict | [
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def test_string_argument_parsing():\n arguments = [\n {\n \"name\": \"firstname\",\n \"type\": \"str\",\n \"default\": \"Allysa P. Hacker\",\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([\"--firstname\", \"john\"])\n assert values.firstname == \"john\"",
"def test_parse_config(self):\n with open(self.config_file_json, 'w+',\n encoding=\"utf-8\", errors=\"ignore\") as config_f:\n json.dump({\n 'analyzer': ['--analyzers', 'clangsa'],\n 'parse': ['--trim-path-prefix', '/workspace']},\n config_f)\n\n out, returncode = self.__run_analyze(self.config_file_json)\n\n self.assertEqual(returncode, 0)\n self.assertIn(\"clangsa analyzed simple.cpp\", out)\n self.assertNotIn(\"clang-tidy analyzed simple.cpp\", out)\n\n out, returncode = self.__run_parse(self.config_file_json)\n print(out)\n self.assertEqual(returncode, 2)",
"def test_all_command_line():\n assert read_settings('abc 123 -p testpre'.split()) == \\\n {'oauth_token': 'abc',\n 'oauth_secret': '123',\n 'app_key': 'RWmvpkGK4m9tavh4bCfdzsYjH',\n 'app_secret': 'uCShewTskeuBvt9haLi8LFARSJXkxJsCPNZ3dGwpYz4vuc5Mo9',\n 'config': 'stwark.cfg',\n 'prefix': 'testpre'}",
"def test_json_configuration(request):\n ProjectMock(request).style(\n \"\"\"\n [\"your.json\".has]\n an_extra = \"key\"\n\n [\"their.json\"]\n x = 1\n \"\"\"\n ).flake8().assert_errors_contain(\n \"\"\"\n NIP001 File nitpick-style.toml has an incorrect style. Invalid config:\\x1b[32m\n \"their.json\".x: Unknown configuration. See https://nitpick.rtfd.io/en/latest/nitpick_section.html.\n \"your.json\".has: Unknown configuration. See https://nitpick.rtfd.io/en/latest/nitpick_section.html.\\x1b[0m\n \"\"\",\n 1,\n )",
"def test_build_from_good_json(self):",
"def test_arg() -> None:\n parser = arg_parser()\n parsed = parser.parse_args(\n [\"--test\", \"test_name\", \"-n\", \"52\", \"--tool\", \"cwltool\", \"-j\", \"4\"]\n )\n assert parsed.test == \"test_name\"\n assert parsed.n == \"52\"\n assert parsed.tool == \"cwltool\"\n assert parsed.j == 4",
"def test_create_argument_list():\n raw_config = \"\"\"[firstname]\ntype=str\ndefault=rayman\"\"\"\n config = ConfigParser()\n config.read_string(raw_config)\n results = reading.get_template_arguments(config)\n assert results[0]['name'] == 'firstname'",
"def test_oauth_command_line():\n assert read_settings('abc 123'.split()) == \\\n {'oauth_token': 'abc',\n 'oauth_secret': '123',\n 'app_key': 'RWmvpkGK4m9tavh4bCfdzsYjH',\n 'app_secret': 'uCShewTskeuBvt9haLi8LFARSJXkxJsCPNZ3dGwpYz4vuc5Mo9',\n 'config': 'stwark.cfg',\n 'prefix': 'data'}",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def test_add_arguments_with_long_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.long_arg == ['help']\n parser_manager.add_argument('aaa')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=']\n parser_manager.add_argument('xx')\n assert parser_manager.cli_parser.long_arg == ['help', 'aaa=', 'xx=']\n\n assert parser_manager.cli_parser.short_arg == 'h'",
"def test_manifest_parses(self):\n self.assertIsInstance(self.json, dict)",
"def test_add_flag_arguments_and_no_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'ha'\n parser_manager.add_argument('b')\n assert parser_manager.cli_parser.short_arg == 'hab:'\n parser_manager.add_argument('c', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hab:c'\n\n assert len(parser_manager.cli_parser.long_arg) == 1 # Only help is arg argument\n\n assert parser_manager.cli_parser.long_arg == [\"help\"] # Only help is arg argument",
"def test_required_args(self):\n parser, config_dict = set_args()\n args = parser.parse_args(self.cmd_args[5])\n config_dict, arg_dict = parse_args(config_dict, args)\n self.assertTrue(arg_dict.get(\"generate_config\") is None)",
"def test_ios_error_config_with_ios_json(self):\n files = {\n '/fake_src/ios/build/bots/fake_builder_group/fake_ios_error.json':\n ('{\"gn_args\": [\"is_debug=true\"]}\\n')\n }\n mbw = self.fake_mbw(files)\n self.check(['lookup', '-m', 'fake_builder_group', '-b', 'fake_ios_error'],\n mbw=mbw,\n ret=0,\n out=('\\n'\n 'Writing \"\"\"\\\\\\n'\n 'is_debug = true\\n'\n '\"\"\" to _path_/args.gn.\\n\\n'\n '/fake_src/buildtools/linux64/gn gen _path_\\n'))",
"def test_add_flag_arguments_with_short_name_to_cli_parser():\n parser_manager = RootConfigParsingManager()\n assert parser_manager.cli_parser.short_arg == 'h'\n parser_manager.add_argument('a', is_flag=True)\n parser_manager.add_argument('x', is_flag=True)\n assert parser_manager.cli_parser.short_arg == 'hax'",
"def test_configuration_endpoint():\n parser = create_parser()\n parsed_arguments = parser.parse_args([\"--configuration-endpoint\", \"/test\"])\n assert parsed_arguments.configuration_endpoint == \"/test\", \"Wrong endpoint\"",
"def _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"experiment_config\",\n type=str,\n help='experiment json file (\\'{\"dataset\":\"EmnistDataset\"}\\'',\n )\n args = parser.parse_args()\n return args",
"def test_parsing_configuration_file_with_no_argument_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['arg5'] = 'default value'\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +\n test_files_path + '/' +\n config_file).split())\n\n assert result == expected_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a json file containing a configuration with no values for arguments with default values is correctly parsed | def test_parsing_configuration_file_with_no_argument_with_default_value_in_root_parsing_manager(
root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):
config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'
expected_dict = load_configuration_from_json_file(config_file)
expected_dict['arg5'] = 'default value'
result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +
test_files_path + '/' +
config_file).split())
assert result == expected_dict | [
"def test_falsy_default_argument_values():\n arguments = [\n {\n \"name\": \"nonrequired\",\n \"type\": \"str\",\n \"default\": None\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([])\n assert values.nonrequired == ''",
"def test_validate_empty_dict_return_default_values_of_arguments_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_argument('c', argument_type=int, default_value=1)\n parser_manager.add_argument('hello', argument_type=str, default_value=\"world\")\n\n default_dic = {}\n expected_dic = {'c': 1, 'hello': 'world'}\n\n assert parser_manager.validate(default_dic) == expected_dic",
"def test_default_values(self):\n class MySchema(Schema):\n foo = BoolOption(default=True)\n\n class bar(Section):\n baz = IntOption()\n bla = StringOption(default='hello')\n\n schema = MySchema()\n config = StringIO(\"[bar]\\nbaz=123\")\n expected_values = {'__main__': {'foo': True},\n 'bar': {'baz': 123, 'bla': 'hello'}}\n parser = SchemaConfigParser(schema)\n parser.readfp(config)\n self.assertEquals(expected_values, parser.values())\n\n config = StringIO(\"[bar]\\nbla=123\")\n expected = {\n '__main__': {'foo': True},\n 'bar': {'baz': 0, 'bla': '123'}}\n parser = SchemaConfigParser(schema)\n parser.readfp(config)\n self.assertEquals(expected, parser.values())",
"def test_generate_config_default(self):\n parser, config_dict = set_args()\n args = parser.parse_args(self.cmd_args[0])\n config_dict, arg_dict = parse_args(config_dict, args)\n self.assertTrue(config_dict.get(\"dataset\") == \"wn18\")\n self.assertTrue(config_dict.get(\"device\") == \"GPU\")\n self.assertTrue(arg_dict.get(\"num_partitions\") == 5)",
"def test_json_configuration(request):\n ProjectMock(request).style(\n \"\"\"\n [\"your.json\".has]\n an_extra = \"key\"\n\n [\"their.json\"]\n x = 1\n \"\"\"\n ).flake8().assert_errors_contain(\n \"\"\"\n NIP001 File nitpick-style.toml has an incorrect style. Invalid config:\\x1b[32m\n \"their.json\".x: Unknown configuration. See https://nitpick.rtfd.io/en/latest/nitpick_section.html.\n \"your.json\".has: Unknown configuration. See https://nitpick.rtfd.io/en/latest/nitpick_section.html.\\x1b[0m\n \"\"\",\n 1,\n )",
"def test_required_args(self):\n parser, config_dict = set_args()\n args = parser.parse_args(self.cmd_args[5])\n config_dict, arg_dict = parse_args(config_dict, args)\n self.assertTrue(arg_dict.get(\"generate_config\") is None)",
"def test_parsing_environment_variables_with_no_argument_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments):\n config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['arg5'] = 'default value'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_getConfig_with_missing_keys_in_config_file(self):\n #Write the bad config file\n if not os.path.isdir(config_address + \"/..\"):\n os.mkdir(config_address + \"/..\")\n parser = configparser.ConfigParser()\n bad_defaults = copy.deepcopy(defaults)\n bad_defaults.pop('size')\n parser['general'] = bad_defaults\n with open(config_address, 'w') as configfile:\n parser.write(configfile)\n\n #Read the bad config file\n config = getConfig()['general']\n self.assertEqual(config, defaults)",
"def test_empty_config(self):\n with open(self.config_file_json, 'w+',\n encoding=\"utf-8\", errors=\"ignore\") as config_f:\n config_f.write(\"\")\n\n out, returncode = self.__run_analyze(self.config_file_json)\n\n self.assertEqual(returncode, 0)\n self.assertIn(\"clangsa analyzed simple.cpp\", out)\n self.assertIn(\"clang-tidy analyzed simple.cpp\", out)",
"def test_read_json_missing_parameters(self):\n self.assertRaises(\n ValidationError,\n read_json,\n \"test/invalid_data_missing_parameters.json\",\n \"test/valid_schema.json\"\n )",
"def test_non_existent_JSON_file(self):\n self.assertEqual(Base.load_from_file(), [])",
"def test_check_default_values():\n iniconf.check_default_values(spec, 'key1')\n iniconf.check_default_values(spec, 'key2')\n try:\n iniconf.check_default_values(spec, 'key3')\n except ConfigError:\n spec['key3'] = 'integer(default=1)'\n else:\n raise AssertionError(\"Checking for a default value should have failed with: %s\" % spec['key3'])",
"def test_configuration_are_none_by_default():\n\n config = aai.TranscriptionConfig()\n fields = config.raw.__fields_set__ - {\"language_code\"}\n\n for name, value in inspect.getmembers(config):\n if name in fields and value is not None:\n pytest.fail(\n f\"Configuration field {name} is {value} and not None by default.\"\n )",
"def test_getConfig_when_there_is_a_config_file(self):\n generateConfig()\n self.assertEqual(getConfig()['general'], defaults)",
"def test_extra_sections_with_missing_section(self):\n class MySchema(Schema):\n foo = DictOption()\n\n config = StringIO(textwrap.dedent(\"\"\"\n [__main__]\n foo = dict1\n \"\"\"))\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config)\n parser.parse_all()\n\n self.assertRaises(NoSectionError, parser.values)\n # config is not valid\n self.assertFalse(parser.is_valid())",
"def test_factory_empty_settings_2(self):\n config = ConfigParser()\n config.read(\"{}\")\n self.assertRaises(ValueError, check_settings_sections, config, EXCEPTED_SECTIONS)",
"def test_some_parser_defaults(self):\n assert self.args.rate == 250.0\n assert self.args.gain == 1",
"def test_parsing_configuration_file_with_long_and_short_names_for_arguments_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_basic_configuration_with_long_and_short_names.json'\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['argumento2'] = expected_dict.pop('2')\n expected_dict['arg5'] = expected_dict.pop('5')\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +\n test_files_path +\n '/' + config_file).split())\n assert result == expected_dict",
"def test_load_yaml_config_converts_empty_files_to_dict() -> None:\n create_file(YAML_PATH)\n\n assert isinstance(config_util.load_yaml_config_file(YAML_PATH), dict)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the parsing of a configuration defined via environment variables missing arguments with default values results in a dict with the default values for those arguments | def test_parsing_environment_variables_with_no_argument_with_default_value_in_root_parsing_manager(
root_config_parsing_manager_with_mandatory_and_optional_arguments):
config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'
created_environment_variables = define_environment_variables_configuration_from_json_file(
file_name=config_file,
simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
arguments_prefix[0],
group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
get_groups_prefixes())
expected_dict = load_configuration_from_json_file(config_file)
expected_dict['arg5'] = 'default value'
result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()
assert result == expected_dict
remove_environment_variables_configuration(variables_names=created_environment_variables) | [
"def test_validate_empty_dict_return_default_values_of_arguments_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_argument('c', argument_type=int, default_value=1)\n parser_manager.add_argument('hello', argument_type=str, default_value=\"world\")\n\n default_dic = {}\n expected_dic = {'c': 1, 'hello': 'world'}\n\n assert parser_manager.validate(default_dic) == expected_dic",
"def test_runtime_envs_set_default(self):\n pass",
"def env(*args, **kwargs):\n for argument in args:\n value = os.environ.get(argument)\n if value:\n return value\n return kwargs.get('default', '')",
"def test_generate_config_default(self):\n parser, config_dict = set_args()\n args = parser.parse_args(self.cmd_args[0])\n config_dict, arg_dict = parse_args(config_dict, args)\n self.assertTrue(config_dict.get(\"dataset\") == \"wn18\")\n self.assertTrue(config_dict.get(\"device\") == \"GPU\")\n self.assertTrue(arg_dict.get(\"num_partitions\") == 5)",
"def test_parsing_configuration_file_with_no_argument_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['arg5'] = 'default value'\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse(args=('--config-file ' +\n test_files_path + '/' +\n config_file).split())\n\n assert result == expected_dict",
"def _get_args_defaults(env, args):\n defaults = {'height': _get_config_variable(env, 'height', 500),\n 'column_width': _get_config_variable(env, 'column_width', 40),\n 'res_days': _get_config_variable(env, 'res_days', 7),\n 'title': _get_config_variable(env, 'default_title',\n 'Tickets statistics'),\n 'daterange': _get_config_variable(env, 'default_daterange',\n '3m;'),\n }\n # Elegant :)\n defaults.update(args)\n return defaults",
"def test_falsy_default_argument_values():\n arguments = [\n {\n \"name\": \"nonrequired\",\n \"type\": \"str\",\n \"default\": None\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([])\n assert values.nonrequired == ''",
"def complete_configuration(env, args):\n cfg = dict()\n\n # combining previously defined environment and arguments from command line\n # to prepare process configuration\n for key in env:\n if key.lower() in args and args[key.lower()]:\n cfg[key.lower()] = args[key.lower()]\n else:\n cfg[key.lower()] = env[key]\n for key in args:\n if key not in cfg:\n cfg[key] = args[key]\n\n return cfg",
"def test_config_as_dict(self):\n with mock.patch.dict(\"os.environ\"):\n os.environ[\"AIRFLOW__VAR__broken\"] = \"not_ok\"\n asdict = conf.as_dict(raw=True, display_sensitive=True)\n assert asdict.get(\"VAR\") is None\n assert asdict[\"testsection\"][\"testkey\"] == \"testvalue\"",
"def env(*vars, **kwargs):\n import os\n\n for v in vars:\n value = os.environ.get(v)\n if value:\n return value\n return kwargs.get('default', '')",
"def test_use_factory_defaults(self):\n for k in neurotic.global_config['defaults']:\n neurotic.global_config['defaults'][k] = 'bad value'\n argv = ['neurotic', '--use-factory-defaults']\n args = neurotic.parse_args(argv)\n for k, v in neurotic._global_config_factory_defaults['defaults'].items():\n self.assertEqual(getattr(args, k), v,\n f'args.{k} was not reset to factory default')",
"def test_with_overrides_value(self):\n\n expected = {\n \"AWS_SAM_LOCAL\": \"true\",\n \"AWS_LAMBDA_FUNCTION_MEMORY_SIZE\": \"1024\",\n \"AWS_LAMBDA_FUNCTION_TIMEOUT\": \"123\",\n \"AWS_LAMBDA_FUNCTION_HANDLER\": \"handler\",\n \"AWS_LAMBDA_FUNCTION_NAME\": self.name,\n \"AWS_LAMBDA_FUNCTION_VERSION\": \"$LATEST\",\n \"AWS_LAMBDA_LOG_GROUP_NAME\": f\"aws/lambda/{self.name}\",\n \"AWS_LAMBDA_LOG_STREAM_NAME\": \"$LATEST\",\n \"AWS_ACCOUNT_ID\": \"123456789012\",\n \"AWS_REGION\": \"us-east-1\",\n \"AWS_ACCESS_KEY_ID\": \"defaultkey\",\n \"AWS_SECRET_ACCESS_KEY\": \"defaultsecret\",\n # This value is coming from user passed environment variable\n \"AWS_DEFAULT_REGION\": \"user-specified-region\",\n \"variable2\": \"mystring\",\n # Value coming from the overrides\n \"variable1\": \"variable1 value from overrides\",\n \"list_var\": \"list value coming from overrides\",\n \"dict_var\": \"\",\n \"none_var\": \"\",\n \"true_var\": \"true\",\n \"false_var\": \"false\",\n }\n\n environ = EnvironmentVariables(\n self.name,\n self.memory,\n self.timeout,\n self.handler,\n variables=self.variables,\n shell_env_values=self.shell_env,\n override_values=self.override,\n )\n\n self.assertEqual(environ.resolve(), expected)",
"def test_check_default_values():\n iniconf.check_default_values(spec, 'key1')\n iniconf.check_default_values(spec, 'key2')\n try:\n iniconf.check_default_values(spec, 'key3')\n except ConfigError:\n spec['key3'] = 'integer(default=1)'\n else:\n raise AssertionError(\"Checking for a default value should have failed with: %s\" % spec['key3'])",
"def test_required_args(self):\n parser, config_dict = set_args()\n args = parser.parse_args(self.cmd_args[5])\n config_dict, arg_dict = parse_args(config_dict, args)\n self.assertTrue(arg_dict.get(\"generate_config\") is None)",
"def _argparse_check_env(cls, env_prefix, prefix, k, args):\n if env_prefix is None:\n return\n\n argname = f'{prefix}{k}'\n name = f'{env_prefix}_{prefix}{k}'.upper().replace('-', '_')\n v = os.environ.get(name)\n if v is not None:\n args.setdefault(argname, v)",
"def test_parsing_environment_variables_with_subgroups_and_no_arguments_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['input']['in1']['name'] = 'my_i1_instance'\n expected_dict['output']['o1']['name'] = 'my_o1_instance'\n expected_dict['output']['o2']['name'] = 'my_o2_instance'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def config(default=None, environment=environment(), **values):\n if environment in values:\n return values[environment]\n else:\n return default",
"def test_load_config_from_dotenv_non_empty_file_all_keys_accepted(tmp_path):\n env_file = tmp_path / \".env\"\n env_file.write_text('VALID = true\\ntruly_invalid = true\\nInVaLiD = true')\n\n config = ConfigLoader.load_config_from_dotenv(str(env_file), accepted_keys=None)\n assert len(config) == 1, 'Resulting `Config`-instance should contain only one key-value pair'\n assert 'VALID' in config, '`VALID` key should be in resulting config'\n assert 'InVaLiD' not in config and 'truly_invalid' not in config, 'Other invalid keys should not be in resulting config'",
"def _environment_variables() -> Dict[str, str]:\n return {key: value for key, value in os.environ.items() if _is_encodable(value)}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a configuration defined via environment variables with subgroups is correctly parsed | def test_parsing_environment_variables_with_subgroups_and_long_and_short_names_in_root_parsing_manager(
root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):
config_file = 'root_manager_configuration_with_subgroups_and_long_and_short_names.json'
created_environment_variables = define_environment_variables_configuration_from_json_file(
file_name=config_file,
simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
arguments_prefix[0],
group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
get_groups_prefixes())
expected_dict = load_configuration_from_json_file('root_manager_configuration_with_subgroups.json')
expected_dict['input']['in1']['name'] = 'i1_name'
expected_dict['output']['o1']['model'] = 'o1_model_x'
result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()
assert result == expected_dict
remove_environment_variables_configuration(variables_names=created_environment_variables) | [
"def test_nested(self):\n env = {\"APP_X\": \"nope\", \"XYZ_X\": \"foo\", \"XYZ_SUB_Y\": \"bar\"}\n cfg = environ.to_config(Nested, environ=env)\n\n assert Nested(x=\"foo\", sub=Nested.Sub(y=\"bar\")) == cfg",
"def test_config_priority_between_environ_variables_and_configuration_file_with_subgroups_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments,\n test_files_path):\n\n config_file_environment_variables = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file_environment_variables,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n sys.argv.append('--config-file')\n\n sys.argv.append(test_files_path + '/root_manager_configuration_with_subgroups_and_long_and_short_names.json')\n\n expected_dict = load_configuration_from_json_file(config_file_environment_variables)\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['name'] = 'o1_name'\n expected_dict['output']['o2']['name'] = 'o2_name'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n sys.argv = []\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_parsing_environment_variables_with_subgroups_and_no_arguments_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['input']['in1']['name'] = 'my_i1_instance'\n expected_dict['output']['o1']['name'] = 'my_o1_instance'\n expected_dict['output']['o2']['name'] = 'my_o2_instance'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_hosts_stripped_env_hosts():\n def command():\n pass\n myenv = {'hosts': [' foo ', 'bar '], 'roles': [], 'exclude_hosts': []}\n eq_hosts(command, ['foo', 'bar'], env=myenv)",
"def test_service_groups(self):\n self.assertIn(\"application_services\", EFConfig.SERVICE_GROUPS)",
"def test_env_list_includes_non_ephemeral(self):\n self.assertIn(\"test\", EFConfig.ENV_LIST)",
"def test__test_environment():\n environment = os.getenv('ENV_FOR_DYNACONF')\n\n assert environment == 'test'",
"def test_env_list_includes_ephemeral(self):\n self.assertIn(\"alpha0\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha1\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha2\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha3\", EFConfig.ENV_LIST)\n self.assertNotIn(\"alpha4\", EFConfig.ENV_LIST)",
"def test_parsing_environment_variables_with_subgroups_and_unknown_arguments_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_unknown_arguments.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_parsing_environment_variables_with_subgroups_and_wrong_type_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_wrong_argument_type_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_dash_vars_with_env(self):\n test_settings = yamldict.YAMLDict({'foo-bar': 'baz'})\n assert test_settings['foo-bar'] == 'baz'\n update_from_env(test_settings)\n assert test_settings['foo-bar'] == 'new-baz'",
"def test_new_runtime_env_overrides_config_with_env_vars_in_play_and_subclasses():\n with mock.patch.dict(\n os.environ,\n {\n ENV_CODE_VAR: _UnitTestSubConfig.ENV_CODE,\n \"COMPONENT_NAME\": _ENV_VAL,\n \"UNITTEST_CFG_A\": \"ENVVAR_UNITTEST_CFG_A\",\n \"UNITTEST_CFG_D\": \"ENVVAR_UNITTEST_CFG_D\",\n \"UNITTEST_CFG_G\": \"ENVVAR_UNITTEST_CFG_G\",\n \"SUB_UNITTEST_3\": \"ENVVAR_SUB_UNITTEST_3\",\n \"SUB_UNITTEST_4\": \"ENVVAR_SUB_UNITTEST_4\",\n },\n ):\n _load_config.cache_clear() # wipes the @lru_cache for fresh run on next call\n cfg = _load_config()\n\n # 1. Env var takes priority over Config vars like COMPONENT_NAME still override even if originally defined at the\n # grandparent config level\n assert cfg.COMPONENT_NAME == _ENV_VAL\n # 2. Env var still takes priority when Same-named config vars in the subclass replace the parent value\n assert cfg.UNITTEST_CFG_A == \"ENVVAR_UNITTEST_CFG_A\"\n # 3. Child prop composing 2 values from parent config is late-evaluated (at call time), not evaluated as the\n # class is read-in from a module import, or construction ... AND when doing the late-eval, it composes the\n # values from the environment vars that replaced the original values for the fields it is composing\n assert cfg.SUB_UNITTEST_1 == \"ENVVAR_UNITTEST_CFG_A:ENVVAR_UNITTEST_CFG_D\"\n # 4. Child prop composing 2 values from parent, one of which is overridden in child, AND then further\n # overriden by the environment var, does get the overridden part FROM the env var\n assert cfg.SUB_UNITTEST_2 == \"ENVVAR_UNITTEST_CFG_A:UNITTEST_CFG_B\"\n # 5. If child and parent are BOTH properties, AND the field's name is overridden by an ENVVAR, the child\n # WILL NOT pick up the env var value. It sticks to its value.\n # WHY?? See extensive note in previous test\n assert cfg.UNITTEST_CFG_G == \"SUB_UNITTEST_CFG_G\"\n # 7. Simple composition of values in the same class works\n assert cfg.SUB_UNITTEST_5 == \"ENVVAR_SUB_UNITTEST_3:ENVVAR_SUB_UNITTEST_4\"",
"def test_service_groups_has_fixtures(self):\n self.assertIn(\"fixtures\", EFConfig.SERVICE_GROUPS)",
"def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n check_parse_cli_result(root_config_parsing_manager, \"\", {})\n\n with pytest.raises(UnknownArgException):\n check_parse_cli_result(root_config_parsing_manager, \"-z\", {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})\n\n with pytest.raises(NoNameSpecifiedForSubgroupException):\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',\n {'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})\n\n with pytest.raises(BadContextException):\n check_parse_cli_result(root_config_parsing_manager, \"-b\", {})",
"def test_runtime_envs_get(self):\n pass",
"def test_local_env_vars_shell(tunable_groups: TunableGroups) -> None:\n _run_local_env(\n tunable_groups,\n shell_subcmd=\"$const_arg,$other_arg,$unknown_arg,$kernel_sched_latency_ns\",\n expected={\n \"const_arg\": 111, # From \"const_args\"\n \"other_arg\": float(\"NaN\"), # Not included in \"shell_env_params\"\n \"unknown_arg\": float(\"NaN\"), # Unknown/undefined variable\n \"kernel_sched_latency_ns\": 2000000, # From \"tunable_params\"\n }\n )",
"def test_get_hostgroups(self):\n pass",
"def test_config_normalization():\n assert Container._normalize({\n 'grp': {\n '__default__': {\n 'dep1': 'x',\n 'dep2': 'z',\n '$arg': 100,\n 'other': ['a', 'b'],\n '$dic': {'a': 1}\n },\n 'ent': {\n 'dep1:dep1': 'y',\n 'other:other': ['c'],\n '$dic': {'b': 2}\n },\n 'ent2': {\n '$dic': {'c': 3}\n }\n }\n }) == {\n 'grp': {\n 'ent': {\n 'dep1': ('dep1', 'y'),\n 'dep2': ('dep2', 'z'),\n '$arg': 100,\n 'other': (None, (('other', 'c'),)),\n '$dic': {'b': 2},\n },\n 'ent2': {\n 'dep1': ('dep1', 'x'),\n 'dep2': ('dep2', 'z'),\n '$arg': 100,\n 'other': (None, (('other', 'a'), ('other', 'b'))),\n '$dic': {'c': 3}\n }\n }\n }",
"def test_runtime_envs_list(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a configuration defined via environment variables with subgroups and unknown arguments terminates the execution | def test_parsing_environment_variables_with_subgroups_and_unknown_arguments_terminate_execution_in_root_parsing_manager(
root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):
config_file = 'root_manager_configuration_with_subgroups_and_unknown_arguments.json'
created_environment_variables = define_environment_variables_configuration_from_json_file(
file_name=config_file,
simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
arguments_prefix[0],
group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
get_groups_prefixes())
result = None
with pytest.raises(SystemExit) as result:
_ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()
assert result.type == SystemExit
assert result.value.code == -1
remove_environment_variables_configuration(variables_names=created_environment_variables) | [
"def test_parsing_environment_variables_with_subgroups_and_wrong_type_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_wrong_argument_type_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_config_priority_between_environ_variables_and_configuration_file_with_subgroups_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments,\n test_files_path):\n\n config_file_environment_variables = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file_environment_variables,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n sys.argv.append('--config-file')\n\n sys.argv.append(test_files_path + '/root_manager_configuration_with_subgroups_and_long_and_short_names.json')\n\n expected_dict = load_configuration_from_json_file(config_file_environment_variables)\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['name'] = 'o1_name'\n expected_dict['output']['o2']['name'] = 'o2_name'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n sys.argv = []\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_parsing_environment_variables_with_subgroups_and_no_arguments_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['input']['in1']['name'] = 'my_i1_instance'\n expected_dict['output']['o1']['name'] = 'my_o1_instance'\n expected_dict['output']['o2']['name'] = 'my_o2_instance'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_nested_exclusive_option_groups(self):\n self.assertRaises(SystemExit,\n self._test_options, [\"--test1\", \"--test2\"])",
"def test_local_env_vars_shell(tunable_groups: TunableGroups) -> None:\n _run_local_env(\n tunable_groups,\n shell_subcmd=\"$const_arg,$other_arg,$unknown_arg,$kernel_sched_latency_ns\",\n expected={\n \"const_arg\": 111, # From \"const_args\"\n \"other_arg\": float(\"NaN\"), # Not included in \"shell_env_params\"\n \"unknown_arg\": float(\"NaN\"), # Unknown/undefined variable\n \"kernel_sched_latency_ns\": 2000000, # From \"tunable_params\"\n }\n )",
"def test_parsing_environment_variables_with_subgroups_and_long_and_short_names_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_long_and_short_names.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file('root_manager_configuration_with_subgroups.json')\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['model'] = 'o1_model_x'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('type', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n dic_a = {'a': True}\n\n dic_z = {\n \"z\": True\n }\n\n dic_b = {\n 'b': \"type\"\n }\n\n dic_a_sub = {\n 'a': True,\n 'sub': {\n 'titi':\n {\n 'type': 'toto',\n 'b': \"type\"\n }\n }\n }\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_z)\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_b)\n\n assert root_config_parsing_manager.validate(dic_a) == dic_a\n\n assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub\n\n assert root_config_parsing_manager.validate({}) == {}",
"def check_env():\n\n # add argv[4] for design\n if(len(sys.argv[1]) < 1 and len(sys.argv[2] < 1 and len(sys.argv[3] < 1))):\n printError()\n exit()",
"def test_local_env_vars_windows(tunable_groups: TunableGroups) -> None:\n _run_local_env(\n tunable_groups,\n shell_subcmd=r\"%const_arg%,%other_arg%,%unknown_arg%,%kernel_sched_latency_ns%\",\n expected={\n \"const_arg\": 111, # From \"const_args\"\n \"other_arg\": r\"%other_arg%\", # Not included in \"shell_env_params\"\n \"unknown_arg\": r\"%unknown_arg%\", # Unknown/undefined variable\n \"kernel_sched_latency_ns\": 2000000, # From \"tunable_params\"\n }\n )",
"def test_prepare_environment(self):\n pass",
"def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n check_parse_cli_result(root_config_parsing_manager, \"\", {})\n\n with pytest.raises(UnknownArgException):\n check_parse_cli_result(root_config_parsing_manager, \"-z\", {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})\n\n with pytest.raises(NoNameSpecifiedForSubgroupException):\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',\n {'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})\n\n with pytest.raises(BadContextException):\n check_parse_cli_result(root_config_parsing_manager, \"-b\", {})",
"def test_env_list_includes_ephemeral(self):\n self.assertIn(\"alpha0\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha1\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha2\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha3\", EFConfig.ENV_LIST)\n self.assertNotIn(\"alpha4\", EFConfig.ENV_LIST)",
"def test_all_command_line():\n assert read_settings('abc 123 -p testpre'.split()) == \\\n {'oauth_token': 'abc',\n 'oauth_secret': '123',\n 'app_key': 'RWmvpkGK4m9tavh4bCfdzsYjH',\n 'app_secret': 'uCShewTskeuBvt9haLi8LFARSJXkxJsCPNZ3dGwpYz4vuc5Mo9',\n 'config': 'stwark.cfg',\n 'prefix': 'testpre'}",
"def test_environment(self):\n pass",
"def test_parsing_environment_variables_with_no_argument_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments):\n config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['arg5'] = 'default value'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_expand_groups_empty_list() -> None:\n assert not Environment._expand_groups([\"$empty\"], _GROUPS)",
"def test_runtime_envs_get(self):\n pass",
"def test_expand_groups_unknown() -> None:\n with pytest.raises(KeyError):\n Environment._expand_groups([\"$list\", \"$UNKNOWN\", \"$str\", \"end\"], _GROUPS)",
"def _argparse_check_env(cls, env_prefix, prefix, k, args):\n if env_prefix is None:\n return\n\n argname = f'{prefix}{k}'\n name = f'{env_prefix}_{prefix}{k}'.upper().replace('-', '_')\n v = os.environ.get(name)\n if v is not None:\n args.setdefault(argname, v)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a configuration defined via environment variables with subgroups without variables with default values is correctly parsed | def test_parsing_environment_variables_with_subgroups_and_no_arguments_with_default_value_in_root_parsing_manager(
root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):
config_file = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'
created_environment_variables = define_environment_variables_configuration_from_json_file(
file_name=config_file,
simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
arguments_prefix[0],
group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
get_groups_prefixes())
expected_dict = load_configuration_from_json_file(config_file)
expected_dict['input']['in1']['name'] = 'my_i1_instance'
expected_dict['output']['o1']['name'] = 'my_o1_instance'
expected_dict['output']['o2']['name'] = 'my_o2_instance'
result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()
assert result == expected_dict
remove_environment_variables_configuration(variables_names=created_environment_variables) | [
"def test_parsing_environment_variables_with_no_argument_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments):\n config_file = 'root_manager_basic_configuration_with_no_argument_with_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['arg5'] = 'default value'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_parsing_environment_variables_with_subgroups_and_long_and_short_names_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_long_and_short_names.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file('root_manager_configuration_with_subgroups.json')\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['model'] = 'o1_model_x'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_config_priority_between_environ_variables_and_configuration_file_with_subgroups_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments,\n test_files_path):\n\n config_file_environment_variables = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file_environment_variables,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n sys.argv.append('--config-file')\n\n sys.argv.append(test_files_path + '/root_manager_configuration_with_subgroups_and_long_and_short_names.json')\n\n expected_dict = load_configuration_from_json_file(config_file_environment_variables)\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['name'] = 'o1_name'\n expected_dict['output']['o2']['name'] = 'o2_name'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n sys.argv = []\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_runtime_envs_set_default(self):\n pass",
"def test_hosts_stripped_env_hosts():\n def command():\n pass\n myenv = {'hosts': [' foo ', 'bar '], 'roles': [], 'exclude_hosts': []}\n eq_hosts(command, ['foo', 'bar'], env=myenv)",
"def test_load_config_from_dotenv_non_empty_file_all_keys_accepted(tmp_path):\n env_file = tmp_path / \".env\"\n env_file.write_text('VALID = true\\ntruly_invalid = true\\nInVaLiD = true')\n\n config = ConfigLoader.load_config_from_dotenv(str(env_file), accepted_keys=None)\n assert len(config) == 1, 'Resulting `Config`-instance should contain only one key-value pair'\n assert 'VALID' in config, '`VALID` key should be in resulting config'\n assert 'InVaLiD' not in config and 'truly_invalid' not in config, 'Other invalid keys should not be in resulting config'",
"def test_env_list_includes_non_ephemeral(self):\n self.assertIn(\"test\", EFConfig.ENV_LIST)",
"def test_dash_vars_with_env(self):\n test_settings = yamldict.YAMLDict({'foo-bar': 'baz'})\n assert test_settings['foo-bar'] == 'baz'\n update_from_env(test_settings)\n assert test_settings['foo-bar'] == 'new-baz'",
"def test_parse_hsmconfig_unknown_variable_value(self):\n with self.assertRaises(RuntimeError):\n hsm.parse_hsmconfig([\"foo=$TEST\"], \"test data\", {})",
"def test_parsing_environment_variables_with_subgroups_and_unknown_arguments_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_unknown_arguments.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_override_dotenv_file_with_env_var(tmpdir):\n # Verify default if nothing overriding\n cfg = LocalConfig()\n assert cfg.COMPONENT_NAME == \"USAspending API\"\n dotenv_val = \"a_test_verifying_dotenv_overrides_runtime_env_default_config\"\n\n # Now the .env file takes precedence\n tmp_config_dir = tmpdir.mkdir(\"config_dir\")\n dotenv_file = tmp_config_dir.join(\".env\")\n # Must use some of the default overrides from .env, like USASPENDING_DB_*. Fallback to .env.template if not existing\n shutil.copy(str(_PROJECT_ROOT_DIR / \".env.template\"), dotenv_file)\n if Path(_PROJECT_ROOT_DIR / \".env\").exists():\n shutil.copy(str(_PROJECT_ROOT_DIR / \".env\"), dotenv_file)\n with open(dotenv_file, \"a\"):\n dotenv_file.write(f\"COMPONENT_NAME={dotenv_val}\", \"a\")\n dotenv_path = os.path.join(dotenv_file.dirname, dotenv_file.basename)\n cfg = LocalConfig(_env_file=dotenv_path)\n assert cfg.COMPONENT_NAME == dotenv_val\n\n # Now the env var takes ultimate precedence\n with mock.patch.dict(os.environ, {\"COMPONENT_NAME\": _ENV_VAL}):\n cfg = LocalConfig()\n assert cfg.COMPONENT_NAME == _ENV_VAL",
"def test_parsing_environment_variables_with_subgroups_and_wrong_type_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_wrong_argument_type_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_nested(self):\n env = {\"APP_X\": \"nope\", \"XYZ_X\": \"foo\", \"XYZ_SUB_Y\": \"bar\"}\n cfg = environ.to_config(Nested, environ=env)\n\n assert Nested(x=\"foo\", sub=Nested.Sub(y=\"bar\")) == cfg",
"def test_with_overrides_value(self):\n\n expected = {\n \"AWS_SAM_LOCAL\": \"true\",\n \"AWS_LAMBDA_FUNCTION_MEMORY_SIZE\": \"1024\",\n \"AWS_LAMBDA_FUNCTION_TIMEOUT\": \"123\",\n \"AWS_LAMBDA_FUNCTION_HANDLER\": \"handler\",\n \"AWS_LAMBDA_FUNCTION_NAME\": self.name,\n \"AWS_LAMBDA_FUNCTION_VERSION\": \"$LATEST\",\n \"AWS_LAMBDA_LOG_GROUP_NAME\": f\"aws/lambda/{self.name}\",\n \"AWS_LAMBDA_LOG_STREAM_NAME\": \"$LATEST\",\n \"AWS_ACCOUNT_ID\": \"123456789012\",\n \"AWS_REGION\": \"us-east-1\",\n \"AWS_ACCESS_KEY_ID\": \"defaultkey\",\n \"AWS_SECRET_ACCESS_KEY\": \"defaultsecret\",\n # This value is coming from user passed environment variable\n \"AWS_DEFAULT_REGION\": \"user-specified-region\",\n \"variable2\": \"mystring\",\n # Value coming from the overrides\n \"variable1\": \"variable1 value from overrides\",\n \"list_var\": \"list value coming from overrides\",\n \"dict_var\": \"\",\n \"none_var\": \"\",\n \"true_var\": \"true\",\n \"false_var\": \"false\",\n }\n\n environ = EnvironmentVariables(\n self.name,\n self.memory,\n self.timeout,\n self.handler,\n variables=self.variables,\n shell_env_values=self.shell_env,\n override_values=self.override,\n )\n\n self.assertEqual(environ.resolve(), expected)",
"def test_env_list_includes_ephemeral(self):\n self.assertIn(\"alpha0\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha1\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha2\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha3\", EFConfig.ENV_LIST)\n self.assertNotIn(\"alpha4\", EFConfig.ENV_LIST)",
"def test_config_as_dict(self):\n with mock.patch.dict(\"os.environ\"):\n os.environ[\"AIRFLOW__VAR__broken\"] = \"not_ok\"\n asdict = conf.as_dict(raw=True, display_sensitive=True)\n assert asdict.get(\"VAR\") is None\n assert asdict[\"testsection\"][\"testkey\"] == \"testvalue\"",
"def test_config_normalization():\n assert Container._normalize({\n 'grp': {\n '__default__': {\n 'dep1': 'x',\n 'dep2': 'z',\n '$arg': 100,\n 'other': ['a', 'b'],\n '$dic': {'a': 1}\n },\n 'ent': {\n 'dep1:dep1': 'y',\n 'other:other': ['c'],\n '$dic': {'b': 2}\n },\n 'ent2': {\n '$dic': {'c': 3}\n }\n }\n }) == {\n 'grp': {\n 'ent': {\n 'dep1': ('dep1', 'y'),\n 'dep2': ('dep2', 'z'),\n '$arg': 100,\n 'other': (None, (('other', 'c'),)),\n '$dic': {'b': 2},\n },\n 'ent2': {\n 'dep1': ('dep1', 'x'),\n 'dep2': ('dep2', 'z'),\n '$arg': 100,\n 'other': (None, (('other', 'a'), ('other', 'b'))),\n '$dic': {'c': 3}\n }\n }\n }",
"def test_precedence_order(tmpdir):\n # Verify default if nothing overriding\n cfg = LocalConfig()\n assert cfg.COMPONENT_NAME == \"USAspending API\"\n dotenv_val = \"a_test_verifying_dotenv_overrides_runtime_env_default_config\"\n\n # Now the .env file takes precedence\n tmp_config_dir = tmpdir.mkdir(\"config_dir\")\n dotenv_file = tmp_config_dir.join(\".env\")\n # Must use some of the default overrides from .env, like USASPENDING_DB_*. Fallback to .env.template if not existing\n shutil.copy(str(_PROJECT_ROOT_DIR / \".env.template\"), dotenv_file)\n if Path(_PROJECT_ROOT_DIR / \".env\").exists():\n shutil.copy(str(_PROJECT_ROOT_DIR / \".env\"), dotenv_file)\n with open(dotenv_file, \"a\"):\n dotenv_file.write(f\"COMPONENT_NAME={dotenv_val}\", \"a\")\n dotenv_path = os.path.join(dotenv_file.dirname, dotenv_file.basename)\n cfg = LocalConfig(_env_file=dotenv_path)\n assert cfg.COMPONENT_NAME == dotenv_val\n\n # Now the env var, when present, takes precedence\n with mock.patch.dict(os.environ, {\"COMPONENT_NAME\": _ENV_VAL}):\n cfg = LocalConfig()\n assert cfg.COMPONENT_NAME == _ENV_VAL\n\n # Now the keyword arg takes precedence\n with mock.patch.dict(os.environ, {\"COMPONENT_NAME\": _ENV_VAL}):\n kwarg_val = \"component_name_set_as_a_kwarg\"\n cfg = LocalConfig(COMPONENT_NAME=kwarg_val)\n assert cfg.COMPONENT_NAME == kwarg_val\n\n # Or if overriding via CLI, Now the CLI arg takes precedence\n cli_val = \"test_override_with_command_line_args\"\n test_args = [\"dummy_program\", \"--config\", f\"COMPONENT_NAME={cli_val}\"]\n with mock.patch.dict(os.environ, {\"COMPONENT_NAME\": _ENV_VAL}):\n with patch.object(sys, \"argv\", test_args):\n _load_config.cache_clear() # wipes the @lru_cache for fresh run on next call\n app_cfg_copy = _load_config()\n assert app_cfg_copy.COMPONENT_NAME == cli_val",
"def test_new_runtime_env_overrides_config_with_env_vars_in_play_and_subclasses():\n with mock.patch.dict(\n os.environ,\n {\n ENV_CODE_VAR: _UnitTestSubConfig.ENV_CODE,\n \"COMPONENT_NAME\": _ENV_VAL,\n \"UNITTEST_CFG_A\": \"ENVVAR_UNITTEST_CFG_A\",\n \"UNITTEST_CFG_D\": \"ENVVAR_UNITTEST_CFG_D\",\n \"UNITTEST_CFG_G\": \"ENVVAR_UNITTEST_CFG_G\",\n \"SUB_UNITTEST_3\": \"ENVVAR_SUB_UNITTEST_3\",\n \"SUB_UNITTEST_4\": \"ENVVAR_SUB_UNITTEST_4\",\n },\n ):\n _load_config.cache_clear() # wipes the @lru_cache for fresh run on next call\n cfg = _load_config()\n\n # 1. Env var takes priority over Config vars like COMPONENT_NAME still override even if originally defined at the\n # grandparent config level\n assert cfg.COMPONENT_NAME == _ENV_VAL\n # 2. Env var still takes priority when Same-named config vars in the subclass replace the parent value\n assert cfg.UNITTEST_CFG_A == \"ENVVAR_UNITTEST_CFG_A\"\n # 3. Child prop composing 2 values from parent config is late-evaluated (at call time), not evaluated as the\n # class is read-in from a module import, or construction ... AND when doing the late-eval, it composes the\n # values from the environment vars that replaced the original values for the fields it is composing\n assert cfg.SUB_UNITTEST_1 == \"ENVVAR_UNITTEST_CFG_A:ENVVAR_UNITTEST_CFG_D\"\n # 4. Child prop composing 2 values from parent, one of which is overridden in child, AND then further\n # overriden by the environment var, does get the overridden part FROM the env var\n assert cfg.SUB_UNITTEST_2 == \"ENVVAR_UNITTEST_CFG_A:UNITTEST_CFG_B\"\n # 5. If child and parent are BOTH properties, AND the field's name is overridden by an ENVVAR, the child\n # WILL NOT pick up the env var value. It sticks to its value.\n # WHY?? See extensive note in previous test\n assert cfg.UNITTEST_CFG_G == \"SUB_UNITTEST_CFG_G\"\n # 7. Simple composition of values in the same class works\n assert cfg.SUB_UNITTEST_5 == \"ENVVAR_SUB_UNITTEST_3:ENVVAR_SUB_UNITTEST_4\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a configuration defined via environment variables with subgroups and wrong argument type terminates the execution | def test_parsing_environment_variables_with_subgroups_and_wrong_type_terminate_execution_in_root_parsing_manager(
root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):
config_file = 'root_manager_configuration_with_subgroups_and_wrong_argument_type_value.json'
created_environment_variables = define_environment_variables_configuration_from_json_file(
file_name=config_file,
simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
arguments_prefix[0],
group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
get_groups_prefixes())
result = None
with pytest.raises(SystemExit) as result:
_ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()
assert result.type == SystemExit
assert result.value.code == -1
remove_environment_variables_configuration(variables_names=created_environment_variables) | [
"def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('type', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n dic_a = {'a': True}\n\n dic_z = {\n \"z\": True\n }\n\n dic_b = {\n 'b': \"type\"\n }\n\n dic_a_sub = {\n 'a': True,\n 'sub': {\n 'titi':\n {\n 'type': 'toto',\n 'b': \"type\"\n }\n }\n }\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_z)\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_b)\n\n assert root_config_parsing_manager.validate(dic_a) == dic_a\n\n assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub\n\n assert root_config_parsing_manager.validate({}) == {}",
"def test_parsing_environment_variables_with_subgroups_and_unknown_arguments_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_unknown_arguments.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_nested_exclusive_option_groups(self):\n self.assertRaises(SystemExit,\n self._test_options, [\"--test1\", \"--test2\"])",
"def test_config_priority_between_environ_variables_and_configuration_file_with_subgroups_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments,\n test_files_path):\n\n config_file_environment_variables = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file_environment_variables,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n sys.argv.append('--config-file')\n\n sys.argv.append(test_files_path + '/root_manager_configuration_with_subgroups_and_long_and_short_names.json')\n\n expected_dict = load_configuration_from_json_file(config_file_environment_variables)\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['name'] = 'o1_name'\n expected_dict['output']['o2']['name'] = 'o2_name'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n sys.argv = []\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n check_parse_cli_result(root_config_parsing_manager, \"\", {})\n\n with pytest.raises(UnknownArgException):\n check_parse_cli_result(root_config_parsing_manager, \"-z\", {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})\n\n with pytest.raises(NoNameSpecifiedForSubgroupException):\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',\n {'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})\n\n with pytest.raises(BadContextException):\n check_parse_cli_result(root_config_parsing_manager, \"-b\", {})",
"def test_parsing_environment_variables_with_subgroups_and_no_arguments_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['input']['in1']['name'] = 'my_i1_instance'\n expected_dict['output']['o1']['name'] = 'my_o1_instance'\n expected_dict['output']['o2']['name'] = 'my_o2_instance'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def check_env():\n\n # add argv[4] for design\n if(len(sys.argv[1]) < 1 and len(sys.argv[2] < 1 and len(sys.argv[3] < 1))):\n printError()\n exit()",
"def test_env_list_includes_ephemeral(self):\n self.assertIn(\"alpha0\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha1\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha2\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha3\", EFConfig.ENV_LIST)\n self.assertNotIn(\"alpha4\", EFConfig.ENV_LIST)",
"def test_parsing_environment_variables_with_subgroups_and_long_and_short_names_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_long_and_short_names.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file('root_manager_configuration_with_subgroups.json')\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['model'] = 'o1_model_x'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_new_runtime_env_overrides_config_errors_subclass_only_validated_fields():\n\n with mock.patch.dict(\n os.environ,\n {\n ENV_CODE_VAR: _UnitTestSubConfigFailFindingSubclassFieldsInValidator1.ENV_CODE,\n },\n ):\n with pytest.raises(KeyError) as exc_info:\n _load_config.cache_clear() # wipes the @lru_cache for fresh run on next call\n _load_config()\n\n assert \"SUB_UNITTEST_6\" in str(exc_info.value)",
"def test_local_env_vars_shell(tunable_groups: TunableGroups) -> None:\n _run_local_env(\n tunable_groups,\n shell_subcmd=\"$const_arg,$other_arg,$unknown_arg,$kernel_sched_latency_ns\",\n expected={\n \"const_arg\": 111, # From \"const_args\"\n \"other_arg\": float(\"NaN\"), # Not included in \"shell_env_params\"\n \"unknown_arg\": float(\"NaN\"), # Unknown/undefined variable\n \"kernel_sched_latency_ns\": 2000000, # From \"tunable_params\"\n }\n )",
"def test_runtime_envs_get(self):\n pass",
"def test_invalid_setting_key(self):\n ...",
"def test_parse_hsmconfig_unknown_variable_value(self):\n with self.assertRaises(RuntimeError):\n hsm.parse_hsmconfig([\"foo=$TEST\"], \"test data\", {})",
"def test_prepare_environment(self):\n pass",
"def test_runtime_envs_list(self):\n pass",
"def test_environment(self):\n pass",
"def test_env_list_includes_non_ephemeral(self):\n self.assertIn(\"test\", EFConfig.ENV_LIST)",
"def test_build_group_args(self):\n mydir = os.path.dirname(__file__)\n path = os.path.join(mydir, './build_group_args.yml')\n build.build_package(None, 'groups', 'pkg', [], path)\n\n from quilt.data.groups import pkg\n\n assert isinstance(pkg.group_a.csv(), DataFrame), \\\n 'Expected parent `transform: csv` to affect group_a.csv()'\n assert isinstance(pkg.group_a.tsv(), DataFrame), \\\n 'Expected local `transform: tsv` to affect group_a.tsv()'\n # TODO these tests should really test the node type and verify it as a file node\n # but currently both raw files and DFs are DataNode instances\n assert isinstance(pkg.group_b.txt(), string_types), \\\n 'Expected `transform: id` to be inferred from file extension'\n assert isinstance(pkg.group_b.subgroup.txt(), string_types), \\\n 'Expected `transform: id` to be inferred from file extension'\n # ENDTODO\n assert isinstance(pkg.group_b.tsv(), DataFrame), \\\n 'Expected `transform: tsv` to be inferred from file extension'\n assert pkg.group_b.tsv()['Date0'].dtype == np.dtype('<M8[ns]'), \\\n 'Expected Date0 column to parse as date'\n assert pkg.group_b.subgroup.tsv().shape == (1, 3), \\\n 'Expected `transform: tsv` and one skipped row from group args'\n assert pkg.group_b.subgroup.csv().shape == (0, 2), \\\n 'Expected local `transform: csv` and one skipped row from group args'\n assert pkg.group_b.subgroup.many_tsv.one().shape == (1, 3), \\\n 'Expected local `transform: csv` and one skipped row from group args'\n assert isinstance(pkg.group_b.subgroup.many_tsv.two(), DataFrame), \\\n 'Expected `transform: tsv` from ancestor'\n assert isinstance(pkg.group_b.subgroup.many_tsv.three(), DataFrame), \\\n 'Expected `transform: tsv` from ancestor'\n assert not pkg.group_empty._keys(), 'Expected group_empty to be empty'\n assert not pkg.group_x.empty_child._keys(), 'Expected group_x.emptychild to be empty'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that arguments values defined via the environment variables are preserved regarding values defined via a config file with subgroups in configuration | def test_config_priority_between_environ_variables_and_configuration_file_with_subgroups_in_root_parsing_manager(
root_config_parsing_manager_with_mandatory_and_optional_arguments,
test_files_path):
config_file_environment_variables = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'
created_environment_variables = define_environment_variables_configuration_from_json_file(
file_name=config_file_environment_variables,
simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
arguments_prefix[0],
group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.
get_groups_prefixes())
sys.argv.append('--config-file')
sys.argv.append(test_files_path + '/root_manager_configuration_with_subgroups_and_long_and_short_names.json')
expected_dict = load_configuration_from_json_file(config_file_environment_variables)
expected_dict['input']['in1']['name'] = 'i1_name'
expected_dict['output']['o1']['name'] = 'o1_name'
expected_dict['output']['o2']['name'] = 'o2_name'
result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()
assert result == expected_dict
sys.argv = []
remove_environment_variables_configuration(variables_names=created_environment_variables) | [
"def test_parsing_environment_variables_with_subgroups_and_no_arguments_with_default_value_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_no_argument_default_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file(config_file)\n expected_dict['input']['in1']['name'] = 'my_i1_instance'\n expected_dict['output']['o1']['name'] = 'my_o1_instance'\n expected_dict['output']['o2']['name'] = 'my_o2_instance'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_dash_vars_with_env(self):\n test_settings = yamldict.YAMLDict({'foo-bar': 'baz'})\n assert test_settings['foo-bar'] == 'baz'\n update_from_env(test_settings)\n assert test_settings['foo-bar'] == 'new-baz'",
"def test_all_command_line():\n assert read_settings('abc 123 -p testpre'.split()) == \\\n {'oauth_token': 'abc',\n 'oauth_secret': '123',\n 'app_key': 'RWmvpkGK4m9tavh4bCfdzsYjH',\n 'app_secret': 'uCShewTskeuBvt9haLi8LFARSJXkxJsCPNZ3dGwpYz4vuc5Mo9',\n 'config': 'stwark.cfg',\n 'prefix': 'testpre'}",
"def test_inject_env_arg_precedence(self) -> None:\n client = assemble(ClientWithValuePrecedence)\n val_attrib1 = client.get_value_attrib1()\n val_attrib2 = client.get_value_attrib2()\n # attrib1 is overwritten from env vars:\n self.assertEqual(val_attrib1, 11)\n # attrib2 is overwritten from args:\n self.assertEqual(val_attrib2, 13)",
"def test_parsing_environment_variables_with_subgroups_and_long_and_short_names_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_long_and_short_names.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n expected_dict = load_configuration_from_json_file('root_manager_configuration_with_subgroups.json')\n expected_dict['input']['in1']['name'] = 'i1_name'\n expected_dict['output']['o1']['model'] = 'o1_model_x'\n\n result = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result == expected_dict\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_hosts_stripped_env_hosts():\n def command():\n pass\n myenv = {'hosts': [' foo ', 'bar '], 'roles': [], 'exclude_hosts': []}\n eq_hosts(command, ['foo', 'bar'], env=myenv)",
"def complete_configuration(env, args):\n cfg = dict()\n\n # combining previously defined environment and arguments from command line\n # to prepare process configuration\n for key in env:\n if key.lower() in args and args[key.lower()]:\n cfg[key.lower()] = args[key.lower()]\n else:\n cfg[key.lower()] = env[key]\n for key in args:\n if key not in cfg:\n cfg[key] = args[key]\n\n return cfg",
"def test_precedence_order(tmpdir):\n # Verify default if nothing overriding\n cfg = LocalConfig()\n assert cfg.COMPONENT_NAME == \"USAspending API\"\n dotenv_val = \"a_test_verifying_dotenv_overrides_runtime_env_default_config\"\n\n # Now the .env file takes precedence\n tmp_config_dir = tmpdir.mkdir(\"config_dir\")\n dotenv_file = tmp_config_dir.join(\".env\")\n # Must use some of the default overrides from .env, like USASPENDING_DB_*. Fallback to .env.template if not existing\n shutil.copy(str(_PROJECT_ROOT_DIR / \".env.template\"), dotenv_file)\n if Path(_PROJECT_ROOT_DIR / \".env\").exists():\n shutil.copy(str(_PROJECT_ROOT_DIR / \".env\"), dotenv_file)\n with open(dotenv_file, \"a\"):\n dotenv_file.write(f\"COMPONENT_NAME={dotenv_val}\", \"a\")\n dotenv_path = os.path.join(dotenv_file.dirname, dotenv_file.basename)\n cfg = LocalConfig(_env_file=dotenv_path)\n assert cfg.COMPONENT_NAME == dotenv_val\n\n # Now the env var, when present, takes precedence\n with mock.patch.dict(os.environ, {\"COMPONENT_NAME\": _ENV_VAL}):\n cfg = LocalConfig()\n assert cfg.COMPONENT_NAME == _ENV_VAL\n\n # Now the keyword arg takes precedence\n with mock.patch.dict(os.environ, {\"COMPONENT_NAME\": _ENV_VAL}):\n kwarg_val = \"component_name_set_as_a_kwarg\"\n cfg = LocalConfig(COMPONENT_NAME=kwarg_val)\n assert cfg.COMPONENT_NAME == kwarg_val\n\n # Or if overriding via CLI, Now the CLI arg takes precedence\n cli_val = \"test_override_with_command_line_args\"\n test_args = [\"dummy_program\", \"--config\", f\"COMPONENT_NAME={cli_val}\"]\n with mock.patch.dict(os.environ, {\"COMPONENT_NAME\": _ENV_VAL}):\n with patch.object(sys, \"argv\", test_args):\n _load_config.cache_clear() # wipes the @lru_cache for fresh run on next call\n app_cfg_copy = _load_config()\n assert app_cfg_copy.COMPONENT_NAME == cli_val",
"def _argparse_check_env(cls, env_prefix, prefix, k, args):\n if env_prefix is None:\n return\n\n argname = f'{prefix}{k}'\n name = f'{env_prefix}_{prefix}{k}'.upper().replace('-', '_')\n v = os.environ.get(name)\n if v is not None:\n args.setdefault(argname, v)",
"def test_parsing_environment_variables_with_subgroups_and_unknown_arguments_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_unknown_arguments.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_local_env_vars_shell(tunable_groups: TunableGroups) -> None:\n _run_local_env(\n tunable_groups,\n shell_subcmd=\"$const_arg,$other_arg,$unknown_arg,$kernel_sched_latency_ns\",\n expected={\n \"const_arg\": 111, # From \"const_args\"\n \"other_arg\": float(\"NaN\"), # Not included in \"shell_env_params\"\n \"unknown_arg\": float(\"NaN\"), # Unknown/undefined variable\n \"kernel_sched_latency_ns\": 2000000, # From \"tunable_params\"\n }\n )",
"def test_with_overrides_value(self):\n\n expected = {\n \"AWS_SAM_LOCAL\": \"true\",\n \"AWS_LAMBDA_FUNCTION_MEMORY_SIZE\": \"1024\",\n \"AWS_LAMBDA_FUNCTION_TIMEOUT\": \"123\",\n \"AWS_LAMBDA_FUNCTION_HANDLER\": \"handler\",\n \"AWS_LAMBDA_FUNCTION_NAME\": self.name,\n \"AWS_LAMBDA_FUNCTION_VERSION\": \"$LATEST\",\n \"AWS_LAMBDA_LOG_GROUP_NAME\": f\"aws/lambda/{self.name}\",\n \"AWS_LAMBDA_LOG_STREAM_NAME\": \"$LATEST\",\n \"AWS_ACCOUNT_ID\": \"123456789012\",\n \"AWS_REGION\": \"us-east-1\",\n \"AWS_ACCESS_KEY_ID\": \"defaultkey\",\n \"AWS_SECRET_ACCESS_KEY\": \"defaultsecret\",\n # This value is coming from user passed environment variable\n \"AWS_DEFAULT_REGION\": \"user-specified-region\",\n \"variable2\": \"mystring\",\n # Value coming from the overrides\n \"variable1\": \"variable1 value from overrides\",\n \"list_var\": \"list value coming from overrides\",\n \"dict_var\": \"\",\n \"none_var\": \"\",\n \"true_var\": \"true\",\n \"false_var\": \"false\",\n }\n\n environ = EnvironmentVariables(\n self.name,\n self.memory,\n self.timeout,\n self.handler,\n variables=self.variables,\n shell_env_values=self.shell_env,\n override_values=self.override,\n )\n\n self.assertEqual(environ.resolve(), expected)",
"def test_parsing_environment_variables_with_subgroups_and_wrong_type_terminate_execution_in_root_parsing_manager(\n root_config_parsing_manager_with_mandatory_and_optional_arguments, test_files_path):\n config_file = 'root_manager_configuration_with_subgroups_and_wrong_argument_type_value.json'\n created_environment_variables = define_environment_variables_configuration_from_json_file(\n file_name=config_file,\n simple_argument_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n arguments_prefix[0],\n group_arguments_prefix=root_config_parsing_manager_with_mandatory_and_optional_arguments.cli_parser.\n get_groups_prefixes())\n\n result = None\n with pytest.raises(SystemExit) as result:\n _ = root_config_parsing_manager_with_mandatory_and_optional_arguments.parse()\n\n assert result.type == SystemExit\n assert result.value.code == -1\n\n remove_environment_variables_configuration(variables_names=created_environment_variables)",
"def test_env_list_includes_ephemeral(self):\n self.assertIn(\"alpha0\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha1\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha2\", EFConfig.ENV_LIST)\n self.assertIn(\"alpha3\", EFConfig.ENV_LIST)\n self.assertNotIn(\"alpha4\", EFConfig.ENV_LIST)",
"def test_with_shell_env_value(self):\n\n expected = {\n \"AWS_SAM_LOCAL\": \"true\",\n \"AWS_LAMBDA_FUNCTION_MEMORY_SIZE\": \"1024\",\n \"AWS_LAMBDA_FUNCTION_TIMEOUT\": \"123\",\n \"AWS_LAMBDA_FUNCTION_HANDLER\": \"handler\",\n \"AWS_LAMBDA_FUNCTION_NAME\": self.name,\n \"AWS_LAMBDA_FUNCTION_VERSION\": \"$LATEST\",\n \"AWS_LAMBDA_LOG_GROUP_NAME\": f\"aws/lambda/{self.name}\",\n \"AWS_LAMBDA_LOG_STREAM_NAME\": \"$LATEST\",\n \"AWS_ACCOUNT_ID\": \"123456789012\",\n \"AWS_REGION\": \"us-east-1\",\n \"AWS_ACCESS_KEY_ID\": \"defaultkey\",\n \"AWS_SECRET_ACCESS_KEY\": \"defaultsecret\",\n # This value is coming from user passed environment variable\n \"AWS_DEFAULT_REGION\": \"user-specified-region\",\n # Value coming from the shell\n \"variable1\": \"variable1 value from shell_env\",\n \"variable2\": \"mystring\",\n \"list_var\": \"\",\n \"dict_var\": \"\",\n \"none_var\": \"\",\n \"true_var\": \"true\",\n \"false_var\": \"false\",\n }\n\n environ = EnvironmentVariables(\n self.name,\n self.memory,\n self.timeout,\n self.handler,\n variables=self.variables,\n shell_env_values=self.shell_env,\n )\n\n self.assertEqual(environ.resolve(), expected)",
"def test_prepare_environment(self):\n pass",
"def test_runner_args_only_when_set(runner):\n\n conf = Config(test_runner=runner)\n assert \"runner_args\" not in conf._metadata\n\n conf2 = Config(test_runner=runner, runner_args=[\"-vv\", \"--pdb\"])\n assert conf2._metadata.get(\"runner_args\") == [\"-vv\", \"--pdb\"]",
"def test_new_runtime_env_overrides_config_with_env_vars_in_play_and_subclasses():\n with mock.patch.dict(\n os.environ,\n {\n ENV_CODE_VAR: _UnitTestSubConfig.ENV_CODE,\n \"COMPONENT_NAME\": _ENV_VAL,\n \"UNITTEST_CFG_A\": \"ENVVAR_UNITTEST_CFG_A\",\n \"UNITTEST_CFG_D\": \"ENVVAR_UNITTEST_CFG_D\",\n \"UNITTEST_CFG_G\": \"ENVVAR_UNITTEST_CFG_G\",\n \"SUB_UNITTEST_3\": \"ENVVAR_SUB_UNITTEST_3\",\n \"SUB_UNITTEST_4\": \"ENVVAR_SUB_UNITTEST_4\",\n },\n ):\n _load_config.cache_clear() # wipes the @lru_cache for fresh run on next call\n cfg = _load_config()\n\n # 1. Env var takes priority over Config vars like COMPONENT_NAME still override even if originally defined at the\n # grandparent config level\n assert cfg.COMPONENT_NAME == _ENV_VAL\n # 2. Env var still takes priority when Same-named config vars in the subclass replace the parent value\n assert cfg.UNITTEST_CFG_A == \"ENVVAR_UNITTEST_CFG_A\"\n # 3. Child prop composing 2 values from parent config is late-evaluated (at call time), not evaluated as the\n # class is read-in from a module import, or construction ... AND when doing the late-eval, it composes the\n # values from the environment vars that replaced the original values for the fields it is composing\n assert cfg.SUB_UNITTEST_1 == \"ENVVAR_UNITTEST_CFG_A:ENVVAR_UNITTEST_CFG_D\"\n # 4. Child prop composing 2 values from parent, one of which is overridden in child, AND then further\n # overriden by the environment var, does get the overridden part FROM the env var\n assert cfg.SUB_UNITTEST_2 == \"ENVVAR_UNITTEST_CFG_A:UNITTEST_CFG_B\"\n # 5. If child and parent are BOTH properties, AND the field's name is overridden by an ENVVAR, the child\n # WILL NOT pick up the env var value. It sticks to its value.\n # WHY?? See extensive note in previous test\n assert cfg.UNITTEST_CFG_G == \"SUB_UNITTEST_CFG_G\"\n # 7. Simple composition of values in the same class works\n assert cfg.SUB_UNITTEST_5 == \"ENVVAR_SUB_UNITTEST_3:ENVVAR_SUB_UNITTEST_4\"",
"def test_check_recipe_package_env_vars():\n\n pytest_enable_socket()\n\n ## Test that an env_var is created for a single installed file and the dir\n recipe = CreateRecipe(\n \"\"\"\n one_file_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - one_file_v1.bw\n package:\n name: one_file_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing env_var for recipe with one file\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: 11-Mar-2019\n file-type: \n - bw\n final-files: \n - one_file_v1.bw\n final-file-sizes:\n one_file_v1.bw: 10.1K\n data-provider: UCSC\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet --no-check-certificate --output-document hg19phastcons.bw http://hgdownload.cse.ucsc.edu/goldenpath/hg19/phastCons100way/hg19.100way.phastCons.bw\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n (mv $f \"one_file_v1.$ext\")\n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_one_file_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_one_file_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_one_file_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"one_file_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_one_file_v1_dir\" in x or \"ggd_one_file_v1_file\" in x]\n first = False\n second = False\n for x in env_vars:\n if \"ggd_one_file_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/one_file_v1/1\") in x\n first = True\n elif \"ggd_one_file_v1_file\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/one_file_v1/1/one_file_v1.bw\")\n second = True\n else:\n assert False\n assert first == True\n assert second == True\n\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_one_file_v1_file\" in output\n assert \"$ggd_one_file_v1_dir\" in output\n\n ## Test that an env_var is created for the non indexed file when two files are installed with an index present, and the dir\n recipe = CreateRecipe(\n \"\"\"\n two_files_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: []\n package:\n name: two_files_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing env_var for recipe with two files and an index present\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - bed\n final-files: \n - two_files_v1.bed.gz\n - two_files_v1.bed.gz.tbi\n final-file-sizes:\n two_files_v1.bed.gz: 24.02K\n two_files_v1.bed.gz.tbi: 10.24K\n ggd-channel: genomics\n\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet $genome\n\n ## get the file, \n ## unzip it, \n ## remove any lines that do not have a scaffolding in the hg19.genom file. (If scaffolding in hg19.genome, grep exists with 0)\n ## add header to the file, and remove the bin column\n ## sort it based on the genome file\n ## bgzip it\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk '{ if (system(\"grep -Fq \" $2 \" hg19.genome\") == 0) print $0}' \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | gsort /dev/stdin $genome \\\n | bgzip -c > gaps.bed.gz\n\n tabix gaps.bed.gz\n\n rm hg19.genome\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n (mv $f \"two_files_v1.$ext\")\n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_two_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_two_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_two_files_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"two_files_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_two_files_v1_dir\" in x or \"ggd_two_files_v1_file\" in x]\n first = False\n second = False\n for x in env_vars:\n if \"ggd_two_files_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_v1/1\") in x\n first = True\n elif \"ggd_two_files_v1_file\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_v1/1/two_files_v1.bed.gz\")\n second = True\n else:\n assert False\n assert first == True\n assert second == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_two_files_v1_file\" in output\n assert \"$ggd_two_files_v1_dir\" in output \n\n ## Test that NO env_var is created when two files are installed with no index present, and the dir\n recipe = CreateRecipe(\n \"\"\"\n two_files_noindex_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - two_files_noindex_v1.genome\n - two_files_noindex_v1.txt.gz\n package:\n name: two_files_noindex_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing NO file env_var for recipe with two files and no index\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - genome\n - txt\n final-files: \n - two_files_noindex_v1.genome\n - two_files_noindex_v1.txt.gz\n final-file-sizes: \n two_files_noindex_v1.genome: 10.01K\n two_files_noindex_v1.txt.gz: 12.41K\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | bgzip -c > two_files_noindex_v1.txt.gz\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n if [[ ! -f \"two_files_noindex_v1.$ext\" ]] \n then\n (mv $f \"two_files_noindex_v1.$ext\")\n fi \n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_two_files_noindex_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_two_files_noindex_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_two_files_noindex_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"two_files_noindex_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_two_files_noindex_v1_dir\" in x or \"ggd_two_files_noindex_v1_file\" in x]\n first = False\n for x in env_vars:\n if \"ggd_two_files_noindex_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\") in x\n first = True\n elif \"ggd_two_files_noindex_v1_file\" in x:\n assert False ## There should not be a file env_var made for this package\n else:\n assert False\n assert first == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_two_files_noindex_v1_file\" not in output\n assert \"$ggd_two_files_noindex_v1_dir\" in output\n\n ## Test that NO env_var is created when thre+ files are installed, and the dir\n recipe = CreateRecipe(\n \"\"\"\n three_files_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - three_files_v1.genome\n - three_files_v1.1.txt.gz\n - three_files_v1.2.txt.gz\n package:\n name: three_files_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing NO file env_var for recipe with three+ files\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - txt\n - genome\n final-files: \n - three_files_v1.1.txt.gz\n - three_files_v1.2.txt.gz\n - three_files_v1.genome\n final-file-sizes: \n three_files_v1.1.txt.gz: 24.04K\n three_files_v1.2.txt.gz: 24.04K\n three_files_v1.genome: 10.01K\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | bgzip -c > gap.txt.gz\n cp gap.txt.gz gaps.1.txt.gz\n mv gap.txt.gz gaps.2.txt.gz\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n if [[ ! -f \"three_files_v1.$ext\" ]] \n then\n (mv $f \"three_files_v1.$ext\")\n fi \n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_three_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_three_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_three_files_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"three_files_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_three_files_v1_dir\" in x or \"ggd_three_files_v1_file\" in x]\n first = False\n for x in env_vars:\n if \"ggd_three_files_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/three_files_v1/1\") in x\n first = True\n elif \"ggd_three_files_v1_file\" in x:\n assert False ## There should not be a file env_var made for this package\n else:\n assert False\n assert first == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_three_files_v1_file\" not in output\n assert \"$ggd_three_files_v1_dir\" in output"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a subgroup is correctly added to a parsing manager | def test_add_subgroup_in_root_parsing_manager():
parser_manager = RootConfigParsingManager()
assert len(parser_manager.cli_parser.subgroup_parsers) == 0
parser_manager.add_subgroup(name='sub')
assert len(parser_manager.cli_parser.subgroup_parsers) == 1
parser_manager.add_subgroup(name='sub1')
assert len(parser_manager.cli_parser.subgroup_parsers) == 2
parser_manager.add_subgroup(name='sub3')
assert len(parser_manager.cli_parser.subgroup_parsers) == 3 | [
"def test_add_subgroup_parser_that_already_exists_raises_an_exception_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='toto')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('toto', subparser)\n\n repeated_subparser = SubgroupConfigParsingManager('titi')\n repeated_subparser.add_argument('n', 'name')\n\n with pytest.raises(AlreadyAddedSubparserException):\n parser_manager.add_subgroup_parser('toto', repeated_subparser)",
"def test_add_subgroup_parser_without_name_argument_raise_an_exception_in_root_parsing_manager():\n parser = RootConfigParsingManager()\n subparser = SubgroupConfigParsingManager('titi')\n\n with pytest.raises(SubgroupParserWithoutNameArgumentException):\n parser.add_subgroup_parser('toto', subparser)",
"def test_arguments_string_parsing_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n check_parse_cli_result(root_config_parsing_manager, \"\", {})\n\n with pytest.raises(UnknownArgException):\n check_parse_cli_result(root_config_parsing_manager, \"-z\", {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a', {'a': True})\n\n with pytest.raises(NoNameSpecifiedForSubgroupException):\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b', {})\n\n check_parse_cli_result(root_config_parsing_manager, '-a --sub toto -b --name titi',\n {'a': True, 'sub': {'titi': {'type': 'toto', 'b': True}}})\n\n with pytest.raises(BadContextException):\n check_parse_cli_result(root_config_parsing_manager, \"-b\", {})",
"def test_subgroups(clean_raw_data):\n subgroup_names = subgroups(clean_raw_data)\n assert subgroup_names == ['spectrum1', 'spectrum2', 'spectrum3']",
"def test_arguments_dict_validation_with_subgroup_parser_in_subgroup_parsing_manager(root_config_parsing_manager):\n subparser = SubgroupConfigParsingManager('toto')\n subparser.add_argument('b', is_flag=True, action=store_true)\n subparser.add_argument('type', is_flag=True, action=store_true)\n subparser.add_argument('n', 'name')\n root_config_parsing_manager.add_subgroup_parser('sub', subparser)\n\n dic_a = {'a': True}\n\n dic_z = {\n \"z\": True\n }\n\n dic_b = {\n 'b': \"type\"\n }\n\n dic_a_sub = {\n 'a': True,\n 'sub': {\n 'titi':\n {\n 'type': 'toto',\n 'b': \"type\"\n }\n }\n }\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_z)\n\n with pytest.raises(UnknownArgException):\n root_config_parsing_manager.validate(dic_b)\n\n assert root_config_parsing_manager.validate(dic_a) == dic_a\n\n assert root_config_parsing_manager.validate(dic_a_sub) == dic_a_sub\n\n assert root_config_parsing_manager.validate({}) == {}",
"def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})",
"def subgroup_parser():\n parser = SubgroupConfigParser('test')\n parser.add_argument('a', is_flag=True)\n return parser",
"def test_post_groups(self):\n pass",
"def test_subcommand_group():\n output = subprocess.check_output(['textx', 'testgroup'],\n stderr=subprocess.STDOUT)\n assert b'groupcommand1' in output\n assert b'groupcommand2' in output",
"def test_get_groups_list(self):\n pass",
"def test_add_group_zero_members2() -> None:\n g = Group([])\n v = Grouping()\n v.add_group(g)\n assert v._groups == []",
"def test_edit_group(app):\n\n app.group.validation_of_group_exist()\n app.group.edit_group(Group(group_name=Profinity.long_word_20, group_header=Profinity.long_word_20,\n group_footer=Profinity.long_word_20))\n app.group.delete_first_group()",
"def test_nested_groups(self):\n self.handler = HighfiveHandlerMock(\n Payload({}), repo_config=self.fakes['config']['nested_groups']\n ).handler\n (chosen_reviewers, mentions) = self.choose_reviewers(\n self.fakes['diff']['normal'], \"nikomatsakis\"\n )\n assert set([\"pnkfelix\", \"nrc\"]) == chosen_reviewers",
"def test_add_mft_user_group(self):\n pass",
"def test_get_argument_groups(self) -> None:\n MockCustomizableClass.add_args_to_group(self.test_group)\n group_dict = get_argument_groups(self.argparser)\n self.assertEqual(len(group_dict), 3, f\"Expected 3 groups, found {group_dict}\")\n self.assertEqual(\n len(group_dict[\"__NO_TITLE__\"]), # type: ignore\n 0,\n f\"Expected no unnamed groups, found {group_dict['__NO_TITLE__']}.\",\n )\n self.assertIn(\"test_group\", group_dict.keys(), \"test group not found\")\n self.assertIn(\"test_subgroup\", group_dict.keys(), \"test subgroup not found\")\n self.assertIn(self.test_group, group_dict.values(), \"test group doesn't match\")",
"def test_grouping_attribute() -> None:\n g = Grouping()\n assert g._groups == []",
"def test_api_v3_groups_post(self):\n pass",
"def test_constructor_with_defined_groups(self):\n a = pygame.sprite.Group()\n b = pygame.sprite.Group()\n c = pygame.sprite.Group()\n\n Bullet.groups = (a, b, c)\n bullet = Bullet(RED, 5)\n self.assertEqual(len(bullet.groups), len(Bullet.groups))\n for i in Bullet.groups:\n with self.subTest(group=i):\n self.assertIn(i, bullet.groups)",
"def test_create_services_network_group_by_network_group_name(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wrapper around getIbvCounters() to expand arguments so that we can use it with multiprocessing.map() | def getIbvCountersWrapper(args):
return getIbvCounters(*args) | [
"def _cx_counters_psutil(self):\n for iface, counters in psutil.net_io_counters(pernic=True).iteritems():\n metrics = {\n 'bytes_rcvd': counters.bytes_recv,\n 'bytes_sent': counters.bytes_sent,\n 'packets_in.count': counters.packets_recv,\n 'packets_in.error': counters.errin,\n 'packets_out.count': counters.packets_sent,\n 'packets_out.error': counters.errout,\n }\n self._submit_devicemetrics(iface, metrics)",
"def compute_flags(self, *args):",
"def _build_counters(counters):\n return [\n {'name': name, 'delta': value}\n for name, value in counters.items()\n ]",
"def get_memory_counters(args={}):\n res = {}\n\n if args.get('virtual_memory', False):\n res['virtual_memory'] = to_dict(psutil.virtual_memory())\n \n if args.get('swap_memory', False):\n res['swap_memory'] = to_dict(psutil.swap_memory())\n \n return res",
"def counters(cli_opts, json): # noqa: B902\n\n return_code = fib.FibCountersCmd(cli_opts).run(json)\n sys.exit(return_code)",
"def get_cpu_counters(args={}):\n res = {}\n\n interval = args.get('cpu_interval', CPU_INTERVAL_S)\n percpu = args.get('percpu', False)\n \n res['cpu_percent'] = psutil.cpu_percent(interval=interval, percpu=percpu)\n\n # Convert Unix style CPU utilization to task manager style\n if percpu is False:\n num_cpus = psutil.cpu_count(logical=False)\n res['cpu_percent'] /= num_cpus\n\n if args.get('cpu_times', False):\n res['cpu_times'] = to_dict(psutil.cpu_times(percpu=percpu))\n\n if args.get('cpu_stats', False):\n res['cpu_stats'] = to_dict(psutil.cpu_stats())\n \n return res",
"def optVarEquMapCount(*args):\n return _optcc.optVarEquMapCount(*args)",
"def get_network_counters(args={}):\n res = {}\n\n if args.get('net_io_counters', False):\n res['net_io_counters'] = to_dict(psutil.net_io_counters(pernic=pernic))\n\n return res",
"def add_cache_increment_parameter(tasks):\n denom = len(tasks) or 1\n increment = 1.0 / denom * 100\n # This is kind of terrible. Once we know how much progress each task\n # yeilds, we must pass that value into the Signature for the sub tassks.\n for _task in tasks:\n _task.args = _task.args + (increment,)\n\n return tasks",
"def counter_specific(fn, dict2update):\n cnt = 0\n\n def inner(*args, **kwargs):\n \"\"\"This is a counter function which updates specific dictionaries\n\n Returns:\n dict: dict we want to update\n \"\"\"\n nonlocal cnt\n cnt += 1\n print(\"{} has been called {} times.\".format(fn.__name__, cnt))\n dict2update[fn.__name__] = cnt\n return dict2update\n return inner",
"def _operand_count_sweep():\n for input_count in range(2, 16, 1):\n yield input_count",
"def merge_counters(counter1, counter2) -> Counter:\n return Counter(**{**counter1, **counter2})",
"def idxs_map(idxs, cmd, *args, **kwargs):\r\n # XXX: consider insisting on sorted idxs\r\n # XXX: use np.searchsorted instead of dct\r\n\r\n if 0: # these should all be true, but evaluating them is slow\r\n for ii, (idxs_ii, vals_ii) in enumerate(args):\r\n for jj in idxs: assert jj in idxs_ii\r\n for kw, (idxs_kw, vals_kw) in kwargs.items():\r\n for jj in idxs: assert jj in idxs_kw\r\n\r\n args_imap = []\r\n for idxs_j, vals_j in args:\r\n if len(idxs_j):\r\n args_imap.append(dict(zip(idxs_j, vals_j)))\r\n else:\r\n args_imap.append({})\r\n\r\n kwargs_imap = {}\r\n for kw, (idxs_j, vals_j) in kwargs.items():\r\n if len(idxs_j):\r\n kwargs_imap[kw] = dict(zip(idxs_j, vals_j))\r\n else:\r\n kwargs_imap[kw] = {}\r\n\r\n f = scope._impls[cmd]\r\n rval = []\r\n for ii in idxs:\r\n try:\r\n args_nn = [arg_imap[ii] for arg_imap in args_imap]\r\n except:\r\n ERR('args_nn %s' % cmd)\r\n ERR('ii %s' % ii)\r\n ERR('arg_imap %s' % str(arg_imap))\r\n ERR('args_imap %s' % str(args_imap))\r\n raise\r\n try:\r\n kwargs_nn = dict([(kw, arg_imap[ii])\r\n for kw, arg_imap in kwargs_imap.items()])\r\n except:\r\n ERR('args_nn %s' % cmd)\r\n ERR('ii %s' % ii)\r\n ERR('kw %s' % kw)\r\n ERR('arg_imap %s' % str(arg_imap))\r\n raise\r\n try:\r\n rval_nn = f(*args_nn, **kwargs_nn)\r\n except:\r\n ERR('error calling impl of %s' % cmd)\r\n raise\r\n rval.append(rval_nn)\r\n return rval",
"def instruction(num_args):\n def decorator_instruction(func):\n @functools.wraps(func)\n def execute_instruction(state):\n state.status = STATE_RUNNING\n modes = modes_list(state.intcode[state.ic] // 100)[:num_args]\n params = get_param_indices(state, modes, state.ic)\n if params and len(state.intcode) <= max(params):\n state.intcode.extend([0]*(max(params) - len(state.intcode) + 1))\n new_ic = func(state, *params)\n # Logger.log(f'{func.__name__}: {params} {state}')\n if state.status != STATE_WAIT_FOR_INPUT:\n state.ic = ((state.ic + num_args + 1)\n if new_ic is None else new_ic)\n return execute_instruction\n return decorator_instruction",
"def _set_counters(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_counters_openconfig_spanning_tree__stp_mstp_mst_instances_mst_instance_interfaces_interface_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"counters must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_counters_openconfig_spanning_tree__stp_mstp_mst_instances_mst_instance_interfaces_interface_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__counters = t\n if hasattr(self, '_set'):\n self._set()",
"def _loadCounters(self):\n for counter in self.counterDict.keys():\n self.allCounters[counter] = self.counterDict[counter]",
"def numba_histogram(v, b):\n return np.histogram(v, b)",
"def _set_counters(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_counters_openconfig_spanning_tree__stp_rstp_interfaces_interface_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"counters must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_counters_openconfig_spanning_tree__stp_rstp_interfaces_interface_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__counters = t\n if hasattr(self, '_set'):\n self._set()",
"def FormCounts(bitVects,actVals,whichBit,nPossibleActs,nPossibleBitVals=2):\n if len(bitVects) != len(actVals): raise ValueError('var and activity lists should be the same length')\n res = numpy.zeros((nPossibleBitVals,nPossibleActs),numpy.integer)\n for i in range(len(bitVects)):\n res[bitVects[i][whichBit],actVals[i]] += 1\n return res"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the progress bar character | def set_bar_character(self, char):
self.bar_char = char | [
"def set_bar_char(self, char):\n \n self._bar_char = char",
"def set_empty_bar_character(self, char):\n self.empty_bar_char = char",
"def bar_done(self):\n\t\tself.cursor += self.width",
"def set_char(self, coord, char):\n\t\tassert coord.x >= 0 and coord.x < self.width, \"X Coordinate out of range\"\n\t\tassert coord.y >= 0 and coord.y < self.height, \"Y Coordinate out of range\"\n\t\tself.content[self.y_max - coord.y][coord.x] = char",
"def setChar(*args, **kwargs):\n \n pass",
"def update_progress(self, progress):\n ## Modify this to change the length of the progress bar\n barLength = 10\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done...\\r\\n\"\n block = int(round(barLength*progress))\n text = \"\\rPercent: [{0}] {1}% {2}\".format( \"#\"*block + \"-\"*(barLength-block), progress*100, status)\n sys.stdout.write(text)\n sys.stdout.flush()",
"def progress_bar(percentage: float, size: int = 10) -> str:\n fill = '░'\n chars = round(percentage / 100.0 * size)\n remainder = size - chars\n return ('█'* chars) + (remainder * fill)",
"def updateStatus(self, text):\r\n self.style.configure('text.Horizontal.TProgressbar', text=' ' + text)\r\n self.progressTextFormat = ' ' + text + ' {0}%'\r\n print(text)",
"def SetSpecialChar ( self, num, buf):\n\t\tif num < 0 | num > 7: return False # Verify you are setting a valid character\n\t\tif len (buf) != 8: return False # Must be 8 characters long\n\t\toutput = \"\" # Set the output to null\n\t\tfor row in buf:\n\t\t\toutput = output + chr(row)\n\t\toutput = \"%c%s\" % ( chr(num), output )\n\t\tself.SendCommand ( 9, output)",
"def char(self, c):\n n = ord(c)\n ch = font[n * 8:n * 8 + 8]\n self.image(ch)",
"def SetSpecialChar ( self, num, buf):\n\t\tif num < 0 | num > 7: return False\n\t\tif len (buf) != 8: return False \n\t\toutput = \"\"\n\t\tfor row in buf:\n\t\t\toutput = output + chr(row)\n\t\toutput = \"%c%s\" % ( chr(num), output )\n\t\tself.SendCommand ( 9, output)",
"def PrintProgress(self):\n ratio = 100*self.progressBar['value'] / self.progressBar['maximum']\n s = '\\033[1K\\r['\n n = math.floor(ratio)\n s += '=' * n\n if n < 100:\n s += '>' + '.'*(100-n-1)\n s += '] {:6.2f} %'.format(ratio)\n print(s, end='')\n sys.stdout.flush()",
"def set_char(self, char, x=None, y=None):\n if x is None:\n x = self.term_cursor[0]\n if y is None:\n y = self.term_cursor[1]\n\n x, y = self.constrain_coords(x, y)\n self.term[y][x] = (self.attrspec, self.charset.current, char)",
"def set_redraw_frequency(self, freq):\n self.bar_char = freq",
"def SetProgress(self, percent):\r\n self.gauge.SetValue(percent)",
"def update_RunProgressBar(self,run):\n self.progBarRun.setValue(run)",
"def change_char_p(self, x, y):\n \n self.game_grid[y][x] = 'P'",
"def advance_char(self):\n self.pos.advance(self.current_char)\n self.current_char = self.text[self.pos.index] if self.pos.index < len(self.text) else None",
"def set_character(self, new_character):\n self.character = new_character"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the empty bar character | def set_empty_bar_character(self, char):
self.empty_bar_char = char | [
"def set_bar_character(self, char):\n self.bar_char = char",
"def set_bar_char(self, char):\n \n self._bar_char = char",
"def clear(self):\n return \"\\x1b[0m\"",
"def render_blank_note_in_ascii(self):\n return \" \" * 4",
"def blank(self):\n self.lines.append('')",
"def blank_line(self) -> None:\n self.add_output(\"\")",
"def clear(self, console):\n console.put_char(self.x, self.y, ' ')",
"def get_empty_cell(self):\n return ' ' * self.width",
"def blank_line(self, row):\n self.term[row] = self.empty_line()",
"def _ch_backspace(self):\n pass",
"def clear_text(self):\n self.set_text('')",
"def hbar(length=80):\n return '='*length",
"def prefill_with_character(value, column_length=4, fill_char=HTML_SPACE):\r\n pass",
"def clear(self):\n self.value = u\"\"",
"def promptClear(self):\n\n self.string = \"\"\n self.position = 0\n self.view = 0",
"def print_empty_line():\n print(\"\")",
"def empty_notice(s):\n return s.replace('\\0', ' (press <RETURN> to leave empty)')",
"def _line_clear(self):\n self.state = list(filter(lambda row: row.count(' ') != 0, self.state))\n while len(self.state) < Field.HEIGHT:\n self.state.insert(0, [' ' for col in range(Field.WIDTH)])",
"def promptBackspace(self):\n\n self.hdirty = True\n if self.position != 0:\n self.string = self.string[:self.position - 1] + \\\n self.string[self.position:]\n self.position -= 1\n if self.promptValidate():\n self.promptFromScratch()\n else:\n self.stdscr.delch(self.height - 1, len(self.prompt) +\n 1 + self.position - self.view)\n self.stdscr.move(self.height - 1, len(self.prompt) +\n 1 + self.position - self.view)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the progress character | def set_progress_character(self, char):
self.progress_char = char | [
"def set_bar_character(self, char):\n self.bar_char = char",
"def set_bar_char(self, char):\n \n self._bar_char = char",
"def setChar(*args, **kwargs):\n \n pass",
"def set_empty_bar_character(self, char):\n self.empty_bar_char = char",
"def SetSpecialChar ( self, num, buf):\n\t\tif num < 0 | num > 7: return False # Verify you are setting a valid character\n\t\tif len (buf) != 8: return False # Must be 8 characters long\n\t\toutput = \"\" # Set the output to null\n\t\tfor row in buf:\n\t\t\toutput = output + chr(row)\n\t\toutput = \"%c%s\" % ( chr(num), output )\n\t\tself.SendCommand ( 9, output)",
"def set_char(self, coord, char):\n\t\tassert coord.x >= 0 and coord.x < self.width, \"X Coordinate out of range\"\n\t\tassert coord.y >= 0 and coord.y < self.height, \"Y Coordinate out of range\"\n\t\tself.content[self.y_max - coord.y][coord.x] = char",
"def SetSpecialChar ( self, num, buf):\n\t\tif num < 0 | num > 7: return False\n\t\tif len (buf) != 8: return False \n\t\toutput = \"\"\n\t\tfor row in buf:\n\t\t\toutput = output + chr(row)\n\t\toutput = \"%c%s\" % ( chr(num), output )\n\t\tself.SendCommand ( 9, output)",
"def update_progress(self, progress):\n ## Modify this to change the length of the progress bar\n barLength = 10\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done...\\r\\n\"\n block = int(round(barLength*progress))\n text = \"\\rPercent: [{0}] {1}% {2}\".format( \"#\"*block + \"-\"*(barLength-block), progress*100, status)\n sys.stdout.write(text)\n sys.stdout.flush()",
"def advance_char(self):\n self.pos.advance(self.current_char)\n self.current_char = self.text[self.pos.index] if self.pos.index < len(self.text) else None",
"def SetProgress(self, percent):\r\n self.gauge.SetValue(percent)",
"def char(self, c):\n n = ord(c)\n ch = font[n * 8:n * 8 + 8]\n self.image(ch)",
"def set_character(self, new_character):\n self.character = new_character",
"def updateStatus(self, text):\r\n self.style.configure('text.Horizontal.TProgressbar', text=' ' + text)\r\n self.progressTextFormat = ' ' + text + ' {0}%'\r\n print(text)",
"def set_char(self, char, x=None, y=None):\n if x is None:\n x = self.term_cursor[0]\n if y is None:\n y = self.term_cursor[1]\n\n x, y = self.constrain_coords(x, y)\n self.term[y][x] = (self.attrspec, self.charset.current, char)",
"def set_progress(self, value):\n\n if self.active_socket is not None:\n msg = 'PROGRESS %f\\n' % float(value)\n try:\n self.active_socket.send(msg)\n except socket.error:\n pass",
"def setProgress(self, n, m):\n pass",
"def setpos(self, new_pos):\n self.pos = new_pos - 1\n self.next_char()",
"def PrintProgress(self):\n ratio = 100*self.progressBar['value'] / self.progressBar['maximum']\n s = '\\033[1K\\r['\n n = math.floor(ratio)\n s += '=' * n\n if n < 100:\n s += '>' + '.'*(100-n-1)\n s += '] {:6.2f} %'.format(ratio)\n print(s, end='')\n sys.stdout.flush()",
"def set_percentage_text(self, text: str) -> None:\n self.progress_pct_text.set(text)\n self.frame.update()",
"def update_progress(self, value):\n self.progress.setValue(value)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the redraw frequency | def set_redraw_frequency(self, freq):
self.bar_char = freq | [
"def set_speed(self):\n\n sender = self.sender()\n speed = 1\n if sender is self.speed_x05:\n speed = 0.5\n if sender is self.speed_x025:\n speed = 0.25\n if sender is self.speed_x0125:\n speed = 0.125\n self.highlight_selected_speed(sender)\n refresh_rate = round(1000/(self.fps * speed))\n self.timer.start(refresh_rate)",
"def on_redraw_timer(self, event):\r\n if ((not (self.paused or not self.running))\r\n and (len(RAW_Q)%frequency == 0)):\r\n readPort()\r\n nxt = len(self.data) #this is set for the case that\r\n #PLOT_ARRAY is updating faster than the graph\r\n #is being drawn\r\n self.data.append(PLOT_ARRAY[nxt])\r\n self.draw_plot()",
"def redraw(self):\n if not self.__drawing_queued: #if we are moving, then there is a timeout somewhere already\n self.__drawing_queued = True\n self._last_frame_time = dt.datetime.now()\n gobject.timeout_add(1000 / self.framerate, self.__interpolate)",
"def setLoopFreq(self, freq):\n # causes an AttributeError for non-real-time timers\n self._timer.setLoopFreq(freq)",
"def set_frequency(self):\n if self.laser_status:\n self._fiber_shooting_logic.set_frequency(self._mw.frequency_spinBox.value())\n else:\n pass\n return",
"def updateTimeFactors(self, new_rtf, new_freq, new_dt):\n self.realtime_factor = new_rtf\n self.frequency = new_freq\n\n self.step_size = new_dt",
"def run(self,granularity=1,runtime=100):\n if granularity > 0.0:\n self.timer = QtCore.QTimer()\n self.timer.connect(self.timer,QtCore.SIGNAL(\"timeout()\"),self.drawNow)\n self.timer.start(1000*granularity)\n if runtime > 0.0:\n self.timeout = QtCore.QTimer()\n self.timeout.connect(self.timeout,QtCore.SIGNAL(\"timeout()\"),self.stop)\n self.timeout.setSingleShot(True)\n self.timeout.start(1000*runtime)",
"def __draw_fps(self):\n txt = f'{round(self.clock.get_fps())} FPS'\n rtxt = self.font.render(txt, False, pygame.Color('black'))\n rsiz = self.font.size(txt)\n self.__screen.blit(rtxt, (SCREEN_WIDTH-rsiz[0]-5, 5))",
"def set_fps(self, fps=25.0):\n self.fps = fps",
"def queue_redraw(self):\n self.redraw_all = True",
"def update_display(self):\n if hasattr(self, 'spec') and hasattr(self, 'y'):\n self.plot.plot(self.spec.wavelengths(), self.y, pen='r', clear=True)\n pg.QtGui.QApplication.processEvents()",
"def toggle_show_fps(self, key):\n\n if key == pg.K_F5:\n self.show_fps = not self.show_fps\n if not self.show_fps:\n pg.display.set_caption(self.caption)",
"def flicker_update(self):\n if self.flicker_lock: return\n self.flicker_index += 1\n if self.flicker_index >= FLICKER_CONSTANT: self.flicker_index = 0",
"def _draw_digital_clock(self):\n self._draw_time_scale()\n self._draw_time()",
"def set_timers(self):\n pygame.time.set_timer(USEREVENTS.TIMER_ONE_SEC, 1000) #Each second",
"def start_mode_change_timer(self):\n\n Clock.unschedule(self.__change_mode)\n self.mode_change_timer = self.scatter_length\n Clock.schedule_once(self.__change_mode, self.mode_change_timer)",
"def _configure_timers(self):\n self._timer_plot = QtCore.QTimer(self)\n self._timer_plot.timeout.connect(self._update_plot)\n # self.timer = QtCore.QTimer()",
"def _draw_simple_clock(self):\n self._draw_simple_background()\n self._draw_numbers()\n self._draw_hands()",
"def tick(self):\r\n self.eventClock += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overwrites a previous message to the output. | def overwrite(self, output_, messages):
# carriage return
output_.write('\x0D')
if self.last_messages_length is not None:
# clear the line with the text of the last message
output_.write('\x20' * self.last_messages_length)
# carriage return
output_.write('\x0D')
output_.write(messages)
self.last_messages_length = len(messages) | [
"def _overwrite(self, message):\n if self._output.is_decorated():\n self._output.write('\\x0D\\x1B[2K')\n self._output.write(message)\n else:\n self._output.writeln(message)",
"def message_reset(self):\n self.message = \"\"",
"def finalizeMessage(self, same, msg, filename):\n if not same:\n self._same = False\n self._message += '\\n'+'*'*20+'\\nDIFF in {}: \\n {}'.format(filename, '\\n '.join(msg))",
"def resetMessage(self):\n\n self._last_set_json = str(self._original_json)\n self._text_editor.setText(self._original_json)\n self.message_type = self._original_typedef",
"def restore_message(self, unused=None):\n if self.DEBUG_PRINT_FUNCTIONS:\n pass;\n print \"restore_message\"\n self.message.config(text=self.oldmessage_info[0])\n self.message.config(fg=self.oldmessage_info[1])",
"def resetMessage(self):\n self._text_editor.setText(self._original_json)",
"def output(message):\n sys.stdout.write(message + \"\\n\")\n sys.stdout.flush()",
"def UpdateMessage(self, message, new_suffix):\n if not message:\n raise ValueError('A message must be passed.')\n if message not in self._messages:\n raise ValueError(\n 'The given message does not belong to this output object.')\n if self._messages and message != self._messages[-1]:\n raise ValueError('Only the last added message can be updated.')\n with self._lock:\n message._UpdateSuffix(new_suffix) # pylint: disable=protected-access",
"def add(self, message):\n self.display_output.append(message)",
"def update_history(self, message):\n\n # Get timestamp for this access\n now_time_ts = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n history_string = '{:s}: {:s}\\n'.format(now_time_ts, message)\n if 'history' not in self._nc.ncattrs():\n self._nc.setncattr('history', history_string)\n return\n \n previous_history = self._nc.history.strip()\n if not previous_history:\n self._nc.history = history_string\n else:\n self._nc.history += history_string",
"def _sout_updated(self):\n self._server.message_all(\"SOUT UPDATED\\n\")",
"def _UpdateConsole(self):\n if self._messages:\n # Check if there have been new messages added\n if self._last_print_index < (len(self._messages) - 1):\n # Print all the new messages starting at the last message printed\n # and separate them with newlines.\n for message in self._messages[self._last_print_index:-1]:\n message.Print()\n self._stream.write('\\n')\n # Update last print index\n self._last_print_index = len(self._messages) - 1\n self._messages[self._last_print_index].Print()",
"def putBack(self, *args) -> \"void\":\n return _coin.SoInput_putBack(self, *args)",
"def archive(self):\n\n self.old.extend(self.message_list)\n self.message_list = []",
"def _log(self, newText):\r\n self.gameLog.config(state=NORMAL)\r\n self.gameLog.insert(END, newText+\"\\n\")\r\n self.gameLog.config(state=DISABLED)\r\n self.gameLog.yview(END)",
"def _write(self, message=None):\n if message is not None:\n stdout('%s\\n' % message)\n else:\n stdout('\\n')",
"def _UpdateConsole(self):\n if not self._may_have_update:\n return\n\n # Reset at the start so if gcloud exits, the cursor is in the proper place.\n # We need to track the number of outputted lines of the last update because\n # new messages may have been added so it can't be computed from _messages.\n if self._last_total_lines:\n self._stream.write(self._GetAnsiCursorUpSequence(self._last_total_lines))\n\n total_lines = 0\n force_print_rest = False\n for message in self._messages:\n num_lines = message.num_lines\n total_lines += num_lines\n if message.has_update or force_print_rest:\n force_print_rest |= message.num_lines_changed\n message.Print()\n else:\n # Move onto next message\n self._stream.write('\\n' * num_lines)\n self._last_total_lines = total_lines\n self._may_have_update = False",
"def output(msg):\n print(f\"{datetime.now()}\\t{msg}\")",
"def log_msg(self, msg, end=\"\\n\"):\n self.log += msg+end\n self.prompt.delete('1.0', tk.END)\n self.prompt.insert(tk.END, self.log)\n self.prompt.yview(tk.END)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Crea una revolucion de un croquis con respecto a un eje (Primer linea del croquis) para crear una operacion solida | def revolucionAditiva(self, doc, croquis = None, nombreExtrusion = "Revolucion", angulo = 360, invertido = 0, planoMedio = 0 ):
self.nombre = nombreExtrusion
self.doc = doc
self.tipo = "revolucionAditiva"
#Se extrae el string de la base y de su padre mediante metodos que aceptan varios tipos de clases
stringCroquis = extraerString(croquis)
if type(croquis) is str:
croquis = self.doc.seleccionarObjeto(croquis)
stringPadreCroquis = extraerStringPadre(croquis)
self.doc.contLineasReferencia += 1
stringEjeRevolucion = f"EjeRevolucion{str(self.doc.contLineasReferencia).zfill(2)}"
#EJE DE REVOLUCION
self.doc.base.getObject(stringPadreCroquis).newObject('PartDesign::Line',stringEjeRevolucion)
self.doc.base.getObject(stringEjeRevolucion).AttachmentOffset = FreeCAD.Placement(
FreeCAD.Vector(0.0000000000, 0.0000000000, 0.0000000000),
FreeCAD.Rotation(0.0000000000, 0.0000000000, 0.0000000000)
)
self.doc.base.getObject(stringEjeRevolucion).MapReversed = False
self.doc.base.getObject(stringEjeRevolucion).Support = [(self.doc.base.getObject(stringCroquis),'Edge1')]
self.doc.base.getObject(stringEjeRevolucion).MapPathParameter = 0.000000
self.doc.base.getObject(stringEjeRevolucion).MapMode = 'TwoPointLine'
#REVOLUCION
self.doc.base.getObject(stringPadreCroquis).newObject('PartDesign::Revolution',nombreExtrusion)
self.base = self.doc.base.getObject(nombreExtrusion)
self.base.Profile = self.doc.base.getObject(stringCroquis)
self.base.ReferenceAxis = (self.doc.base.getObject(stringEjeRevolucion), [''])
self.base.Angle = angulo
self.base.Reversed = invertido
self.base.Midplane = planoMedio
self.doc.extrusiones[nombreExtrusion] = self
self.doc.addExtern("Extrusion", nombreExtrusion)
return self | [
"def revolucionDeVaciado(self, doc, croquis = None, nombreExtrusion = \"RevolucionDeVaciado\", angulo = 360, invertido = 0, planoMedio = 0 ):\n \n self.nombre = nombreExtrusion\n self.doc = doc\n self.tipo = \"revolucionDeVaciado\"\n\n #Se extrae el string de la base y de su padre mediante metodos que aceptan varios tipos de clases\n stringCroquis = extraerString(croquis)\n\n if type(croquis) is str:\n croquis = self.doc.seleccionarObjeto(croquis)\n\n stringPadreCroquis = extraerStringPadre(croquis)\n\n self.doc.contLineasReferencia += 1\n stringEjeRevolucion = f\"EjeRevolucion{str(self.doc.contLineasReferencia).zfill(2)}\"\n\n #EJE DE REVOLUCION\n self.doc.base.getObject(stringPadreCroquis).newObject('PartDesign::Line',stringEjeRevolucion)\n\n self.doc.base.getObject(stringEjeRevolucion).AttachmentOffset = FreeCAD.Placement(\n FreeCAD.Vector(0.0000000000, 0.0000000000, 0.0000000000),\n FreeCAD.Rotation(0.0000000000, 0.0000000000, 0.0000000000)\n )\n\n self.doc.base.getObject(stringEjeRevolucion).MapReversed = False\n self.doc.base.getObject(stringEjeRevolucion).Support = [(self.doc.base.getObject(stringCroquis),'Edge1')]\n self.doc.base.getObject(stringEjeRevolucion).MapPathParameter = 0.000000\n self.doc.base.getObject(stringEjeRevolucion).MapMode = 'TwoPointLine'\n\n #REVOLUCION\n self.doc.base.getObject(stringPadreCroquis).newObject('PartDesign::Groove',nombreExtrusion)\n self.base = self.doc.base.getObject(nombreExtrusion)\n\n self.base.Profile = self.doc.base.getObject(stringCroquis)\n self.base.ReferenceAxis = (self.doc.base.getObject(stringEjeRevolucion), [''])\n self.base.Angle = angulo\n self.base.Reversed = invertido\n self.base.Midplane = planoMedio\n\n self.doc.extrusiones[nombreExtrusion] = self\n self.doc.addExtern(\"Extrusion\", nombreExtrusion)\n\n return self",
"def polyCrease(relativeValue=float, operation=int, vertexValue=float, value=float, createHistory=bool):\n pass",
"def create(points):\r\n points = p2e._base._util.scale_1000(points)\r\n \r\n eco_id = Roof._gen_object(\"roof\", \"plane\")\r\n if id == -1: return None\r\n return Roof(eco_id, points)",
"def create_solid(self):\n\n # Creates a cadquery solid from points and revolves\n solid = (\n cq.Workplane(self.workplane)\n .polyline(self.points)\n .close()\n .extrude(distance=-self.distance / 2.0, both=self.extrude_both)\n )\n\n # Checks if the azimuth_placement_angle is a list of angles\n if isinstance(self.azimuth_placement_angle, Iterable):\n rotated_solids = []\n # Perform seperate rotations for each angle\n for angle in self.azimuth_placement_angle:\n rotated_solids.append(\n solid.rotate(\n (0, 0, -1), (0, 0, 1), angle))\n solid = cq.Workplane(self.workplane)\n\n # Joins the seperate solids together\n for i in rotated_solids:\n solid = solid.union(i)\n else:\n # Peform rotations for a single azimuth_placement_angle angle\n solid = solid.rotate(\n (0, 0, -1), (0, 0, 1), self.azimuth_placement_angle)\n\n self.perform_boolean_operations(solid)\n\n return solid",
"def start(self):\n self.colisiones = [[0,0],[0,0],[0,0]] #X(a,b), Y(a,b), Z(a,b) --> A ------- B\n t = 0 \n self.VxA = [] #Listas que indican que particulas han chocado con la pared(pueden repetirse)\n self.VxB = []\n self.VyA = []\n self.VyB = []\n self.VzA = []\n self.VzB = []\n while t <= self.t:\n for i in range(len(self.particulas)):\n self.particulas[i].xyz[0] += self.dt * self.particulas[i].v[0]\n self.particulas[i].xyz[1] += self.dt * self.particulas[i].v[1]\n self.particulas[i].xyz[2] += self.dt * self.particulas[i].v[2]\n x = self.particulas[i].hit_x(self.cubo.a,self.cubo.b) # [1,0] si hit A/ [0,1] si hit B\n y = self.particulas[i].hit_y(self.cubo.a,self.cubo.b)\n z = self.particulas[i].hit_z(self.cubo.a,self.cubo.b)\n self.colisiones[0][0] += x[0]\n self.colisiones[0][1] += x[1]\n self.colisiones[1][0] += y[0]\n self.colisiones[1][1] += y[1]\n self.colisiones[2][0] += z[0]\n self.colisiones[2][1] += z[1]\n if x[0] == 1:#Debido a esto la cantidad de memoria usada es mayor en cada unidad de tiempo. \n self.VxA.append(self.particulas[i].v[0])\n if x[1] == 1:\n self.VxB.append(self.particulas[i].v[0])\n if y[0] == 1:\n self.VyA.append(self.particulas[i].v[1])\n if y[1] == 1:\n self.VyB.append(self.particulas[i].v[1])\n if z[0] == 1:\n self.VzA.append(self.particulas[i].v[2])\n if z[1] == 1:\n self.VzB.append(self.particulas[i].v[2])\n t += self.dt\n \n self.colisionesT = 0 #Colisiones totales\n for i in range(3):\n for k in range(2):\n self.colisionesT += self.colisiones[i][k]",
"def calculate_erection_operation_time(project_specs, project_data, construct_duration, operational_construction_time):\n erection_construction_time = 1/3 * construct_duration\n\n print('Calculating operation time for erection...')\n # group project data by project ID\n project = project_specs\n\n # for components in component list determine if base or topping\n project_data['components']['Operation'] = project_data['components']['Lift height m'] > (float(project['Hub height m'] * project['Breakpoint between base and topping (percent)']))\n boolean_dictionary = {True: 'Top', False: 'Base'}\n project_data['components']['Operation'] = project_data['components']['Operation'].map(boolean_dictionary)\n\n # create groups for operations\n top_v_base = project_data['components'].groupby(['Operation'])\n\n # group crane data by boom system and crane name to get distinct cranes\n crane_grouped = project_data['crane_specs'].groupby(['Equipment name', 'Crane name', 'Boom system', 'Crane capacity tonne'])\n\n crane_poly = pd.DataFrame(columns=['Equipment name', 'Crane name', 'Boom system', 'Crane capacity tonne', 'Crane poly'])\n for name, crane in crane_grouped:\n crane = crane.reset_index(drop=True)\n x = crane['Max capacity tonne']\n y = crane['Hub height m']\n wind_speed = min(crane['Max wind speed m per s'])\n hoist_speed = min(crane['Hoist speed m per min'])\n travel_speed = min(crane['Speed of travel km per hr'])\n setup_time = max(crane['Setup time hr'])\n crew_type = crane['Crew type ID'][0] #todo: fix this so it's not a hack... need to rethink data structure - right now just picking first crew type - this is correct because same for all crane/boom combinations but we should come up with a better way to do it\n polygon = Polygon([(0, 0), (0, max(y)), (min(x), max(y)), (max(x), min(y)), (max(x), 0)])\n df = pd.DataFrame([[name[0],\n name[1],\n name[2],\n name[3],\n wind_speed,\n setup_time,\n hoist_speed,\n travel_speed,\n crew_type,\n polygon]],\n columns=['Equipment name', 'Crane name', 'Boom system', 'Crane capacity tonne',\n 'Max wind speed m per s', 'Setup time hr',\n 'Hoist speed m per min', 'Speed of travel km per hr',\n 'Crew type ID', 'Crane poly'])\n crane_poly = crane_poly.append(df, sort=True)\n\n # loop through operation type (topping vs. base)\n rownew = pd.Series()\n component_max_speed = pd.DataFrame()\n crane_poly_new = crane_poly\n for name_operation, component_group in top_v_base:\n # calculate polygon for crane capacity and check if component can be lifted by each crane without wind loading\n for idx, crane in crane_poly.iterrows():\n polygon = crane['Crane poly']\n\n for component in component_group['Component']:\n # get weight and height of component in each component group\n component_only = component_group.where(component_group['Component'] == component).dropna(thresh=1)\n point = Point(component_only['Mass tonne'], component_only['Lift height m'])\n crane['Lift boolean {component}'.format(component=component)] = polygon.contains(point)\n\n rownew = rownew.append(crane)\n\n bool_list = list()\n for component in component_group['Component']:\n if crane['Lift boolean {component}'.format(component=component)] is False:\n crane_bool = False\n else:\n crane_bool = True\n bool_list.append(crane_bool)\n\n # calculate max permissible wind speed\n # equation for calculating permissible wind speed:\n # vmax = max_TAB * sqrt(1.2 * mh / aw), where\n # mh = hoist load\n # aw = area exposed to wind = surface area * coeff drag\n # 1.2 = constant in m^2 / t\n # vmax_tab = maximum load speed per load chart\n # source: pg. 33 of Liebherr\n\n mh = component_group['Mass tonne']\n aw = component_group['Surface area sq m'] * component_group['Coeff drag']\n vmax_tab = crane['Max wind speed m per s']\n vmax_calc = vmax_tab * sqrt(1.2 * mh / aw)\n\n # if vmax_calc is less than vmax_tab then vmax_calc, otherwise vmax_tab (based on pg. 33 of Liebherr)\n # todo: check vmax - should it be set to calculated value rather than vmax_tab if greater?\n component_group_new = pd.DataFrame(component_group,\n columns=list(component_group.columns.values) + ['vmax',\n 'Crane name',\n 'Boom system',\n 'crane_bool'])\n component_group_new['vmax'] = list((min(vmax_tab, x) for x in vmax_calc))\n component_group_new['Crane name'] = crane['Crane name']\n component_group_new['Boom system'] = crane['Boom system']\n component_group_new['crane_bool'] = bool_list\n\n component_max_speed = component_max_speed.append(component_group_new, sort=True)\n\n crane_poly_new['Crane bool {operation}'.format(operation=name_operation)] = min(bool_list)\n\n crane_poly = crane_poly_new\n\n # join crane polygon to crane specs\n crane_component = pd.merge(crane_poly, component_max_speed, on=['Crane name', 'Boom system'])\n\n # select only cranes that could lift the component\n possible_cranes = crane_component.where(crane_component['crane_bool'] == True).dropna(thresh=1).reset_index(drop=True)\n\n # calculate travel time per cycle\n turbine_spacing = float(project['Turbine spacing (times rotor diameter)'] * project['Rotor diameter m'] * km_per_m)\n turbine_num = float(project['Number of turbines'])\n possible_cranes['Travel time hr'] = turbine_spacing / possible_cranes['Speed of travel km per hr'] * turbine_num\n\n # calculate erection time\n possible_cranes['Operation time hr'] = ((possible_cranes['Lift height m'] / possible_cranes['Hoist speed m per min'] * hr_per_min)\n + (possible_cranes['Cycle time installation hrs'])\n ) * turbine_num\n\n # store setup time\n possible_cranes['Setup time hr'] = possible_cranes['Setup time hr'] * turbine_num\n\n erection_time = possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',\n 'Boom system', 'Operation'])['Operation time hr'].sum()\n travel_time = possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',\n 'Boom system', 'Operation'])['Travel time hr'].max()\n setup_time = possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',\n 'Boom system', 'Operation'])['Setup time hr'].max()\n rental_time_without_weather = erection_time + travel_time + setup_time\n\n operation_time = rental_time_without_weather.reset_index()\n operation_time = operation_time.rename(columns={0: 'Operation time all turbines hrs'})\n operation_time['Operational construct days'] = (operation_time['Operation time all turbines hrs'] /\n operational_construction_time)\n\n # if more than one crew needed to complete within construction duration then assume that all construction happens\n # within that window and use that time frame for weather delays; if not, use the number of days calculated\n operation_time['time_construct_bool'] = (operation_time['Operational construct days'] >\n erection_construction_time * 30)\n boolean_dictionary = {True: erection_construction_time * 30, False: np.NAN}\n operation_time['time_construct_bool'] = operation_time['time_construct_bool'].map(boolean_dictionary)\n operation_time['Time construct days'] = operation_time[['time_construct_bool', 'Operational construct days']].min(axis=1)\n\n #print(possible_cranes[['Crane name', 'Component', 'Operation time hr', 'Operation']])\n for operation, component_group in top_v_base:\n unique_component_crane = possible_cranes.loc[possible_cranes['Operation'] == operation]['Component'].unique()\n for component in component_group['Component']:\n if component not in unique_component_crane:\n error = 1\n sys.exit('Error: Unable to find installation crane for {} operation and {} component'.format(operation, component))\n else:\n error = 0\n if error == 0:\n print('Crane(s) found for all components for {} installation'.format(operation))\n\n return possible_cranes, operation_time, error",
"def transfer_operators(self):\n coarse = self\n fine = self.child\n\n\n all_tris = np.arange(fine.topology.P2).reshape(coarse.topology.P2, 4)\n central_tris = all_tris[:,0]\n corner_tris = all_tris[:,1:]\n #first, compute contribution to transfer matrices from the central refined triangle\n\n coarse_dual = coarse.dual\n fine_dual = fine.dual[central_tris]\n face_edge_mid = util.gather(fine.topology.FV[0::4], fine.primal)\n\n fine_edge_normal = [np.cross(face_edge_mid[:,i-2,:], face_edge_mid[:,i-1,:]) for i in range(3)]\n fine_edge_mid = [(face_edge_mid[:,i-2,:] + face_edge_mid[:,i-1,:])/2 for i in range(3)]\n fine_edge_dual = [np.cross(fine_edge_mid[i], fine_edge_normal[i]) for i in range(3)]\n fine_edge_normal = np.array(fine_edge_normal)\n fine_edge_mid = np.array(fine_edge_mid)\n fine_edge_dual = np.array(fine_edge_dual)\n\n coarse_areas = [triangle_area_from_corners(coarse_dual, face_edge_mid[:,i-2,:], face_edge_mid[:,i-1,:]) for i in range(3)]\n fine_areas = [triangle_area_from_corners(fine_dual , face_edge_mid[:,i-2,:], face_edge_mid[:,i-1,:]) for i in range(3)]\n fine_areas = [(fine_areas[i-2]+fine_areas[i-1])/2 for i in range(3)]\n coarse_areas = np.array(coarse_areas)\n fine_areas = np.array(fine_areas)\n\n #normal of edge midpoints to coarse dual\n interior_normal = np.array([np.cross(face_edge_mid[:,i,:], coarse_dual) for i in range(3)])\n\n #the 0-3 index of the overlapping domains\n #biggest of the subtris formed with the coarse dual vertex seems to work; but cant prove why it is so...\n touching = np.argmax(coarse_areas, axis=0)\n## print touching\n## print fine_areas\n## print coarse_areas\n\n #indexing arrays\n I = np.arange(len(touching))\n m = touching #middle pair\n l = touching-1 #left-rotated pair\n r = touching-2 #right-rotated pair\n\n #compute sliver triangles\n sliver_r = triangle_area_from_normals(\n +fine_edge_normal[l, I],\n +fine_edge_dual [l, I],\n +interior_normal [r, I])\n sliver_l = triangle_area_from_normals(\n +fine_edge_normal[r, I],\n -fine_edge_dual [r, I],\n -interior_normal [l, I])\n\n## print 'slivers'\n## print sliver_l\n## print sliver_r\n\n assert(np.all(sliver_l>-1e-10))\n assert(np.all(sliver_r>-1e-10))\n\n\n #assemble area contributions of the middle triangle\n areas = np.empty((len(touching),3,3)) #coarsetris x coarsevert x finevert\n #the non-overlapping parts\n areas[I,l,l] = 0\n areas[I,r,r] = 0\n #triangular slivers disjoint from the m,m intersection\n areas[I,r,l] = sliver_l\n areas[I,l,r] = sliver_r\n #subset of coarse tri bounding sliver\n areas[I,r,m] = coarse_areas[r,I] - sliver_l\n areas[I,l,m] = coarse_areas[l,I] - sliver_r\n #subset of fine tri bounding sliver\n areas[I,m,l] = fine_areas[l,I] - sliver_l\n areas[I,m,r] = fine_areas[r,I] - sliver_r\n #square middle region; may compute as fine or caorse minus its flanking parts\n areas[I,m,m] = coarse_areas[m,I] - areas[I,m,l] - areas[I,m,r]\n\n #we may get numerical negativity for 2x2x2 symmetry, with equilateral fundemantal domain,\n #or high subdivision levels. or is error at high subdivision due to failing of touching logic?\n assert(np.all(areas > -1e-10))\n\n #areas maps between coarse vertices and fine edge vertices.\n #add mapping for coarse to fine vertices too\n\n #need to grab coarsetri x 3coarsevert x 3finevert arrays of coarse and fine vertices\n fine_vertex = np.repeat( fine .topology.FV[0::4, None, :], 3, axis=1)\n coarse_vertex = np.repeat( coarse.topology.FV[: , : , None], 3, axis=2)\n\n def coo_matrix(data, row, col):\n \"\"\"construct a coo_matrix from data and index arrays\"\"\"\n return util.coo_matrix(\n (data.ravel(),(row.ravel(), col.ravel())),\n shape=(coarse.topology.D2, fine.topology.D2))\n\n center_transfer = coo_matrix(areas, coarse_vertex, fine_vertex)\n\n\n #add corner triangle contributions; this is relatively easy\n #coarsetri x 3coarsevert x 3finevert\n corner_vertex = util.gather(corner_tris, fine.topology.FV)\n corner_dual = util.gather(corner_tris, fine.dual)\n corner_primal = util.gather(corner_vertex, fine.primal)\n\n #coarsetri x 3coarsevert x 3finevert\n corner_areas = triangle_areas_around_center(corner_dual, corner_primal)\n #construct matrix\n corner_transfer = coo_matrix(corner_areas, coarse_vertex, corner_vertex)\n self.transfer = util.csr_matrix(center_transfer + corner_transfer)\n\n #calc normalizations\n self.coarse_area = self.transfer * np.ones(fine .topology.D2)\n self.fine_area = self.transfer.T * np.ones(coarse.topology.D2)\n\n self.f = np.sqrt( self.fine_area)[:,None]\n self.c = np.sqrt( self.coarse_area)[:,None]\n\n #test for consistency with metric calculations\n assert(np.allclose(self.coarse_area, coarse.D2P0, 1e-10))\n assert(np.allclose(self.fine_area , fine .D2P0, 1e-10))",
"def expandeaza(self):\r\n succesori = []\r\n nod_graf_curent = self.nod_graf.info\r\n matrice = NodParcurgere.problema.matrice_clasa\r\n\r\n for (i, j) in [(nod_graf_curent[0], nod_graf_curent[1] - 1),\r\n (nod_graf_curent[0], nod_graf_curent[1] + 1),\r\n (nod_graf_curent[0] - 1, nod_graf_curent[1]),\r\n (nod_graf_curent[0] + 1, nod_graf_curent[1])]: # parcurge lista celor 4 posibili succesori ai nodului curent (self)\r\n if 0 <= i < len(matrice) and 0 <= j < len(matrice[0]):\r\n if matrice[i][j] != \"liber\": # verifica daca pozitia succesorului este ocupata de vreun elev\r\n if ((matrice[i][j], matrice[nod_graf_curent[0]][nod_graf_curent[1]]) not in NodParcurgere.problema.lista_suparati) and ((matrice[nod_graf_curent[0]][nod_graf_curent[1]], matrice[i][j]) not in NodParcurgere.problema.lista_suparati): # verifica daca elevul reprezentand nodul curent si cu elevul ce reprezinta posibilul succesor nu sunt certati\r\n if i in [len(matrice) - 1, len(matrice) - 2]: # verfica daca succesorul se afla pe ultimele doua linii\r\n nod_info = (i, j)\r\n if i == nod_graf_curent[0] + 1:\r\n directie = \"v\"\r\n elif i == nod_graf_curent[0] - 1:\r\n directie = \"^\"\r\n elif j == nod_graf_curent[1] + 1:\r\n if j % 2 == 1:\r\n directie = \">\"\r\n else:\r\n directie = \">>\"\r\n else:\r\n if j % 2 == 0:\r\n directie = \"<\"\r\n else:\r\n directie = \"<<\"\r\n succesori.append((Nod(nod_info, self.fct_h(nod_info)), 1, directie))\r\n\r\n elif not ((j == nod_graf_curent[1] + 1 and j % 2 == 0) or (j == nod_graf_curent[1] - 1 and j % 2 == 1)): # in acest caz succesorul nu se afla pe ultimele doua linii\r\n if i == nod_graf_curent[0] + 1:\r\n directie = \"v\"\r\n elif i == nod_graf_curent[0] - 1:\r\n directie = \"^\"\r\n elif j == nod_graf_curent[1] + 1:\r\n directie = \">\"\r\n else:\r\n directie = \"<\"\r\n nod_info = (i, j)\r\n succesori.append((Nod(nod_info, self.fct_h(nod_info)), 1, directie))\r\n\r\n return succesori",
"def inserisci(self,chiave,valore):\r\n\r\n coppia=[chiave,valore]\r\n nuovoAlbero=AlberoBinario(NodoBinarioLazyDeletion(coppia))\r\n\r\n if self.albero.radice==None:\r\n self.albero.radice= nuovoAlbero.radice\r\n else:\r\n attuale=self.albero.radice #Nodo in analisi\r\n precedente=None #Nodo appena precedente a quello attuale\r\n while attuale!=None: #Finche' il nodo attuale non e' vuoto\r\n precedente=attuale #il nodo precedente diventa attuale\r\n if (chiave==self.chiave(attuale)): #se la chiave cercata e' uguale a quella nel nodo attuale\r\n attuale.info=coppia #sto sovrascrivendo i valori della nuova coppia a quelli della vecchia coppia\r\n attuale.stato=True #aggiorna lo stato del nodo\r\n return\r\n else:\r\n if chiave<self.chiave(attuale): #La chiave cercata e' minore del nodo in analisi\r\n attuale=attuale.figlioSx #il nodo allora da cui continuo la ricerca e' il figlio sx\r\n else:\r\n attuale=attuale.figlioDx #il nodo da cui continuo la ricerca e' il figlio dx\r\n \r\n if chiave<self.chiave(precedente): #la chiave e' minore della chiave del nodo precedente\r\n self.albero.inserisciComeSottoAlberoSx(precedente, nuovoAlbero)#inserisci l'albero radicato nel nodo precedente come sottalbero sx del nuovo albero\r\n else:\r\n self.albero.inserisciComeSottoAlberoDx(precedente,nuovoAlbero) #inserisci l'albero radicato nel nodo precedente come sottoalbero dx del nuovo albero\r",
"def _action_procurement_create(self):\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n new_procs = self.env['procurement.order'] #Empty recordset\n# for line in self:\n# if line.state != 'sale' or not line.product_id._need_procurement():\n# continue\n# qty = 0.0\n# for proc in line.procurement_ids:\n# qty += proc.product_qty\n# if float_compare(qty, line.product_uom_qty, precision_digits=precision) >= 0:\n# continue\n# \n# if not line.order_id.procurement_group_id:\n# vals = line.order_id._prepare_procurement_group()\n# line.order_id.procurement_group_id = self.env[\"procurement.group\"].create(vals)\n# \n# vals = line._prepare_order_line_procurement(group_id=line.order_id.procurement_group_id.id)\n# vals['product_qty'] = line.product_uom_qty - qty\n# new_proc = self.env[\"procurement.order\"].create(vals)\n# new_procs += new_proc\n new_procs.run()\n return new_procs",
"def __init__(self, doc, profil, skin, name='disque'):\n\n self.data = {\n 'thick': 5., # mm\n 'hole radius': 30., # mm\n 'diameter': skin['diameter'] + skin['thick'], # mm\n }\n\n side = profil['side']\n radius = profil['radius']\n thick = self.data['thick']\n diam = self.data['diameter']\n\n # use profile shape to make suppressed parts of the disque\n shape = []\n\n # 1st part\n shape.append(Vector(radius, side / 2, 0))\n shape.append(Vector(radius + diam, side / 2, 0))\n shape.append(Vector(radius + diam, -side / 2, 0))\n shape.append(Vector(radius, -side / 2, 0))\n shape.append(Vector(radius, side / 2, 0))\n\n wire0 = Part.makePolygon(shape)\n face0 = Part.Face(wire0)\n\n # 2nd and 3rd parts\n face1 = Part.Face(wire0)\n face2 = Part.Face(wire0)\n\n # make the volumes\n cut0 = face0.extrude(Vector(0, 0, thick))\n cut0.rotate(Vector(0, 0, 0), Vector(0, 0, 1), 0)\n\n cut1 = face1.extrude(Vector(0, 0, thick))\n cut1.rotate(Vector(0, 0, 0), Vector(0, 0, 1), 120)\n\n cut2 = face2.extrude(Vector(0, 0, thick))\n cut2.rotate(Vector(0, 0, 0), Vector(0, 0, 1), 240)\n\n # make the disque\n disque = Part.makeCylinder(diam / 2, thick)\n disque = disque.cut(cut0)\n disque = disque.cut(cut1)\n disque = disque.cut(cut2)\n\n # dig the hole\n hole = Part.makeCylinder(self.data['hole radius'], thick)\n disque = disque.cut(hole)\n\n MecaComponent.__init__(self, doc, disque, name, (0.95, 1., 1.))",
"def crtaj_vrata (X1,Y1,Z1,koja_vrata=195 , rel_smjer = 0):\n #gdje sam\n radnaPozicija = mc.player.getPos()\t\t\n #kamo gledam\n smjerRada = mc.player.getDirection ()\t\t\t#uzmem kamo gledam\n\n #smjer gledanja radi preglednosti spremimo u \"vektor\"\"\n Vx=0\t\t\t\t\t\t\t\t\t\t\t\t#pocetne vrijednosti su nule\n Vz=0\n if abs (smjerRada.x) > abs (smjerRada.z): \t\t#nadje se dominanti smjer i spremi u vektor\n Vx=round(smjerRada.x)\n else:\n Vz=round(smjerRada.z)\n\n # rel_smjer == 0 naprijed 1 lijevo 2 desno 3 iza \n \n if Vx == 1 :\n pass \n if Vx == -1 : \n rel_smjer += 2\n if rel_smjer > 3 :\n rel_smjer -= 4\n \n \n if Vz == -1 : \n rel_smjer += 1\n if rel_smjer > 3 :\n rel_smjer -= 4 \n if Vz == 1 : \n rel_smjer += 3\n if rel_smjer > 3 :\n rel_smjer -= 4 \n \n if Vz != 0 :\n if rel_smjer == 1 :\n buffer = 3\n if rel_smjer == 3 :\n buffer = 1\n if ( rel_smjer == 1 ) or ( rel_smjer == 3 ) :\n rel_smjer = buffer\n\n \n\n \n #crtanje\n \n \n \n if abs ( Vx ) != abs ( Vz ) :\t\t# ne pod 45\n\n gdjeX1=radnaPozicija.x + Vx*X1 + Vz*Z1 # modificiraj pocetnu koordinatu\n gdjeY1=radnaPozicija.y + Y1\n gdjeZ1=radnaPozicija.z + Vx*Z1 + Vz*X1\n mc.setBlock ( gdjeX1 , gdjeY1 , gdjeZ1 , koja_vrata , 0 + rel_smjer ) # doljnji dio vrata\n gdjeY1=radnaPozicija.y + 1\n mc.setBlock ( gdjeX1 , gdjeY1 , gdjeZ1 , koja_vrata , 8 + rel_smjer ) # gornji dio vrata\n return 1",
"def cone_setup(doc, profil, data):\n\n # if the final shapes already existed (true if one shape exists)\n # return them immediatly\n cone_top_obj = doc.getObject('_cone_top_base')\n cone_side_obj = doc.getObject('_cone_side_base')\n cone_struct_obj = doc.getObject('_cone_struct_base')\n cone_top_thread_obj = doc.getObject('_cone_top_thread')\n cone_struct_thread_obj = doc.getObject('_cone_struct_thread')\n\n if cone_top_obj:\n cone_top = cone_top_obj.Shape.copy()\n cone_side0 = cone_side_obj.Shape.copy()\n cone_side1 = cone_side_obj.Shape.copy()\n cone_side2 = cone_side_obj.Shape.copy()\n cone_struct = cone_struct_obj.Shape.copy()\n cone_top_thread = cone_top_thread_obj.Shape.copy()\n cone_struct_thread = cone_struct_thread_obj.Shape.copy()\n\n return cone_top, cone_side0, cone_side1, cone_side2, cone_struct, \\\n cone_top_thread, cone_struct_thread\n\n side = profil['side']\n radius = profil['radius']\n diam_int = data['diameter']\n diam_ext = data['diameter'] + data['thick']\n length = data['len_lo'] + data['len_hi']\n struct_thick = data['struct_thick']\n\n # to modify the sphere to make it a ellipsoid\n matrix = Matrix()\n matrix.scale(1., 1., length / (diam_ext / 2))\n\n # to suppress the lower half of the sphere/ellipsoid\n lower = Part.makeBox(diam_ext, diam_ext, length)\n lower.translate(Vector(-diam_ext / 2, -diam_ext / 2, 0))\n\n gui_doc = FreeCADGui.ActiveDocument\n\n # make the external shape base of the cone\n sphere = Part.makeSphere(diam_ext / 2)\n sphere = sphere.transformGeometry(matrix)\n cone_base_ext = sphere.common(lower)\n cone_base_ext_obj = doc.addObject(\"Part::Feature\", '_cone_base_ext')\n cone_base_ext_obj.Shape = cone_base_ext\n gui_doc.getObject('_cone_base_ext').Visibility = False\n\n # make the internal shape base of the cone\n sphere = Part.makeSphere(diam_int / 2)\n sphere = sphere.transformGeometry(matrix)\n cone_base_int = sphere.common(lower)\n cone_base_int_obj = doc.addObject(\"Part::Feature\", '_cone_base_int')\n cone_base_int_obj.Shape = cone_base_int\n gui_doc.getObject('_cone_base_int').Visibility = False\n\n # make the skin\n skin = cone_base_ext.cut(cone_base_int)\n skin_obj = doc.addObject(\"Part::Feature\", '_skin')\n skin_obj.Shape = skin\n gui_doc.getObject('_skin').Visibility = False\n\n # use profile shapes to make suppressed parts of the skin\n\n # full profil part\n shape = []\n shape.append(Vector(radius - 10, side / 2, 0))\n shape.append(Vector(radius + diam_ext, side / 2, 0))\n shape.append(Vector(radius + diam_ext, -side / 2, 0))\n shape.append(Vector(radius - 10, -side / 2, 0))\n shape.append(Vector(radius - 10, side / 2, 0))\n\n wire = Part.makePolygon(shape)\n face = Part.Face(wire)\n\n # make the volume\n long_cut_base = face.extrude(Vector(0, 0, length))\n long_cut_obj = doc.addObject(\"Part::Feature\", '_long_cut_base')\n long_cut_obj.Shape = long_cut_base\n gui_doc.getObject('_long_cut_base').Visibility = False\n\n # full profil part\n shape = []\n shape.append(Vector(radius, side / 2, 0))\n shape.append(Vector(radius + diam_ext, side / 2, 0))\n shape.append(Vector(radius + diam_ext, -side / 2, 0))\n shape.append(Vector(radius, -side / 2, 0))\n shape.append(Vector(radius, side / 2, 0))\n\n wire = Part.makePolygon(shape)\n face = Part.Face(wire)\n\n # make the volume\n short_cut_base = face.extrude(Vector(0, 0, data['len_lo']))\n short_cut_obj = doc.addObject(\"Part::Feature\", '_short_cut_base')\n short_cut_obj.Shape = short_cut_base\n gui_doc.getObject('_short_cut_base').Visibility = False\n\n # create 1/3 cylinder\n cylinder = Part.makeCylinder(diam_ext / 2, length, Vector(0, 0, 0), Vector(0, 0, 1), 120)\n cylinder_obj = doc.addObject(\"Part::Feature\", '_cylinder_1_3')\n cylinder_obj.Shape = cylinder\n gui_doc.getObject('_cylinder_1_3').Visibility = False\n\n # thread bases\n radius_ext = diam_ext / 2 - struct_thick - 5\n thread_ext = Part.makeHelix(8, struct_thick - 8, radius_ext)\n radius_int = diam_ext / 2 - struct_thick - 9\n thread_int = Part.makeHelix(8, struct_thick, radius_int)\n\n # tube to make the space for threads\n tube_thread_ext = Part.makeCylinder(radius_ext, struct_thick)\n tube_thread_int = Part.makeCylinder(radius_int, struct_thick)\n tube_thread = tube_thread_ext.cut(tube_thread_int)\n tube_thread_obj = doc.addObject(\"Part::Feature\", '_tube_thread')\n tube_thread_obj.Shape = tube_thread\n gui_doc.getObject('_tube_thread').Visibility = False\n\n # tube to cut the top of the structure\n tube_cut_ext = Part.makeCylinder(diam_ext / 2 - struct_thick / 2, struct_thick)\n tube_cut_int = tube_thread_int\n tube_cut = tube_cut_ext.cut(tube_cut_int)\n tube_cut_obj = doc.addObject(\"Part::Feature\", '_tube_cut')\n tube_cut_obj.Shape = tube_cut\n gui_doc.getObject('_tube_cut').Visibility = False\n\n # tube to make the locking of the cone\n tube_lock_0 = Part.makeCylinder(diam_ext / 2 - struct_thick - 2, struct_thick / 2)\n tube_lock_1 = Part.makeCylinder(diam_ext / 2 - struct_thick + 2, struct_thick / 2)\n tube_lock_2 = Part.makeCylinder(diam_ext / 2, struct_thick / 4)\n\n tube_lock_int = tube_lock_1.cut(tube_lock_0)\n tube_lock_int.translate(Vector(0, 0, data['len_lo'] - 0.25 * data['struct_thick']))\n tube_lock_obj = doc.addObject(\"Part::Feature\", '_tube_lock_int')\n tube_lock_obj.Shape = tube_lock_int\n gui_doc.getObject('_tube_lock_int').Visibility = False\n\n tube_lock_ext = tube_lock_2.cut(tube_lock_0)\n tube_lock_ext.translate(Vector(0, 0, data['len_lo'] - 0.25 * data['struct_thick']))\n tube_lock_obj = doc.addObject(\"Part::Feature\", '_tube_lock_ext')\n tube_lock_obj.Shape = tube_lock_ext\n gui_doc.getObject('_tube_lock_ext').Visibility = False\n\n # make cone top part\n cone_top = cone_top_make(doc, gui_doc, data, skin, lower, cone_base_int, tube_cut, tube_thread, tube_lock_int)\n cone_top = cone_top.copy()\n\n cone_side = cone_side_make(doc, gui_doc, skin, lower, data, cone_base_int, long_cut_base, cylinder, tube_lock_int, tube_lock_ext)\n\n cone_side0 = cone_side.copy()\n cone_side0.rotate(Vector(0, 0, 0), Vector(0, 0, 1), 0)\n cone_side1 = cone_side.copy()\n cone_side1.rotate(Vector(0, 0, 0), Vector(0, 0, 1), 120)\n cone_side2 = cone_side.copy()\n cone_side2.rotate(Vector(0, 0, 0), Vector(0, 0, 1), 240)\n\n cone_struct = cone_struct_make(doc, gui_doc, cone_base_ext, short_cut_base, data, cone_top, cone_side, tube_thread)\n\n # internal thread profile\n p0 = (radius_int, 0, 0)\n p1 = (radius_int + 4, 0, 4)\n p2 = (radius_int, 0, 8)\n\n e0 = Part.makeLine(p0, p1)\n e1 = Part.makeLine(p1, p2)\n e2 = Part.makeLine(p2, p0)\n \n section = Part.Wire([e0, e1, e2])\n cone_struct_thread = Part.Wire(thread_int).makePipeShell([section], 1, 1)\n\n cone_struct_thread_obj = doc.addObject(\"Part::Feature\", '_cone_struct_thread')\n cone_struct_thread_obj.Shape = cone_struct_thread.copy()\n gui_doc.getObject('_cone_struct_thread').Visibility = False\n\n # external thread profile\n p0 = (radius_ext, 0, 0)\n p1 = (radius_ext - 4, 0, 4)\n p2 = (radius_ext, 0, 8)\n\n e0 = Part.makeLine(p0, p1)\n e1 = Part.makeLine(p1, p2)\n e2 = Part.makeLine(p2, p0)\n \n section = Part.Wire([e0, e1, e2])\n cone_top_thread = Part.Wire(thread_ext).makePipeShell([section], 1, 1)\n\n cone_top_thread_obj = doc.addObject(\"Part::Feature\", '_cone_top_thread')\n cone_top_thread_obj.Shape = cone_top_thread.copy()\n gui_doc.getObject('_cone_top_thread').Visibility = False\n\n return cone_top, cone_side0, cone_side1, cone_side2, cone_struct, \\\n cone_top_thread, cone_struct_thread",
"def crear_todo_el_mapa(self):\n for i in range(0,self.cantidad_de_paredes_no_rompibles): # Defino paredes no rompibles\n self.pos_de_paredes_no_rompibles_en_x += self.distancia_entre_paredes_no_rompibles * (i != 0)\n for g in range(0,self.cantidad_de_paredes_no_rompibles):\n if self.pos_de_paredes_no_rompibles_en_y == 570:\n self.pos_de_paredes_no_rompibles_en_y = 90\n self.pos_de_paredes_no_rompibles_en_y += self.distancia_entre_paredes_no_rompibles * (g != 0) \n self.paredes_no_rompibles.append(parednorompible.Parednorompible([self.pos_de_paredes_no_rompibles_en_x,self.pos_de_paredes_no_rompibles_en_y]))\n \n for i in range(0,len(self.paredes_no_rompibles)):\n self.lista_de_objetos.append(self.paredes_no_rompibles[i].set_estado_de_algunas_casillas()) # Se setean el estado de las casillas con una pared no rompible encima\n \n for i in range(0,self.cantidad_de_casillas): # Defino casillas y a su vez esta crea las casillas rompibles\n self.id_casilla[0] += 1 * (i != 0)\n self.pos_de_casillas_en_x += self.longitud_de_lado_de_casilla * (i != 0)\n for g in range(0,self.cantidad_de_casillas):\n if self.pos_de_casillas_en_y == 650:\n self.pos_de_casillas_en_y = 10 \n self.pos_de_casillas_en_y += self.longitud_de_lado_de_casilla * (g != 0)\n if self.id_casilla[1] == 8:\n self.id_casilla[1] = 0\n self.id_casilla[1] += 1 * (g != 0)\n self.set_id_casilla = (self.id_casilla[0],self.id_casilla[1])\n self.casillas.append(casillas.Casilla(self.set_id_casilla,[self.pos_de_casillas_en_x,self.pos_de_casillas_en_y],self.lista_de_objetos,self.provabilidad_de_spawn_de_casillas_rompibles))\n \n self.crear_portal()\n self.crear_white_walkers() # Se crean tres objetos de la clase WhiteWalker",
"def calc_E_field(X, k_int, k_ext, a, E_0=1, N=N_max): \n (r, theta, phi) = coor.cart2sph(X[0],X[1],X[2])\n\n E = np.zeros((np.size(r),3), dtype=complex)\n\n r_int, theta_int, phi_int = r[r<a], theta[r<a] , phi[r<a] \n r_ext, theta_ext, phi_ext = r[r>=a], theta[r>=a] , phi[r>=a]\n\n print( (k_ext*a +4*(k_ext*a)**(1/3) +2) ) \n\n print(\"Fields calculations ...\")\n E_int = E_internal(r_int,theta_int,phi_int, k_int,k_ext,a)\n E_sca = E_scattered(r_ext,theta_ext,phi_ext, k_int,k_ext,a)\n print(\"Fields computed !\")\n\n ## transform back the fields into cartesian coordinates\n for i in range(theta_int.size):\n R= coor.Mat_sph2cart(theta_int[i],phi_int[i])\n E_int[i,:] = R@E_int[i,:]\n\n for i in range(theta_ext.size):\n R= coor.Mat_sph2cart(theta_ext[i],phi_ext[i])\n E_sca[i,:] = R@E_sca[i,:]\n\n\n \"\"\" \n R = coor.Mat_sph2cart(theta,phi)\n\n print(\"E size : \", E_int.shape )\n print(\"R size : \", R.shape)\n\n E_int = R[r<a,:,:]@np.expand_dims(E_int, axis=0)\n print(\"E size : \", E_int.shape )\n\n E_int = E_int[:,0,:]\n\n E_sca = R[r>=a,:,:]@np.expand_dims(E_sca, axis=0)\n E_sca = E_sca[:,0,:]\n \"\"\"\n ## total field => + OPP\n E[r<a,:] = E_int\n E[r>=a,:] = E_sca \n E[r>=a,0] = E[r>=a,0] + np.exp(1j*k_ext * r[r>=a]*np.cos(theta[r>=a]))\n\n return E",
"def crearPersona(altura=1.70, posicion=(0,0,0), cont=0):\n \n pathgeneral = bpy.path.abspath(\"//\") #Cogemos la ruta hacia este blend\n \n #CUERPO\n #Importamos cuerpo principal le damos nombre y lo colocamos\n path = pathgeneral + \"Body_1.blend/Object/\"\n archivo = \"Body\"\n bpy.ops.wm.append(filename=archivo, directory=path)\n person = \"persona_\"+str(cont)\n bpy.data.objects[\"Body\"].name = person\n \n #ROPA\n #Importamos un vestido\n v = str(random.randint(1, 3)) #Cogemos uno de los vestidos al azar\n #path = \"C:/Users/gabri/Desktop/peoplecreator/vestido_\"+v+\".blend/Object/\"\n path = pathgeneral + \"vestido_\"+v+\".blend/Object/\"\n archivo = \"Dress\"\n bpy.ops.wm.append(filename=archivo, directory=path)\n #Hacemos que el vestido se emparente al cuerpo\n vestido = \"vestido_\"+str(cont)\n bpy.data.objects[\"Dress\"].name = vestido\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects[vestido].select = True\n bpy.context.scene.objects.active = bpy.data.objects[person]\n bpy.ops.object.parent_set(keep_transform=True)\n \n #PELO\n #Importamos pelo\n p = str(random.randint(1, 3)) #Cogemos uno de los peinados al azar\n path = pathgeneral + \"hair_\"+p+\".blend/Object/\"\n archivo = \"Hair\"\n bpy.ops.wm.append(filename=archivo, directory=path)\n #Hacemos que el pelo se emparente al cuerpo\n hair = \"hair_\"+str(cont)\n bpy.data.objects[\"Hair\"].name = hair\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects[hair].select = True\n bpy.context.scene.objects.active = bpy.data.objects[person]\n bpy.ops.object.parent_set(keep_transform=True)\n \n #ALTURA\n bpy.data.objects[person].dimensions[1] = altura\n \n #POSICION\n bpy.data.objects[person].location = posicion",
"def zoeppritz(p,iso1,iso2):\n\n \n #Create output matrix\n Rmatrix = np.zeros((6,6))\n \n #Extract densities\n rho1 = iso1[2]\n rho2 = iso2[2]\n p2 = p*p\n \n #Free surface\n if rho1 == 0:\n # print('Free upper surface')\n \n #Free fluid\n if iso2[1] == 0:\n Rmatrix[6,6] = -1\n \n #Free solid\n else:\n qa2 = np.sqrt(((1/iso2[0]) - p)*((1/iso2[0]) + p))\n qb2 = np.sqrt(((1/iso2[1]) - p)*((1/iso2[1]) + p))\n Omega2 = qb2*qb2 - p2\n DeltaPV = 4*p2*qa2*qb2 + (Omega2**2)\n \n Rmatrix[5,5] = (4*p2*qa2*qb2 - (Omega2**2))/DeltaPV\n Rmatrix[3,3] = Rmatrix[5,5]*(-1)\n Rmatrix[3,5] = 4*p*Omega2*np.sqrt(qa2*qb2)/DeltaPV\n Rmatrix[5,3] = Rmatrix[3,5]\n Rmatrix[4,4] = 1\n \n #Free lower surface \n elif rho2 == 0:\n # print('Free lower surface')\n \n #Fluid on top of free base\n if iso1[2] == 0:\n Rmatrix[2,2] = -1\n \n #Solid on free surface\n else:\n qa1 = np.sqrt(((1/iso1[0]) - p)*((1/iso1[0]) + p))\n qb1 = np.sqrt(((1/iso1[1]) - p)*((1/iso1[1]) + p))\n Omega1 = qb1*qb1 - p2\n DeltaPV = 4*p2*qa1*qb1 + (Omega1**2)\n \n Rmatrix[2,2] = (4*p2*qa1*qb1 - (Omega1**2))/DeltaPV\n Rmatrix[0,0] = Rmatrix[2,2]*(-1)\n Rmatrix[0,2] = 4*p*Omega1*np.sqrt(qa1*qb1)/DeltaPV\n Rmatrix[2,0] = Rmatrix[0,2]\n Rmatrix[1,1] = 1\n \n #Fluid on top\n elif iso1[1] == 0:\n \n #fluid-fluid\n if iso2[1] == 0:\n # print('Fluid - Fluid')\n qa1 = np.sqrt(((1/iso1[0]) - p)*((1/iso1[0]) + p))\n qa2 = np.sqrt(((1/iso2[0]) - p)*((1/iso2[0]) + p))\n DeltaA = (rho2*qa1) + (rho1*qa2)\n \n Rmatrix[2,2] = ((rho2*qa1) - (rho1*qa2))/DeltaA\n Rmatrix[5,5] = Rmatrix[2,2]\n Rmatrix[2,5] = 2*np.sqrt(rho1*rho2*qa1*qa2)/DeltaA\n Rmatrix[5,2] = Rmatrix[2,5]\n \n #Fluid - solid\n else:\n # print('Fluid - Solid')\n qa1 = np.sqrt(((1/iso1[0]) - p)*((1/iso1[0]) + p))\n qa2 = np.sqrt(((1/iso2[0]) - p)*((1/iso2[0]) + p))\n qb2 = np.sqrt(((1/iso2[1]) - p)*((1/iso2[1]) + p))\n Omega2 = qb2*qb2 - p2\n Fa1 = np.sqrt(2*rho1*qa1)\n Fa2 = np.sqrt(2*rho2*qa2)\n Fb2 = np.sqrt(2*rho2*qb2)\n DeltaPV = 4*p2*qa2*qb2 + (Omega2**2) + rho1*qa2/(rho2*(iso2[1]**4)*qa1)\n \n Rmatrix[2,2] = (4*p2*qa2*qb2 + (Omega2**2) - ((rho1*qa2)/(rho2*(iso2[1]**4)*qa1)))/DeltaPV\n Rmatrix[3,3] = ((-4)*p2*qa2*qb2 + (Omega2**2) + (rho1*qa2/(rho2*(iso2[1]**4)*qa1)))/DeltaPV\n Rmatrix[5,5] = (4*p2*qa2*qb2 - (Omega2**2) + (rho1*qa2/(rho2*(iso2[1]**4)*qa1)))/DeltaPV\n Rmatrix[2,5] = Fa1*Fa2*Omega2/(qa1*rho2*(iso2[1]**2)*DeltaPV)\n Rmatrix[5,2] = Rmatrix[2,5]\n Rmatrix[3,5] = 2*p*Fa2*Fb2*Omega2/(rho2*DeltaPV)\n Rmatrix[5,3] = Rmatrix[3,5]\n Rmatrix[2,3] = (-2)*p*Fa1*Fb2*qa2/(qa1*rho2*(iso2[1]**2)*DeltaPV)\n Rmatrix[3,2] = Rmatrix[2,3]\n Rmatrix[4,4] = 1\n \n #Fluid on bottom\n #Solid - fluid\n elif iso2[1] == 0:\n # print('Solid - Fluid')\n qa1 = np.sqrt(((1/iso1[0]) - p)*((1/iso1[0]) + p))\n qb1 = np.sqrt(((1/iso1[1]) - p)*((1/iso1[1]) + p))\n qa2 = np.sqrt(((1/iso2[0]) - p)*((1/iso2[0]) + p))\n Omega1 = qb1*qb1 - p2\n Fa1 = np.sqrt(2*rho1*qa1)\n Fb1 = np.sqrt(2*rho1*qb1)\n Fa2 = np.sqrt(2*rho2*qa2)\n DeltaPV = 4*p2*qa1*qb1 + (Omega1**2) + rho2*qa1/(rho1*(iso1[1]**4)*qa2)\n \n Rmatrix[5,5] = (4*p2*qa1*qb1 + (Omega1**2) - (rho2*qa1)/(rho1*(iso1[1]**4)*qa2))/DeltaPV\n Rmatrix[0,0] = (-4*p2*qa1*qb1 + (Omega1**2) + (rho2*qa1)/(rho1*(iso1[1]**4)*qa2))/DeltaPV\n Rmatrix[2,2] = (4*p2*qa1*qb1 - (Omega1**2) + (rho2*qa1)/(rho1*(iso1[1]**4)*qa2))/DeltaPV\n Rmatrix[2,5] = (Fa1*Fa2*Omega1)/(qa2*rho1*(iso1[1]**2)*DeltaPV)\n Rmatrix[5,2] = Rmatrix[2,5]\n Rmatrix[0,2] = (2*p*Fa1*Fb1*Omega1)/(rho1*DeltaPV)\n Rmatrix[2,0] = Rmatrix[0,2]\n Rmatrix[5,0] = (-2*p*Fa2*Fb1*qa1)/(qa2*rho1*(iso1[1]**2)*DeltaPV);\n Rmatrix[0,5] = Rmatrix[5,0];\n Rmatrix[1,1] = 1;\n \n #Solid - solid\n else:\n # print('Solid - Solid')\n qa1 = np.sqrt(((1/iso1[0]) - p)*((1/iso1[0]) + p))\n qb1 = np.sqrt(((1/iso1[1]) - p)*((1/iso1[1]) + p))\n qa2 = np.sqrt(((1/iso2[0]) - p)*((1/iso2[0]) + p))\n qb2 = np.sqrt(((1/iso2[1]) - p)*((1/iso2[1]) + p))\n \n Aap = rho2*qa1 + rho1*qa2\n Abp = rho2*qb1 + rho1*qb2\n Aam = rho2*qa1 - rho1*qa2\n Abm = rho2*qb1 - rho1*qb2\n mu1 = rho1*(iso1[1]**2)\n mu2 = rho2*(iso2[1]**2)\n B1 = mu1 - mu2\n B2 = B1*(-1)\n C1p = 2*p*(B1*(p2 + qa1*qb1) - rho1)\n C1m = 2*p*(B1*(p2 - qa1*qb1) - rho1)\n C2p = 2*p*(B2*(p2 + qa2*qb2) - rho2)\n C2m = 2*p*(B2*(p2 - qa2*qb2) - rho2)\n D = p2*((rho1 + rho2)**2)\n E1 = rho1 - 2*p2*B1\n E2 = rho2 - 2*p2*B2\n Fa1 = np.sqrt(2*rho1*qa1)\n Fa2 = np.sqrt(2*rho2*qa2)\n Fb1 = np.sqrt(2*rho1*qb1)\n Fb2 = np.sqrt(2*rho2*qb2)\n Gbp = mu1*qb1 + mu2*qb2\n Gbm = mu1*qb1 - mu2*qb2\n Hb1 = np.sqrt(2*mu1*qb1)\n Hb2 = np.sqrt(2*mu2*qb2)\n\n DeltaPV = Aap*Abp - C1p*C2p + D\n DeltaH = Gbp\n\n Rmatrix[2,2] = (Aam*Abp + C1m*C2p - D)/DeltaPV #P1P1\n Rmatrix[5,5] = (-Aam*Abp + C1p*C2m - D)/DeltaPV #P2P2\n Rmatrix[0,0] = (-Aap*Abm - C1m*C2p + D)/DeltaPV #V1V1\n Rmatrix[3,3] = (Aap*Abm - C1p*C2m + D)/DeltaPV #V2V2\n Rmatrix[2,5] = Fa1*Fa2*(qb1*E2 + qb2*E1)/DeltaPV #P2P1\n Rmatrix[5,2] = Rmatrix[2,5] #P1P2\n Rmatrix[0,3] = Fb1*Fb2*(qa1*E2 + qa2*E1)/DeltaPV #V2V1\n Rmatrix[3,0] = Rmatrix[0,3] #V1V2\n Rmatrix[0,2] = -p*Fa1*Fb1*(2*qa2*qb2*E1*B2 + E2*(E2 - rho1))/(rho1*DeltaPV) #P1V1\n Rmatrix[2,0] = Rmatrix[0,2] #V1P1\n Rmatrix[3,5] = -p*Fa2*Fb2*(2*qa1*qb1*E2*B1 + E1*(E1 - rho2))/(rho2*DeltaPV) #P2V2\n Rmatrix[5,3] = Rmatrix[3,5] #V2P2\n Rmatrix[2,3] = -p*Fa1*Fb2*(2*B2*qb1*qa2 + E1 - rho2)/DeltaPV #V2P1\n Rmatrix[3,2] = Rmatrix[2,3] #P1V2\n Rmatrix[0,5] = -p*Fa2*Fb1*(2*B1*qb2*qa1 + E2 - rho1)/DeltaPV #P2V1\n Rmatrix[5,0] = Rmatrix[0,5] #V1P2\n Rmatrix[1,1] = Gbm/DeltaH #H1H1\n Rmatrix[4,4] = -Gbm/DeltaH #H2H2\n Rmatrix[1,4] = Hb1*Hb2/DeltaH #H2H1\n Rmatrix[4,1] = Rmatrix[1,4] #H1H2\n \n return Rmatrix",
"def contract(self,i,j,c):\n self.transform[j] = i\n a = self.points[i]\n b = self.points[j]\n\n # set i to the new point c\n self.points[i] = c\n # invalidate j\n self.points[j] = None\n\n # store the quadric of the contracted edge\n # needed for: Q_c = Q_a + Q_b - Q_{ab} below\n Qab = self.graph[i][j]\n\n # subtract the quadrics of the triangles incident\n # to the edge (i,j)\n count = 0\n for k in self.graph[i].keys() & self.graph[j].keys():\n self.graph[i][k] -= triangle_quadric(a,b,self.points[k])\n self.graph[k][i] -= triangle_quadric(a,b,self.points[k])\n count += 1\n\n # move the edges from j to i\n for k in self.graph[j]:\n Qe = self.graph[k].pop(j)\n self.graph[i][k] += Qe # + only for x and y (because of the link condition others are zero)\n self.graph[k][i] += Qe # + only for x and y (because of the link condition others are zero)\n self.graph[i].pop(i) # i was added once before the for loop and once in it\n # invalidate j\n self.graph[j] = None\n\n # calculate the new quadric\n if msmQ: # use method 3 for the error measure\n self.Qs[i] = self.point_quadric(i)\n for k in self.graph[i]:\n self.Qs[k] = self.point_quadric(k)\n else: # use method 4 for the error measure\n self.Qs[i] = self.Qs[i] + self.Qs[j] - Qab\n # invalidate j\n self.Qs[j] = None\n # return the number of removed triangles (either 1 or 2)\n return count",
"def buildCurve(self, *args):\n\n\t\t#access the EP Curve tool if checkEdgeSel returns false\n\t\tif self.checkEdgeSel() == True:\n\t\t\tnewConCurve = cmds.polyToCurve(f=2, dg=3, ch=0)\n\t\t\tcmds.textScrollList('hairStrandList', a=newConCurve, e=1)\n\t\t\treturn\n\n\t\tmel.eval('EPCurveTool')\n\t\treturn"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Corta un solido a traves de la revolucion de un croquis con respecto a un eje (Primer linea del croquis) | def revolucionDeVaciado(self, doc, croquis = None, nombreExtrusion = "RevolucionDeVaciado", angulo = 360, invertido = 0, planoMedio = 0 ):
self.nombre = nombreExtrusion
self.doc = doc
self.tipo = "revolucionDeVaciado"
#Se extrae el string de la base y de su padre mediante metodos que aceptan varios tipos de clases
stringCroquis = extraerString(croquis)
if type(croquis) is str:
croquis = self.doc.seleccionarObjeto(croquis)
stringPadreCroquis = extraerStringPadre(croquis)
self.doc.contLineasReferencia += 1
stringEjeRevolucion = f"EjeRevolucion{str(self.doc.contLineasReferencia).zfill(2)}"
#EJE DE REVOLUCION
self.doc.base.getObject(stringPadreCroquis).newObject('PartDesign::Line',stringEjeRevolucion)
self.doc.base.getObject(stringEjeRevolucion).AttachmentOffset = FreeCAD.Placement(
FreeCAD.Vector(0.0000000000, 0.0000000000, 0.0000000000),
FreeCAD.Rotation(0.0000000000, 0.0000000000, 0.0000000000)
)
self.doc.base.getObject(stringEjeRevolucion).MapReversed = False
self.doc.base.getObject(stringEjeRevolucion).Support = [(self.doc.base.getObject(stringCroquis),'Edge1')]
self.doc.base.getObject(stringEjeRevolucion).MapPathParameter = 0.000000
self.doc.base.getObject(stringEjeRevolucion).MapMode = 'TwoPointLine'
#REVOLUCION
self.doc.base.getObject(stringPadreCroquis).newObject('PartDesign::Groove',nombreExtrusion)
self.base = self.doc.base.getObject(nombreExtrusion)
self.base.Profile = self.doc.base.getObject(stringCroquis)
self.base.ReferenceAxis = (self.doc.base.getObject(stringEjeRevolucion), [''])
self.base.Angle = angulo
self.base.Reversed = invertido
self.base.Midplane = planoMedio
self.doc.extrusiones[nombreExtrusion] = self
self.doc.addExtern("Extrusion", nombreExtrusion)
return self | [
"def revolucionAditiva(self, doc, croquis = None, nombreExtrusion = \"Revolucion\", angulo = 360, invertido = 0, planoMedio = 0 ):\n \n self.nombre = nombreExtrusion\n self.doc = doc\n self.tipo = \"revolucionAditiva\"\n\n #Se extrae el string de la base y de su padre mediante metodos que aceptan varios tipos de clases\n stringCroquis = extraerString(croquis)\n\n if type(croquis) is str:\n croquis = self.doc.seleccionarObjeto(croquis)\n\n stringPadreCroquis = extraerStringPadre(croquis)\n\n self.doc.contLineasReferencia += 1\n stringEjeRevolucion = f\"EjeRevolucion{str(self.doc.contLineasReferencia).zfill(2)}\"\n\n #EJE DE REVOLUCION\n self.doc.base.getObject(stringPadreCroquis).newObject('PartDesign::Line',stringEjeRevolucion)\n\n self.doc.base.getObject(stringEjeRevolucion).AttachmentOffset = FreeCAD.Placement(\n FreeCAD.Vector(0.0000000000, 0.0000000000, 0.0000000000),\n FreeCAD.Rotation(0.0000000000, 0.0000000000, 0.0000000000)\n )\n\n self.doc.base.getObject(stringEjeRevolucion).MapReversed = False\n self.doc.base.getObject(stringEjeRevolucion).Support = [(self.doc.base.getObject(stringCroquis),'Edge1')]\n self.doc.base.getObject(stringEjeRevolucion).MapPathParameter = 0.000000\n self.doc.base.getObject(stringEjeRevolucion).MapMode = 'TwoPointLine'\n\n #REVOLUCION\n self.doc.base.getObject(stringPadreCroquis).newObject('PartDesign::Revolution',nombreExtrusion)\n self.base = self.doc.base.getObject(nombreExtrusion)\n\n self.base.Profile = self.doc.base.getObject(stringCroquis)\n self.base.ReferenceAxis = (self.doc.base.getObject(stringEjeRevolucion), [''])\n self.base.Angle = angulo\n self.base.Reversed = invertido\n self.base.Midplane = planoMedio\n\n self.doc.extrusiones[nombreExtrusion] = self\n self.doc.addExtern(\"Extrusion\", nombreExtrusion)\n\n return self",
"def crtaj_vrata (X1,Y1,Z1,koja_vrata=195 , rel_smjer = 0):\n #gdje sam\n radnaPozicija = mc.player.getPos()\t\t\n #kamo gledam\n smjerRada = mc.player.getDirection ()\t\t\t#uzmem kamo gledam\n\n #smjer gledanja radi preglednosti spremimo u \"vektor\"\"\n Vx=0\t\t\t\t\t\t\t\t\t\t\t\t#pocetne vrijednosti su nule\n Vz=0\n if abs (smjerRada.x) > abs (smjerRada.z): \t\t#nadje se dominanti smjer i spremi u vektor\n Vx=round(smjerRada.x)\n else:\n Vz=round(smjerRada.z)\n\n # rel_smjer == 0 naprijed 1 lijevo 2 desno 3 iza \n \n if Vx == 1 :\n pass \n if Vx == -1 : \n rel_smjer += 2\n if rel_smjer > 3 :\n rel_smjer -= 4\n \n \n if Vz == -1 : \n rel_smjer += 1\n if rel_smjer > 3 :\n rel_smjer -= 4 \n if Vz == 1 : \n rel_smjer += 3\n if rel_smjer > 3 :\n rel_smjer -= 4 \n \n if Vz != 0 :\n if rel_smjer == 1 :\n buffer = 3\n if rel_smjer == 3 :\n buffer = 1\n if ( rel_smjer == 1 ) or ( rel_smjer == 3 ) :\n rel_smjer = buffer\n\n \n\n \n #crtanje\n \n \n \n if abs ( Vx ) != abs ( Vz ) :\t\t# ne pod 45\n\n gdjeX1=radnaPozicija.x + Vx*X1 + Vz*Z1 # modificiraj pocetnu koordinatu\n gdjeY1=radnaPozicija.y + Y1\n gdjeZ1=radnaPozicija.z + Vx*Z1 + Vz*X1\n mc.setBlock ( gdjeX1 , gdjeY1 , gdjeZ1 , koja_vrata , 0 + rel_smjer ) # doljnji dio vrata\n gdjeY1=radnaPozicija.y + 1\n mc.setBlock ( gdjeX1 , gdjeY1 , gdjeZ1 , koja_vrata , 8 + rel_smjer ) # gornji dio vrata\n return 1",
"def start(self):\n self.colisiones = [[0,0],[0,0],[0,0]] #X(a,b), Y(a,b), Z(a,b) --> A ------- B\n t = 0 \n self.VxA = [] #Listas que indican que particulas han chocado con la pared(pueden repetirse)\n self.VxB = []\n self.VyA = []\n self.VyB = []\n self.VzA = []\n self.VzB = []\n while t <= self.t:\n for i in range(len(self.particulas)):\n self.particulas[i].xyz[0] += self.dt * self.particulas[i].v[0]\n self.particulas[i].xyz[1] += self.dt * self.particulas[i].v[1]\n self.particulas[i].xyz[2] += self.dt * self.particulas[i].v[2]\n x = self.particulas[i].hit_x(self.cubo.a,self.cubo.b) # [1,0] si hit A/ [0,1] si hit B\n y = self.particulas[i].hit_y(self.cubo.a,self.cubo.b)\n z = self.particulas[i].hit_z(self.cubo.a,self.cubo.b)\n self.colisiones[0][0] += x[0]\n self.colisiones[0][1] += x[1]\n self.colisiones[1][0] += y[0]\n self.colisiones[1][1] += y[1]\n self.colisiones[2][0] += z[0]\n self.colisiones[2][1] += z[1]\n if x[0] == 1:#Debido a esto la cantidad de memoria usada es mayor en cada unidad de tiempo. \n self.VxA.append(self.particulas[i].v[0])\n if x[1] == 1:\n self.VxB.append(self.particulas[i].v[0])\n if y[0] == 1:\n self.VyA.append(self.particulas[i].v[1])\n if y[1] == 1:\n self.VyB.append(self.particulas[i].v[1])\n if z[0] == 1:\n self.VzA.append(self.particulas[i].v[2])\n if z[1] == 1:\n self.VzB.append(self.particulas[i].v[2])\n t += self.dt\n \n self.colisionesT = 0 #Colisiones totales\n for i in range(3):\n for k in range(2):\n self.colisionesT += self.colisiones[i][k]",
"def modelisation(vitesse_limite = 110, nb_voies = 3, scenario = 0, pas = 0.5, nb_vehicules_voulu = 20, debit = 0.5):\r\n #Initialisation\r\n \r\n #On cree les voies\r\n (liste_voie, sortie) = cvoie.creer_voies(nb_voies, vitesse_limite)\r\n \r\n #Initialisation de la liste de tous les vehicules crees\r\n liste_vehicules_modelises = []\r\n \r\n #Initialisation de la liste des vehicules en circulation sur le troncon\r\n #modelise\r\n liste_voitures_circul = []\r\n \r\n #Initialisation du nombre de vehicules crees\r\n compteur_vehicules = 0\r\n \r\n #Initialisation du repere temporel\r\n instant = 0\r\n \r\n #Initialisation de la liste de vehicules ayant rate la sortie a cause du \r\n #trafic\r\n liste_sortie_manquee = []\r\n \r\n #Initialisation de la liste de vehicules ayant entierement traverse le \r\n #troncon d'autoroute modelise\r\n vehicule_hors = []\r\n \r\n #Initialisation de la liste de chaque vehicule associe aux instants \r\n #auxquels il atteint 0m et 1200m (sert au calcul du temps de parcours)\r\n liste_temps = []\r\n \r\n #Debut de la modelisation graphique \r\n plt.ion()\r\n \r\n \r\n #Modelisation de la circulation\r\n \r\n #Tant que tous les vehicules a creer n'ont pas traverse entierement le \r\n #troncon d'autoroute, pour chaque instant\r\n while len(vehicule_hors) != nb_vehicules_voulu:\r\n \r\n #Initialisation de la liste de vehicules sortis du modele a l'instant \r\n #donne (ayant traverse le troncon), vehicules a supprimer \r\n a_supprimer = []\r\n \r\n #On ajoute progressivement les vehicules sur les voies\r\n \r\n #Si le nombre de vehicules deja crees n'est pas egal au nombre de \r\n #vehicules a modeliser\r\n if compteur_vehicules != nb_vehicules_voulu:\r\n #On cree des vehicules\r\n (liste_vehicules_crees, compteur_vehicules) = cvehi.generer_les_vehicules(compteur_vehicules, liste_voie, pas, debit, nb_vehicules_voulu, scenario, nb_voies, vitesse_limite)\r\n \r\n #On associe les vehicules crees a l'instant auquel ils ont ete \r\n #crees (utile au calcul du temps de parcours)\r\n liste_temps += [[vehi.nom, instant] for vehi in liste_vehicules_crees]\r\n \r\n #On ajoute les vehicules crees a la liste des vehicules en \r\n #circulation\r\n liste_voitures_circul += liste_vehicules_crees\r\n\r\n #On ajoute les vehicules crees a la liste totale des vehicules \r\n #crees (utile aux statistiques sur les vehicules)\r\n liste_vehicules_modelises += liste_vehicules_crees\r\n \r\n \r\n #On gere la circulation des vehicules sur le troncon modelise\r\n \r\n #Pour chacun des vehicules en circulation sur le troncon d'autoroute\r\n for vehi in liste_voitures_circul:\r\n \r\n #Gestion de la ligne de dissuasion : un vehicule ne peut pas se \r\n #rabattre sur la voie la plus a droite au niveau de la ligne de \r\n #dissuasion s'il cherche a prendre la sortie\r\n if (vehi.prend_la_sortie and vehi.position < 400 and vehi.position > 800) or (vehi.prend_la_sortie and vehi.voie.id_voie != 1) or not(vehi.prend_la_sortie):\r\n #Le vehicule cherche a se rabattre a droite s'il le peut\r\n vehi.serrer_droite()\r\n \r\n #Le vehicule prend la sortie s'il le veut et s'il le peut\r\n vehi.prendre_la_sortie(pas)\r\n \r\n #Si le vehicule suit de trop pres le vehicule devant lui\r\n trop_proche = vehi.tester_environnement()\r\n if trop_proche:\r\n #Le vehicule cherche a depasser si possible (un vehicule \r\n #voulant prendre la sortie ne cherche pas a depasser, il reste\r\n #a droite)\r\n depassement_reussi = False\r\n if not(vehi.prend_la_sortie):\r\n depassement_reussi = vehi.depasser(nb_voies)\r\n #Si le vehicule n'a pas pu depasser, il ralentit\r\n if not depassement_reussi:\r\n vehi.ralentir(pas)\r\n \r\n #Si le vehicule n'a pas de vehicule proche devant lui\r\n else:\r\n #Le vehicule accelere (sans depasser sa limite)\r\n vehi.accelerer(vitesse_limite, pas)\r\n \r\n #La position du vehicule est mise a jour\r\n vehi.maj_position(pas)\r\n \r\n #Si le vehicule a franchi le troncon modelise\r\n if vehi.position > 1200 and vehi not in vehicule_hors:\r\n #On ajoute le vehicule a la liste des vehicules ayant franchi\r\n #le troncon (s'il n'y est pas deja)\r\n vehicule_hors.append(vehi)\r\n \r\n #On releve l'instant auquel le vehicule franchit le metre 1200 \r\n #(utile au calcul du temps de parcours)\r\n for couple in liste_temps:\r\n if couple[0] == vehi.nom:\r\n couple.append(instant)\r\n \r\n #Si le vehicule est plus de 300m apres le troncon modelise \r\n #(distance permettant de s'affranchir des effets de bords, \r\n #l'existence des vehicules immediatement apres le metre 1200 ayant\r\n #un impact sur la circulation des vehicules encore sur le troncon)\r\n if vehi.position > 1500:\r\n \r\n #On ajoute le vehicule a la liste des vehicules ayant rate la \r\n #sortie s'il est concerne\r\n if vehi.prend_la_sortie and vehi.voie.id_voie != -1: \r\n liste_sortie_manquee.append(vehi.nom)\r\n \r\n #On ajoute le vehicule a la liste des vehicules a supprimer\r\n a_supprimer.append(vehi)\r\n\r\n #Fin de la gestion de la circulation des vehicules pour l'instant donne\r\n \r\n #On supprime les vehicules hors du modele\r\n for vehi in a_supprimer:\r\n #On enleve le vehicule de la liste de vehicules de sa voie\r\n vehi.voie.liste_vehicules.remove(vehi)\r\n #On enleve le vehicule des voitures circulant sur la route\r\n liste_voitures_circul.remove(vehi)\r\n #On supprime le vehicule\r\n del(vehi)\r\n \r\n #On incremente le temps\r\n instant += pas\r\n \r\n #Affichages possibles (pour chaque instant)\r\n# print(instant) #echelle temporelle\r\n# print(compteur_vehicules) #nombre de vehicules deja crees\r\n\r\n #On genere le graphique des positions des vehicules pour l'instant \r\n #donne\r\n graph.plot(nb_voies, liste_voitures_circul, instant)\r\n plt.pause(0.2)\r\n plt.clf()\r\n plt.draw()\r\n \r\n #Fin de l'instant donne\r\n \r\n \r\n #Fin de la modelisation graphique\r\n plt.ioff()\r\n \r\n \r\n #Calcul du temps de parcours de chaque vehicule (temps mis par le vehicule \r\n #pour franchir le troncon de 1200m, instant auquel il a franchi le \r\n #metre 1200 - instant de creation au metre 0) \r\n temps_parcours = [triplet[2] - triplet[1] for triplet in liste_temps]\r\n #Calcul du temps de parcours moyen\r\n temps_parcours_moyen = sum(temps_parcours) / float(len(temps_parcours))\r\n \r\n #Affichage de statistiques sur les vehicules\r\n print(\"Temps de parcours minimal : {} s\\nTemps de parcours moyen : {} s\\nTemps de parcours maximal : {} s\\n\".format(min(temps_parcours), temps_parcours_moyen, max(temps_parcours)))\r\n print(\"Nombre de vehicules ayant rate la sortie : {}\".format(len(liste_sortie_manquee)))\r\n print(\"Vehicules ayant rate la sortie : {}\".format(liste_sortie_manquee))",
"def expandeaza(self):\r\n succesori = []\r\n nod_graf_curent = self.nod_graf.info\r\n matrice = NodParcurgere.problema.matrice_clasa\r\n\r\n for (i, j) in [(nod_graf_curent[0], nod_graf_curent[1] - 1),\r\n (nod_graf_curent[0], nod_graf_curent[1] + 1),\r\n (nod_graf_curent[0] - 1, nod_graf_curent[1]),\r\n (nod_graf_curent[0] + 1, nod_graf_curent[1])]: # parcurge lista celor 4 posibili succesori ai nodului curent (self)\r\n if 0 <= i < len(matrice) and 0 <= j < len(matrice[0]):\r\n if matrice[i][j] != \"liber\": # verifica daca pozitia succesorului este ocupata de vreun elev\r\n if ((matrice[i][j], matrice[nod_graf_curent[0]][nod_graf_curent[1]]) not in NodParcurgere.problema.lista_suparati) and ((matrice[nod_graf_curent[0]][nod_graf_curent[1]], matrice[i][j]) not in NodParcurgere.problema.lista_suparati): # verifica daca elevul reprezentand nodul curent si cu elevul ce reprezinta posibilul succesor nu sunt certati\r\n if i in [len(matrice) - 1, len(matrice) - 2]: # verfica daca succesorul se afla pe ultimele doua linii\r\n nod_info = (i, j)\r\n if i == nod_graf_curent[0] + 1:\r\n directie = \"v\"\r\n elif i == nod_graf_curent[0] - 1:\r\n directie = \"^\"\r\n elif j == nod_graf_curent[1] + 1:\r\n if j % 2 == 1:\r\n directie = \">\"\r\n else:\r\n directie = \">>\"\r\n else:\r\n if j % 2 == 0:\r\n directie = \"<\"\r\n else:\r\n directie = \"<<\"\r\n succesori.append((Nod(nod_info, self.fct_h(nod_info)), 1, directie))\r\n\r\n elif not ((j == nod_graf_curent[1] + 1 and j % 2 == 0) or (j == nod_graf_curent[1] - 1 and j % 2 == 1)): # in acest caz succesorul nu se afla pe ultimele doua linii\r\n if i == nod_graf_curent[0] + 1:\r\n directie = \"v\"\r\n elif i == nod_graf_curent[0] - 1:\r\n directie = \"^\"\r\n elif j == nod_graf_curent[1] + 1:\r\n directie = \">\"\r\n else:\r\n directie = \"<\"\r\n nod_info = (i, j)\r\n succesori.append((Nod(nod_info, self.fct_h(nod_info)), 1, directie))\r\n\r\n return succesori",
"def cerca(self,chiave):\r\n\r\n if self.albero.radice==None: #L'albero e' vuoto, ritorna None\"\r\n return None\r\n\r\n\r\n attuale=self.albero.radice\r\n while attuale!=None: #Cerca nell'albero fino a quando la radice non e' vuota\r\n chiaveCorrente=self.chiave(attuale) #memorizza la chiave del nodo corrente\r\n if chiave==chiaveCorrente: #la chiave che voglio cercare e' uguale a quella in scansione\r\n if attuale.status== True:\r\n return attuale\r\n else:\r\n return None\r\n if chiave<chiaveCorrente: #la chiave da cercare e' minore della chiave in scansione\r\n attuale=attuale.figlioSx #il nodo attuale allora e' il figlio sx\r\n else:\r\n attuale=attuale.figlioDx #il nodo attuale allore e' il figlio dx\r\n\r\n return None",
"def review_quant_cost(self):\n\n def get_historic_cost(product, date):\n \"\"\" me traigo el precio historico del inventario\n \"\"\"\n ps_obj = self.env['product.supplierinfo']\n domain = [('product_tmpl_id', '=', product.product_tmpl_id.id),\n ('date_start', '<=', date)]\n ps = ps_obj.search(domain, limit=1, order='date_start')\n\n return ps.price if ps else False\n\n bulonfer = self.env['res.partner'].search([('ref', '=', 'BULONFER')])\n\n ail_obj = self.env['account.invoice.line']\n quant_obj = self.env['stock.quant']\n ails = ail_obj.search([('product_margin', '<', 0),\n ('invoice_id.state', '!=', 'draft'),\n ('date_invoice', '>', '2020-01-01'),\n ('invoice_id.type', '=', 'out_invoice')],\n order=\"date_invoice\")\n\n for ail in ails:\n # verificar si es bulonfer\n if bulonfer not in ail.product_id.seller_ids.mapped('name'):\n continue\n _logger.info('FIX: PROCESANDO: %s %s' %\n (ail.date_invoice, ail.product_id.default_code))\n\n invoice_date = ail.invoice_id.date_invoice\n\n # obtener el costo al momento de la compra segun inventario\n historic_inv_cost = get_historic_cost(ail.product_id, invoice_date)\n\n # obtener el costo del producto segun los quants\n # historic_quant_cost = ail.product_id.standard_product_price\n\n # obtener el costo del producto segun bulonfer hoy\n today_cost = ail.product_id.bulonfer_cost\n\n # precio de lista de la factura\n list_price = ail.price_unit\n\n # obtener los quants correspondientes a ese producto\n domain = [('location_id.name', '=', 'Stock'),\n ('product_id.id', '=', ail.product_id.id),\n ('in_date', '<', invoice_date)]\n\n # verificar que no este mal el costo\n cost = min(historic_inv_cost, today_cost) \\\n if historic_inv_cost else today_cost\n\n if cost * 1.35 > list_price:\n _logger.info('FIX: AJUSTANDO: %s %s' % (cost, list_price))\n cost = list_price / 1.35\n\n quants = quant_obj.search(domain)\n for quant in quants:\n if quant.cost > cost:\n quant.cost = cost\n\n ail.product_margin = list_price / cost - 1 \\\n if cost and list_price else 1e10",
"def dessiner(self, point1, point2) :\n \n #definit la couleur de la grille\n glColor3f(self.couleur[0], self.couleur[1], self.couleur[2]);\n \n glLineWidth(1.0) #epaisseur du trait\n \n glPushMatrix() #sauvegarde la matrice de transformation\n \n #ajuste les coins sur un valeur discrete\n #correspondant au pas de la grille\n point1 = (int((point1[0] - self.pas) / self.pas) * self.pas,\n int((point1[1] - self.pas) / self.pas) * self.pas) \n point2 = (int((point2[0] + self.pas) / self.pas) * self.pas,\n int((point2[1] + self.pas) / self.pas) * self.pas) \n\n \n x = point1[0] #valeur courante de x\n #parcours les valeurs de x\n while x <= point2[0] :\n #dessine une ligne\n glBegin(GL_LINES)\n glVertex3f(x, point1[1], self.getHauteur())\n glVertex3f(x, point2[1], self.getHauteur())\n glEnd()\n x += self.pas\n \n y = point1[1] #valeur courante de y\n #parcours les valeurs de y\n while y <= point2[1] :\n #dessine une ligne\n glBegin(GL_LINES)\n glVertex3f(point1[0], y, self.getHauteur())\n glVertex3f(point2[0], y, self.getHauteur())\n glEnd()\n y += self.pas\n \n glPopMatrix() #restore la matrice de transformation ",
"def getIcosaedreVertices(self):\n #golden ratio\n phi = 0.5*(1+sqrt(5)) \n\n topPoints = [(phi,1,0)]+ [(phi,-1,0)]+ [(1,0,-phi)]+ [(0,phi,-1)]+ [(0,phi,1)]+ [(1,0,phi)]\n\n topPoints = np.array(topPoints)\n # rot clockwise arround Z pour amener le point 1 en position (1,0,0)\n sinth = 1/sqrt(1+phi**2)\n costh = phi*sinth\n scale = 1/sqrt(1+phi**2)\n rot_mat = scale*np.array([[costh,sinth,0],\n [-sinth, costh,0],\n [0,0,1]])\n\n for i in range(len(topPoints)):\n topPoints[i,:] = np.matmul(rot_mat,topPoints[i,:])\n\n # change de repere\n # X' = -Y, Y'=-Z, Z'=X\n tmp = np.zeros_like(topPoints)\n for i in range(topPoints.shape[0]):\n tmp[i,0] = -topPoints[i,1]\n tmp[i,1] = -topPoints[i,2]\n tmp[i,2] = topPoints[i,0]\n topPoints = tmp\n\n # points du bas de l'icosaedre\n bottomPoints = np.zeros_like(topPoints)\n for i in range(bottomPoints.shape[0]):\n bottomPoints[i,0] = -topPoints[i,0]\n bottomPoints[i,1] = topPoints[i,1]\n bottomPoints[i,2] = -topPoints[i,2]\n\n # icosaedre vertices\n icoPoints=np.vstack((topPoints,bottomPoints))\n\n #return\n return icoPoints",
"def expandeaza(self):\n succ = []\n nod_c = self.nod_graf\n arce = self.problema.arce\n for a in arce:\n if a.capat == nod_c.info:\n succ.append((problema.cauta_nod_nume(a.varf), a.cost))\n\n return succ",
"def get_ribs(self):\n # TODO: upravit kody\n if self.body is None:\n self.get_body()\n if self.lungs is None:\n self.get_lungs()\n\n chloc = chest_localization.ChestLocalization(bona_object=self, data3dr_tmp=self.data3dr)\n\n body = chloc.clear_body(self.body)\n coronal = self.dist_coronal(return_in_working_voxelsize=True)\n\n final_area_filter = chloc.area_filter(self.data3dr, body, self.lungs, coronal)\n location_filter = chloc.dist_hull(final_area_filter)\n intensity_filter = chloc.strict_intensity_filter(self.data3dr)\n deep_filter = chloc.deep_struct_filter_old(self.data3dr) # potrebuje upravit jeste\n\n ribs = intensity_filter & location_filter & final_area_filter & body & deep_filter\n\n #ribs_sum = intensity_filter.astype(float) + location_filter.astype(float) + final_area_filter.astype(float) + deep_filter.astype(float)\n\n # oriznuti zeber (a take hrudniku) v ose z\n z_border = chloc.process_z_axe(ribs, self.lungs, \"001\")\n ribs[0:z_border, :, :] = False\n final_area_filter[0:z_border, :, :] = False\n\n #chloc.print_it_all(ss, data3dr_tmp, final_area_filter*2, pattern+\"area\")\n #chloc.print_it_all(self, self.data3dr, ribs*2, pattern+\"thr\")\n #chloc.print_it_all(self, self.data3dr>220, ribs*3, pattern)\n\n # zebra\n self.ribs = ribs\n # hrudnik\n self.chest = final_area_filter\n\n return ribs",
"def Iterreno(self):\r\n try: #*100 por causa da porcentagem\r\n return ((self.PV1.CotaTerreno - self.PV2.CotaTerreno)/self.L)*100\r\n except Exception as e:\r\n try_Except(e)",
"def realizar_movimiento(nueva_posicion_cabeza, tablero, posicion_serpiente, posicion_fruta, especial_largo):\n\tcolor_verde = '\\033[92m'\n\tcolor_normal = '\\033[0m'\n\tposicion_serpiente.insert(0, nueva_posicion_cabeza)\n\tif posicion_fruta != posicion_serpiente[0] and especial_largo != 1: #no llamo al metodo remover cola si se uso el especial de crecer y no si comio fruta\n\t\tremover_cola_vieja(tablero, posicion_serpiente)\t\n\tif especial_largo == -2: #removemos dos veces la cola si se uso el especial de decrecer 2\n\t\tfor i in range(2): remover_cola_vieja(tablero, posicion_serpiente)\t\n\tfor parte in posicion_serpiente:\n\t\tif parte is posicion_serpiente[0]: tablero[parte[0]][parte[1]] = color_verde + \"o\" + color_normal; continue\n\t\ttablero[parte[0]][parte[1]] = color_verde + \"#\" + color_normal",
"def _get_to_invoice_qty(self):\n for line in self:\n if line.order_id.state in ['sale', 'done']:\n if line.product_id.invoice_policy == 'order':\n line.di_qte_a_facturer_un_saisie = line.di_qte_un_saisie - line.di_qte_un_saisie_fac\n line.di_poin_a_facturer = line.di_poin - line.di_poin_fac\n line.di_poib_a_facturer = line.di_poib - line.di_poib_fac\n line.di_nb_pieces_a_facturer = line.di_nb_pieces - line.di_nb_pieces_fac\n line.di_nb_colis_a_facturer = line.di_nb_colis - line.di_nb_colis_fac\n line.di_nb_palette_a_facturer = line.di_nb_palette - line.di_nb_palette_fac\n else:\n line.di_qte_a_facturer_un_saisie = line.di_qte_un_saisie_liv - line.di_qte_un_saisie_fac\n line.di_poin_a_facturer = line.di_poin_liv - line.di_poin_fac\n line.di_poib_a_facturer = line.di_poib_liv - line.di_poib_fac\n line.di_nb_pieces_a_facturer = line.di_nb_pieces_liv - line.di_nb_pieces_fac\n line.di_nb_colis_a_facturer = line.di_nb_colis_liv - line.di_nb_colis_fac\n line.di_nb_palette_a_facturer = line.di_nb_palette_liv - line.di_nb_palette_fac\n else:\n line.di_qte_a_facturer_un_saisie = 0.0\n line.di_poin_a_facturer = 0.0\n line.di_poib_a_facturer = 0.0\n line.di_nb_pieces_a_facturer = 0\n line.di_nb_colis_a_facturer = 0\n line.di_nb_palette_a_facturer = 0.0 \n super(SaleOrderLine, self)._get_to_invoice_qty()",
"def deplacer(self,i,j,nouv_i,nouv_j):\r\n coul_piece = self.cases[i][j].get_coul()\r\n etat_piece = self.cases[i][j].get_etat()\r\n self.cases[nouv_i][nouv_j].set_coul(coul_piece)\r\n self.cases[nouv_i][nouv_j].set_etat(etat_piece)\r\n self.cases[i][j].liberer_case()\r\n\r\n coul_piece_prise = None\r\n etat_piece_prise = None\r\n \r\n #Promotion\r\n if (coul_piece ==\"blanc\" and nouv_i==7) or (coul_piece==\"noir\" and nouv_i==0):\r\n self.cases[nouv_i][nouv_j].promotion()\r\n \r\n #Prise d'une piece\r\n if abs(nouv_i-i)==2:\r\n \"\"\"une piece a ete attrapee\"\"\"\r\n coul_piece_prise = self.cases[(nouv_i+i)//2][(nouv_j+j)//2].get_coul()\r\n etat_piece_prise = self.cases[(nouv_i+i)//2][(nouv_j+j)//2].get_etat()\r\n self.cases[(nouv_i+i)//2][(nouv_j+j)//2].liberer_case()\r\n \r\n #On ajoute a l'historique les changements du plateau\r\n self.historique.append([etat_piece,coul_piece,i,j,nouv_i,nouv_j,etat_piece_prise,coul_piece_prise])\r\n \r\n self.changer_tour()",
"def equazioneRettaPerDuePunti(vert1,vert2):\n\n\tx1=vert1[0]\n\tx2=vert2[0]\n\ty1=vert1[1]\n\ty2=vert2[1]\n\tm=0\n\tq=0\n\n\t# Se i due punti hanno la stessa ascissa, la retta che li comprende e' parallela all'asse y\n\t# Se i due punti hanno la stessa ordinata, la retta che li comprende e' parallela all'asse x\n\tif x1==x2:\n\t\tretta = [1,0,x1]\n\t\tprint\"x =\",x1\n\telif y1==y2:\n\t\tprint\"y =\",y1\n\t\tretta = [0,1,y1]\n\telse:\n\t\tm=(float(y2)-float(y1))/(float(x2)-float(x1))\n\t\tq=float(y1)-m*float(x1)\n\t\tretta = [-m,1,q]\n\n\treturn retta",
"def creaFalda(vert1, vert2, angolo, altezzaFalda, direzione):\n\tlinea = MKPOL([[vert1,vert2],[[1,2]],None])\n\n\tif vert1[1]>vert2[1]:\n\t\tx=vert1[0]\n\t\ty=vert2[1]\n\telse:\n\t\tx=vert2[0]\n\t\ty=vert1[1]\n\n\tvert3 = [x,y]\n\n\t#AB = sqrt[(x2- x1)^2 + (y2- y1)^2]\n\tdistv1v2 = sqrt((vert1[0]-vert2[0])*(vert1[0]-vert2[0])+(vert1[1]-vert2[1])*(vert1[1]-vert2[1]))\n\tdistv1v3 = sqrt((vert1[0]-vert3[0])*(vert1[0]-vert3[0])+(vert1[1]-vert3[1])*(vert1[1]-vert3[1]))\n\tdistv2v3 = sqrt((vert2[0]-vert3[0])*(vert2[0]-vert3[0])+(vert2[1]-vert3[1])*(vert2[1]-vert3[1]))\n\t\n\t#distv2v3 = distv1v2 * math.cos(a)\n\t#math.cos(a) = distv2v3/distv1v2\n\ta = math.asin(distv2v3/distv1v2)\n\n\tprint a* 180/PI\n\n\tb = PI/2-a\n\n\tprint b * 180/PI\n\n\tdistv2v4 = altezzaFalda * math.cos(angolo)\n\tprint \"dist\", distv2v4\n\n\taltezzaPerpendicolareFalda = sqrt(altezzaFalda*altezzaFalda-distv2v4*distv2v4)\n\tdistv2v5 = distv2v4 * math.cos(b)\n\tprint \"dist\", distv2v5\n\tdistv4v5 = sqrt(distv2v4*distv2v4-distv2v5*distv2v5)\n\tprint \"dist\", distv4v5\n\n\tif direzione==1:\n\t\tvert6 = [vert2[0]+distv2v5,vert2[1]+distv4v5,altezzaPerpendicolareFalda]\n\t\tvert7 = [vert1[0]+distv2v5,vert1[1]+distv4v5,altezzaPerpendicolareFalda]\n\telif direzione==2:\n\t\tvert6 = [vert2[0]-distv2v5,vert2[1]+distv4v5,altezzaPerpendicolareFalda]\n\t\tvert7 = [vert1[0]-distv2v5,vert1[1]+distv4v5,altezzaPerpendicolareFalda]\n\telif direzione==3:\n\t\tvert6 = [vert2[0]-distv2v5,vert2[1]-distv4v5,altezzaPerpendicolareFalda]\n\t\tvert7 = [vert1[0]-distv2v5,vert1[1]-distv4v5,altezzaPerpendicolareFalda]\n\telif direzione==4:\n\t\tvert6 = [vert2[0]+distv2v5,vert2[1]-distv4v5,altezzaPerpendicolareFalda]\n\t\tvert7 = [vert1[0]+distv2v5,vert1[1]-distv4v5,altezzaPerpendicolareFalda]\n\n\tprint vert6\n\tverts = [vert1,vert2,vert6,vert7]\n\tprova = MKPOL([verts,[[1,2,3,4]],None])\n\tprint distv4v5\n\t#VIEW(prova)\n\n\treturn verts\n\n\tlinea = PROD([linea,QUOTE([2])])\n\tlinea = R([3,2])(angolo)(linea)\n\n\t#VIEW(linea)",
"def rebond():\n global balle, compteur\n x0, y0, x1, y1 = canvas.coords(balle[0])\n if x0 <= 0 or x1 >= 600:\n balle[1] = -balle[1]\n compteur += 1\n print(compteur)\n if y1 <= 0:\n canvas.coords(balle[0], (x0, 360, x1, 400))\n if y0 >= 400:\n canvas.coords(balle[0], (x0, -40, x1, 0))",
"def _carta_a_pila(self, origen, destino):#igual a solitario_claseico.py(no hace falta cambiarlo, se los dejo como comentario)\n if origen.es_vacia():\n raise SolitarioError(\"El origen está vacío\")\n\n destino.apilar(origen.tope())\n origen.desapilar()\n\n if origen in self.mesa.pilas_tablero and not origen.es_vacia() and origen.tope().boca_abajo:\n origen.tope().voltear()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a Cache Item representing the specified key. | def get_item(self, key):
cPickle_key = self.normalize_key(key)
md5_key = hashlib.md5(cPickle_key).hexdigest()
document = self.collection.find_one({"md5":md5_key, "key": cPickle_key})
if document != None:
item = cPickle.loads(str(document['item']))
item.isHit = True
return item
else:
item = CacheItem()
item.key = key
return item | [
"def get(self, key):\n #return none if the item isn't in the cache\n if key not in self.items:\n return None\n\n #retrieve the item from the dictionary\n item = self.items[key]\n\n #move it to the front of the list since it is the\n #most recently accessed item\n self._move_to_head(item)\n return item",
"def get_item(self, key):\n\t\tif not key in self.items: return None\n\t\treturn self.items[ key ]",
"def Lookup(self, key):\n return CacheReference(self, key)",
"def get_cache(cls, key):\n return cls._instance(key)._cache",
"def get_cached_artifact(self, key):\n return self._artifacts_manager.artifacts[key]",
"def __getitem__(self, key):\n\n # check for slycat path\n self.check_fs_path()\n\n # is item in cache?\n if key in self:\n\n # get hash and value\n digest = self.digest_hash(key)\n value = self._loaded[digest].value\n expired = self._loaded[digest].expired()\n\n # if expired, erase and return None\n if expired:\n self.expire(digest)\n return None\n\n else:\n return None\n\n # cherrypy.log.error(\"[CACHE] Retrieving %s from cache.\" % str(digest))\n\n return value",
"def get(self, bucket, key):\n\n return self._cache[bucket].get(key, None)",
"def get(self, key):\n try:\n instance = self._get_instance(key)\n except KeyError:\n raise KeyError('There is no object registered for the given\"%s\" key' % key)\n\n return instance",
"def __getitem__( self, key ):\n return self.read( key=key, default=None, raiseOnError=True )",
"def __getitem__(self, key: Union[Any, int]) -> Any:\n if isinstance(key, int):\n return self.contents[key]\n else:\n matches = [c for c in self.contents if c.name == key]\n if len(matches) == 0:\n raise KeyError(f'{key} is not in {self.__class__.__name__}')\n elif len(matches) == 1:\n return matches[0]\n else:\n return self.__class__(contents = matches)",
"def get(self, key):\n\n key = str(key)\n database = self._get_database()\n return database.get(key, None)",
"def from_key(key):\r\n model = get_model_from_key(key)\r\n if model is None:\r\n raise BadKeyError\r\n try:\r\n _, id = key.split(':', 2)\r\n id = int(id)\r\n except ValueError, TypeError:\r\n raise BadKeyError\r\n return model.objects.get_by_id(id)",
"def getItem(cType, cKey):\n return '%s#%s' % (cType, cKey)",
"def direct_get(self, key):\n return get_store_value(self.store, key)",
"def read(self, key):\n if key not in self.data:\n raise Exception('Cache miss for key {key}'.format(key=key))\n\n self.promote(key)\n return self.deque['value']",
"def get_value(key):\n data = cache.get(key)\n if data:\n return pickle.loads(data)",
"def get_object(bucket, key):\n return ObjectStore.get_object(bucket, key)",
"def __getitem__(self, key):\n if self.document_cache is None:\n return self.fetch_document(key, raw_results = False)\n try:\n return self.document_cache[key]\n except KeyError:\n document = self.fetch_document(key, raw_results = False)\n self.document_cache.cache(document)\n return document",
"def get(self, key):\n data = self.cache.get(key)\n if data is not None:\n return data.decode('base64')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add Builders and construction variables for protoc to an Environment. | def generate(env):
try:
bld = env['BUILDERS']['Protoc']
except KeyError:
bld = ProtocBuilder
env['BUILDERS']['Protoc'] = bld
env['PROTOC'] = env.Detect(protocs) or 'protoc'
env['PROTOCFLAGS'] = SCons.Util.CLVar('')
env['PROTOCPROTOPATH'] = SCons.Util.CLVar('')
env['PROTOCCOM'] = '$PROTOC $PROTOCFLAGS ${PROTOCPYTHONOUTDIR and ("--python_out="+PROTOCPYTHONOUTDIR) or ""} ${PROTOCFDSOUT and ("-o"+PROTOCFDSOUT) or ""} ${SOURCES}'
env['PROTOCSRCSUFFIX'] = '.proto'
env['PROTOCPYTHONOUTDIR'] = '.' | [
"def setup_environment():",
"def base_setup(env, prereqs=None):\n\n if GetOption('help') or GetOption('clean'):\n return\n\n compiler = env['CC']\n\n build_type = env['BUILD_TYPE']\n print('Setting up compile environment for {}'.format(compiler))\n print(\"Build type is '{}'\".format(build_type))\n\n prev_compiler = env.get('BSETUP', False)\n if prev_compiler:\n if prev_compiler != compiler:\n print('Env is already setup for a different compiler')\n print('Env already setup')\n Exit(2)\n\n # Turn on -Wall first, then DESIRED_FLAGS may disable some of the options\n # that this brings in.\n env.Append(CCFLAGS=['-g',\n '-Wshadow',\n '-Wall',\n '-fpic'])\n\n env.AppendIfSupported(CCFLAGS=DESIRED_FLAGS)\n\n if build_type == 'debug':\n if compiler == 'gcc':\n env.AppendUnique(CCFLAGS=['-Og'])\n else:\n env.AppendUnique(CCFLAGS=['-O0'])\n else:\n if build_type == 'release':\n env.AppendUnique(CPPDEFINES='DAOS_BUILD_RELEASE')\n\n env.AppendUnique(CCFLAGS=['-O2'])\n env.AppendUnique(CPPDEFINES={'_FORTIFY_SOURCE':'2'})\n\n if build_type != 'release':\n env.AppendUnique(CPPDEFINES={'FAULT_INJECTION':'1'})\n\n env.AppendUnique(CPPDEFINES={'CMOCKA_FILTER_SUPPORTED':'0'})\n\n env.AppendUnique(CPPDEFINES='_GNU_SOURCE')\n\n cenv = env.Clone()\n cenv.Append(CFLAGS='-Werror')\n config = Configure(cenv)\n if config.CheckHeader('stdatomic.h'):\n config.Finish()\n env.AppendUnique(CPPDEFINES={'HAVE_STDATOMIC':'1'})\n elif prereqs:\n config.Finish()\n prereqs.require(env, 'openpa', headers_only=True)\n else:\n config.Finish()\n\n if compiler == 'icx' and not GetOption('no_rpath'):\n # Hack to add rpaths\n for path in env['ENV']['LD_LIBRARY_PATH'].split(':'):\n if 'oneapi' in path:\n env.AppendUnique(RPATH_FULL=[path])\n\n if GetOption('preprocess'):\n # Could refine this but for now, just assume these warnings are ok\n env.AppendIfSupported(CCFLAGS=PP_ONLY_FLAGS)\n\n env['BSETUP'] = compiler",
"def generate(env):\n\n env[\"KOTLINC\"] = _detect(env)\n\n env.SetDefault(\n KOTLINC=\"kotlinc\",\n KOTLINSUFFIX=\".kt\",\n KOTLINEXTENSION=\"Kt\",\n KOTLINCLASSSUFFIX=\".class\",\n KOTLINJARSUFFIX=\".jar\",\n KOTLINCFLAGS=SCons.Util.CLVar(),\n KOTLINJARFLAGS=SCons.Util.CLVar(),\n KOTLINRTJARFLAGS=SCons.Util.CLVar([\"-include-runtime\"]),\n KOTLINCCOM=\"$KOTLINC $KOTLINCFLAGS $SOURCE\",\n KOTLINCCOMSTR=\"\",\n KOTLINJARCOM=\"$KOTLINC $KOTLINJARFLAGS -d $TARGET $SOURCE\",\n KOTLINJARCOMSTR=\"\",\n KOTLINRTJARCOM=\"$KOTLINC $KOTLINRTJARFLAGS -d $TARGET $SOURCE\",\n KOTLINRTJARCOMSTR=\"\",\n )\n\n env.AddMethod(Kotlin, \"Kotlin\")\n env.AddMethod(KotlinJar, \"KotlinJar\")\n env.AddMethod(KotlinRuntimeJar, \"KotlinRuntimeJar\")",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def add_envvars(controller):\n envvars = lists(models(EnvVariable, controller=just(controller)))\n return envvars.map(lambda _: controller)",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def generate(env):\r\n\r\n version = env.get('DELPHI_VERSION', None) \r\n delphi = env.Dir(_getDelphiPath(env, version))\r\n env['DELPHI'] = delphi\r\n env['DELPHI_IDE'] = delphi.File(IDES.__dict__.get(version, IDES.DELPHI))\r\n env['DELPHI_BPGSUFFIX'] = BPGSUFFIX.__dict__.get(version, BPGSUFFIX.DELPHI) \r\n\r\n # Builder para los DPRs ...\r\n env['DCC32_BIN'] = delphi.File('BIN/DCC32.EXE')\r\n env['DCC32_DPR_MSG'] = ' * [DCC32] Compiling project: [$SOURCE] to: [$TARGET] using options: \"$DCC32_OPTS\" and defines: \"$DCC32_DEFINES\"'\r\n env['DCC32_DPK_MSG'] = ' * [DCC32] Compiling package: [$SOURCE] to: [$TARGET] using options: \"$DCC32_OPTS\" and defines: \"$DCC32_DEFINES\"'\r\n env['DCC32_INCLUDE'] = []\r\n env['DCC32_DCU'] = ''\r\n env['DCC32_DCP'] = ''\r\n env['DCC32_BPL'] = ''\r\n env['DCC32_RES'] = ['\"${DELPHI.abspath}/Lib\"']\r\n env['DCC32_UNIT'] = ['\"${DELPHI.abspath}/Lib\"']\r\n env['DCC32_SWITCHES'] = ''\r\n env['DCC32_OPTS'] = ''\r\n env['DCC32_USEPACKAGES']= 0\r\n env['DCC32_PACKAGES'] = []\r\n env['DCC32_DEFINES'] = []\r\n\r\n DelphiProgram = env.Builder(\r\n action = SCons.Action.Action(\r\n DelphiProgramCompileGenerator\r\n , generator = 1\r\n , cmdstr = \"$DCC32_DPR_MSG\"\r\n )\r\n , emitter = DelphiProgramEmitter\r\n , src_suffix = '.dpr'\r\n , target_suffix = '.exe'\r\n )\r\n\r\n env.InstallBuilder('DelphiProgram', DelphiProgram, ['DPR', 'DelphiProg'])\r\n \r\n # Builder para los DPRs ...\r\n DelphiLibrary = env.Builder(\r\n action = SCons.Action.Action(\r\n DelphiProgramCompileGenerator\r\n , generator = 1\r\n , cmdstr = \"$DCC32_DPR_MSG\"\r\n )\r\n , emitter = DelphiLibraryEmitter\r\n , src_suffix = '.dpr'\r\n , target_suffix = '.dll'\r\n )\r\n\r\n env.InstallBuilder('DelphiLibrary', DelphiLibrary, ['DelphiDLL', 'DelphiLib'])\r\n\r\n DelphiLocale = env.Builder(\r\n action = SCons.Action.Action(\r\n DelphiLocaleCompileGenerator\r\n , generator = 1\r\n , cmdstr = \"$DCC32_DPR_MSG\"\r\n )\r\n , emitter = DelphiLocaleEmitter\r\n , src_suffix = '.dpr'\r\n )\r\n\r\n env.InstallBuilder('DelphiLocale', DelphiLocale)\r\n\r\n # Builder para los DPRs ...\r\n DelphiActiveX = env.Builder(\r\n action = SCons.Action.Action(\r\n DelphiProgramCompileGenerator\r\n , generator = 1\r\n , cmdstr = \"$DCC32_DPR_MSG\"\r\n )\r\n , emitter = DelphiActiveXEmitter\r\n , src_suffix = '.dpr'\r\n , target_suffix = '.ocx'\r\n )\r\n\r\n env.InstallBuilder('DelphiActiveX', DelphiActiveX, ['DelphiControl'])\r\n \r\n # Builder para los DPRs ...\r\n DelphiPackage = env.Builder(\r\n action = SCons.Action.Action(\r\n DelphiPackageCompileGenerator\r\n , generator = 1\r\n , cmdstr = \"$DCC32_DPK_MSG\"\r\n )\r\n , emitter = DelphiPackageEmitter\r\n , src_suffix = '.dpk'\r\n , target_suffix = '.bpl'\r\n )\r\n\r\n env.InstallBuilder('DelphiPackage', DelphiPackage, ['DPK', 'BPL'])\r\n\r\n # Builder para la tlb...\r\n env['TLIB_BIN'] = env.Dir('$DELPHI').File('BIN/TLIBIMP.EXE')\r\n env['TLIB_FLAGS'] = '-P+ -Ha- -Hr- -Hs- -Pt+ -Cw+ -R-'\r\n env['TLIB_COM'] = '$TLIB_BIN $TLIB_FLAGS -D${TARGET.dir.abspath} ${SOURCE.abspath}'\r\n env['TLIB_MSG'] = '**** [TLIBIMP] Generating Delphi source for: [${SOURCE}] to: [${TARGET}]'\r\n \r\n\r\n DelphiTypelib = env.Builder(\r\n action = SCons.Action.Action('$TLIB_COM', '$TLIB_MSG')\r\n , single_source = 1\r\n , emitter = DelphiTypelibEmitter\r\n , target_suffix = '.pas'\r\n , src_suffix = ['.tlb', '.dll', '.ocx', '.exe']\r\n )\r\n\r\n env.InstallBuilder('DelphiTypelib', DelphiTypelib, ['DelphiTLIB', 'TLB'])\r\n\r\n # Builder para el res...\r\n env['BRCC32_BIN'] = env.Dir('$DELPHI').File('BIN/BRCC32.EXE')\r\n env['BRCC32_COM'] = '$BRCC32_BIN -fo$TARGET $SOURCE'\r\n env['BRCC32_MSG'] = \"**** [BRCC32]: Compiling resource: '$TARGET' from: '$SOURCE'\"\r\n \r\n DelphiResource = env.Builder(\r\n action = SCons.Action.Action('$BRCC32_COM', '$BRCC32_MSG')\r\n , src_suffix = '.rc'\r\n , target_suffix = '.res'\r\n )\r\n\r\n env.InstallBuilder('DelphiResource', DelphiResource, ['DelphiRES'])\r\n\r\n delphi_lib = env.Dir('${DELPHI}/Lib')\r\n \r\n dcps = glob.glob(delphi_lib.abspath + '/*.dcp')\r\n\r\n for dcp in dcps:\r\n (base, ext) = os.path.splitext(dcp)\r\n name = os.path.basename(base)\r\n env.Alias(name, dcp)\r\n\r\n SConsEnvironment.ReadDelphiOptions = _ReadDelphiOptions\r\n \r\n home = env.Dir(_getDelphiPath(env, version))\r\n ide = runWrapper(env, 'delphi', env.File('${DELPHI_IDE}'), '${BPGNAME}${DELPHI_BPGSUFFIX}')\r\n env.Depends(ide, env.Alias('prepare-delphi'))\r\n \r\n dump = env.Command(target = '$BUILDTMP/dump', source = None, action = _DumpDelphiPath)\r\n env.Alias('dump', dump)",
"def _build_environment(func, bound_args):\n spec = [(\"arg\" + str(i), t) for i, t in enumerate(bound_args)]\n\n exec_glbls = dict(spec=spec)\n exec_glbls[\"jitclass\"] = jitclass\n assign_env = \"; \".join(f\"self.arg{i} = arg{i}\" for i, t in enumerate(bound_args))\n env_args = \", \".join(f\"arg{i}\" for i, t in enumerate(bound_args))\n src = f\"\"\"\n@jitclass(spec)\nclass Environment():\n def __init__(self, {env_args}):\n {assign_env}\n pass\n\"\"\"\n exec_in_file(f\"{func.__name__}_Environment_{id(func)}\", src, exec_glbls)\n return exec_glbls[\"Environment\"]",
"def prepare_environment(self) -> None:\n pass",
"def __allocate_environment__(cls, options, test_driver):",
"def setup(self):\n # Write sensitive variables from the environment to specified files.\n # NOTE: Both the grid cert and the grid key are necessary for using the grid, EOS, etc!\n self.writeGridCertFromVariableToFile()\n self.writeGridKeyFromVariableToFile()\n self.writeSSHKeyFromVariableToFile()\n\n logger.debug(\"Setting up environment variables.\")\n # Setup environment\n self.setupRoot()\n self.setupReceiverPath()\n self.setupEnvironmentVars()",
"def create():\n return _DynamicEnvironment()",
"def prepare_opts(env):\n custom_opt = os.path.join(env['ROOT_DIR'], env['OPT_FILE'])\n vars = Variables(custom_opt, ARGUMENTS)\n\n if utility.is_windows():\n vc_versions = {\n '': '',\n 'VS2017': '14.1',\n 'VS2015': '14.0',\n 'VS2010': '10.0',\n 'VS2010Express': '10.0Exp',\n 'VS2005': '8.0',\n 'VS2005Express': '8.0Exp',\n }\n\n vars.Add(EnumVariable('msvc',\n ('Set use msvc version and specifies vs version.'),\n '',\n allowed_values = tuple(vc_versions.values()),\n ignorecase = 2))\n\n else:\n env['msvc'] = ''\n\n vars.Add(BoolVariable('rtos', 'Set to build rtos binaries', 0))\n vars.Add(BoolVariable('debug', 'Set to build debug version', 0))\n vars.Add(PathVariable('depends', 'Path to depends folder', os.path.join('$ROOT_DIR', 'depends'),\n PathVariable.PathIsDirCreate))\n\n vars.Add(PathVariable('essentials', 'Path to essentials folder', os.path.join('$ROOT_DIR', 'essentials'),\n PathVariable.PathIsDirCreate))\n\n vars.Add(PathVariable('build_path', 'Path to build folder', os.path.join('$ROOT_DIR', 'build'),\n PathVariable.PathIsDirCreate))\n vars.Add(PathVariable('export_path', 'Path to export folder', os.path.join('$ROOT_DIR', 'export'),\n PathVariable.PathIsDirCreate))\n\n vars.Update(env)\n\n cpp_defines = []\n for key, value in ARGLIST:\n if key == 'define':\n cpp_defines.append(value)\n\n incdirs = []\n for key, value in ARGLIST:\n if key == 'incdir':\n if os.path.isdir(value):\n incdirs.append(value)\n else:\n print(\"WARNING: incdir {0} does NOT exist!\".format(value))\n\n libdirs = []\n for key, value in ARGLIST:\n if key == 'libdir':\n if os.path.isdir(value):\n libdirs.append(value)\n else:\n print(\"WARNING: libdir {0} does NOT exist!\".format(value))\n\n if env['debug']:\n cpp_defines.extend(['DBG', 'DEBUG'])\n else:\n cpp_defines.append('NDEBUG')\n\n if utility.is_windows():\n cpp_defines.append('WIN32')\n\n if env['rtos']:\n cpp_defines.append('RTOS')\n os_types = {'rt-thread': 'RTOS_RTT',\n 'freertos': 'RTOS_FREERTOS',\n }\n\n keys = os_types.keys()\n vars.Add(EnumVariable('rtos_os',\n 'Set rtos type.\\n',\n keys[1],\n allowed_values = tuple(keys),\n ignorecase = 2))\n\n vars.Update(env)\n cpp_defines.append(os_types[env['rtos_os']])\n\n print(\"Default use rt-thread as rtos. Use 'rtos_os=' variable to change!\")\n\n try:\n # Call rtos specified scripts.\n rtos_module = importlib.import_module(env['rtos_os'])\n rtos_module.rtos_config(env)\n except Exception as e:\n print('Fail to do rtos specified config in {0}.py'.format(env['rtos_os']))\n print(e)\n sys.exit(1)\n\n env.AppendUnique(CPPDEFINES = cpp_defines)\n env.Replace(DEPENDS = '$depends')\n env.Replace(ESSENTIALS = '$essentials')\n env.Replace(EXPORT_PATH = '$export_path')\n # if integrate solution project, use build/ as variant dir\n # else use build/project_name as variant dir\n build_conf = 'debug' if env['debug'] else 'release'\n if File('SolutionMagicFile', '#').exists():\n env.Replace(BUILD_PATH = os.path.join('$build_path', build_conf))\n else:\n solo_sln_name = os.path.basename(Dir('#').abspath)\n env.Replace(BUILD_PATH = os.path.join('$build_path', build_conf, solo_sln_name))\n\n env.Append(CPPPATH = [os.path.join(env['DEPENDS'], 'include'),\n os.path.join(env['ESSENTIALS'], 'include')])\n\n env.Append(CPPPATH = incdirs)\n\n env.Append(LIBPATH = [os.path.join(env['DEPENDS'], 'lib'),\n os.path.join(env['ESSENTIALS'], 'lib')])\n\n env.Append(LIBPATH = libdirs)\n\n env.PrependENVPath('PATH', [os.path.join(env['DEPENDS'], 'bin'),\n os.path.join(env['ESSENTIALS'], 'bin')])\n\n Help(vars.GenerateHelpText(env))\n\n env['TEST'] = 'test' in COMMAND_LINE_TARGETS",
"def putenv(self):\n for c in self.config_options:\n c.putenv()",
"def environment(self):\n # FIXME: Cache value?\n build_env = Environment()\n\n # Top priority: profile\n profile_env = self._conanfile.buildenv\n build_env.compose_env(profile_env)\n\n build_requires = self._conanfile.dependencies.build.topological_sort\n for require, build_require in reversed(build_requires.items()):\n if require.direct: # Only buildenv_info from direct deps is propagated\n # higher priority, explicit buildenv_info\n if build_require.buildenv_info:\n build_env.compose_env(build_require.buildenv_info)\n # Lower priority, the runenv of all transitive \"requires\" of the build requires\n if build_require.runenv_info:\n build_env.compose_env(build_require.runenv_info)\n # Then the implicit\n os_name = self._conanfile.settings_build.get_safe(\"os\")\n build_env.compose_env(runenv_from_cpp_info(build_require, os_name))\n\n # Requires in host context can also bring some direct buildenv_info\n host_requires = self._conanfile.dependencies.host.topological_sort\n for require in reversed(host_requires.values()):\n if require.buildenv_info:\n build_env.compose_env(require.buildenv_info)\n\n return build_env",
"def build_envs(self) -> Tuple[EnvsDictType, EnvMetaDataType]:\n if \"dmcontrol\" not in self.config.env.name:\n raise NotImplementedError\n envs: EnvsDictType = {}\n mode = \"train\"\n env_id_list = self.config.env[mode]\n num_envs = len(env_id_list)\n seed_list = list(range(1, num_envs + 1))\n mode_list = [mode for _ in range(num_envs)]\n\n envs[mode] = hydra.utils.instantiate(\n self.config.env.builder,\n env_id_list=env_id_list,\n seed_list=seed_list,\n mode_list=mode_list,\n )\n envs[\"eval\"] = self._create_dmcontrol_vec_envs_for_eval()\n metadata = self.get_env_metadata(env=envs[\"train\"])\n return envs, metadata",
"async def build_context(self) -> InjectionContext:\n context = InjectionContext(settings=self.settings)\n context.settings.set_default(\"default_label\", \"Aries Cloud Agent\")\n\n if context.settings.get(\"timing.enabled\"):\n timing_log = context.settings.get(\"timing.log_file\")\n collector = Collector(log_path=timing_log)\n context.injector.bind_instance(Collector, collector)\n\n # Shared in-memory cache\n context.injector.bind_instance(BaseCache, InMemoryCache())\n\n # Global protocol registry\n context.injector.bind_instance(ProtocolRegistry, ProtocolRegistry())\n\n # Global goal code registry\n context.injector.bind_instance(GoalCodeRegistry, GoalCodeRegistry())\n\n # Global event bus\n context.injector.bind_instance(EventBus, EventBus())\n\n # Global did resolver\n context.injector.bind_instance(DIDResolver, DIDResolver([]))\n context.injector.bind_instance(DIDMethods, DIDMethods())\n context.injector.bind_instance(KeyTypes, KeyTypes())\n context.injector.bind_instance(\n BaseVerificationKeyStrategy, DefaultVerificationKeyStrategy()\n )\n\n await self.bind_providers(context)\n await self.load_plugins(context)\n\n # Set DIDComm prefix\n DIDCommPrefix.set(context.settings)\n\n return context",
"def setup_environment(args, destination):\n # type: (argparse.Namespace, str) -> Dict[str, str]\n\n use_wrapper = args.override_compiler or is_preload_disabled(sys.platform)\n\n environment = dict(os.environ)\n environment.update({'INTERCEPT_BUILD_TARGET_DIR': destination})\n\n if use_wrapper:\n environment.update(wrapper_environment(args))\n environment.update({\n 'CC': COMPILER_WRAPPER_CC,\n 'CXX': COMPILER_WRAPPER_CXX,\n })\n else:\n intercept_library = build_libear(args.cc, destination)\n if sys.platform == 'darwin':\n environment.update({\n 'DYLD_INSERT_LIBRARIES': intercept_library,\n 'DYLD_FORCE_FLAT_NAMESPACE': '1'\n })\n else:\n environment.update({'LD_PRELOAD': intercept_library})\n\n return environment",
"def populate_jinja_environment(self, env):\n env.filters['registry'] = self.registry\n env.globals['flattened_url'] = self.flattened_url\n env.globals['new_etcd_discovery_token'] = self.new_etcd_discovery_token\n env.globals['load_coreos_ami'] = self.load_coreos_ami_id\n env.globals['dockersystemd'] = self._dockersystemd_template"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the inode of parent directory. Raises error if on root. | def __get_parent_inode(self, path):
if path == "/":
raise FileSystemError("No parent directory.")
parent_dir = Inode.selectBy(inode_num=0).orderBy("-rev_id")[0]
for fn in split_path(path)[:-1]:
tmp = Dentry.selectBy(parent=parent_dir, filename=fn)
if tmp.count() == 0:
raise FileSystemError("file not found.")
parent_dir = Inode.selectBy(inode_num=tmp[0].inode_num).\
orderBy("-rev_id")[0]
return parent_dir.inode_num | [
"def get_parent_dir(path):\n\treturn os.path.dirname(os.path.abspath(path))",
"def inode(self):\n return self._dir_info[\"file_id\"].get_value()",
"def __get_inode(self, path):\n if path == \"/\":\n return 0\n parent_dir = Inode.selectBy(inode_num=self.__get_parent_inode(path)).\\\n orderBy(\"-rev_id\")[0]\n tmp = Dentry.selectBy(parent=parent_dir,\n filename=split_path(path)[-1])\n if tmp.count() == 0:\n raise FileSystemError(\"file not found.:\",path)\n ret = Inode.selectBy(inode_num=tmp[0].inode_num).orderBy(\"-rev_id\")[0]\n return ret.inode_num",
"def parent(self):\n if self.is_leaf:\n return self.relative('.')\n return self.relative('..')",
"def f_get_parent(self):\n if self.v_is_root:\n raise TypeError('Root does not have a parent')\n elif self.v_location == '':\n return self.v_root\n else:\n return self.v_root.f_get(self.v_location, fast_access=False, shortcuts=False)",
"def nid_parent(self):\n return NID(self.data[24:28])",
"def lookup(self, parent_inode_id, name, ctx=None):\n logger.debug(\"LOOKUP entry for parent inode:{},name:{}\".format(parent_inode_id, name))\n if name == '.':\n inode_id = parent_inode_id\n elif name == '..':\n inode_id = self._meta_store.get_inode(parent_inode_id).id\n else: \n inode_id = self._meta_store.get_inode_id(parent_inode_id, name)\n if inode_id is None:\n logger.error(\"Entry not found for parent inode:{},name:{} in LOOKUP\".format(parent_inode_id, name))\n raise llfuse.FUSEError(errno.ENOENT)\n self._meta_store.get_inode(inode_id).lookup_count += 1\n # increment lookup counter when we lookup file/folder\n return self.getattr(inode_id, ctx)",
"def _get_parentFolder(self) -> \"adsk::core::Ptr< adsk::core::DataFolder >\" :\n return _core.DataFolder__get_parentFolder(self)",
"def get_parent(self, idx):\n if (idx == 0):\n return None # Root of tree does not have a parent\n return (idx - 1) // 2",
"def GetParentFileEntry(self):\n location = getattr(self.path_spec, u'location', None)\n if location is None:\n return\n\n parent_location = self._file_system.DirnamePath(location)\n if parent_location is None:\n return\n if parent_location == u'':\n parent_location = self._file_system.PATH_SEPARATOR\n\n parent_path_spec = getattr(self.path_spec, u'parent', None)\n path_spec = zip_path_spec.ZipPathSpec(\n location=parent_location, parent=parent_path_spec)\n return ZipFileEntry(self._resolver_context, self._file_system, path_spec)",
"def get_parent_pid(self):\n if not self.h_process:\n self.open()\n\n NT_SUCCESS = lambda val: val >= 0\n\n pbi = (c_int * 6)()\n size = c_int()\n\n # Set return value to signed 32bit integer.\n NTDLL.NtQueryInformationProcess.restype = c_int\n\n ret = NTDLL.NtQueryInformationProcess(self.h_process,\n 0,\n byref(pbi),\n sizeof(pbi),\n byref(size))\n\n if NT_SUCCESS(ret) and size.value == sizeof(pbi):\n return pbi[5]\n\n return None",
"def get_parent_path(base, directory_name):\n done = False\n while not done:\n base = os.path.dirname(base)\n if base == \"/\":\n return None\n if os.path.split(base)[-1] == directory_name:\n done = True\n else:\n done = False\n return base",
"def parent_pid_organization_id(self) -> str:\n return pulumi.get(self, \"parent_pid_organization_id\")",
"def GetParentFileEntry(self):\n location = getattr(self.path_spec, u'location', None)\n if location is None:\n return\n\n parent_location = self._file_system.DirnamePath(location)\n if parent_location is None:\n return\n if parent_location == u'':\n parent_location = self._file_system.PATH_SEPARATOR\n\n parent_path_spec = getattr(self.path_spec, u'parent', None)\n path_spec = tar_path_spec.TARPathSpec(\n location=parent_location, parent=parent_path_spec)\n return TARFileEntry(self._resolver_context, self._file_system, path_spec)",
"def parent_element_id(self) -> int:\n return self._parent_element_id",
"def get_parent_folder_path(self):\n result = None\n model_folder_list = self.get_model_folders()\n if model_folder_list:\n result = ''\n if len(model_folder_list) > 1:\n for folder in model_folder_list[-2]:\n result += '/' + folder\n if len(result) == 0:\n result = '/'\n return result",
"def tree_parent_direction(root):\n leafnodes_list = get_leafnodes(root)\n leafnodesposition_list = [node.position for node in leafnodes_list]\n print('len(leafnodes_list) ',len(leafnodes_list),[ll.position for ll in leafnodes_list],root.position )\n if len(leafnodes_list) == 1 and root in leafnodes_list:\n return np.mean(leafnodesposition_list,axis=0)-root.parent_edge.startbracnch.position\n if root.parent_edge:\n return np.mean(leafnodesposition_list,axis=0)-root.parent_edge.startbracnch.position\n return np.mean(leafnodesposition_list,axis=0)-root.position",
"def test_get_parent_dir(self):\n self.assertEquals(util.fileops.get_parent_dir('/home/brandon/test/test.py'),\n '/home/brandon/test')\n self.assertEquals(util.fileops.get_parent_dir('test.html'), '')",
"def get_parent_index(self, index):\n return int((index - 1 ) / 2) if index != 0 else None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the inode of the object (file/directory). Raises an error if the object doesn't exist. | def __get_inode(self, path):
if path == "/":
return 0
parent_dir = Inode.selectBy(inode_num=self.__get_parent_inode(path)).\
orderBy("-rev_id")[0]
tmp = Dentry.selectBy(parent=parent_dir,
filename=split_path(path)[-1])
if tmp.count() == 0:
raise FileSystemError("file not found.:",path)
ret = Inode.selectBy(inode_num=tmp[0].inode_num).orderBy("-rev_id")[0]
return ret.inode_num | [
"def inode(self):\n return self._dir_info[\"file_id\"].get_value()",
"def inode(filesystem, mode=\"total\", host_os=detect_host_os()):\n validate_mode(mode, SIZE_CONVERSION_MODES)\n\n free, total = host_os.fs_inodes(filesystem)\n\n return convert_size(free, total, mode)",
"def find_by_inode(root, device, inode):\n try:\n x = FilesystemObject.query.filter_by( root_uuid=root.id, inode=inode, device=device).one_or_none()\n return x\n except Exception as e:\n print(e)\n return None",
"def GetFileInode(self, fname,user):\n\t\ttry:\n\t\t\tfid, fsize, digests = self.GetFileInfo(fname,user)\n\t\t\tif not fid:\n\t\t\t\treturn None, None, None\n\t\t\tquery = \"\"\"select address, port, cid from dnode, block where dnode.nid = block.nid and block.fid=%s and block.owner=%d\"\"\" %(fid,user)\n\t\t\tself.c.execute(query)\n\t\t\treturn fsize, self.c.fetchall(), digests\n\t\texcept: \n\t\t\traise",
"def open(self, inode_id, flags, ctx):\n logger.debug(\"OPEN inode:{}\".format(inode_id))\n # increment open counter when we open file\n self._meta_store.get_inode(inode_id).open_count += 1\n # increment lookup counter when we open file\n # self._meta_store.get_inode(inode_id).lookup_count += 1\n return inode_id",
"def open(self, key):\n this_dir = key or ''\n this_dir = os.path.normpath(this_dir)\n element = self._folder_meta['dir']\n lastobj_name = '/'\n lastobj_meta = element\n if this_dir:\n dir_pieces = this_dir.split(os.path.sep)\n dir_pieces, lastobj_name = dir_pieces[:-1], dir_pieces[-1]\n try:\n for piece in dir_pieces:\n if this_dir == os.curdir:\n continue\n element = element[piece]['dir']\n lastobj_meta = element[lastobj_name]\n except KeyError:\n raise IOError(\"{} not found in node {}\".format(this_dir, self.node_uuid))\n\n if 'obj' not in lastobj_meta:\n raise IOError(\"{} is not a file in node {}\".format(this_dir, self.node_uuid))\n\n obj_hashkey = lastobj_meta['obj']\n return self._container.get_object_stream(obj_hashkey)",
"def getFileObject(fs, directory, path, followSymlinks):\n try:\n if path == \"/\":\n fileObject = fs.rootDir\n elif path.startswith(\"/\"):\n fileObject = fs.rootDir.getFileAt(path[1:], followSymlinks)\n else:\n fileObject = directory.getFileAt(path, followSymlinks)\n except FileNotFoundError:\n raise FilesystemError(\"{0} does not exist.\".format(path))\n if fileObject.absolutePath == directory.absolutePath:\n fileObject = directory\n return fileObject",
"def open(self, oid):\n return open(self.path(oid), 'rb')",
"def path_nsobject(path=''):\n if not path.startswith('/'):\n raise U.HpssicError(\"An absolute path is required\")\n\n # break the path into its components with '/' at the beginning\n nl = ['/'] + [z for z in path.lstrip('/').split(os.path.sep)]\n parent_id = None\n\n # walk down the tree structure to the leaf\n for name in nl:\n (obj_id, parent_id) = nsobj_id(name=name, parent=parent_id)\n parent_id = obj_id\n\n # return the bottom object id\n return obj_id",
"def __get_parent_inode(self, path):\n if path == \"/\":\n raise FileSystemError(\"No parent directory.\")\n parent_dir = Inode.selectBy(inode_num=0).orderBy(\"-rev_id\")[0]\n for fn in split_path(path)[:-1]:\n tmp = Dentry.selectBy(parent=parent_dir, filename=fn)\n if tmp.count() == 0:\n raise FileSystemError(\"file not found.\")\n parent_dir = Inode.selectBy(inode_num=tmp[0].inode_num).\\\n orderBy(\"-rev_id\")[0]\n return parent_dir.inode_num",
"def _identify_file(self):\r\n # Note: this code reads the stream directly, without using ELFStructs,\r\n # since we don't yet know its exact format. ELF was designed to be\r\n # read like this - its e_ident field is word-size and endian agnostic.\r\n #\r\n self.stream.seek(0)\r\n magic = self.stream.read(4)\r\n elf_assert(magic == b'\\x7fELF', 'Magic number does not match')\r\n\r\n ei_class = self.stream.read(1)\r\n if ei_class == b'\\x01':\r\n self.elfclass = 32\r\n elif ei_class == b'\\x02':\r\n self.elfclass = 64\r\n else:\r\n raise ELFError('Invalid EI_CLASS %s' % repr(ei_class))\r\n\r\n ei_data = self.stream.read(1)\r\n if ei_data == b'\\x01':\r\n self.little_endian = True\r\n elif ei_data == b'\\x02':\r\n self.little_endian = False\r\n else:\r\n raise ELFError('Invalid EI_DATA %s' % repr(ei_data))",
"def _get_oid_for_managed_object_name(self, name):\n oid, label, suffix = self.mib_view_controller.getNodeName(name)\n return oid + suffix",
"def get_file_object(self):\n\n if self.file_obj == None:\n self._open()\n return self.file_obj",
"def lookup(self, parent_inode_id, name, ctx=None):\n logger.debug(\"LOOKUP entry for parent inode:{},name:{}\".format(parent_inode_id, name))\n if name == '.':\n inode_id = parent_inode_id\n elif name == '..':\n inode_id = self._meta_store.get_inode(parent_inode_id).id\n else: \n inode_id = self._meta_store.get_inode_id(parent_inode_id, name)\n if inode_id is None:\n logger.error(\"Entry not found for parent inode:{},name:{} in LOOKUP\".format(parent_inode_id, name))\n raise llfuse.FUSEError(errno.ENOENT)\n self._meta_store.get_inode(inode_id).lookup_count += 1\n # increment lookup counter when we lookup file/folder\n return self.getattr(inode_id, ctx)",
"def get_node_by_object(self, object: object):\n data = self.database.select(self.TABLE_NAME,\n {'target_id' : object.id,\n 'parent_type': object.object_type.value})\n\n return self.get_node(data[0]['id'])",
"def get_individual_icon_path(self):\n path = self.get_path()\n if path is None:\n return None\n\n # Try to read diricon.\n icon_path = os.path.join(path, '.DirIcon')\n if os.access(icon_path, os.R_OK):\n return icon_path\n\n return None",
"def _get_entry(self, parent_inode_id, name, ctx):\n inode_id = self._meta_store.get_inode_id(parent_inode_id, name)\n if inode_id is None:\n logger.error(\"Entry not found for parent inode:{},name:{} in LOOKUP\".format(parent_inode_id, name))\n raise llfuse.FUSEError(errno.ENOENT)\n return self.getattr(inode_id, ctx)",
"def get_object_id(input_image_path):\n input_image_path = input_image_path.replace('--original', '')\n file_name = input_image_path.split('/')[-1]\n object_id = '.'.join(file_name.split('/')[-1].split('.')[:-1])\n return object_id",
"def identity(self) -> Optional['outputs.AmlFilesystemIdentityResponse']:\n return pulumi.get(self, \"identity\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the selected file. If the file is from the server it is downloaded and read without being saved locally. If it is a local file, it is read locally. | def read(self, path, length, offset):
#Create a list of available files
T = atpy.Table(SERVER_LOCATION + 'QUERY?query=files_list&format=list',type='ascii')
#Check is file is local
if not (path[1:] in T['col3']):
if not path in self.__openfiles:
self.open(path, 0)
return self.__openfiles[path].read(length, offset)
#File is not local so open and read from server
else:
urlString = SERVER_LOCATION + 'RETRIEVE?file_id=' + path[1:]
ht = None
ht = urllib2.urlopen(urlString)
return ht.read(length) | [
"def _read_file(self):\n with open(self.file, 'r') as the_file:\n content = the_file.read()\n return content",
"def read_file( self, url ):\n return urlopen( url ).read()",
"def _read(url):\n if os.path.exists(url): \n file_obj = open(url, 'r') \n file_body = file_obj.read() \n file_obj.close() \n #start_response('200 OK', [('Content-Type', content_type)]) \n \n return file_body \n else: \n return None;",
"def serve_file(self):\n path = self.translate_path(self.path)\n try:\n f = open(path, 'rb')\n except IOError:\n raise\n else:\n return SimpleHTTPRequestHandler.do_GET(self)",
"def serve_file(self):\n path = self.translate_path(self.path)\n try:\n open(path, 'rb')\n except IOError:\n raise\n else:\n return SimpleHTTPRequestHandler.do_GET(self)",
"def readFile(self, mode = 'r'):\n\n # Read import file\n try:\n file = open(self.path, mode = mode)\n fileContent = file.read()\n file.close()\n\n return fileContent\n except Exception as e:\n print(\"The file `%s` could not be opened.\" % (self.path));\n print(e)\n sys.exit()",
"def read_file(file_path):\n try:\n with open(file_path, \"r\") as iFile:\n content = iFile.read()\n return content\n except:\n return None",
"def get_file_content(path_or_url, as_text=True):\n if path_or_url.startswith(\"https://\"):\n if TRACE_DEEP:\n print(f\"Fetching: {path_or_url}\")\n _headers, content = get_remote_file_content(url=path_or_url, as_text=as_text)\n return content\n\n elif path_or_url.startswith(\"file://\") or (\n path_or_url.startswith(\"/\") and os.path.exists(path_or_url)\n ):\n return get_local_file_content(path=path_or_url, as_text=as_text)\n\n else:\n raise Exception(f\"Unsupported URL scheme: {path_or_url}\")",
"def __readfile(self):\n raise NotImplementedError",
"def __read_file(self, path: str, binary: bool):\n mode = \"rb\" if binary else \"r\"\n encoding = None if binary else \"utf-8\"\n\n with self.__lock:\n if not os.path.isfile(path):\n return None\n\n with open(path, mode, encoding=encoding) as data_file:\n if binary:\n return data_file.read()\n\n return json.load(data_file)",
"def read_file(text_url, report_path=''):\n path = os.path.join(report_path, text_url) + '.txt'\n\n if os.access(path, os.R_OK):\n with open(path) as file_pointer:\n return file_pointer.read()\n return text_url",
"def get_file(self, filename):\n try:\n to_file = open(os.path.expanduser(self.local_directory + filename),\n \"wb\")\n except IOError:\n print self.local_directory + \" is missing!\"\n return\n\n f, metadata = self.api_client.get_file_and_metadata(\n self.current_path + \"/\" + filename)\n to_file.write(f.read())",
"def read_file(input_file):\n if isinstance(input_file, file):\n return input_file.read()\n else:\n with open(input_file, 'r') as f:\n contents = f.read()\n f.close()\n return contents",
"def _read_file(self):\n filetype = self._file[self._file.rfind('.'):]\n if filetype == '.dft':\n return GalileoReader(self._file).create_faulttree()\n raise UnsupportedFileTypeException('{}'.format(filetype))",
"def retrieveFile(_filename):\n print(\"Contacting server...\")\n url = \"http://cycada.ml/game/\" + _filename + \".txt\"\n _data = req.get(url)\n if _data.status_code == 200:\n print(\"File found!\")\n return _data.content.decode('utf-8') #convert to string from binary object\n else:\n print(\"Wrong ID, try relaunching.\")\n time.sleep(2)\n return False",
"def get_file(self, user: User): \n self.send(user, CODE['fileTransferS'])\n message = self.get(user)\n if message == CODE['error']:\n return\n self.send(user, CODE['ready'])\n file_name, file_size = message.split(SEPERATOR)\n file_name = os.path.basename(file_name)\n file_size = int(file_size)\n\n with open(file_name, \"wb\") as file:\n gotten = 0\n while gotten < file_size:\n bytes_read = user.client_socket.recv(BUFFER)\n file.write(bytes_read)\n gotten += len(bytes_read)\n \n command = self.get(user)\n if(command == CODE['all_sent']):\n print(\"Recieved!\")\n \n self.send_file(user, file_name, str(file_size))",
"def receive_file_from_socket(self):\n pass",
"def read(self, file_path, is_binary=False):\n with codecs.open(file_path, 'rb' if is_binary else 'r', 'utf-8') as fp:\n file_content = fp.read()\n\n fp.close()\n\n return file_content",
"def read_file(self, f, source=None):\r\n if source is None:\r\n try:\r\n source = f.name\r\n except AttributeError:\r\n source = '<???>'\r\n self._read(f, source)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes an object from oldPath and places it on newPath | def rename(self, oldPath, newPath):
conn = sqlhub.getConnection()
trans = conn.transaction()
now = time.time()
i_num = self.__get_inode(oldPath)
parent_i_num = self.__get_parent_inode(oldPath)
parent_i = Inode.selectBy(inode_num=parent_i_num).orderBy("-rev_id")[0]
dl = Dentry.selectBy(parent=parent_i)
new_i = Inode(inode_num=parent_i.inode_num,
rev_id=parent_i.rev_id+1,
uid=parent_i.uid, gid=parent_i.gid,
atime=now, mtime=parent_i.mtime,
ctime=parent_i.ctime, size=parent_i.size,
mode=parent_i.mode, connection=trans)
for de in dl:
if de.inode_num != i_num:
Dentry(parent=new_i, filename=de.filename,
inode_num=de.inode_num, connection=trans)
parent_i_num = self.__get_parent_inode(newPath)
parent_i = Inode.selectBy(inode_num=parent_i_num).orderBy("-rev_id")[0]
Dentry(parent=new_i, filename=split_path(newPath)[-1],
inode_num=i_num, connection=trans)
old_i = Inode.selectBy(inode_num=i_num).orderBy("-rev_id")[0]
Inode(inode_num=old_i.inode_num,
rev_id=old_i.rev_id+1,
uid=old_i.uid, gid=old_i.gid,
atime=now, mtime=old_i.mtime,
ctime=old_i.ctime, size=old_i.size,
mode=old_i.mode, connection=trans)
trans.commit()
if oldPath in self.__openfiles:
while not self.__openfiles[oldPath].is_close():
self.__openfiles[oldPath].close()
del self.__openfiles[oldPath] | [
"def removed(object, oldParent=None, oldName=None):",
"def update_path(self, new_path):\n self.ui.systemTreeWidget.clear()\n self.ui.selectedTreeWidget.clear()\n clear_list(self.widgets)\n self.ui.pathLineEdit.setText(new_path)\n self.fb.set_current_path(new_path)\n self.populate_system_tree()",
"def deleted(self, src, path):",
"def drop_refs(self, from_path: str, is_ancillary: bool = False,\n is_removed: bool = False, is_system: bool = False) -> None:\n self.__api.files.pop(from_path, is_ancillary=is_ancillary,\n is_removed=is_removed, is_system=is_system)",
"def save_old(self, path):\n self.save_old_dictionary(path)",
"def moved(object, oldParent, oldName, newParent, newName):",
"def link(self, oldPath, newPath):\n conn = sqlhub.getConnection()\n trans = conn.transaction()\n now = time.time()\n i_num = self.__get_inode(oldPath)\n parent_i_num = self.__get_parent_inode(newPath)\n parent_i = Inode.selectBy(inode_num=parent_i_num).orderBy(\"-rev_id\")[0]\n dl = Dentry.selectBy(parent=parent_i)\n new_i = Inode(inode_num=parent_i.inode_num,\n rev_id=parent_i.rev_id+1,\n uid=parent_i.uid, gid=parent_i.gid,\n atime=now, mtime=parent_i.mtime,\n ctime=parent_i.ctime, size=parent_i.size,\n mode=parent_i.mode, connection=trans)\n for de in dl:\n Dentry(parent=new_i, filename=de.filename,\n inode_num=de.inode_num, connection=trans)\n Dentry(parent=new_i, filename=split_path(newPath)[-1],\n inode_num=i_num, connection=trans)\n trans.commit()",
"def _path(*components, **oldnew):\n old, new = oldnew.get('old', ''), oldnew.get('new', '')\n return os.path.normpath(re.sub('^' + re.escape(old), new,\n os.path.join(*components)))",
"def simplify_path(self, old_path):\n path = re.sub(r\"//+\", \"/\", old_path)\n path = re.sub(r\"/\\./+\", \"/\", path)\n new_path = re.sub(r\"/[^/]+/\\.\\./\", \"/\", path)\n while (new_path != path):\n path = new_path\n new_path = re.sub(r\"/[^/]+/\\.\\./\", \"/\", path)\n if (new_path != old_path):\n log.debug(\"simplified path from \" + old_path + \n \" to \" + new_path,'simplify_path')\n return path",
"def testRemoveObject(self):\n if not self._repo.getCapabilities()['Unfiling']:\n pytest.skip('This repository does not allow unfiling, skipping')\n\n subFolder1 = self._testFolder.createFolder('sub1')\n doc = subFolder1.createDocument('testdoc1')\n assert len(subFolder1.getChildren()) == 1\n subFolder2 = self._testFolder.createFolder('sub2')\n assert len(subFolder2.getChildren()) == 0\n subFolder2.addObject(doc)\n assert len(subFolder2.getChildren()) == 1\n assert (\n subFolder1.getChildren()[0].name ==\n subFolder2.getChildren()[0].name)\n subFolder2.removeObject(doc)\n assert len(subFolder2.getChildren()) == 0\n assert len(subFolder1.getChildren()) == 1\n assert doc.name == subFolder1.getChildren()[0].name",
"def recursiveRemove(path):",
"def move_back(self):\n new_path = self.fb.get_parent_path()\n self.update_path(new_path)",
"def edit_node_path(self, node, new_path):\n\n if self.check_node_existance(node):\n self.nodes[node]['path'] = new_path\n\n else:\n raise NodeNotFound('No node under name \"{0}\" found.'.format(node))",
"def cleanup_path(path, trans, remove_nans, clip_rect, snap_mode, stroke_width, simplify, return_curves, sketch): # real signature unknown; restored from __doc__\n pass",
"def drop_refs(self, from_path: str, is_ancillary: bool = False,\n is_removed: bool = False, is_system: bool = False) -> None:",
"def test_remove_duplicate_path(self):\n output = StringIO()\n call_command('remove_duplicate_paths', verbosity=2, stdout=output)\n\n self.assertEqual(Path.objects.count(), 5)\n self.assertCountEqual((self.p1, self.p3, self.p5, self.p6, self.p8),\n list(Path.objects.all()))\n self.assertIn(\"Deleting path\",\n output.getvalue())\n self.assertIn(\"duplicate paths have been deleted\",\n output.getvalue())",
"def path_rm(ctx, module_name, src_path, version):\n module_tree = ctx.obj.check_module_tree()\n loader = ctx.obj.check_module(\n module_tree, module_name, version, parse_error_handler=log_error_and_exit\n )\n path_obj = Path(src_path)\n loader.remove_path(path_obj)\n loader.save_module_file()",
"def remove_service_path(self, src, dst):\n if self.client is None:\n logger.warning('There is no connection to the SDN controller')\n return False\n\n path_id = (src, dst)\n if path_id in self.paths:\n del self.paths[path_id]\n\n src_interface = self.network.resolve_interface(src.id)\n dst_interface = self.network.resolve_interface(dst.id)\n msg = self.msg_gen.new_delpath_message(\n src_interface.to_dict(),\n dst_interface.to_dict()\n )\n self.client.send(msg.pack().encode())\n return True",
"def update_image_path(self, new_path):\n raise NotImplementedError(\"Updating image paths is not yet supported.\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a link from the object at oldPath to an object at newPath. | def link(self, oldPath, newPath):
conn = sqlhub.getConnection()
trans = conn.transaction()
now = time.time()
i_num = self.__get_inode(oldPath)
parent_i_num = self.__get_parent_inode(newPath)
parent_i = Inode.selectBy(inode_num=parent_i_num).orderBy("-rev_id")[0]
dl = Dentry.selectBy(parent=parent_i)
new_i = Inode(inode_num=parent_i.inode_num,
rev_id=parent_i.rev_id+1,
uid=parent_i.uid, gid=parent_i.gid,
atime=now, mtime=parent_i.mtime,
ctime=parent_i.ctime, size=parent_i.size,
mode=parent_i.mode, connection=trans)
for de in dl:
Dentry(parent=new_i, filename=de.filename,
inode_num=de.inode_num, connection=trans)
Dentry(parent=new_i, filename=split_path(newPath)[-1],
inode_num=i_num, connection=trans)
trans.commit() | [
"def symlink(self, oldPath, newPath):\n mode = 0o644|stat.S_IFLNK\n i_num = self.mknod(newPath, mode, 0)\n self.write(newPath, oldPath, 0)",
"def rename(self, oldPath, newPath):\n \n conn = sqlhub.getConnection()\n trans = conn.transaction()\n now = time.time()\n i_num = self.__get_inode(oldPath)\n parent_i_num = self.__get_parent_inode(oldPath)\n parent_i = Inode.selectBy(inode_num=parent_i_num).orderBy(\"-rev_id\")[0]\n dl = Dentry.selectBy(parent=parent_i)\n new_i = Inode(inode_num=parent_i.inode_num,\n rev_id=parent_i.rev_id+1,\n uid=parent_i.uid, gid=parent_i.gid,\n atime=now, mtime=parent_i.mtime,\n ctime=parent_i.ctime, size=parent_i.size,\n mode=parent_i.mode, connection=trans)\n for de in dl:\n if de.inode_num != i_num:\n Dentry(parent=new_i, filename=de.filename,\n inode_num=de.inode_num, connection=trans)\n parent_i_num = self.__get_parent_inode(newPath)\n parent_i = Inode.selectBy(inode_num=parent_i_num).orderBy(\"-rev_id\")[0]\n Dentry(parent=new_i, filename=split_path(newPath)[-1],\n inode_num=i_num, connection=trans)\n old_i = Inode.selectBy(inode_num=i_num).orderBy(\"-rev_id\")[0]\n Inode(inode_num=old_i.inode_num,\n rev_id=old_i.rev_id+1,\n uid=old_i.uid, gid=old_i.gid,\n atime=now, mtime=old_i.mtime,\n ctime=old_i.ctime, size=old_i.size,\n mode=old_i.mode, connection=trans)\n trans.commit()\n if oldPath in self.__openfiles:\n while not self.__openfiles[oldPath].is_close():\n self.__openfiles[oldPath].close()\n del self.__openfiles[oldPath]",
"def moveLinksOnDisk(self, source, target):\n\t\timport revitron\n\n\t\tsource = re.sub(r'\\\\$', '', source) + os.sep\n\t\tsource = '^' + re.escape(source)\n\t\ttarget = re.sub(r'\\\\$', '', target)\n\t\ttarget = re.sub(r'\\\\', os.sep, target)\n\n\t\tfor _id in self.refs:\n\n\t\t\trefId = revitron.DB.ElementId(_id)\n\t\t\tref = self.refs[_id]\n\n\t\t\tif str(ref.type) in ['RevitLink', 'CADLink']:\n\n\t\t\t\tif re.search(source, ref.path, re.IGNORECASE):\n\t\t\t\t\tnewPath = target + os.sep + re.sub(\n\t\t\t\t\t source,\n\t\t\t\t\t '',\n\t\t\t\t\t ref.path,\n\t\t\t\t\t re.IGNORECASE\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tnewPath = target + os.sep + os.path.basename(ref.path)\n\n\t\t\t\tprint(newPath)\n\n\t\t\t\tif newPath != ref.path:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.makedirs(os.path.dirname(newPath))\n\t\t\t\t\t\tprint('Created {}'.format(os.path.dirname(newPath)))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copyfile(ref.path, newPath)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\tself.data.SetDesiredReferenceData(\n\t\t\t\t\t refId,\n\t\t\t\t\t revitron.DB.FilePath(newPath),\n\t\t\t\t\t revitron.DB.PathType.Absolute,\n\t\t\t\t\t True\n\t\t\t\t\t)\n\n\t\tself.write()",
"def link(srcPath, destPath):\n import os\n return os.link(srcPath, destPath)",
"def _update_links_tons(self, new_path, od):\n\n old_links = od.links\n new_links = new_path.links\n\n original_ton = od.tons.get_original()\n derived_ton = od.tons.get_derived()\n\n # remove tons from modal_network old_links used by od\n for old_id_link in old_links:\n if old_id_link not in new_links:\n old_link = self.modal_network.get_link(old_id_link, od.gauge)\n old_link.tons.remove_original(ton=original_ton,\n categories=od.category,\n id_ods=od.id)\n old_link.tons.remove_derived(ton=derived_ton,\n categories=od.category,\n id_ods=od.id)\n\n # add derived tons to modal_network new_links, used by the new path\n for new_id_link in new_links:\n if new_id_link not in new_links:\n new_link = self.modal_network.get_link(new_id_link, od.gauge)\n new_link.tons.add_original(ton=original_ton,\n categories=od.category,\n id_ods=od.id)\n new_link.tons.add_derived(ton=derived_ton,\n categories=od.category,\n id_ods=od.id)",
"def create_symlink(from_path='', to_path=''):\n\n try:\n os.symlink(from_path, to_path)\n except Exception as exc:\n logger.warning('failed to create symlink from %s to %s: %s', from_path, to_path, exc)\n else:\n logger.debug('created symlink from %s to %s', from_path, to_path)",
"def symlink(srcPath, destPath):\n import os\n return os.symlink(srcPath, destPath)",
"def edit_node_path(self, node, new_path):\n\n if self.check_node_existance(node):\n self.nodes[node]['path'] = new_path\n\n else:\n raise NodeNotFound('No node under name \"{0}\" found.'.format(node))",
"def relink_all(cls, old_file, new_file):\n assert old_file.checksum == new_file.checksum\n assert old_file.id\n assert new_file.id\n\n with db.session.begin_nested():\n ObjectVersion.query.filter_by(file_id=str(old_file.id)).update(\n {ObjectVersion.file_id: str(new_file.id)}\n )",
"def copy_hardlink(file_src, file_dst, new_file):\n if os.path.isfile(file_dst):\n if os.path.exists(file_dst):\n os.unlink(file_dst)\n os.link(file_src, file_dst)\n elif os.path.isdir(file_dst):\n if os.path.exists(new_file):\n os.unlink(new_file)\n os.link(file_src, new_file)\n else:\n os.link(file_src, file_dst)",
"def make_link(self, node0, node1):\r\n Link(node0, node1)",
"def create_link(new_link: LinkCreate, db: Session = Depends(get_db)):\n link = db.query(Link).where(Link.url == new_link.url).first()\n if link:\n # URL already exists, return the link for it.\n return link\n\n # Make sure the given link starts with http or https so it doesn't break the FastAPI RedirectResponse.\n if not new_link.url.startswith('http://') and not new_link.url.startswith('https://'):\n new_link.url = 'http://' + new_link.url\n link = Link(**new_link.dict())\n db.add(link)\n db.commit()\n db.refresh(link)\n return link",
"def link_file(from_file, to_file):\n import os\n if not os.path.exists(to_file):\n if not os.path.islink(to_file):\n os.symlink(from_file, to_file)",
"def SymLinkRel( fromFN, toFN, getio = None ):\n\n if getio: return dict( depends_on = toFN, creates = fromFN,\n attrs = dict( piperun_short = True ) )\n \n os.symlink( os.path.relpath( toFN, os.path.dirname( fromFN ) ),\n fromFN )",
"def _link_destination(self, path):\n path = os.path.expanduser(path)\n return os.readlink(path)",
"def newPath(self, new_path = None, new_fullpath = None, force = False, always_copy = False, always_move = False, leave_symlink = False, create_dirs = True, getPathPreview = False):\n\n if always_copy and always_move:\n raise ValueError(\"Both always_copy and always_move cannot be specified\")\n\n if (new_path is None and new_fullpath is None) or (new_path is not None and new_fullpath is not None):\n raise ValueError(\"Specify only new_dir or new_fullpath\")\n\n old_dir, old_filename = os.path.split(self.filename)\n if new_path is not None:\n # Join new filepath to old one (to handle realtive dirs)\n new_dir = os.path.abspath(os.path.join(old_dir, new_path))\n\n # Join new filename onto new filepath\n new_fullpath = os.path.join(new_dir, old_filename)\n\n else:\n # Join new filepath to old one (to handle realtive dirs)\n new_fullpath = os.path.abspath(os.path.join(old_dir, new_fullpath))\n\n new_dir = os.path.dirname(new_fullpath)\n\n\n if len(Config['move_files_fullpath_replacements']) > 0:\n p(\"Before custom full path replacements: %s\" % (new_fullpath))\n new_fullpath = applyCustomFullpathReplacements(new_fullpath)\n new_dir = os.path.dirname(new_fullpath)\n\n p(\"New path: %s\" % new_fullpath)\n\n if getPathPreview:\n return new_fullpath\n\n if create_dirs:\n p(\"Creating directory %s\" % new_dir)\n try:\n os.makedirs(new_dir)\n except OSError, e:\n if e.errno != 17:\n raise\n\n if os.path.isfile(new_fullpath):\n # If the destination exists, raise exception unless force is True\n if not force:\n raise OSError(\"File %s already exists, not forcefully moving %s\" % (\n new_fullpath, self.filename))\n\n if same_partition(self.filename, new_dir):\n if always_copy:\n # Same partition, but forced to copy\n copy_file(self.filename, new_fullpath)\n else:\n # Same partition, just rename the file to move it\n rename_file(self.filename, new_fullpath)\n\n # Leave a symlink behind if configured to do so\n if leave_symlink:\n symlink_file(new_fullpath, self.filename)\n else:\n # File is on different partition (different disc), copy it\n copy_file(self.filename, new_fullpath)\n if always_move:\n # Forced to move file, we just trash old file\n p(\"Deleting %s\" % (self.filename))\n delete_file(self.filename)\n\n # Leave a symlink behind if configured to do so\n if leave_symlink:\n symlink_file(new_fullpath, self.filename)\n\n self.filename = new_fullpath",
"def _path(*components, **oldnew):\n old, new = oldnew.get('old', ''), oldnew.get('new', '')\n return os.path.normpath(re.sub('^' + re.escape(old), new,\n os.path.join(*components)))",
"def transfer_physical_locations(old_obj, new_obj, apps, schema_editor):\n locations = get_physical_locations(old_obj, apps, schema_editor)\n alter_physical_locations(new_obj, locations, apps, schema_editor)",
"def update(self, new_link: \"Link\") -> \"Link\":\n log(f\"Updating '{self.name}' with new data\")\n new_data = {k: v for k, v in new_link.toJSON().items() if v != getattr(self, k)}\n resp = requests.put(self.api_url, json=new_data, headers=headers)\n if resp.status_code != 200:\n error(f\"Failed to update link {new_link}\")\n breakpoint()\n exit(1)\n log(f\"Link updated successfully\")\n return Link.from_api(resp.json())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a symbolic link from oldPath to newPath. | def symlink(self, oldPath, newPath):
mode = 0o644|stat.S_IFLNK
i_num = self.mknod(newPath, mode, 0)
self.write(newPath, oldPath, 0) | [
"def create_symlink(from_path='', to_path=''):\n\n try:\n os.symlink(from_path, to_path)\n except Exception as exc:\n logger.warning('failed to create symlink from %s to %s: %s', from_path, to_path, exc)\n else:\n logger.debug('created symlink from %s to %s', from_path, to_path)",
"def symlink(srcPath, destPath):\n import os\n return os.symlink(srcPath, destPath)",
"def link(self, oldPath, newPath):\n conn = sqlhub.getConnection()\n trans = conn.transaction()\n now = time.time()\n i_num = self.__get_inode(oldPath)\n parent_i_num = self.__get_parent_inode(newPath)\n parent_i = Inode.selectBy(inode_num=parent_i_num).orderBy(\"-rev_id\")[0]\n dl = Dentry.selectBy(parent=parent_i)\n new_i = Inode(inode_num=parent_i.inode_num,\n rev_id=parent_i.rev_id+1,\n uid=parent_i.uid, gid=parent_i.gid,\n atime=now, mtime=parent_i.mtime,\n ctime=parent_i.ctime, size=parent_i.size,\n mode=parent_i.mode, connection=trans)\n for de in dl:\n Dentry(parent=new_i, filename=de.filename,\n inode_num=de.inode_num, connection=trans)\n Dentry(parent=new_i, filename=split_path(newPath)[-1],\n inode_num=i_num, connection=trans)\n trans.commit()",
"def generateSymbolicLink(path, output_dir, first_replacement=\"uploaded\",\n second_replacement=\"home/mediapanel\",\n lua_folder=\"themes\"):\n split_path = path.split(\"/\")\n replaced_index = split_path.index(first_replacement)\n replacement_dir = os.path.join(second_replacement, lua_folder, output_dir)\n split_path[replaced_index] = replacement_dir\n os.symlink(path, os.path.join(*split_path))",
"def link(srcPath, destPath):\n import os\n return os.link(srcPath, destPath)",
"def create_symlink(symlink_path, target_path):\n if current_system() == \"winnt\":\n os.remove(symlink_path)\n if target_path.endswith('.cmd'):\n shutil.copy(target_path, symlink_path)\n with open(symlink_path, 'w') as f:\n # create a cmd file to mimic how we do symlinks in linux\n f.writelines(['@echo off\\n', f'\"{target_path}\" %*'])\n else:\n target_path = str(pathlib.Path(target_path).resolve())\n if os.path.exists(symlink_path):\n os.remove(symlink_path)\n os.symlink(target_path, symlink_path)",
"def create_sym_link(source_file: str, dest_folder: str, dest_file: str = \"\", sudo: bool = False):\n\n if (not exists(dest_folder)): mkdir(dest_folder, sudo=sudo)\n\n run_command(f\"{'sudo' if sudo else ''} ln -sf {source_file} {dest_folder}/{dest_file}\")",
"def SymLinkRel( fromFN, toFN, getio = None ):\n\n if getio: return dict( depends_on = toFN, creates = fromFN,\n attrs = dict( piperun_short = True ) )\n \n os.symlink( os.path.relpath( toFN, os.path.dirname( fromFN ) ),\n fromFN )",
"def copy_symlink(file_src, file_dst, new_file):\n if os.path.isfile(file_dst):\n if os.path.exists(file_dst):\n os.unlink(file_dst)\n os.symlink(file_src, file_dst)\n elif os.path.isdir(file_dst):\n if os.path.exists(new_file):\n os.unlink(new_file)\n os.symlink(file_src, new_file)\n else:\n os.symlink(file_src, file_dst)",
"def copy_hardlink(file_src, file_dst, new_file):\n if os.path.isfile(file_dst):\n if os.path.exists(file_dst):\n os.unlink(file_dst)\n os.link(file_src, file_dst)\n elif os.path.isdir(file_dst):\n if os.path.exists(new_file):\n os.unlink(new_file)\n os.link(file_src, new_file)\n else:\n os.link(file_src, file_dst)",
"def _create_link(self, src, link, sudo=False):\n # non-absolute path links are converted to absolute\n # paths starting from ~\n if not os.path.isabs(link):\n link = os.path.expanduser(os.path.join('~', link))\n # create the parent directory of the link if necessary\n link_dir = os.path.dirname(link)\n if not os.path.exists(link_dir):\n if os.path.lexists(link_dir):\n os.remove(link_dir)\n os.makedirs(link_dir)\n\n if not os.path.exists(link) and not os.path.lexists(link):\n cmd = ['ln', '-s', src, link]\n if sudo:\n cmd = ['sudo'] + cmd\n subprocess.check_call(cmd)\n elif os.path.lexists(link):\n # if the location is NOT a link, delete the directory\n if not os.path.islink(link):\n sudo_cmd = 'sudo' if sudo else ''\n subprocess.check_call('%s rm -rf %s' % (sudo_cmd, link), shell=True)\n tmploc = '/tmp/%s_%d' % (self._extract_basename(link), int(time.time()))\n os.symlink(src, tmploc)\n cmd = ['/bin/mv', '-Tf', tmploc, os.path.abspath(link)]\n if sudo:\n cmd = ['sudo'] + cmd\n subprocess.check_call(cmd)\n else:\n err = 'Cannot create symlink to %s. Already a file or directory' % link\n raise Error(err)",
"def create_symlink(self):\n try:\n os.symlink(os.getenv(ENV_FFMPEG_PATH), f\"/home/abc/bin/{self.alias}\")\n except FileExistsError:\n pass",
"def symlink(source, link_name):\n global __CSL\n if __CSL is None:\n csl = ctypes.windll.kernel32.CreateSymbolicLinkW\n csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)\n csl.restype = ctypes.c_ubyte\n __CSL = csl\n flags = 0\n if source is not None and os.path.isdir(source):\n flags = 1\n if __CSL(link_name, source, flags) == 0:\n raise ctypes.WinError()",
"def link_file(from_file, to_file):\n import os\n if not os.path.exists(to_file):\n if not os.path.islink(to_file):\n os.symlink(from_file, to_file)",
"def _link_destination(self, path):\n path = os.path.expanduser(path)\n return os.readlink(path)",
"def symlink(self, link, existing, handler):\n self.java_obj.symlink(link, existing, AsyncHandler(handler))\n return self",
"def create_symlink(self, project_name):\n try:\n if self.alive:\n self.connection.send_new_symlink(project_name)\n except Exception:\n LOGGER.warning(ADD_SYMLINK_ERROR, project_name, exc_info=True)",
"def symlink(src, dst, target_is_directory=False, **kwargs):\n norm_dst = ntpath.normpath(dst)\n if not is_remote_path(norm_dst):\n raise ValueError(\"The link dst must be an absolute UNC path for where the link is to be created\")\n\n norm_src = ntpath.normpath(src)\n print_name = norm_src\n\n if not is_remote_path(norm_src):\n flags = SymbolicLinkFlags.SYMLINK_FLAG_RELATIVE\n substitute_name = norm_src\n dst_dir = ntpath.dirname(norm_dst)\n norm_src = ntpath.abspath(ntpath.join(dst_dir, norm_src))\n else:\n flags = SymbolicLinkFlags.SYMLINK_FLAG_ABSOLUTE\n substitute_name = \"\\\\??\\\\UNC\\\\\" + norm_src[2:]\n\n src_drive = ntpath.splitdrive(norm_src)[0]\n dst_drive = ntpath.splitdrive(norm_dst)[0]\n if src_drive.lower() != dst_drive.lower():\n raise ValueError(f\"Resolved link src root '{src_drive}' must be the same as the dst root '{dst_drive}'\")\n\n try:\n src_stat = stat(norm_src, **kwargs)\n except OSError as err:\n if err.errno != errno.ENOENT:\n raise\n else:\n # If the src actually exists, override the target_is_directory with whatever type src actually is.\n target_is_directory = py_stat.S_ISDIR(src_stat.st_mode)\n\n symlink_buffer = SymbolicLinkReparseDataBuffer()\n symlink_buffer[\"flags\"] = flags\n symlink_buffer.set_name(substitute_name, print_name)\n\n reparse_buffer = ReparseDataBuffer()\n reparse_buffer[\"reparse_tag\"] = ReparseTags.IO_REPARSE_TAG_SYMLINK\n reparse_buffer[\"data_buffer\"] = symlink_buffer\n\n co = CreateOptions.FILE_OPEN_REPARSE_POINT\n if target_is_directory:\n co |= CreateOptions.FILE_DIRECTORY_FILE\n else:\n co |= CreateOptions.FILE_NON_DIRECTORY_FILE\n raw = SMBRawIO(\n norm_dst, mode=\"x\", desired_access=FilePipePrinterAccessMask.FILE_WRITE_ATTRIBUTES, create_options=co, **kwargs\n )\n\n with SMBFileTransaction(raw) as transaction:\n ioctl_request(\n transaction,\n CtlCode.FSCTL_SET_REPARSE_POINT,\n flags=IOCTLFlags.SMB2_0_IOCTL_IS_FSCTL,\n input_buffer=reparse_buffer,\n )",
"def moveLinksOnDisk(self, source, target):\n\t\timport revitron\n\n\t\tsource = re.sub(r'\\\\$', '', source) + os.sep\n\t\tsource = '^' + re.escape(source)\n\t\ttarget = re.sub(r'\\\\$', '', target)\n\t\ttarget = re.sub(r'\\\\', os.sep, target)\n\n\t\tfor _id in self.refs:\n\n\t\t\trefId = revitron.DB.ElementId(_id)\n\t\t\tref = self.refs[_id]\n\n\t\t\tif str(ref.type) in ['RevitLink', 'CADLink']:\n\n\t\t\t\tif re.search(source, ref.path, re.IGNORECASE):\n\t\t\t\t\tnewPath = target + os.sep + re.sub(\n\t\t\t\t\t source,\n\t\t\t\t\t '',\n\t\t\t\t\t ref.path,\n\t\t\t\t\t re.IGNORECASE\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tnewPath = target + os.sep + os.path.basename(ref.path)\n\n\t\t\t\tprint(newPath)\n\n\t\t\t\tif newPath != ref.path:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.makedirs(os.path.dirname(newPath))\n\t\t\t\t\t\tprint('Created {}'.format(os.path.dirname(newPath)))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copyfile(ref.path, newPath)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\tself.data.SetDesiredReferenceData(\n\t\t\t\t\t refId,\n\t\t\t\t\t revitron.DB.FilePath(newPath),\n\t\t\t\t\t revitron.DB.PathType.Absolute,\n\t\t\t\t\t True\n\t\t\t\t\t)\n\n\t\tself.write()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uses FUSE to mount the sqliteFS to the mount point. | def main():
usage = Fuse.fusage
sdfs = SqliteDumpFS(version="%prog"+fuse.__version__,
usage=usage,
dash_s_do='setsingle')
sdfs.parser.add_option(mountopt="db_path", metavar="PATH",default="")
sdfs.parse(values=sdfs, errex=1)
sdfs.main() | [
"def mount_ss(self):\n if match_fs(self.mp, ['nilfs', 'nilfs2']):\n self.mount_tmpfs()\n if not self.passive:\n self.thin_out_snapshots()\n self.do_mount_ss(False)",
"def __init__(self, *args, **kwargs):\n q.logger.log(\"Mounting file system\")\n super(MemFS, self).__init__(*args, **kwargs)",
"def ydl_fuse(d, root, rootbase, foreground=True, allow_other=False):\n\n\tif fuse is None:\n\t\traise Exception(\"Cannot run fuse, it is not installed\")\n\n\tfuse.FUSE(_ydl_fuse(d, rootbase), root, nothreads=True, foreground=foreground, allow_other=allow_other)",
"def mount(device, mountpoint, *args, readonly=False, mkfs=False):\n raise NotImplementedError(\"Contribute on github.com/alej0varas/pybolator\")",
"def start(self):\n sqliteFilename = CONFIG.get('output_sqlite', 'db_file')\n try:\n self.db = adbapi.ConnectionPool(\n 'sqlite3',\n database=sqliteFilename,\n check_same_thread=False\n )\n except sqlite3.OperationalError as e:\n log.msg(e)\n\n self.db.start()",
"def mount(path='/sd'):\n from machine import SD\n sd = SD()\n os.mount(sd, path)",
"def setup_fs():\n from fsspec.implementations.local import LocalFileSystem\n from pathlib import Path\n\n # Setup path to local folder structure, as if copied from a CANedge SD.\n # Assumes the folder is placed in same directory as this file\n fs = LocalFileSystem()\n\n return fs",
"def open ( filename ,\n mode = 'c' ,\n writeback = False ,\n root_only = False , *args ) :\n db_type = RootOnlyShelf if root_only else RootShelf\n return db_type ( filename ,\n mode ,\n writeback , * args )",
"def open(self):\n\n def open():\n filer.open_dir(self.get_path())\n\n if not self.is_mounted:\n self.mount(on_mount=open)\n else:\n open()",
"def mount_fs_on_all(filesystem):\n\n result = run(\"mmmount {0} -a\".format(filesystem))\n\n if result.return_code == 0:\n return True, \"Success\"\n else:\n return False, result",
"def connect_to_database_file(self):\r\n\r\n if os.path.exists(self.os.tmp_login_db_path):\r\n self.conn = sqlite3.connect(self.os.tmp_login_db_path)\r\n self.cursor = self.conn.cursor()\r\n else:\r\n print(\"File does not exist: {}\".format(self.os.tmp_login_db_path))",
"def db_handle(mode='r'):\n\n f = open('/dev/null', mode) # fallback file handle\n try:\n f = open('.fm', mode) # default use .fm\n except:\n home = os.environ.get('HOME') # build munged name\n fmdb = '.fmdb'\n persist = os.environ.get('PWD').replace('/', '_')\n fn = '%s/%s/%s' % (home, fmdb, persist)\n try:\n f = open(fn, mode) # try munged name\n except: # try making directory\n cmd_output('mkdir -p %s/%s' % (home, fmdb))\n try:\n f = open(fn, mode)\n except:\n pass # give up and return fallback\n return f",
"def test_actual_mountpoint(self):\n mount_root = FilePath(self.mktemp())\n pool_name = create_zfs_pool(self)\n pool = StoragePool(reactor, pool_name, mount_root)\n volume = Volume(uuid=u\"my-uuid\", name=u\"myvolumename\", _pool=pool)\n\n d = pool.create(volume)\n\n def gotFilesystem(filesystem):\n self.assertEqual(\n filesystem.get_path().path,\n subprocess.check_output(\n [b\"zfs\", b\"get\", b\"-H\", b\"-o\", b\"value\",\n b\"mountpoint\", filesystem.name]).strip())\n d.addCallback(gotFilesystem)\n return d",
"def install_storage():\n metadata = get_storage_metadata()\n\n # Install packages\n command = 'yum install -y nfs-utils'\n run(command)\n\n # Configure mount points\n\n file_name = '/etc/fstab'\n flags = 'nfs rw,async,hard,intr,noexec 0 0'\n mount_points = [volume['local'] for volume in metadata['volumes'].values()]\n\n # Make sure mount points are added only once to fstab\n with open(file_name, 'r+') as stream:\n # read current mount points\n existing_lines = stream.read().split('\\n')\n\n # prune lines to be replaced by new volumes\n new_lines = []\n for existing_line in existing_lines:\n # skip line if it matches a mount point\n found = False\n for mount_point in mount_points:\n if mount_point in existing_line and not existing_line.startswith('#'):\n found = True\n break\n # no match, then preserve line\n if not found:\n new_lines.append(existing_line)\n\n # Add mount points for volumes\n for volume in metadata['volumes'].values():\n line = '{server}:{remote} {local} {flags}'.format(flags=flags, **volume)\n new_lines.append(line)\n\n # Update contents\n stream.seek(0)\n stream.write('\\n'.join(new_lines))\n stream.truncate()\n\n # Create local mount point directories\n for mount_point in mount_points:\n command = 'mkdir -m 0777 {}'.format(mount_point)\n run(command)",
"def load_fs(name=METADATAFILENAME):\n try:\n # lets see if the metadata file is already here?\n f = openfile(name, False)\n except FileNotFoundError, e:\n warning(\"Note: No filesystem found, building a fresh one.\")\n _blank_fs_init()\n else : \n f.close()\n try:\n restore_metadata(name)\n mounted = filesystemmetadata['mounted']\n mounted = mounted + 1\n filesystemmetadata['mounted'] = mounted\n\n except (IndexError, KeyError), e:\n print \"Error: Cannot reload filesystem. Run lind_fsck for details.\"\n print e\n return\n _load_lower_handle_stubs()",
"def create_fs_on_disk(request, storage):\n self = request.node.cls\n\n out, self.mount_point = storage_helpers.create_fs_on_disk(\n self.vm_name, self.disk_name\n )\n assert out, (\n \"Unable to create a filesystem on disk: %s of VM %s\" % (\n self.disk_name, self.vm_name\n )\n )",
"def testMount(self):\n self.assertEquals(\n MockDaemon._calls,\n [\n ('__init__', (self.dev, self.mount),\n {'wait_count': '10', 'wait_time': '1'}),\n ('daemonize', (), {}),\n ('start', (), {}),\n ('call', ([\"mount\", self.dev, self.mount],), {}),\n ])\n self.assertTrue(os.path.exists(self.mount))",
"def _mount_docker_tmpfs(newroot_norm):\n # /etc/docker as temp fs as dockerd create /etc/docker/key.json\n fs_linux.mount_tmpfs(newroot_norm, '/etc/docker')",
"def open(self):\n if not self.is_open:\n # Find or create storage.\n if self.storage is None:\n try:\n self.storage = FileStorage(self.database)\n self.storage.shelf.file.obtain_lock()\n except FileLockedError, e:\n raise DatabaseFileLocked()\n # Connect to storage.\n self.conn = Connection(\n self.storage, cache_size=self.cache_size)\n self.is_open = True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The plotly figure use to render the scene. Returns ~plotly.graph_objects.Figure The plotly ``Figure`` representing the scene. | def figure(self):
return self.scene | [
"def figure(self) -> Figure:\r\n assert len(self._visualizations) == 1\r\n motor_dashboard = self._visualizations[0]\r\n assert len(motor_dashboard._figures) == 1\r\n return motor_dashboard._figures[0]",
"def get_plotly_object(self):\n pass",
"def create_fig(self):\n figure_panel = Tk.Frame(self, width='6i', height='5.5i')\n figure_panel.grid(row=0, column=0, rowspan=3)\n self.fig = Figure()\n self.canvas = FigureCanvasTkAgg(self.fig, master=figure_panel)\n self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n NavigationToolbar2TkAgg(self.canvas, figure_panel)\n self.canvas.show()",
"def show_figure(plot):\n return plot[0]",
"def get_figure(ax):\n fig = None\n try:\n # Single plot\n fig = ax.get_figure()\n except AttributeError:\n # Subplots\n try:\n # 1D grid\n fig = ax[0].get_figure()\n except AttributeError:\n # 2D grid\n fig = ax[0][0].get_figure()\n\n return fig",
"def create_figure(self):\n if self.next_figure is not None:\n self.control.canvas_next.remove_figure(self.next_figure.shifted())\n self.current_figure = self.next_figure\n else:\n self.current_figure = Figure()\n\n # Draw next figure\n self.next_figure = Figure()\n self.control.canvas_next.hold_figure(self.next_figure.shifted())\n self.control.canvas_next.redraw()",
"def getfigure(self):\n return(self.figC,self.axC,self.canC)",
"def get_canvas(self):\r\n return self.sc.get_canvas()",
"def plotly2html(fig,filename=None,out_string=False,modebar=True):\n import uuid # Unique div-id required,otherwise jupyterlab renders at one place only and overwite it.\n div_id = \"graph-{}\".format(uuid.uuid1())\n fig_json = fig.to_json()\n # a simple HTML template\n if filename:\n template = \"\"\"<html>\n <head>\n <script src=\"https://cdn.plot.ly/plotly-latest.min.js\"></script>\n </head>\n <body>\n <div id='{}'></div>\n <script>\n var fig_data = {}\n Plotly.react('{}', fig_data.data, fig_data.layout);\n </script>\n </body>\n </html>\"\"\"\n\n # write the JSON to the HTML template\n with open(filename, 'w') as f:\n f.write(template.format(div_id,fig_json,div_id))\n f.close()\n else:\n if modebar==True: #Only for docs issue\n config = \"{displayModeBar: true,scrollZoom: true}\"\n else:\n config = \"{displayModeBar: false,scrollZoom: true}\"\n template = \"\"\"<div>\n <script src='https://cdn.plot.ly/plotly-latest.min.js'></script>\n <div id='{}'><!-- Plotly chart DIV --></div>\n <script>\n var data = {};\n var config = {};\n Plotly.newPlot('{}', data.data,data.layout,config);\n </script>\n </div>\"\"\".format(div_id,fig_json,config,div_id)\n if out_string is True:\n return template\n else:\n from IPython.display import HTML\n return HTML(template)",
"def render(**kwargs: KwargsRender | Any) -> Tuple[plt.Figure, plt.Axes]:\n\n figure = cast(plt.Figure, kwargs.get(\"figure\", plt.gcf()))\n axes = cast(plt.Axes, kwargs.get(\"axes\", plt.gca()))\n\n kwargs = handle_arguments_deprecation(\n {\n \"ArgumentRenamed\": [[\"standalone\", \"show\"]],\n },\n **kwargs,\n )\n\n settings = Structure(\n **{\n \"filename\": None,\n \"show\": True,\n \"aspect\": None,\n \"axes_visible\": True,\n \"bounding_box\": None,\n \"tight_layout\": True,\n \"legend\": False,\n \"legend_columns\": 1,\n \"transparent_background\": True,\n \"title\": None,\n \"wrap_title\": True,\n \"x_label\": None,\n \"y_label\": None,\n \"x_ticker\": True,\n \"y_ticker\": True,\n }\n )\n settings.update(kwargs)\n\n if settings.aspect:\n axes.set_aspect(settings.aspect)\n if not settings.axes_visible:\n axes.set_axis_off()\n if settings.bounding_box:\n axes.set_xlim(settings.bounding_box[0], settings.bounding_box[1])\n axes.set_ylim(settings.bounding_box[2], settings.bounding_box[3])\n\n if settings.title:\n axes.set_title(settings.title, wrap=settings.wrap_title)\n if settings.x_label:\n axes.set_xlabel(settings.x_label)\n if settings.y_label:\n axes.set_ylabel(settings.y_label)\n if not settings.x_ticker:\n axes.set_xticks([]) # pyright: ignore\n if not settings.y_ticker:\n axes.set_yticks([]) # pyright: ignore\n if settings.legend:\n axes.legend(ncol=settings.legend_columns)\n\n if settings.tight_layout:\n figure.tight_layout()\n\n if settings.transparent_background:\n figure.patch.set_alpha(0)\n\n if settings.filename is not None:\n figure.savefig(settings.filename)\n\n if settings.show:\n plt.show()\n\n return figure, axes",
"def make_figure():\n fig, ax = plt.subplots()\n\n return fig, ax",
"def _save_figure_factory_(self, fig):\r\n fname = 'annotated_heatmap'\r\n if self.variable is not None and self.variable != '': fname = 'annotated_heatmap_' + self.variable\r\n if not self.__plotlyConfig.get('staticPlot'):\r\n if inline(): \r\n plotly.offline.iplot(fig, config=self.plotlyConfig)\r\n else:\r\n plotly.offline.plot(fig, config=self.plotlyConfig, filename=get_figure_dir() + fname + '.html')\r\n else:\r\n plotly.io.write_image(fig, get_figure_dir() + fname + '.png')",
"def _setup_figure(self):\n if self.fig_size:\n size = self.fig_size\n else:\n size = 6, 1.0 + 0.42 * len(self.designs) * self.rows\n\n self.fig = plt.figure(figsize=size)",
"def plot_window(self) :\n plot = self._Component[\"plot\"]\n return plot",
"def show(self):\n self._create_figure(raise_exception=True)\n self._fig_width, self._fig_height = self._fig_dims()\n plt.show()",
"def show_or_save_fig(self, fig, filename=None):\n if self.out_path:\n fig.savefig(os.path.join(self.out_path, filename), transparent=True, bbox_inches='tight')\n pyplot.close(fig)\n else:\n fig.show() # pragma: no cover",
"def show_data(self, **kwargs) -> Optional[Figure]:\n\n fig = go.Figure(data=[go.Scatter(x=self._x_values, y=self._y_values, mode='markers',\n marker=dict(size=8, color='red', opacity=0.8))])\n fig.update_layout(\n title=\"Linear Regression Data\",\n xaxis_title=\"X Values\",\n yaxis_title=\"Y Values\",\n title_x=0.5\n )\n\n if 'save' in kwargs and kwargs['save']:\n fig.write_image('show_data.jpeg')\n\n if 'return_fig' in kwargs and kwargs['return_fig']:\n return fig\n\n fig.show()",
"def sceneViewer():\n return hou.SceneViewer",
"def new_figure(self):\n f = plt.figure()\n f.subplots_adjust(bottom=0.1,top=0.97,left=0.06,right=0.98)\n plt.axis(self.axis_bounds)\n ax = plt.gca()\n ax.set_aspect(1)\n plt.draw()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The plotly layout of the figure. Returns ~plotly.graph_objects.Layout The plotly ``Layout`` object linked to the figure. | def layout(self):
return self._layout | [
"def create_layout(self):\n graph = Gui.Graph(canvas_size=(500, 500), graph_bottom_left=(0, 0), graph_top_right=(500, 500),\n background_color=None, pad=None, change_submits=False, drag_submits=False,\n enable_events=False,\n key=\"Graphs\", tooltip=None, right_click_menu=None, visible=True, float_values=False,\n metadata=None)\n\n layout = [graph]\n\n return layout",
"def LAYOUT() -> Struct: # pylint: disable=invalid-name\n return MARKET_LAYOUT",
"def pyre_loadLayout(self):\n # access the factory\n from .Layout import Layout\n\n # build one and return it\n return Layout(name=f\"{self.pyre_name}.layout\")",
"def get_working_layout(self):\n if self.return_value is None: return None\n return self.__library_layout",
"def __repr__(self):\r\n return 'Layout(layout={})'.format(self._layout)",
"def get_layout(name: str) -> Layout:\n\n layout_class = LAYOUT_MAP.get(name)\n if layout_class is None:\n raise MissingLayout(f\"no layout called {name!r}, valid layouts\")\n return layout_class()",
"def create(cls, setup: LayoutSetup, aspect: float = 1.0) -> \"BoxedPlotLayout\":\n if setup.type == \"vintage\":\n aspects, rects = create_layout_vintage(aspect)\n elif setup.type == \"post_vintage\":\n aspects, rects = create_layout_post_vintage(aspect)\n elif setup.type == \"post_vintage_ens\":\n aspects, rects = create_layout_post_vintage_ens(aspect)\n elif setup.type == \"standalone_details\":\n aspects, rects = create_layout_standalone_details(aspect)\n else:\n raise ValueError(f\"invalid layout type '{setup.type}'\")\n return cls(setup, aspects=aspects, rects=rects)",
"def draw_layout(self, width, height):\r\n layout = Layout(width, height)\r\n layout_matrix = layout.draw_shape()\r\n self._layout_matrix = layout_matrix\r\n return layout_matrix",
"def get_plotly_object(self):\n pass",
"def layout(self):\n ctx = self.context.copy()\n ctx.content = self.render()\n layout = self.layouts.get(ctx.layout)\n if layout:\n layout = self.layouts.get(layout.layout)\n\n while layout != None:\n ctx.content = renderTemplate(layout.content, ctx)\n layout = self.layouts.get(layout.layout)\n\n return ctx.content",
"def PlotLayoutId(self) -> _n_0_t_0:",
"def get_layout(self, target_type: ObjectType, parent):\n return target_type.instance().layout()(parent)",
"def get_fixed_layout(self):\n return self._fixed_layout",
"def get_base_layout(figures):\n if not isinstance(figures, list):\n raise TypeError(\"Invalid figures '{0}'. \"\n \"It should be list.\"\n .format(figures))\n\n layout = {}\n for figure in figures:\n if not figure['layout']:\n raise Exception(\"Figure does not have 'layout'.\")\n\n for key, value in figure['layout'].items():\n layout[key] = value\n\n return layout",
"def set_slide_layout(self, slide_layout):\n if isinstance(slide_layout, int):\n return self.presentation.slide_layouts[slide_layout]\n else:\n return return_slide_layout_by_name(self.presentation, slide_layout)",
"def channel_layout(self):\n # type: () -> DolbyDigitalChannelLayout\n return self._channel_layout",
"def update_layout(self, layout):\n self.figure.update_layout(layout)",
"def layout(layout_str: str, dtype: str = \"int32\") -> Layout:\n return _ffi_api.Layout(layout_str, dtype) # type: ignore",
"def layout(self, layout):\n for index, obj in enumerate(self.layouts):\n if obj.name == layout:\n self.currentLayout = index\n hook.fire(\"layout_change\", self.layouts[self.currentLayout])\n self.layoutAll()\n return\n raise ValueError(\"No such layout: %s\" % layout)",
"def _get_layouts(self):\r\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the layout of the figure scene. | def update_layout(self, layout):
self.figure.update_layout(layout) | [
"def refresh_layout(self):\n if self.__container:\n self.__container._needs_layout(self)",
"def update(self, system):\n update_cellview(self.ax[0, 0], system)\n update_rdfview(self.ax[1, 1], system, self.average_rdf, self.r)\n update_energyview(self.ax[0, 1], system)\n update_msdview(self.ax[1, 0], system)\n self.fig.canvas.draw()",
"def updateOverallGraphs(self):\n # update graphs visualisation\n self.updateTo3D()\n self.updateTo2D()",
"def _resize(self, event):\n h,w = Graph.reset(self)\n \n w_spacing = w/len(self._tf)\n Graph.draw_axes(self, h, w)\n self._draw_connections(h, w_spacing) \n self._draw_points(h, w_spacing)\n print(\"Graph Displayed\") # Confirmation",
"def layout(self):\n\n if len(self.children) == 0:\n ideal_height = self.height\n else:\n ideal_height = self.height/len(self.children)\n\n # Set the height of each plot. The plots use this value a desired height,\n # but may make them selves larger or smaller based on min/max size requirements\n for child in self.children.values():\n child.height = ideal_height\n\n # Now set the positions of each plot based on the final height and\n # position of the plots before it\n y = 0\n for child in self.children.values():\n child.setPos(QtCore.QPoint(0,y))\n y = y + child.height + self.spacing",
"def update_plot(self, *args) -> None:\n self.update_units_labels_and_values()\n self.light_source = self.calculate_light_source()\n x_label, y_label = self.get_axes_labels()\n\n self.axes.clear()\n self.axes.plot(*self.light_source.spectrum(), 'r')\n self.axes.set_xlabel(x_label)\n self.axes.set_ylabel(y_label)\n\n # recompute the ax.dataLim and update ax.viewLim using the new dataLim\n self.axes.relim()\n self.axes.autoscale_view()\n\n # re-draw the canvas\n self.fig.canvas.draw_idle()",
"def layout_changed(self):\n\n manager = self.manager\n if manager is not None:\n manager.layout_changed()\n parent = self.parent\n if parent is not None:\n parent.layout_changed()",
"def _update_np_visualizer(self) -> None:\n # Refresh visualizer\n self.nb_visualizer.update_geometry(self.pcd_scene)\n self.nb_visualizer.update_geometry(self.mesh_stones)\n if not self.nb_visualizer.poll_events(): sys.exit() #TODO: or exit thread\n self.nb_visualizer.update_renderer()\n time.sleep(self.fps)",
"def _update(self):\n self.scene.apply_render_style()\n\n if self.camera_orientation is not None:\n set_camera(self.scene, self.camera_orientation)\n self.camera_orientation = None\n\n # Get actors to render\n self._update_actors()\n to_render = [act for act in self.actors.values() if act.is_visible]\n\n # Set actors look\n meshes = [act.mesh.c(act.color).alpha(act.alpha) for act in to_render]\n\n # Add axes\n if self.axes is not None:\n meshes.append(self.axes)\n\n # update actors rendered\n self.scene.plotter.show(\n *meshes, interactorStyle=0, bg=brainrender.BACKGROUND_COLOR,\n )\n\n # Fake a button press to force canvas update\n self.scene.plotter.interactor.MiddleButtonPressEvent()\n self.scene.plotter.interactor.MiddleButtonReleaseEvent()\n\n # Update list widget\n update_actors_list(self.actors_list, self.actors)\n\n return meshes",
"def refresh_layout_immediate(self):\n self.refresh_layout()\n self.update_layout(self.canvas_origin, self.canvas_size, immediate=True)",
"def UpdateModel(self):\n\n #change the coords of model\n Shape.UpdateModel(self)\n\n #get the size of the shape (view)\n width, height = self.GetSize()\n\n #get the ratio between the model and the shape (view) from\n #the diagram frame where the shape is displayed.\n ratio = self.GetDiagram().GetPanel().GetCurrentZoom()\n\n # set the new size to the model.\n self.GetModel().SetSize(width/ratio, height/ratio)",
"def update(self):\n\t\tfor p in self.panes:\n\t\t\tp.update()",
"def update_plot(self, event):\n print 'Trying to redraw'\n plt.figure( 1 )\n\n if self.xmin.get() != 'ALL' and self.xmax.get() != 'ALL':\n plt.xlim(int(self.xmin.get()), int(self.xmax.get()))\n else:\n plt.xlim(0, 16384)\n if self.ymin.get() != 'ALL' and self.ymax.get() != 'ALL':\n plt.ylim(int(self.ymin.get()), int(self.ymax.get()))\n else:\n plt.ylim(0, 1024)\n\n self.canvas.show()",
"def on_action_set_figure(self, content):\n self._figure = content['figure']\n self.refresh_mpl_widget()",
"def visualize(self):\n self.visualizeSignal.emit() # Emits the visualizeSignal signal",
"def layoutBarScene(self):\n self.scene.clear()\n\n width = self.scene.width()\n height = self.scene.height()\n\n plotWidth = width - 2*self.padding\n plotHeight = height - 2*self.padding\n\n maxExtent = plotWidth\n\n axisPen = qtg.QPen(qtc.Qt.black)\n names = self.amsc.GetNames()[:-1]\n\n selection = self.amsc.GetSelectedSegments()\n colorMap = self.amsc.GetColors()\n if self.valueGroup.checkedAction().text() == 'Linear coefficients':\n fits = self.amsc.SegmentFitCoefficients()\n elif self.valueGroup.checkedAction().text() == 'Pearson correlation':\n fits = self.amsc.SegmentPearsonCoefficients()\n elif self.valueGroup.checkedAction().text() == 'Spearman rank correlation':\n fits = self.amsc.SegmentSpearmanCoefficients()\n\n ## Check if they selected any extrema\n if selection is None or len(selection) == 0:\n selection = []\n selectedExts = self.amsc.GetSelectedExtrema()\n allSegments = self.amsc.GetCurrentLabels()\n for minMaxPair in allSegments:\n for extIdx in selectedExts:\n if extIdx in minMaxPair:\n selection.append(minMaxPair)\n ## Okay, well then we will just plot everything we have for the current\n ## level\n if len(selection) == 0:\n selection = allSegments\n\n if self.valueGroup.checkedAction().text() == 'Linear coefficients':\n maxValue = 0\n for extPair in selection:\n if maxValue < max(map(abs,fits[extPair])):\n maxValue = max(map(abs,fits[extPair]))\n else:\n maxValue = 1\n\n if self.bundledAction.isChecked():\n axisHeight = plotHeight/float(len(names))\n axisWidth = plotWidth/float(len(names))\n\n for j,extPair in enumerate(selection):\n myColor = colorMap[extPair]\n myPen = qtg.QPen(qtg.QColor('#000000'))\n brushColor = qtg.QColor(myColor)\n brushColor.setAlpha(127)\n myBrush = qtg.QBrush(brushColor)\n for i,val in enumerate(fits[extPair]):\n absVal = abs(val)\n barExtent = (absVal/maxValue)*maxExtent\n if self.signedAction.isChecked():\n x = self.padding + maxExtent/2.\n if val > 0:\n w = barExtent/2.\n else:\n w = -barExtent/2.\n else:\n x = self.padding\n w = barExtent\n y = (height-self.padding) - i*axisHeight \\\n - j*axisHeight/float(len(selection))\n h = -axisHeight / float(len(selection))\n if self.showNumberAction.isChecked():\n numTxtItem = self.scene.addSimpleText('%.3g' % val, self.font)\n numTxtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)\n fm = qtg.QFontMetrics(numTxtItem.font())\n fontWidth = fm.width(numTxtItem.text())\n numTxtItem.setPos(self.padding+maxExtent-fontWidth,y+h)\n numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)\n numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)\n numTxtItem.setZValue(2)\n myRect = self.scene.addRect(x,y,w,h,myPen,myBrush)\n myRect.setToolTip(str(val))\n myRect.setAcceptHoverEvents(True)\n for i,name in enumerate(names):\n x = self.padding\n y = height - self.padding - i/float(len(names))*plotHeight\n w = plotWidth\n h = -axisHeight\n if self.showLabelsAction.isChecked():\n txtItem = self.scene.addSimpleText(name,self.font)\n txtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)\n fm = qtg.QFontMetrics(txtItem.font())\n fontHeight = fm.height()\n fontWidth = fm.width(txtItem.text())\n txtItem.setPos(self.padding-fontWidth,y+h + (axisHeight-fontHeight)/2.)\n txtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)\n txtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)\n txtItem.setZValue(2)\n myRect = self.scene.addRect(x,y,w,h,axisPen)\n myRect.setZValue(2) # Any value greater than 1 should work to draw on top\n else:\n if len(selection) > 0:\n axisHeight = plotHeight/float(len(selection))\n axisWidth = plotWidth/float(len(selection))\n dimCount = len(names)\n\n self.font.setPointSizeF(np.clip(axisHeight/float(dimCount)-2*self.padding,2,18))\n for j,extPair in enumerate(selection):\n myColor = colorMap[extPair]\n myPen = qtg.QPen(qtg.QColor('#000000'))\n brushColor = qtg.QColor(myColor)\n brushColor.setAlpha(127)\n myBrush = qtg.QBrush(brushColor)\n for i,val in enumerate(fits[extPair]):\n absVal = abs(val)\n name = names[i]\n barExtent = (absVal/maxValue)*maxExtent\n if self.signedAction.isChecked():\n x = self.padding + maxExtent/2.\n if val > 0:\n w = barExtent/2.\n else:\n w = -barExtent/2.\n else:\n x = self.padding\n w = barExtent\n y = (height-self.padding) - j*axisHeight \\\n - i*axisHeight/float(dimCount)\n h = -axisHeight / float(dimCount)\n\n if self.showLabelsAction.isChecked():\n txtItem = self.scene.addSimpleText(name,self.font)\n ## this line can be useful for text sizing, although we cannot\n ## rotate the text if we ignore the transformations.\n # txtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)\n fm = qtg.QFontMetrics(txtItem.font())\n fontHeight = fm.boundingRect(txtItem.text()).height()\n fontWidth = fm.boundingRect(txtItem.text()).width()\n txtItem.setPos(self.padding,y+0.5*(h-fontHeight))\n txtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)\n txtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)\n txtItem.setZValue(2)\n if self.showNumberAction.isChecked():\n numTxtItem = self.scene.addSimpleText('%.3g' % val, self.font)\n ## this line can be useful for text sizing, although we cannot\n ## rotate the text if we ignore the transformations.\n # numTxtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)\n fm = qtg.QFontMetrics(numTxtItem.font())\n fontWidth = fm.boundingRect(numTxtItem.text()).width()\n fontHeight = fm.boundingRect(numTxtItem.text()).height()\n numTxtItem.setPos(self.padding+maxExtent-fontWidth,y+0.5*(h-fontHeight))\n numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)\n numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)\n numTxtItem.setZValue(2)\n myRect = self.scene.addRect(x,y,w,h,myPen,myBrush)\n myRect.setToolTip(str(val))\n myRect.setAcceptHoverEvents(True)\n\n x = self.padding\n y = (height-self.padding) - j*axisHeight\n w = maxExtent\n h = -axisHeight\n myRect = self.scene.addRect(x,y,w,h,axisPen)\n myRect.setZValue(2) # Any value greater than 1 should work to draw on top\n\n if self.signedAction.isChecked():\n axisPen = qtg.QPen(qtc.Qt.black)\n axisPen.setWidthF(.5)\n x = self.padding + maxExtent/2.\n y = self.padding\n h = plotHeight\n self.scene.addLine(x,y,x,y+h,axisPen)",
"def initiate_graph(self):\n logger.debug(\"Setting plotcanvas\")\n self.plotcanvas.get_tk_widget().pack(side=tk.TOP, padx=5, fill=tk.BOTH, expand=True)\n self.fig.subplots_adjust(left=0.100,\n bottom=0.100,\n right=0.95,\n top=0.95,\n wspace=0.2,\n hspace=0.2)\n logger.debug(\"Set plotcanvas\")",
"def Reinitialize(self, parent=None, amsc=None, title=None):\n # Try to apply a new layout, if one already exists then make sure to grab\n # it for updating\n if self.layout() is None:\n self.setLayout(qtw.QVBoxLayout())\n layout = self.layout()\n self.clearLayout(layout)\n\n self.padding = 2\n\n ## General Graphics View/Scene setup\n self.scene = qtw.QGraphicsScene()\n self.scene.setSceneRect(0,0,100,100)\n self.gView = qtw.QGraphicsView(self.scene)\n self.gView.setRenderHints(qtg.QPainter.Antialiasing |\n qtg.QPainter.SmoothPixmapTransform)\n self.gView.setHorizontalScrollBarPolicy(qtc.Qt.ScrollBarAlwaysOff)\n self.gView.setVerticalScrollBarPolicy(qtc.Qt.ScrollBarAlwaysOff)\n self.font = qtg.QFont('sans-serif', 12)\n\n ## Defining the right click menu\n self.rightClickMenu = qtw.QMenu()\n self.shapeMenu = qtw.QMenu('Layout')\n self.shapeGroup = qtw.QActionGroup(self.shapeMenu)\n self.rightClickMenu.addMenu(self.shapeMenu)\n shapeActions = []\n shapeActions.append(self.shapeMenu.addAction('Horizontal Bar'))\n shapeActions.append(self.shapeMenu.addAction('Radial'))\n for act in shapeActions:\n act.setCheckable(True)\n self.shapeGroup.addAction(act)\n shapeActions[0].setChecked(True)\n self.shapeGroup.triggered.connect(self.updateScene)\n\n ## Ba da ba ba ba I'm lovin' it\n self.valueMenu = qtw.QMenu('Value to Display')\n self.valueGroup = qtw.QActionGroup(self.valueMenu)\n self.rightClickMenu.addMenu(self.valueMenu)\n valueActions = []\n valueActions.append(self.valueMenu.addAction('Linear coefficients'))\n valueActions.append(self.valueMenu.addAction('Pearson correlation'))\n valueActions.append(self.valueMenu.addAction('Spearman rank correlation'))\n for act in valueActions:\n act.setCheckable(True)\n self.valueGroup.addAction(act)\n valueActions[0].setChecked(True)\n self.valueGroup.triggered.connect(self.updateScene)\n\n self.showLabelsAction = self.rightClickMenu.addAction('Show Labels')\n self.showLabelsAction.setCheckable(True)\n self.showLabelsAction.setChecked(True)\n self.showLabelsAction.triggered.connect(self.updateScene)\n\n self.showNumberAction = self.rightClickMenu.addAction('Show Numeric Values')\n self.showNumberAction.setCheckable(True)\n self.showNumberAction.setChecked(True)\n self.showNumberAction.triggered.connect(self.updateScene)\n\n self.bundledAction = self.rightClickMenu.addAction('Bundled on Dimension')\n self.bundledAction.setCheckable(True)\n self.bundledAction.setChecked(False)\n self.bundledAction.triggered.connect(self.updateScene)\n\n self.signedAction = self.rightClickMenu.addAction('Signed')\n self.signedAction.setCheckable(True)\n self.signedAction.setChecked(True)\n self.signedAction.triggered.connect(self.updateScene)\n\n self.fillAction = self.rightClickMenu.addAction('Fill viewport')\n self.fillAction.setCheckable(True)\n self.fillAction.setChecked(True)\n self.fillAction.triggered.connect(self.updateScene)\n\n captureAction = self.rightClickMenu.addAction('Capture')\n captureAction.triggered.connect(self.saveImage)\n\n self.gView.scale(self.gView.width()/self.scene.width(),\n self.gView.height()/self.scene.height())\n\n layout.addWidget(self.gView)\n self.updateScene()",
"def updateRendering(self):\n\t\tinput = self.getInput(1)\n\t\tif self.parameters[\"InsideOut\"]:\n\t\t\tself.boxWidget.InsideOutOn()\n\t\telse:\n\t\t\tself.boxWidget.InsideOutOff()\n\t\tself.boxWidget.SetInput(input)\n# self.mapper.SetInput(data)\n\t\t\n\t\t#self.mapper.Update()\n\t\tVisualizationModule.updateRendering(self)\n\t\tself.parent.Render()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the required list of colors if orbit trail is desired. | def _get_colors(self, color, trail):
color = color or next(self._color_cycle)
return [color] | [
"def _get_colors(self, color, trail):\n if color is None:\n # HACK: https://stackoverflow.com/a/13831816/554319\n color = next(self.ax._get_lines.prop_cycler)[\"color\"]\n\n colors = [color, to_rgba(color, 0)] if trail else [color]\n return colors",
"def colors(self):\n colors = cache.get('COLORS-' + self.alpha_2)\n if colors:\n return colors\n else:\n return self.analyze_flag()",
"def get_colours():\n return [\n 'darkgreen', 'metallic', 'acid', 'sandy', 'orange', 'red'\n ]",
"def computeGliderTrajectoryColors(gliderTrajectory, minVerticalSpeed, maxVerticalSpeed):\n\n lutAsc = vtk.vtkLookupTable()\n lutAsc.SetTableRange(0, maxVerticalSpeed)\n lutAsc.SetHueRange(1/8, 0)\n lutAsc.Build()\n\n lutDesc = vtk.vtkLookupTable()\n lutDesc.SetTableRange(minVerticalSpeed, 0)\n lutDesc.SetHueRange(5/8, 1/2)\n lutDesc.Build()\n\n colors = vtk.vtkUnsignedCharArray()\n colors.SetNumberOfComponents(3)\n colors.SetName(\"Colors\")\n\n for i in range(0, len(gliderTrajectory)):\n dcolor = [0, 0, 0]\n if (i == 0):\n lutDesc.GetColor(0, dcolor)\n else:\n verticalSpeed = computeVerticalSpeed(gliderTrajectory, i)\n if (verticalSpeed >= 0):\n lutAsc.GetColor(verticalSpeed, dcolor)\n else:\n lutDesc.GetColor(verticalSpeed, dcolor)\n color = [0, 0, 0]\n for k in range(0, 3):\n color[k] = 255 * dcolor[k]\n\n colors.InsertNextTuple(color)\n return colors",
"def _color_list(size):\n return (__colors * (int(size / len(__colors)) + 1))[0:size]",
"def list_colorsets():\n return tuple([\"colorblind\"] + list(colors.keys()))",
"def getListColorChecker(self) -> retval:\n ...",
"def get_colors(_mode):\n if _mode == 0:\n # using colors from wal\n colorsFile = WAL_COLORS\n else:\n # using colors from wpg\n colorsFile = WPG_COLORS\n # parse the file\n print(\"Reading colors\")\n # try:\n with open(colorsFile) as f:\n rawFile = f.readlines() # save the lines to rawFile\n # TODO: Specific exception\n # except:\n # print(\"Error: Colors file missing. Make sure you've run\" +\n # \"pywal/wpg before wal_steam\")\n # sys.exit(1)\n\n # delete the lines not involving the colors\n del rawFile[0:11]\n del rawFile[16]\n\n # loop through rawFile and store colors in a list\n _colors = []\n for line in rawFile:\n # delete everything but the hex code\n tmp = line[line.find(\"#\"):]\n tmp = tmp[:7]\n\n # append the hex code to the colors list\n _colors.append(tmp)\n\n return _colors",
"def get_colours(self, n):\n\t\tbase = np.asarray([[1,0,0], [0,1,0], [0,0,1]])\n\n\t\tif n <= 3:\n\t\t\treturn base[0:n]\n\n\t\t# how many new colours to we need to insert between\n\t\t# red and green and between green and blue?\n\t\tneeded = (((n - 3) + 1) / 2, (n - 3) / 2)\n\n\t\tcolours = []\n\t\tfor start in (0, 1):\n\t\t\tfor x in np.linspace(0, 1, needed[start]+2):\n\t\t\t\tcolours.append((base[start] * (1.0 - x)) +\n\t\t\t\t\t\t\t (base[start+1] * x))\n\n\t\treturn [self.pastel(c) for c in colours[0:n]]",
"def get_pass_fail_colours_4_tone(self, request):\n\n if self.colourblind_options_on(request):\n return [Colours.ORANGE.value, Colours.YELLOW.value, Colours.BLUE_LIGHT.value, Colours.BLUE.value]\n else:\n return [Colours.RED.value, Colours.ORANGE_LIGHT.value, Colours.GREEN_LIGHT.value, Colours.GREEN.value]",
"def getColorList(n):\n colors = []\n\n for i in range(0,n):\n colors.append(generate_new_color(colors,pastel_factor = 0.1))\n \n return colors",
"def rand_color() -> list:\n\n # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT\n import random\n\n return [random.randrange(256), random.randrange(256), random.randrange(256)]",
"def get_colors(self) -> List[RGBColor]:\n return [display_character.color for display_character in self]",
"def get_colours(n):\n if n <= 3:\n return base[0:n]\n\n # how many new colours to we need to insert between\n # red and green and between green and blue?\n needed = (old_div(((n - 3) + 1), 2), old_div((n - 3), 2))\n\n colours = []\n for start in (0, 1):\n for x in np.linspace(0, 1, needed[start]+2)[:-1]:\n colours.append((base[start] * (1.0 - x)) +\n (base[start+1] * x))\n colours.append(base[2])\n\n return [pastel(c) for c in colours[0:n]]",
"def get_colours(self, number):\n if number <= 0:\n number = 1\n ix = int(log2(number))\n return self.colours[ix]",
"def get_color_cycle():\n cycler = mpl.rcParams['axes.prop_cycle']\n return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]",
"def get_colors(color='ALL'):\n\tif color == 'ALL':\n\t\treturn color_dict['palette']\n\treturn color_dict['palette'][color]",
"def crayon_palette(colors: Sequence[str]) -> Sequence[RGBHexColor]:\n return [get_named_color(f\"crayon:{name}\") for name in colors]",
"def get_pass_fail_colours_2_tone(self, request):\n\n if self.colourblind_options_on(request):\n return [Colours.ORANGE.value, Colours.BLUE.value]\n else:\n return [Colours.RED.value, Colours.GREEN.value]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes the attractor from the scene. | def undraw_attractor(self):
pass | [
"def undraw_attractor(self):\n for attractor in self.ax.findobj(match=mpl_patches.Circle):\n attractor.remove()",
"def remove(self):\n self.layers.pop()",
"def remove_highlight_actor(self):\n gui = self.win_parent\n remove_actors_from_gui(gui, self.actors, render=True)\n self.actors = []",
"def _erase (self):\n self.screen.blit_background (self._rect)",
"def deactiveHeadTrack(self):\n\t\t# get objects\n\t\tplayer = scene.objects['Link']\n\t\tplayer.rig['armConstraint'] = False",
"def erase(self):\n self._evidence = [None] * len(self.ground_atoms)",
"def disarm(self):\n self.act = None\n Framework.removeTimeEvent(self)",
"def _remove_animation(self, node):\n pass",
"def remove_piston(self):\n self.phase -= mean(self.phase)\n return self",
"def remove_tiptilt(self):\n plane = fit_plane(self.x, self.y, self.phase)\n self.phase -= plane\n return self",
"def delete_button(self):\n self.physics_object.clear_forces()\n self.window.physics_canvas.delete_physics_object(self.physics_object)\n self.del_win()",
"def delete_targetpoint(self):\n if(self.tp):\n self.world.DestroyBody(self.tp)\n self.tp = None",
"def removeItem(self):\r\n\t\t\r\n\t\tself.enterItem = None\r\n\t\tself.scene().removeSelItem()",
"def remove_at(self, point):\n self.entities.remove(point)",
"def remove_plant(screen, menu, game_engine, game_map=None, param=None):\n\n menu.plant_im = None\n menu.text_input = None\n\n for i in range(0, (len(menu.buttons) - 3)):\n if menu.buttons[i].plant == param:\n menu.buttons.pop(i)\n menu.init_menu(screen)\n break\n\n for i in range(0, len(game_engine.plants)):\n if game_engine.plants[i] == param:\n game_engine.plants.pop(i)\n break",
"def remove_effect(self, ball):\n raise NotImplementedError()",
"def clear(self):\n # Animation to clear the controllers\n clear_animation = Animation(False)\n clear_animation.addLightWait(1,True)\n # TODO add motion wait motionAnimation\n self.setNextAnimations(clear_animation,Timing.unix_timestamp())",
"def vanish(self):\n del foods[foods.index(self)]\n self.sight.reset()\n self.sight.hideturtle()",
"def _removeFirst_animation(self):\n self._remove_animation(0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draw an impulse into the scene. | def draw_impulse(self, position, *, color, label, size):
return self.draw_marker(
position, color=color, label=label, marker_symbol="x", size=size
) | [
"def apply_impulse(self, p):\n\t\tself.force=p",
"def impulse(t: float) -> float:\n return 1. if t >= 1. and t < 2. else 0.",
"def draw_velocity(self):\n vect = vector.scalar_multiply(self.velocity, 10)\n endpos = vector.add(self.rect.center, vect)\n return pygame.draw.line(self.level.screen, (0, 255, 255),\n self.rect.center,\n endpos)",
"def apply_impulse(body, impulse, pos=None):\n\tif pos is None:\n\t\tpos = body.worldCenter\n\n\tbody.ApplyLinearImpulse(impulse, pos, True)",
"def draw(self, screen: Surface, draw_vectors: bool = False) -> None:\n # # draw particle itself\n pygame.draw.circle(\n screen,\n self.color,\n self.pos,\n self.radius,\n )\n\n if draw_vectors:\n # to make lines longer\n scale: float = math.pi\n # width of lines\n width: int = 5\n # draw velocity vector\n pygame.draw.line(\n screen,\n Color('red'),\n self.pos,\n self.pos + self.vel,\n width\n )\n\n # draw acceleration vector\n pygame.draw.line(\n screen,\n Color('blue'),\n self.pos,\n self.pos + self.acc,\n width\n )",
"def on_draw(self):\n self.clear()\n self.arch.draw()\n self.bullet.draw()\n\tfps_display.draw()",
"def draw(self):\r\n if not self.ate_apple:\r\n pygame.draw.rect(window, self.RGB, (self.x, self.y, self.width, self.height))",
"def update(self):\n self.rect.x += self.x_velocity\n self.rect.y += self.y_velocity",
"def on_draw(self) -> None:\n arcade.start_render()\n self.background.draw_sized(\n self.window.width // 2,\n self.window.height // 2,\n self.window.width,\n self.window.height\n )",
"def draw(self, window):\n # usar sprite para desenhar drone\n pygame.draw.circle(window, BLUE, RATIO * self.location, radius=SIZE_DRONE, width=20)\n pygame.draw.circle(window, BLACK, RATIO * self.location, radius=RATIO * AVOID_DISTANCE, width=1)",
"def draw(self):\r\n self.shake()\r\n service.screen.blit(self.image, (self.rect.x, self.rect.y))",
"def _draw(self):\r\n self.ap.set_pixels(self.launch[self.launchCount])",
"def paintEffectsDisplay(meshDrawEnable=bool):\n pass",
"def draw_food(self):\n\n pygame.draw.rect(self.screen, self.food_color, self.rect)",
"def test_pendulum_force_impulse(self):\n # Create an engine: no controller and no internal dynamics\n engine = jiminy.Engine()\n setup_controller_and_engine(engine, self.robot)\n\n # Analytical solution\n def sys(t):\n q = 0.0\n v = 0.0\n for i, force in enumerate(F_register):\n if t > force[\"t\"]:\n pos = self.l * np.array([\n -np.cos(q - np.pi / 2), 0.0, np.sin(q - np.pi / 2)])\n n = pos / np.linalg.norm(pos)\n d = np.cross(self.axis, n)\n F_proj = force[\"F\"][:3].T.dot(d)\n v_delta = ((F_proj + force[\"F\"][4] / self.l) * min(\n force[\"dt\"], t - force[\"t\"])) / self.m\n if (i < len(F_register) - 1):\n q += (v + v_delta) * max(\n 0, min(t, F_register[i + 1][\"t\"]) -\n (force[\"t\"] + force[\"dt\"]))\n else:\n q += (v + v_delta) * max(\n 0, t - force[\"t\"] + force[\"dt\"])\n q += (v + v_delta/2) * min(\n force[\"dt\"], t - force[\"t\"])\n v += v_delta\n else:\n break\n return np.array([q, v])\n\n # Register a set of impulse forces\n np.random.seed(0)\n F_register = [{\"t\": 0.0, \"dt\": 2.0e-3,\n \"F\": np.array([1.0e3, 0.0, 0.0, 0.0, 0.0, 0.0])},\n {\"t\": 0.1, \"dt\": 1.0e-3,\n \"F\": np.array([0.0, 1.0e3, 0.0, 0.0, 0.0, 0.0])},\n {\"t\": 0.2, \"dt\": 2.0e-5,\n \"F\": np.array([-1.0e5, 0.0, 0.0, 0.0, 0.0, 0.0])},\n {\"t\": 0.2, \"dt\": 2.0e-4,\n \"F\": np.array([0.0, 0.0, 1.0e4, 0.0, 0.0, 0.0])},\n {\"t\": 0.4, \"dt\": 1.0e-5,\n \"F\": np.array([0.0, 0.0, 0.0, 0.0, 2.0e4, 0.0])},\n {\"t\": 0.4, \"dt\": 1.0e-5,\n \"F\": np.array([1.0e3, 1.0e4, 3.0e4, 0.0, 0.0, 0.0])},\n {\"t\": 0.6, \"dt\": 1.0e-6,\n \"F\": (2.0 * (np.random.rand(6) - 0.5)) * 4.0e6},\n {\"t\": 0.8, \"dt\": 2.0e-6,\n \"F\": np.array([0.0, 0.0, 2.0e5, 0.0, 0.0, 0.0])}]\n for f in F_register:\n engine.register_force_impulse(\n \"PendulumLink\", f[\"t\"], f[\"dt\"], f[\"F\"])\n\n # Configure the engine: No gravity + Continuous time simulation\n engine_options = engine.get_options()\n engine_options[\"world\"][\"gravity\"] = np.zeros(6)\n engine_options[\"stepper\"][\"sensorsUpdatePeriod\"] = 0.0\n engine_options[\"stepper\"][\"controllerUpdatePeriod\"] = 0.0\n engine_options[\"stepper\"][\"logInternalStepperSteps\"] = True\n engine.set_options(engine_options)\n\n # Run simulation and extract some information from log data\n x0 = np.array([0.0, 0.0])\n tf = 1.0\n time, x_jiminy = simulate_and_get_state_evolution(\n engine, tf, x0, split=False)\n\n # Compute the associated analytical solution\n x_analytical = np.stack([sys(t) for t in time], axis=0)\n\n # Check if t = t_start / t_end were breakpoints.\n # Note that the accuracy for the log is 1us.\n t_break_err = np.concatenate([np.array([\n min(abs(f[\"t\"] - time)),\n min(abs(f[\"t\"] + f[\"dt\"] - time))])\n for f in F_register])\n self.assertTrue(np.allclose(t_break_err, 0.0, atol=TOLERANCE))\n\n # This test has a specific tolerance because the analytical solution is\n # an approximation since in practice, the external force is not\n # constant over its whole application duration but rather depends on\n # the orientation of the pole. For simplicity, the effect of the\n # impulse forces is assumed to be constant. As a result, the tolerance\n # cannot be tighter.\n self.assertTrue(np.allclose(x_jiminy, x_analytical, atol=1e-6))\n\n # Configure the engine: No gravity + Discrete time simulation\n engine_options = engine.get_options()\n engine_options[\"world\"][\"gravity\"] = np.zeros(6)\n engine_options[\"stepper\"][\"sensorsUpdatePeriod\"] = 0.0\n engine_options[\"stepper\"][\"controllerUpdatePeriod\"] = 0.0\n engine_options[\"stepper\"][\"logInternalStepperSteps\"] = True\n engine.set_options(engine_options)\n\n # Configure the engine: Continuous time simulation\n engine_options[\"stepper\"][\"sensorsUpdatePeriod\"] = 1.0e-3\n engine_options[\"stepper\"][\"controllerUpdatePeriod\"] = 1.0e-3\n engine.set_options(engine_options)\n\n # Run simulation\n time, x_jiminy = simulate_and_get_state_evolution(\n engine, tf, x0, split=False)\n\n # Compute the associated analytical solution\n x_analytical = np.stack([sys(t) for t in time], axis=0)\n\n # Check if t = t_start / t_end were breakpoints\n t_break_err = np.concatenate([np.array([\n min(abs(f[\"t\"] - time)),\n min(abs(f[\"t\"] + f[\"dt\"] - time))])\n for f in F_register])\n self.assertTrue(np.allclose(t_break_err, 0.0, atol=TOLERANCE))\n\n # Compare the numerical and analytical solution\n self.assertTrue(np.allclose(x_jiminy, x_analytical, atol=1e-6))",
"def draw(self):\n if self.alive:\n self.surface.set_at((self.x, self.y), self.dude_colour)",
"def OnDraw(self):\n self.SetCurrent()\n \n glClear(GL_COLOR_BUFFER_BIT)\n \n if self.arena != None:\n glBegin(GL_LINE_LOOP)\n [red, green, blue] = self.arena.GetColor()\n glColor3f(red, green, blue)\n for lines in self.arena.GetLines():\n [point1x, point1y] = lines.GetPosition(0)\n [point2x, point2y] = lines.GetPosition(1)\n glVertex2f(point1x, point1y)\n glVertex2f(point2x, point2y)\n \n \n glEnd()\n \n \n for pillar in self.pillar:\n glBegin(GL_LINE_LOOP)\n [red, green, blue] = pillar.GetColor()\n glColor3f(red, green, blue)\n for lines in pillar.GetLines():\n [point1x, point1y] = lines.GetPosition(0)\n [point2x, point2y] = lines.GetPosition(1)\n glVertex2f(point1x, point1y)\n glVertex2f(point2x, point2y)\n glEnd()\n\n\n#\t if self.temppoint != []:\n#\t \t glBegin(GL_POINTS)\n#\t \t glVertex2f(self.temppoint[0][0], self.temppoint[0][1])\n# glEnd()\n\t\n #Currentray is the ray where we have to worry about animation and changes.\n if self.currentray is not None: \n glBegin(GL_LINES)\n [red, green, blue] = self.currentray.GetColor()\n glColor3f(red, green, blue)\n\t\n [x, y] = [self.currentray.GetPoint().GetPosition(0), self.currentray.GetPoint().GetPosition(1)]\n glVertex2f(x, y)\n \n \n [x, y] = self.currentray.GetEndPoint(self.t)\n \n glVertex2f(x, y)\n\t\n glEnd()\n \n #These rays are static, since they have come to a stop at their points of collision.\n for i in self.ray:\n glBegin(GL_LINES)\n [red, green, blue] = i.GetColor()\n glColor3f(red, green, blue)\n \n [x, y] = [i.GetPoint().GetPosition(0), i.GetPoint().GetPosition(1)]\n glVertex(x, y)\n \n [x, y] = i.GetEndPoint(i.finaltime)\n glVertex2f(x, y)\n glEnd()\n\t\t\t\n \n self.SwapBuffers()\n \n return",
"def draw_ui(self, screen):\r\n screen.blit(self.surface, (self.pos.x, self.pos.y))\r\n screen.blit(self.fuel_surface, (self.pos.x, self.pos.y + 25))",
"def _pulse(self):\n self.u = dot(self.u_sfq, self.u)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the legend of the scene. | def update_legend(self):
pass | [
"def update_legend(self, *args):\n legend = self.axes.get_legend()\n if legend is not None:\n self._update_legend_visual(legend)\n self.redraw()",
"def legend(self, **kwargs):\n raise NotImplementedError",
"def __onpick(self, event):\n self.logger.debug(\"running\")\n legend_line = event.artist\n if legend_line in self.__legend_plot_links:\n plot_lines = self.__legend_plot_links[legend_line]\n else:\n self.logger.debug(\"done, no matched lines\")\n return\n for line in plot_lines:\n visible = not line.get_visible()\n line.set_visible(visible)\n if visible:\n legend_line.set_alpha(1.0)\n else:\n legend_line.set_alpha(0.2)\n self.figure.canvas.draw()\n self.logger.debug(\"done\")",
"def clegend(self, **extra):\n leg = self.current_figure.legend(**extra)\n self.current_figure_legend = {}\n sf = self.subfigures[self.current_figure] \n sf['legend'] = {}\n for label, line in zip(leg.get_lines(), self.current_figure.get_lines()):\n label.set_picker(5) # 5 pts tolerance\n sf['legend'][label] = line",
"def legend_place(self):\n logger.debug(\"Placing legend\")\n self.ax1.legend(loc=\"upper right\", ncol=2)",
"def legend_hide(self):\n raise NotImplementedError",
"def update_plot_annotations(self):\n self.match = self.match_selector.value\n self.match_data = self.data[self.match]\n self.teams = self.match_data.blue + self.match_data.red\n if self.title_div is not None:\n self.title_div.text = self.get_page_title()\n if self.video_row is not None:\n self.update_videos()\n if self.team_div is not None:\n self.team_div.text = self.get_team_links()\n if self.figure is not None:\n # Update plot title\n self.figure.title.text = self.get_plot_title()\n # Update Legend labels with new team numbers\n for idx, item in enumerate(self.figure.legend.items):\n self.figure.legend.items[idx] = models.LegendItem(\n label=self.teams[idx],\n renderers = item.renderers,\n index=idx)",
"def update_plot(self, *args) -> None:\n self.update_units_labels_and_values()\n self.light_source = self.calculate_light_source()\n x_label, y_label = self.get_axes_labels()\n\n self.axes.clear()\n self.axes.plot(*self.light_source.spectrum(), 'r')\n self.axes.set_xlabel(x_label)\n self.axes.set_ylabel(y_label)\n\n # recompute the ax.dataLim and update ax.viewLim using the new dataLim\n self.axes.relim()\n self.axes.autoscale_view()\n\n # re-draw the canvas\n self.fig.canvas.draw_idle()",
"def legend_init(self):\n\n leg = self.ax.legend(ncol=2, fancybox=True, shadow=True, loc='upper left',\n framealpha=0.5, prop=font)\n leg.get_frame().set_alpha(0.4)\n leg.get_frame().set_facecolor('LightGreen')\n\n lined = {}\n for legline, origline in zip(leg.get_lines(), self.lines):\n legline.set_picker(7) # 7 pts tolerance\n lined[legline] = origline\n\n return leg",
"def add_legend(self):\r\n # remove duplicate labels\r\n handles, labels = self.ax.get_legend_handles_labels()\r\n handle_list, label_list = [], []\r\n\r\n for handle, label in zip(handles, labels):\r\n if label not in label_list:\r\n handle_list.append(handle)\r\n label_list.append(label)\r\n leg = plt.legend(handle_list, label_list, loc=\"lower left\", scatterpoints=1, fontsize=12)\r\n\r\n # fix size of point in legend\r\n for i in range(len(leg.legendHandles)):\r\n leg.legendHandles[i]._sizes = [90]",
"def setLegendData(self, *args, **kwargs):\n self.legendData = buildLegend(*args, key=self.legendKeyFunc, **kwargs)",
"def add_legend(self):\n\n # some legend defaults if no kwargs are passed\n if self.legend_kwargs is None:\n self.legend_kwargs = {'bbox_to_anchor': (1, 0.8),\n 'loc': 1,\n 'handlelength': 0.8}\n\n self.ax.legend(self.df.prod_ion.values, **self.legend_kwargs)",
"def update_infos_on_change(self):\n self.reset_graph()\n self.set_time_complexity_label(self.array_graph.algorithm.time_complexity)\n self.set_space_complexity_label(self.array_graph.algorithm.space_complexity)",
"def finalize(self, **kwargs):\n self.set_title(\"TSNE Projection of {} Documents\".format(self.n_instances_))\n\n # Remove the ticks\n self.ax.set_yticks([])\n self.ax.set_xticks([])\n\n # Add the legend outside of the figure box.\n if not all(self.classes_ == np.array([self.NULL_CLASS])):\n box = self.ax.get_position()\n self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n manual_legend(\n self,\n self.classes_,\n self.color_values_,\n loc=\"center left\",\n bbox_to_anchor=(1, 0.5),\n )",
"def move_legend(ax, new_loc, **kws):\n old_legend = ax.legend_\n handles = old_legend.legendHandles\n labels = [t.get_text() for t in old_legend.get_texts()]\n title = old_legend.get_title().get_text() \n ax.legend(handles, labels, loc=new_loc, title=title, **kws)\n pass",
"def __init__(self, konfig, parent=None, width=6, height=5, dpi=100):\r\n #osnovna definicija figure, axes i canvasa\r\n self.fig = Figure(figsize=(width, height), dpi=dpi)\r\n self.axes = self.fig.add_subplot(111)\r\n FigureCanvas.__init__(self, self.fig)\r\n self.setParent(parent)\r\n FigureCanvas.setSizePolicy(\r\n self,\r\n QtGui.QSizePolicy.Expanding,\r\n QtGui.QSizePolicy.Expanding)\r\n FigureCanvas.updateGeometry(self)\r\n #podrska za kontekstni meni\r\n self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\r\n #bitni memberi\r\n self.konfig = konfig #konfig dto objekt\r\n self.data = {} #prazan dict koji ce sadrzavati frejmove sa podacima\r\n self.gKanal = None #kanal id glavnog kanala\r\n self.pocetnoVrijeme = None #min zadano vrijeme za prikaz\r\n self.zavrsnoVrijeme = None #max zadano vrijeme za prikaz\r\n self.statusGlavniGraf = False #status glavnog grafa (da li je glavni kanal nacrtan)\r\n self.statusHighlight = False #status prikaza oznacene izabrane tocke\r\n self.lastHighlight = (None, None) #kooridinate zadnjeg highlighta\r\n self.legenda = None #placeholder za legendu\r\n self.highlightSize = 15 #dynamic size za highlight (1.5 puta veci od markera)\r\n self.xlim_original = [0, 1] #defaultna definicija raspona x osi grafa (zoom)\r\n self.ylim_original = [0, 1] #defaultna definicija raspona y osi grafa (zoom)\r\n self.zoomStack = [] #stack za zoom levele\r\n\r\n self.initialize_interaction(self.span_select, self.rect_zoom)\r\n self.reinit_ticks_grid_legend()",
"def updateColor(self):\n\t\tpass",
"def change_labels(plot_obj, labels):\n for text, label in zip(plot_obj.legend_.texts, labels):\n text.set_text(label)",
"def createLegend(self): \n if self.model.legend:\n template = \"\"\" {e}\n var legend = d3.legend({s})\n .csv(\"data/legend.csv\")\n .position({p})\n .{f}(\"{a}\");\n {s}.call(legend);\"\"\"\n\n func = \"shape\"\n arg = \"square\"\n \n # Find the main layer and check the first symbol to determine the correct JS function call\n m = self.model.getMainLayer()\n if m.renderers[0].symbols[0].hasImage() == True:\n func = \"svgImg\"\n head, tail = os.path.split(m.renderers[0].symbols[0].path)\n arg = \"img/{0}\".format(tail)\n else:\n arg = m.renderers[0].symbols[0].getShape() \n \n ext = \"\"\n svg = \"svg\"\n pos = self.model.selectedLegendPosition\n \n \n if self.model.selectedLegendPosition == 4:\n # external legend has to have a different hosting svg element\n ext = \"\"\"var extLgnd = d3.select(\"#extLgnd\")\n .append(\"svg\");\\n\"\"\"\n svg = \"extLgnd\"\n\n # format and return\n return template.format(\n e = ext,\n f = func,\n a = arg,\n s = svg,\n p = pos\n )\n \n else:\n return \"\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the labels for coordinates and position. | def generate_labels(self, label, has_coordinates, has_position):
return (label, None) | [
"def generate_labels(self, label, has_coordinates, has_position):\n return (None, label) if has_position else (label, None)",
"def labels(self):\n xlabel=''\n ylabel=''\n if 'calculation' in self.variables[self.xy[0]]:\n xlabel+=self.variables[self.xy[0]]['calculation']+' '\n xlabel+=self.variables[self.xy[0]]['pretty_name']\n if 'calculation' in self.variables[self.xy[1]]:\n ylabel+=self.variables[self.xy[1]]['calculation']+' '\n ylabel+=self.variables[self.xy[1]]['pretty_name']\n\n return (ylabel,xlabel)",
"def make_labels(self):\n for lab in self.label_ids: #init label objects\n self.labels[lab] = Label(self.id, lab)\n for sentence in self.sentences: #dump stuff into the label objects\n for i in range(1, len(sentence.labels)):\n lab = sentence.labels[i]\n self.labels[lab].add_sentence(sentence.words[i], sentence.probs[lab], sentence.surprisal[lab])",
"def _generate_labels(self, sentence: str, anno_list:list) -> list:\n anno_list.sort(key = lambda x:x[2])\n last_pos = 0\n sentence_lst = list()\n label_lst = list()\n for item in anno_list:\n start_pos = item[2]\n end_pos = item[3]\n part = word_tokenize(sentence[last_pos:start_pos])\n sentence_lst.extend(part)\n label_lst.extend([('O', '')] * len(part))\n part = word_tokenize(sentence[start_pos:end_pos])\n sentence_lst.extend(part)\n label_lst.extend([('B', item[5])] + [('I', item[5])] * (len(part)-1))\n last_pos = end_pos\n part = word_tokenize(sentence[last_pos:])\n sentence_lst.extend(part)\n label_lst.extend([('O', '')] * len(part))\n assert len(sentence_lst) == len(label_lst), f\"ERROR3: Label and tokenized sentence length mismatch!\\n\" \\\n f\"{sentence}\\n{list(itertools.zip_longest(sentence_lst, label_lst))}\\n{anno_list}\"\n return (sentence_lst, label_lst)",
"def buildLabelCoords( self, geo_ids):\n data = []\n row = []\n col = []\n for i, val in enumerate(geo_ids):\n gv = GEOvector(val, self.dictionary, self._conn, stopwords=self.stopwords)\n self.addVecToCoord(gv,i, data, row, col)\n return (data, row, col)",
"def label_gen(index):\n # print(index)\n #print(type(index))\n #assert isinstance(index, (float, int))\n\n # generates unique parameter strings based on index of peak\n pref = str(int(index))\n comb = 'a' + pref + '_'\n\n cent = 'center'\n sig = 'sigma'\n amp = 'amplitude'\n fract = 'fraction'\n\n # creates final objects for use in model generation\n center = comb + cent\n sigma = comb + sig\n amplitude = comb + amp\n fraction = comb + fract\n \n #assert isinstance((center, sigma, amplitude, fraction, comb), str)\n\n return center, sigma, amplitude, fraction, comb",
"def create_labels(self):\n for name in self.names:\n temp_label = Label(text=name)\n self.root.ids.main.add_widget(temp_label)",
"def generate_labels(self):\n items_for_order = self._generate_items_for_order(exclude_preassembled_parts = True)\n labels = []\n for item, relationship_to_quantity_mapping in items_for_order.iteritems():\n for relationship, quantity in relationship_to_quantity_mapping.iteritems():\n while quantity > 0:\n label_quantity = min(quantity, item.max_per_container)\n labels.append(Label(item, relationship, label_quantity))\n quantity -= label_quantity\n\n labels = sorted(labels, key=attrgetter('name'))\n return labels",
"def labels(observation):\n raise NotImplementedError",
"def show_labels(args):\n pld.setGlobalFontSize(16)\n fig, ax = plt.subplots(2, 2, figsize=(8, 6))\n keys = ['MoE Cost', 'MoE Gate', 'k-Means-3', 'k-Means-5']\n titles = ['MoE I', 'MoE II', '$k$-Means-3', '$k$-Means-5']\n moelbl = np.load('data/pen/moe_label.npy')\n momlbl = np.load('data/pen/mom_label.npy')\n gatelbl = np.load('data/pen/gate_expert_label.npy')\n kmeanlbl = np.load('data/pen/pca_kmean_label.npz')\n k3lbl = kmeanlbl['3']\n k5lbl = kmeanlbl['5']\n\n data = npload(cfg['file_path'], cfg['uniqueid'])\n x0 = data[cfg['x_name']]\n\n markers = ['s', 'o', 'x', 'd', '*']\n colors = ['b', 'g', 'r', 'c', 'k']\n cm = plt.get_cmap('jet')\n norm = mpl.colors.Normalize(0, 5)\n\n def show_label_on_axis(ax, x, lbl):\n nlbl = np.amax(lbl) + 1\n ax.imshow(np.reshape(lbl, (61, 21)).T, cmap=cm, origin='lower', norm=norm, extent=[0, 2*np.pi, -2.0, 2.0])\n # for i in range(nlbl):\n # mask = lbl == i\n # ax.scatter(x[mask, 0], x[mask, 1], s=3, marker=markers[i], color=colors[i])\n\n show_label_on_axis(ax[0][0], x0, moelbl)\n ax[0][0].set_title(titles[0])\n show_label_on_axis(ax[0][1], x0, gatelbl)\n ax[0][1].set_title(titles[1])\n show_label_on_axis(ax[1][0], x0, k3lbl)\n ax[1][0].set_title(titles[2])\n show_label_on_axis(ax[1][1], x0, k5lbl)\n ax[1][1].set_title(titles[3])\n ax[1][0].set_xlabel(r'$\\theta$')\n ax[1][1].set_xlabel(r'$\\theta$')\n ax[0][0].set_xticklabels([])\n ax[0][1].set_xticklabels([])\n ax[0][0].set_ylabel(r'$\\omega$')\n ax[1][0].set_ylabel(r'$\\omega$')\n ax[0][1].set_yticklabels([])\n ax[1][1].set_yticklabels([])\n fig.tight_layout()\n fig.savefig('gallery/pen/pen_label_assign.pdf')\n plt.show()",
"def label_plot(self):\n assert self.labels is not None\n\n centroids, vor = self.codebook.voronoi\n regions, vertices = voronoi_finite_polygons(vor)\n normalized_codebook = normalize(node.vector for node in self.codebook)\n for codebook_vector, region in zip(normalized_codebook, regions):\n polygon = vertices[region]\n plt.fill(*zip(*polygon), color=codebook_vector[:3] + [.6])\n\n xs, ys = zip(*centroids)\n plt.plot(xs, ys, 'ko', ms=1)\n plt.axis('equal')\n plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)\n plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)\n\n for label in set(self.labels) - set([None]):\n class_node = max(self.codebook, key=lambda node: node.labels[label])\n plt.text(class_node.x, class_node.y, label,\n horizontalalignment='center', verticalalignment='center')\n\n plt.title('Voronoi label plot')\n plt.show()",
"def _make_labels(n_pos, n_neg):\n\n Y = np.zeros((n_pos + n_neg))\n Y[:n_pos] = 1\n\n return Y",
"def draw_name():\n dist_from_top = 35\n label1 = pyglet.text.Label(\"Chess\", font_name='Courier New', font_size=16, bold=True,\n x=label_calib, y=w_height - dist_from_top,\n anchor_x='center', anchor_y='center', color=side_label_color)\n label2 = pyglet.text.Label(\"II\", font_name='Courier New', font_size=16, bold=True,\n x=label_calib, y=w_height - dist_from_top - 20,\n anchor_x='center', anchor_y='center', color=side_label_color)\n label1.draw()\n label2.draw()",
"def label(self, classes, ground_truth_func):\n for img in self.imgs:\n labels = ground_truth_func(img, classes)\n if len(labels) == 0:\n return\n text_label = open(get_label_path(img), \"w+\")\n for i, (class_display_name, x_cent, y_cent, w, h) in enumerate(labels):\n class_num = classes.index(class_display_name)\n if i > 0:\n text_label.write(\"\\n\")\n text_label.write(f\"{class_num} {x_cent} {y_cent} {w} {h}\")\n text_label.close()",
"def label_generator(self) -> str:\n label = LABEL + str(self.label_counter)\n self.label_counter += 1\n return label",
"def convertLabels(self, axs,pos):\n j=-1\n for k in range(len(plot_columns)):\n ax=axs[k]\n colName=plot_columns[k]\n if colName in conversion_columns:\n print colName\n j=j+1 \n unq_values,index_values=conversion_params[j]\n if pos=='x':\n ax.set_xticks(index_values)\n ax.set_xticklabels(unq_values,rotation='horizontal')\n if pos=='y':\n ax.set_yticks(index_values)\n ax.set_yticklabels(unq_values,rotation='horizontal')",
"def autolabel(rects, xpos='center'):\n\n xpos = xpos.lower() # normalize the case of the parameter\n ha = {'center': 'center', 'right': 'left', 'left': 'right'}\n offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off\n\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() * offset[xpos], 1.00 * height,\n '{}'.format(height), ha=ha[xpos], va='bottom')",
"def get_labels(self):\n return [\"0\",\"1\"]",
"def _add_label(self, labelname, label_dict, firstpoint, secondpoint, boxtype=-1, original_coords=False):\n\t\tix,iy = firstpoint\n\t\tx,y = secondpoint\n\t\tupper_left = (np.min([ix,x]), np.min([iy,y]))\n\t\tlower_right = (np.max([ix,x]), np.max([iy,y]))\n\t\tself.label_dict[labelname] = [upper_left, lower_right, boxtype]\n\t\t\n\t\tself._add_log_label(self.label_dict[labelname])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draw a marker into the scene. | def draw_marker(self, position, *, color, marker_symbol, label, size):
marker_style = dict(size=size, color=color, symbol=marker_symbol)
marker_trace = go.Scatter3d(
x=position[0],
y=position[1],
z=position[2],
marker=marker_style,
name=label,
showlegend=False if label is None else True,
)
self.figure.add_trace(marker_trace)
return marker_trace | [
"def drawMarker(self, id, sidePixels, _img=..., borderBits=...) -> _img:\n ...",
"def mark(self, x,y,z):\n sphere = vtk.vtkSphereSource()\n sphere.SetRadius(3)\n res = 20\n sphere.SetThetaResolution(res)\n sphere.SetPhiResolution(res)\n sphere.SetCenter(x,y,z)\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(sphere.GetOutputPort())\n\n self.marker = vtk.vtkActor()\n self.marker.SetMapper(mapper)\n self.rens[0].AddActor(self.marker)\n self.marker.GetProperty().SetColor( (1,0,0) )\n self.markers.append(self.marker) #keep track of all marker actors\n self.Render()",
"def marker(self, text):\n pass",
"def draw_impulse(self, position, *, color, label, size):\n return self.draw_marker(\n position, color=color, label=label, marker_symbol=\"x\", size=size\n )",
"def SoMarkerSet_addMarker(*args) -> \"void\":\n return _coin.SoMarkerSet_addMarker(*args)",
"def add_marker(self, pose, color, scale):\n marker = self._mks.markers[self._mk_idx]\n marker.scale.x = scale\n marker.scale.y = scale\n marker.scale.z = scale\n marker.color.r = color[0]\n marker.color.g = color[1]\n marker.color.b = color[2]\n if len(color) >= 4:\n marker.color.a = color[3]\n else:\n marker.color.a = 1.0\n marker.pose = pose\n marker.id = self._mk_idx\n self._mk_idx += 1\n self._mks.markers.append(marker)",
"def draw_markers(self, node, font_size, fill_stroke):\n if not node.vertices:\n return\n\n markers = {}\n common_marker = parse_url(node.get('marker')).fragment\n for position in ('start', 'mid', 'end'):\n attribute = f'marker-{position}'\n if attribute in node.attrib:\n markers[position] = parse_url(node.attrib[attribute]).fragment\n else:\n markers[position] = common_marker\n\n angle1, angle2 = None, None\n position = 'start'\n\n while node.vertices:\n # Calculate position and angle\n point = node.vertices.pop(0)\n angles = node.vertices.pop(0) if node.vertices else None\n if angles:\n if position == 'start':\n angle = pi - angles[0]\n else:\n angle = (angle2 + pi - angles[0]) / 2\n angle1, angle2 = angles\n else:\n angle = angle2\n position = 'end'\n\n # Draw marker\n marker = markers[position]\n if not marker:\n position = 'mid' if angles else 'start'\n continue\n\n marker_node = self.markers.get(marker)\n\n # Calculate position, scale and clipping\n if 'viewBox' in marker_node.attrib:\n marker_width, marker_height = self.point(\n marker_node.get('markerWidth', 3),\n marker_node.get('markerHeight', 3),\n font_size)\n scale_x, scale_y, translate_x, translate_y = preserve_ratio(\n self, marker_node, font_size, marker_width, marker_height)\n\n clip_x, clip_y, viewbox_width, viewbox_height = (\n marker_node.get_viewbox())\n\n align = marker_node.get(\n 'preserveAspectRatio', 'xMidYMid').split(' ')[0]\n if align == 'none':\n x_position = y_position = 'min'\n else:\n x_position = align[1:4].lower()\n y_position = align[5:].lower()\n\n if x_position == 'mid':\n clip_x += (viewbox_width - marker_width / scale_x) / 2\n elif x_position == 'max':\n clip_x += viewbox_width - marker_width / scale_x\n\n if y_position == 'mid':\n clip_y += (\n viewbox_height - marker_height / scale_y) / 2\n elif y_position == 'max':\n clip_y += viewbox_height - marker_height / scale_y\n\n clip_box = (\n clip_x, clip_y,\n marker_width / scale_x, marker_height / scale_y)\n else:\n marker_width, marker_height = self.point(\n marker_node.get('markerWidth', 3),\n marker_node.get('markerHeight', 3),\n font_size)\n box = self.calculate_bounding_box(marker_node, font_size)\n if is_valid_bounding_box(box):\n scale_x = scale_y = min(\n marker_width / box[2], marker_height / box[3])\n else:\n scale_x = scale_y = 1\n translate_x, translate_y = self.point(\n marker_node.get('refX'), marker_node.get('refY'),\n font_size)\n clip_box = None\n\n # Scale\n if marker_node.get('markerUnits') != 'userSpaceOnUse':\n scale = self.length(node.get('stroke-width', 1), font_size)\n scale_x *= scale\n scale_y *= scale\n\n # Override angle\n node_angle = marker_node.get('orient', 0)\n if node_angle not in ('auto', 'auto-start-reverse'):\n angle = radians(float(node_angle))\n elif node_angle == 'auto-start-reverse' and position == 'start':\n angle += radians(180)\n\n # Draw marker path\n for child in marker_node:\n self.stream.push_state()\n\n self.stream.transform(\n scale_x * cos(angle), scale_x * sin(angle),\n -scale_y * sin(angle), scale_y * cos(angle),\n *point)\n self.stream.transform(e=-translate_x, f=-translate_y)\n\n overflow = marker_node.get('overflow', 'hidden')\n if clip_box and overflow in ('hidden', 'scroll'):\n self.stream.push_state()\n self.stream.rectangle(*clip_box)\n self.stream.pop_state()\n self.stream.clip()\n\n self.draw_node(child, font_size, fill_stroke)\n self.stream.pop_state()\n\n position = 'mid' if angles else 'start'",
"def mark(self):\n\n self.is_marked = True\n self.show()",
"def GLRender(self, action: 'SoGLRenderAction') -> \"void\":\n return _coin.SoMarkerSet_GLRender(self, action)",
"def addMarker(s):\n sel = cmds.ls(sl=True, type=\"joint\")\n if sel:\n for joint in sel:\n if joint not in s.markers:\n s.markers[joint] = Helper(joint)\n else:\n cmds.confirmDialog(t=\"Oh no...\", m=\"You must select some Joints.\")",
"def _visualize_location(self, base_pose, location):\n # Convert KDL object to geometry message\n base_pose = kdl_frame_to_pose_msg(base_pose)\n\n # Create the marker\n m = Marker()\n if location == \"one\":\n m.id = 1\n m.color.r = 1\n elif location == \"two\":\n m.id = 2\n m.color.g = 1\n elif location == \"three\":\n m.id = 3\n m.color.b = 1\n else:\n m.id = 4\n m.color.r = 1\n m.color.g = 1\n m.color.b = 1\n m.color.a = 1\n m.pose = base_pose\n m.header.frame_id = \"map\"\n m.header.stamp = rospy.Time.now()\n m.type = 0 # Arrow\n m.scale.x = 1.0\n m.scale.y = 0.2\n m.scale.z = 0.2\n m.action = 0\n m.ns = \"arrow\"\n self._waypoint_pub.publish(m)\n m.type = 9\n m.text = location\n m.ns = \"text\"\n m.pose.position.z = 0.5\n self._waypoint_pub.publish(m)",
"def makelineMarker(self, XY, ID, action = Marker.ADD, new=True):\n \n RVIZmarker = Marker()\n \n RVIZmarker.header.frame_id = '/map'\n RVIZmarker.header.stamp = rospy.Time(0)\n \n RVIZmarker.ns = 'Frontier'\n RVIZmarker.id = ID\n RVIZmarker.type = Marker.POINTS\n RVIZmarker.action = action\n \n # Define the scale (meter scale)\n RVIZmarker.scale.x = 0.05\n RVIZmarker.scale.y = 0.05\n \n # Set the color \n RVIZmarker.color.a = 1.0\n RVIZmarker.color.g = 0.0\n \n if new:\n RVIZmarker.color.r = 0.0\n RVIZmarker.color.b = 1.0\n else:\n RVIZmarker.color.r = 1.0\n RVIZmarker.color.b = 0.0\n \n # Store all of the real world XY coordinates\n for i in range(len(XY)):\n pnt = Point()\n x,y = self.grid2xy(XY[i][0], XY[i][1])\n pnt.x = x\n pnt.y = y\n pnt.z = 0.0\n RVIZmarker.points.append(pnt)\n \n #Store the marker\n if len(self.lineMarker.markers) <= ID:\n self.lineMarker.markers.append(RVIZmarker)\n else:\n self.lineMarker.markers[ID] = RVIZmarker",
"def draw_marks(image, marks, color=(255, 0, 255), thick=1):\n for idx, mark in enumerate(marks):\n cv2.circle(image, (int(mark[0]), int(mark[1])),\n thick, color, -1, cv2.LINE_AA)\n # Visualization cropped image\n # cv2.imshow(\"image\", image)\n # cv2.waitKey(0)",
"def render(self, point):\n self._sprite.render(point)",
"def _on_mark(self, evt):\r\n mark_color = 'k'\r\n if self.sub_plots.color.lower() == 'black':\r\n mark_color = 'white'\r\n if self.sub_plots.has_selection:\r\n #delete markers\r\n for sub_plot in self.sub_plots.sub_plots:\r\n for line in sub_plot.selection:\r\n sub_plot.axes.lines.remove(line)\r\n self.canvas.draw()\r\n else:\r\n for i, sub_plot in enumerate(self.sub_plots.sub_plots):\r\n x1, x2, y1, y2 = sub_plot.axes.axis()\r\n x = [x1, x2, x2, x1, x1]\r\n y = [y1, y1, y2, y2, y1]\r\n sub_plot.selection = self.redraw(x,y, hold = True,\r\n limits = (x1,x2,y1,y2),\r\n index = i,\r\n color = mark_color, linewidth = 2.0)\r\n self.sub_plots.has_selection = not self.sub_plots.has_selection",
"def show_position(lon, lat, image_name):\r\n\r\n # The map is created with its corresponding size\r\n m_bcn = sm.StaticMap(SIZE, SIZE)\r\n\r\n # The marker is added\r\n marker_outline = sm.CircleMarker((lon, lat), 'white', 30)\r\n marker = sm.CircleMarker((lon, lat), 'blue', 22)\r\n m_bcn.add_marker(marker_outline)\r\n m_bcn.add_marker(marker)\r\n\r\n # The image is saved with its corresponding name\r\n image = m_bcn.render()\r\n image.save(image_name)",
"def draw_glyph(self, glyph_: Glyph, at: Point):\n pass",
"def draw(self, pen):\n pointPen = PointToSegmentPen(pen)\n self.drawPoints(pointPen)",
"def draw_to_point(self, x, y):\n if self.last == (x, y):\n return\n\n if self.drawing == False:\n self.start()\n\n # self.codes.append('G1 X%0.2f Y%0.2f F%0.2f' % (x, y+self.config['y_offset'], self.config['xy_feedrate']))\n\n # self.codes.append('G1 X{0:.2f} Y{1:.2f} F{2:.2f}'\n # .format(x, y + self.config['y_offset'], self.config['drawing_feedrate']))\n self.codes.append('G0 Z{0:.2f}'.format(self.config['z_offset']))\n self.codes.append('G1 X{0:.2f} Y{1:.2f} F{2:.2f}'\n .format(y, -x, self.config['drawing_feedrate']))\n\n self.last = (x, y)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draw an sphere into the scene. | def draw_sphere(self, position, *, color, label, radius):
xx, yy, zz = generate_sphere(radius, position)
sphere = go.Surface(
x=xx,
y=yy,
z=zz,
colorscale=[[0, color], [1, color]],
cauto=False,
cmin=1,
cmax=1,
showscale=False,
name=label,
showlegend=False if label is None else True,
)
self.figure.add_trace(sphere)
return sphere | [
"def make_sphere(sides, rings, width):\n FreeCAD.newDocument()\n generated_sphere = sphere(sides, rings, width)\n Part.show(generated_sphere)",
"def add_sphere(self, centre, radius, material_data):\n self.scene.add(Sphere(centre, radius, material_from_data(material_data)))",
"def add_sphere(pos_x, pos_y, pos_z):\n bpy.ops.mesh.primitive_uv_sphere_add(\n segments=6,\n ring_count=6,\n size=0.04,\n location=(pos_x, pos_y, pos_z),\n rotation=(0, 0, 0),\n layers=(\n True, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False,\n False, False, False, False\n )\n )",
"def sphere_3d(self, x, y, z, radius):\n self._sphere_3d(x, y, z, radius)",
"def sphere(r):\n S = 4 * pi * (r**2)\n return S",
"def sphere_actor(center=(0, 0, 0), radius=0.5, resolution=32,\n color=colors.purple, opacity=1.0):\n source = tvtk.SphereSource(center=center, radius=radius,\n theta_resolution=resolution,\n phi_resolution=resolution)\n mapper = tvtk.PolyDataMapper(input=source.output)\n prop = tvtk.Property(opacity=opacity, color=color)\n actor = tvtk.Actor(mapper=mapper, property=prop)\n return actor",
"def sphere3(nx,ny,r=1,bot=-90,top=90):\n base = Formex( [[[0,0,0],[1,0,0],[1,1,0]],\n [[1,1,0],[0,1,0],[0,0,0]]],\n [1,2])\n grid = base.replic2(nx,ny,1,1)\n s = float(top-bot) / ny\n return grid.translate([0,bot/s,1]).spherical(scale=[360./nx,s,r])",
"def drawHemisphere(self):\n\n # clearing axes before drawing, only static visible, dynamic only when content\n # is available. visibility is handled with their update method\n axes = self.setupAxes(widget=self.hemisphereMat)\n if self.ui.showPolar.isChecked():\n axesP = self.setupAxesPolar(widget=self.hemisphere2Mat)\n else:\n axesP = None\n\n # calling renderer\n self.drawHemisphereStatic(axes=axes)\n self.drawHemisphereMoving(axes=axes)\n self.drawHemisphereStars(axes=axes)\n if axesP:\n self.drawHemisphereStatic(axes=axesP, polar=True)\n self.drawHemisphereMoving(axes=axesP, polar=True)\n self.drawHemisphereStars(axes=axesP, polar=True)\n\n # drawing the canvas\n axes.figure.canvas.draw()\n if axesP:\n axesP.figure.canvas.draw()",
"def polySphere(texture=int, axis=\"string\", radius=\"string\", createUVs=int, constructionHistory=bool, subdivisionsY=int, subdivisionsX=int, name=\"string\"):\n pass",
"def Sphere (radius, count):\n return Ellipsoid(radius, radius, count)",
"def test_sphere_splat_render_along_ray(out_dir, cam_pos, width, height, fovy, focal_length, use_quartic,\n b_display=False):\n import copy\n print('render sphere along ray')\n sampling_time = []\n rendering_time = []\n\n num_samples = width * height\n\n large_scene = copy.deepcopy(SCENE_TEST)\n\n large_scene['camera']['viewport'] = [0, 0, width, height]\n large_scene['camera']['eye'] = tch_var_f(cam_pos)\n large_scene['camera']['fovy'] = np.deg2rad(fovy)\n large_scene['camera']['focal_length'] = focal_length\n large_scene['objects']['disk']['material_idx'] = tch_var_l(np.zeros(num_samples, dtype=int).tolist())\n large_scene['materials']['albedo'] = tch_var_f([[0.6, 0.6, 0.6]])\n large_scene['tonemap']['gamma'] = tch_var_f([1.0]) # Linear output\n\n x, y = np.meshgrid(np.linspace(-1, 1, width), np.linspace(-1, 1, height))\n #z = np.sqrt(1 - np.min(np.stack((x ** 2 + y ** 2, np.ones_like(x)), axis=-1), axis=-1))\n unit_disk_mask = (x ** 2 + y ** 2) <= 1\n z = np.sqrt(1 - unit_disk_mask * (x ** 2 + y ** 2))\n\n # Make a hemi-sphere bulging out of the xy-plane scene\n z[~unit_disk_mask] = 0\n pos = np.stack((x.ravel(), y.ravel(), z.ravel() - 5, np.ones(num_samples)), axis=1)\n\n # Normals outside the sphere should be [0, 0, 1]\n x[~unit_disk_mask] = 0\n y[~unit_disk_mask] = 0\n z[~unit_disk_mask] = 1\n\n normals = np_normalize(np.stack((x.ravel(), y.ravel(), z.ravel(), np.zeros(num_samples)), axis=1))\n\n if b_display:\n plt.ion()\n plt.figure()\n plt.subplot(131)\n plt.imshow(pos[..., 0].reshape((height, width)))\n plt.subplot(132)\n plt.imshow(pos[..., 1].reshape((height, width)))\n plt.subplot(133)\n plt.imshow(pos[..., 2].reshape((height, width)))\n\n plt.figure()\n plt.imshow(normals[..., 2].reshape((height, width)))\n\n ## Convert to the camera's coordinate system\n #Mcam = lookat(eye=large_scene['camera']['eye'], at=large_scene['camera']['at'], up=large_scene['camera']['up'])\n\n pos_CC = tch_var_f(pos) #torch.matmul(tch_var_f(pos), Mcam.transpose(1, 0))\n\n large_scene['objects']['disk']['pos'] = pos_CC\n large_scene['objects']['disk']['normal'] = None # Estimate the normals tch_var_f(normals)\n # large_scene['camera']['eye'] = tch_var_f([-10., 0., 10.])\n # large_scene['camera']['eye'] = tch_var_f([2., 0., 10.])\n large_scene['camera']['eye'] = tch_var_f([-5., 0., 0.])\n\n # main render run\n start_time = time()\n res = render_splats_along_ray(large_scene, use_quartic=use_quartic)\n rendering_time.append(time() - start_time)\n\n # Test cam_to_world\n res_world = cam_to_world(res['pos'].reshape(-1, 3), res['normal'].reshape(-1, 3), large_scene['camera'])\n\n im = get_data(res['image'])\n im = np.uint8(255. * im)\n\n depth = get_data(res['depth'])\n depth[depth >= large_scene['camera']['far']] = large_scene['camera']['far']\n\n if b_display:\n\n\n plt.figure()\n plt.imshow(im, interpolation='none')\n plt.title('Image')\n plt.savefig(out_dir + '/fig_img_orig.png')\n\n plt.figure()\n plt.imshow(depth, interpolation='none')\n plt.title('Depth Image')\n #plt.savefig(out_dir + '/fig_depth_orig.png')\n\n plt.figure()\n pos_world = get_data(res_world['pos'])\n posx_world = pos_world[:, 0].reshape((im.shape[0], im.shape[1]))\n posy_world = pos_world[:, 1].reshape((im.shape[0], im.shape[1]))\n posz_world = pos_world[:, 2].reshape((im.shape[0], im.shape[1]))\n plt.subplot(131)\n plt.imshow(posx_world)\n plt.title('x_world')\n plt.subplot(132)\n plt.imshow(posy_world)\n plt.title('y_world')\n plt.subplot(133)\n plt.imshow(posz_world)\n plt.title('z_world')\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(pos_world[:, 0], pos_world[:, 1], pos_world[:, 2], s=1.3)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n\n plt.figure()\n pos_world = get_data(res['pos'].reshape(-1, 3))\n posx_world = pos_world[:, 0].reshape((im.shape[0], im.shape[1]))\n posy_world = pos_world[:, 1].reshape((im.shape[0], im.shape[1]))\n posz_world = pos_world[:, 2].reshape((im.shape[0], im.shape[1]))\n plt.subplot(131)\n plt.imshow(posx_world)\n plt.title('x_CC')\n plt.subplot(132)\n plt.imshow(posy_world)\n plt.title('y_CC')\n plt.subplot(133)\n plt.imshow(posz_world)\n plt.title('z_CC')\n\n imsave(out_dir + '/img_orig.png', im)\n #imsave(out_dir + '/depth_orig.png', im_depth)\n\n # hold matplotlib figure\n plt.ioff()\n plt.show()",
"def plot_on_sphere(xyz_data, normalize=False):\n #make 3d figure\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n \n #build up a sphere\n u, v = np.mgrid[0:2*np.pi:200j, 0:np.pi:100j]\n x = np.cos(u)*np.sin(v)\n y = np.sin(u)*np.sin(v)\n z = np.cos(v)\n \n #plot the sphere\n ax.plot_surface(x, y, z, color=\"k\", alpha = .1)\n \n #normalize data then plot it\n if normalize:\n xyz_normalized = normalize_rows(xyz_data)\n ax.scatter3D(xyz_normalized[:,0], xyz_normalized[:,1], xyz_normalized[:,2],s=0.1)\n else:\n ax.scatter3D(xyz_data[:,0], xyz_data[:,1], xyz_data[:,2],s=0.1)\n \n return fig, ax",
"def sphere(radius):\n M = np.diag([1., 1., 1., -(radius ** 2)])\n if radius < 0:\n M *= -1\n return Quadric(M)",
"def sphere(degree=int, axis=\"string\", pivot=\"string\", sections=int, radius=\"string\", tolerance=\"string\", object=bool, useTolerance=bool, polygon=int, spans=int, nodeState=int, name=\"string\", heightRatio=float, startSweep=int, caching=bool, endSweep=int, constructionHistory=bool):\n pass",
"def sphere_simple(radius, precision=0.0001):\n center = np.ceil(radius)\n extent = center*2 + 1\n rr, cc, zz = np.mgrid[0:extent, 0:extent, 0:extent]\n\n return sphere(rr, cc, zz, (center, center, center), radius, precision)",
"def test_sphere_splat_NDC(out_dir, cam_pos, width, height, fovy, focal_length, b_display=False):\n import copy\n print('render sphere')\n sampling_time = []\n rendering_time = []\n\n num_samples = width * height\n\n large_scene = copy.deepcopy(SCENE_TEST)\n\n large_scene['camera']['viewport'] = [0, 0, width, height]\n large_scene['camera']['eye'] = tch_var_f(cam_pos)\n large_scene['camera']['fovy'] = np.deg2rad(fovy)\n large_scene['camera']['focal_length'] = focal_length\n large_scene['objects']['disk']['material_idx'] = tch_var_l(np.zeros(num_samples, dtype=int).tolist())\n large_scene['materials']['albedo'] = tch_var_f([[0.6, 0.6, 0.6]])\n large_scene['tonemap']['gamma'] = tch_var_f([1.0]) # Linear output\n\n x, y = np.meshgrid(np.linspace(-1, 1, width), np.linspace(-1, 1, height))\n #z = np.sqrt(1 - np.min(np.stack((x ** 2 + y ** 2, np.ones_like(x)), axis=-1), axis=-1))\n unit_disk_mask = (x ** 2 + y ** 2) <= 1\n z = np.sqrt(1 - unit_disk_mask * (x ** 2 + y ** 2))\n\n # Make a hemi-sphere bulging out of the xy-plane scene\n z[~unit_disk_mask] = 0\n pos = np.stack((x.ravel(), y.ravel(), z.ravel(), np.ones(num_samples)), axis=1)\n\n # Normals outside the sphere should be [0, 0, 1]\n x[~unit_disk_mask] = 0\n y[~unit_disk_mask] = 0\n z[~unit_disk_mask] = 1\n\n normals = np_normalize(np.stack((x.ravel(), y.ravel(), z.ravel(), np.zeros(num_samples)), axis=1))\n\n if b_display:\n plt.ion()\n plt.figure()\n plt.imshow(pos[..., 2].reshape((height, width)))\n\n plt.figure()\n plt.imshow(normals[..., 2].reshape((height, width)))\n\n # Convert to the camera's coordinate system\n Mcam = lookat(eye=large_scene['camera']['eye'], at=large_scene['camera']['at'], up=large_scene['camera']['up'])\n Mproj = perspective(fovy=large_scene['camera']['fovy'], aspect=width/height, near=large_scene['camera']['near'],\n far=large_scene['camera']['far'])\n\n pos_CC = torch.matmul(tch_var_f(pos), Mcam.transpose(1, 0))\n pos_NDC = torch.matmul(pos_CC, Mproj.transpose(1, 0))\n\n large_scene['objects']['disk']['pos'] = pos_NDC / pos_NDC[..., 3][:, np.newaxis]\n large_scene['objects']['disk']['normal'] = tch_var_f(normals)\n\n # main render run\n start_time = time()\n res = render_splats_NDC(large_scene)\n rendering_time.append(time() - start_time)\n\n im = get_data(res['image'])\n im = np.uint8(255. * im)\n\n depth = get_data(res['depth'])\n depth[depth >= large_scene['camera']['far']] = depth.min()\n im_depth = np.uint8(255. * (depth - depth.min()) / (depth.max() - depth.min()))\n\n if b_display:\n plt.figure()\n plt.imshow(im, interpolation='none')\n plt.title('Image')\n plt.savefig(out_dir + '/fig_img_orig.png')\n\n plt.figure()\n plt.imshow(im_depth, interpolation='none')\n plt.title('Depth Image')\n plt.savefig(out_dir + '/fig_depth_orig.png')\n\n imsave(out_dir + '/img_orig.png', im)\n imsave(out_dir + '/depth_orig.png', im_depth)\n\n # hold matplotlib figure\n plt.ioff()\n plt.show()",
"def draw_person_on_sphere(axes, view, height=0.5, radius=1.0):\n limb_offset = height * 0.05\n head_radius = height * 0.10\n head_height = height - head_radius\n neck_length = head_radius * 0.50\n shoulder_height = height - 2*head_radius - neck_length\n torso_length = shoulder_height * 0.55\n torso_radius = torso_length * 0.30\n leg_length = shoulder_height - torso_length\n arm_length = torso_length * 0.90\n\n def _draw_part(y, z):\n x = np.zeros_like(y)\n xp, yp, zp = transform_xyz(view, None, x, y, z + radius)\n axes.plot(xp, yp, zp, color='k')\n\n # circle for head\n u = np.linspace(0, 2 * pi, 40)\n y = head_radius * cos(u)\n z = head_radius * sin(u) + head_height\n _draw_part(y, z)\n\n # rectangle for body\n y = np.array([-torso_radius, torso_radius, torso_radius, -torso_radius, -torso_radius])\n z = np.array([0., 0, torso_length, torso_length, 0]) + leg_length\n _draw_part(y, z)\n\n # arms\n y = np.array([-torso_radius - limb_offset, -torso_radius - limb_offset, -torso_radius])\n z = np.array([shoulder_height - arm_length, shoulder_height, shoulder_height])\n _draw_part(y, z)\n _draw_part(-y, z) # pylint: disable=invalid-unary-operand-type\n\n # legs\n y = np.array([-torso_radius + limb_offset, -torso_radius + limb_offset])\n z = np.array([0, leg_length])\n _draw_part(y, z)\n _draw_part(-y, z) # pylint: disable=invalid-unary-operand-type\n\n limits = [-radius-height, radius+height]\n axes.set_xlim(limits)\n axes.set_ylim(limits)\n axes.set_zlim(limits)\n axes.set_axis_off()",
"def sphere(radius):\n if not isinstance(radius, float) or radius <= 0:\n raise ValueError(f\"Incorrect value ({radius}) for radius\")\n substrate = _Substrate(\"sphere\", radius=radius)\n return substrate",
"def Sphere(self,n):\n return CubicalComplex(Cube([[0,1]]*(n+1)).faces())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Classify each line with what it contains using a naive, rule based classifier | def _classify_lines(self, receipt):
labels = []
for i, line in enumerate(receipt):
line = str(line)
a_chars = count(line, string.ascii_letters)
num_chars = count(line, string.digits)
punct_chars = count(line, string.punctuation)
if 'bon fiscal' in line.lower():
labels.append('unknown')
#if 'subtotal' in line.lower():
# labels.append('unknown')
elif (re.search('S\.?C\.?(.+?)(S.?R.?L.?)|(S[:.,]?A[:.,]?)', line, re.IGNORECASE) or\
any(x in line.lower() for x in ['kaufland'])) and i < 5 and 'shop' not in labels:
labels.append('shop')
elif (re.search('(C[^\w]?U[^\w]?I[^\w]?)|(C[^\w]?F[^\w]?)|(C[^\w]?I[^\w]?F[^\w]?)|(COD FISCAL).+? (\d){4,}', line) or\
re.search('\d{8}', line)) and i < 6:
labels.append('cui')
elif (re.search('(STR)|(CALEA)|(B-DUL).(.+?)', line, re.IGNORECASE) and i < 7) or\
(re.search('(NR).(\d+)', line, re.IGNORECASE) and i < 3):
labels.append('address')
elif 'TVA' in line:
labels.append('tva')
elif 'TOTAL' in line and 'SUBTOTAL' not in line:
labels.append('total')
elif re.search('DATA?.+?\d{2,4}[.\\-]\d{2,4}[.\\-]\d{2,4}', line, re.IGNORECASE) or\
re.search('\d{2}[./\\-]\d{2}[./\\-]\d{2,4}', line, re.IGNORECASE):
labels.append('data')
elif a_chars > 0 and num_chars/a_chars > 1 and 2 < i < len(receipt) - 7 and \
all(x not in line.lower() for x in ['tel', 'fax']) and 'total' not in labels:
labels.append('price')
elif 3 < i < len(receipt) - 8 and a_chars+punct_chars > 5 and 'total' not in labels and ((\
all(not re.search('(\W|^)'+x, line.lower()) for x in ['tel', 'fax', 'subtotal', 'numerar', 'brut', 'net'] +
days)\
and not re.search('\d{5}', line)) or labels[-1] == 'price'):
labels.append('name')
else:
labels.append('unknown')
return labels | [
"def classifying_func(features):\n return classify_line(features, model, encoder)",
"def classify(training=sample_file):\n\n input = sc.textFile(args.input).map(lambda word: word.lower().split(\" \")).collect()[0] # Input tweet converted into list of words\n total_tweets = total_number_of_tweets(training)\n cities = dict(tweets_per_city(training))\n training \\\n .map(lambda tweet: (tweet.split(\"\\t\")[PLACE_NAME], list(set(tweet.split(\"\\t\")[TWEET_TEXT].lower().split(\" \"))))) \\\n .reduceByKey(lambda wordlist_x, wordlist_y: wordlist_x + wordlist_y) \\\n .flatMapValues(lambda city_words_key: city_words_key) \\\n .map(lambda key: (key, 1)) \\\n .reduceByKey(add) \\\n .map(lambda place: ((place[0][0], cities[place[0][0]]), (place[0][1], place[1]))) \\\n .groupByKey().mapValues(list) \\\n .map(lambda place : (naive_bayes(total_tweets, place[0][1], place[1], input), place[0][0])) \\\n .groupByKey().mapValues(list) \\\n .sortBy(lambda place: -place[0]) \\\n .map(lambda places: (\".\".join(places[1]).replace(\".\", \"\\t\"), places[0])) \\\n .zipWithIndex() \\\n .filter(lambda word: word[1] < 1) \\\n .map(lambda place: place[0][0] + \"\\t\" + str(place[0][1])) \\\n .coalesce(1) \\\n .saveAsTextFile(args.output)",
"def classify(self, infile):\n clock = Ticker()\n\n if not self.trained:\n raise AttributeError(\"Model must be trained before use\")\n\n predictions = []\n for i, (x, y) in enumerate(self.iterfile(infile)):\n predictions.extend(self.classifier.predict(x))\n clock.tick('Partial classify {}'.format(i))\n return [self.ind2cat[i] for i in predictions]",
"def classifyLabelledRawFile(raw_file_path: str,\n out_labelled_file_path: str) -> None:\n # Read current raw file and skip header\n tsvRawFile = open(raw_file_path)\n tsvRawReader = csv.reader(tsvRawFile, delimiter='\\t')\n inHeader = next(tsvRawReader, None)\n\n # Open output file and write header\n tsvOutFile = open(out_labelled_file_path, 'w')\n tsvWriter = csv.writer(tsvOutFile, delimiter='\\t')\n outHeader = inHeader + [\"Label\"]\n tsvWriter.writerow(outHeader)\n\n # Read through and automatically label data\n for row in tsvRawReader:\n # Get the label index\n right_brac_idx = row[ACTIVITY_COL_IDX].find(']')\n labIdx = int(row[ACTIVITY_COL_IDX][1:right_brac_idx])\n\n # Take away the label from the activity string\n activityString = row[ACTIVITY_COL_IDX][right_brac_idx+1:]\n activityString = activityString.strip()\n row[ACTIVITY_COL_IDX] = activityString\n\n #Write to output file\n outRow = row + [labIdx]\n tsvWriter.writerow(outRow)\n\n tsvRawFile.close()\n tsvOutFile.close()",
"def preprocess(self):\r\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\r\n all_attr_names = lines[1].split()\r\n black, blond, brown = 0, 0, 0\r\n for i, attr_name in enumerate(all_attr_names):\r\n self.attr2idx[attr_name] = i\r\n\r\n if attr_name == 'Blond_Hair':\r\n blond = i\r\n if attr_name == 'Black_Hair':\r\n black = i\r\n if attr_name == 'Brown_Hair':\r\n brown = i\r\n\r\n lines = lines[2:]\r\n \r\n for i, line in enumerate(lines):\r\n split = line.split()\r\n filename = split[0]\r\n values = split[1:]\r\n\r\n # Remove in other attributes translation.\r\n if values[black] != '1' and values[blond] != '1' and values[brown] != '1':\r\n continue\r\n\r\n label = []\r\n for attr_name in self.selected_attrs:\r\n idx = self.attr2idx[attr_name]\r\n label.append(values[idx] == '1')\r\n\r\n if (i+1) < 2000:\r\n self.test_dataset.append([filename, label])\r\n else:\r\n self.train_dataset.append([filename, label])\r\n\r\n print('Finished preprocessing the CelebA dataset...')",
"def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n\n target_strings = []\n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n values = split[1:]\n\n label = []\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n label.append(int(values[idx] == '1'))\n\n if (i+1) < 2000:\n self.test_dataset.append([filename, label])\n else:\n self.train_dataset.append([filename, label])\n\n print('Finished preprocessing the CelebA dataset...')",
"def classify_corpus(df, tfidf_model, log_model, file_name):\n # tfidf_model = TfidfVectorizer(lowercase=False)\n \n features_big = tfidf_model.transform(df.cleaned_spacy_text.tolist())\n features_nd_big = features_big.toarray()\n print(len(features_nd_big), len(df['cleaned_spacy_text']))\n\n predictions_proba = log_model.predict_proba(features_nd_big[0:len(df['cleaned_spacy_text'])])\n predictions_one = [x[1] for x in predictions_proba]\n predictions_zero = [x[0] for x in predictions_proba]\n predictions_big = log_model.predict(features_nd_big[0:len(df['cleaned_spacy_text'])])\n\n\n df['prediction_proba_0'] = predictions_zero\n df['prediction_proba_1'] = predictions_one\n df['prediction'] = predictions_big\n df.to_csv(file_name + '_classified_corpus.csv')",
"def classify_step(step_file):\n image_files = create_images(step_file)\n return classify_images(image_files)",
"def classify_new_email(filename,probabilities_by_category,prior_by_category, spam_words_total, ham_words_total, D, r):\n\n p_dict = probabilities_by_category[0]\n q_dict = probabilities_by_category[1]\n spam_prior = prior_by_category[0]\n ham_prior = prior_by_category[1]\n log_spam_conditional = 0\n log_ham_conditional = 0\n file_word_frequencies = util.get_word_freq([filename])\n\n for word in file_word_frequencies:\n if word in p_dict:\n p_d = p_dict[word]\n elif word in q_dict:\n p_d = 1 / (spam_words_total + D)\n\n if word in q_dict:\n q_d = q_dict[word]\n elif word in p_dict:\n q_d = 1 / (ham_words_total + D)\n \n if (word not in q_dict) and (word not in p_dict):\n continue\n \n log_spam_conditional += (file_word_frequencies[word] * np.log(p_d))\n log_ham_conditional += (file_word_frequencies[word] * np.log(q_d))\n \n log_spam_posterior = log_spam_conditional + np.log(spam_prior) \n log_ham_posterior = log_ham_conditional + np.log(ham_prior) \n \n if log_spam_posterior > np.log(r) + log_ham_posterior:\n result = 'spam'\n else:\n result = 'ham'\n classify_result = (result, [log_spam_posterior, log_ham_posterior])\n return classify_result",
"def classifyData(date=None):\n \n # load training data\n try:\n with open('trainingset','r') as myFile:\n trainingSet = pickle.load(myFile)\n except:\n print 'Creating new training set'\n trainingSet = []\n \n # if requested, add new examples from 'date' to the training set\n if date != None:\n if len(trainingSet) > 100:\n myModel = LearningModel(trainingSet)\n else:\n myModel = None\n \n commentList = map(lambda x: x.comment, trainingSet)\n myComments = TimesComments(date)\n \n i = 0\n for comment in myComments.iterComments():\n i += 1\n if comment[0] in commentList:\n print 'comment already found!'\n else:\n print str(i) + '/' + str(len(myComments.myComments))\n newpt = LabeledData(comment[0],comment[1],comment[2],myModel)\n # if we have a trained learning model, only add manually classified points\n if newpt.predProb == None or newpt.manuallyClassified:\n trainingSet.append(newpt)\n \n with open('trainingset','w') as myFile:\n pickle.dump(trainingSet,myFile)\n \n myModel = LearningModel(trainingSet)\n return myModel",
"def createNBClassifier(data):\n\n # for each feature, need to calculate probability of True/False\n\n # get the 2 classes\n classes = set([])\n for d in data:\n classes.add(d['class'])\n if len(classes) == 2:\n break\n\n # simple set labels\n true_label = classes.pop()\n false_label = classes.pop()\n\n # for each feature we need to calculate probabilities of true/false\n keys = filter( lambda x: x != 'class', data[0].keys())\n\n classifier = {}\n totalnos = len(data)\n\n # does a loop over all elements in list for every key\n # can be optimized to one loop, TODO\n\n for k in keys:\n probset = {}\n probset['true'] = {}\n probset['false'] = {}\n\n for d in data:\n if d['class'] == true_label:\n probset['true'][d[k]] = probset['true'].get(d[k], 0) + 1\n probset['false'][d[k]] = probset['false'].get(d[k], 0) + 0\n else:\n probset['false'][d[k]] = probset['false'].get(d[k], 0) + 1\n probset['true'][d[k]] = probset['true'].get(d[k], 0) + 0\n\n # arbitrary cutoff to decide when the number of keys are too many\n if len(probset['true'].keys() + probset['false'].keys()) > 0.3*len(data):\n # too many keys present\n # discrete probability does not make sense\n # we need to model a gaussian distribution\n #probset = {}\n probset['gaussian'] = True\n\n # obtain mean and standard deviation\n true_nos = []\n false_nos = []\n for d in data:\n if d['class'] == true_label:\n true_nos.append(float(d[k]))\n else:\n false_nos.append(float(d[k]))\n \n true_nos = np.array(true_nos)\n false_nos = np.array(false_nos)\n\n probset['true_mean'] = float(np.mean(true_nos))\n probset['true_std'] = float(np.std(true_nos))\n\n probset['false_mean'] = float(np.mean(false_nos))\n probset['false_std'] = float(np.std(false_nos))\n\n else: \n # use ordinary distribution\n probset['gaussian'] = False\n\n # convert to probabilities\n for p in probset['true'].keys():\n probset[p] = float(probset['true'][p])/totalnos\n for p in probset['false'].keys():\n probset[p] = float(probset['false'][p])/totalnos\n\n # add it master dict\n classifier[k] = probset\n\n\n # add true and false labels\n classifier['true'] = true_label\n classifier['false'] = false_label\n\n #print classifier\n return classifier",
"def classify_new_email(filename, probabilities_by_category, prior_by_category, b=0):\n # TODO: Write your code here\n # 2 classes are handled the same way\n log_probabilities = [0, 0]\n x = util.get_words_in_file(filename)\n for i in range(2):\n for word in probabilities_by_category[i]:\n x_d = x.count(word)\n log_probabilities[i] += x_d * np.log(probabilities_by_category[i][word])\n log_probabilities[i] += prior_by_category[i] # Since both 0.5, this line doesn't affect anything\n\n if log_probabilities[0] + b >= log_probabilities[1]:\n classify_result = ('spam', log_probabilities[0])\n else:\n classify_result = ('ham', log_probabilities[1])\n\n return classify_result",
"def classify(self, ruleSet):\n\t\tself.getZ().evaluateClass(ruleSet)",
"def classify_text(text, lang, exclude=[]):\n \n model = load_model(lang)\n features = load_features(lang)\n \n texts = [text.lower()]\n for e in exclude:#this for loop is not right\n new_texts = []\n for t in texts:\n new_texts = new_texts + t.split(e)\n texts = new_texts\n feature_vector = get_sparse_feature_vector(texts, features, exclude)\n p_label, p_acc, p_val = linu.predict([0], [feature_vector], model)\n p_val = p_val[0][0]/(1+abs(p_val[0][0]))\n return {'label':p_label[0],'value':p_val}",
"def data_classification(self, data=[]):\n data_type = ''\n self.logger.info('Attempting to classify: {0}'.format(data))\n #This section classifies an input as heartbeat, expecting integer\n if len(data) == 1:\n try:\n value = data[0]\n int(value)\n self.instruction_list.append(self.heartbeat._make(data))\n data_type = 'heartbeat'\n except ValueError as input_error:\n self.logger.error('{0}, expecting heartbeat with epoch timestamp'.format(input_error))\n #This section classifies the input as a bid\n if len(data) == 5:\n is_bid_syntax_valid = self.validate_bid_format(data)\n if is_bid_syntax_valid:\n self.instruction_list.append(self.bid._make(data))\n data_type = 'bid'\n else:\n self.logger.error('Invalid syntax for classifying object as a bid: {0}'.format(data))\n # This section classifies the input as a user listing\n if len(data) == 6:\n is_listing_syntax_valid = self.validate_listing_format(data)\n if is_listing_syntax_valid:\n self.instruction_list.append(self.user_listing._make(data))\n data_type = 'user_listing'\n else:\n self.logger.error('Invalid syntax for classifying object as a user listing: {0}'.format(data))\n \n if data_type:\n self.logger.info('Successfully classified {0} as {1}'.format(self.instruction_list[-1], data_type))\n else:\n self.logger.debug('Unable to classify instruction: {0}'.format(data))\n return data_type",
"def classify_forced(self, mod = imagenet_model):\n classes = super(EasyImageFile, self).classify(mod)\n output = [mod, classes]\n exif_json.save(self.path, output, classify_field)\n return classes",
"def classify(self, data):\n data = np.array([exp[1:-1] for exp in data])\n data = np.append(np.ones((data.shape[0],1),dtype=float),data,axis=1)\n predictions = []\n for exp in data:\n confidence = self.sigmoid(exp.T.dot(self.theta))\n predictions.append((confidence>0.5, confidence))\n return predictions",
"def classify(args=None):\n from .classify import main\n\n if args.output:\n check_output_directory(args.output, must_exist=False)\n if args.temp:\n check_output_directory(args.temp)\n\n # Connect to the DB,\n Session = connect_to_db(\n expand_database_argument(args.database, exist=True, hyphen_default=True)\n )\n session = Session()\n\n return_code = main(\n inputs=args.input,\n session=session,\n marker_name=args.marker,\n method=args.method,\n out_dir=args.output,\n ignore_prefixes=tuple(args.ignore_prefixes),\n min_abundance=args.abundance,\n tmp_dir=args.temp,\n biom=args.biom,\n debug=args.verbose,\n cpu=check_cpu(args.cpu),\n )\n\n session.close()\n if isinstance(return_code, list):\n # Should be a list of *.method.tsv filenames.\n return 0\n else:\n return return_code",
"def classify(texts: List[str], params: Any) -> List[str]:\n\n # ############################ REPLACE THIS WITH YOUR CODE #############################\n best_model, doc2vec, datasets_info = params\n\n X_test_start, X_test_len = datasets_info[texts[0]]\n X_test = doc2vec.get_X(X_test_start, X_test_len)\n\n preds_int = best_model.predict(X_test)\n preds = ['pos' if pr == 1 else 'neg' for pr in preds_int]\n\n return preds\n # ############################ REPLACE THIS WITH YOUR CODE #############################"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dada una ``Ruta`` (Path) a una subcoleccion, retorna esa subcoleccion. | def subcoleccion_desde_ruta(self, ruta):
parts = ruta.split(".")
coleccion = self
while parts:
coleccion = coleccion.colecciones[parts.pop(0)]
return coleccion | [
"def existing_sub_paths(self, sub_paths):\n paths_to_subs = [self / _ for _ in sub_paths]\n return [_ for _ in paths_to_subs if _.exists()]",
"def _isSubpathInPath(self, path, subpath):\n path = self._getAbsPath(path)\n subpath = self._getAbsPath(subpath)\n\n # If the parent path is the root directory ('/') or otherwise already\n # ends in a separator character, we need to strip the separator from\n # the end so we don't double it when we do the containment check.\n if path.endswith('/') or path.endswith('\\\\'):\n path = path[:-1]\n\n # Check for identical paths, either with or without a trailing\n # directory separator.\n if (\n (subpath == path) or\n (subpath == path + '/') or (subpath == path + '\\\\')\n ):\n return False\n\n # Check for subpath containment. This should work on either Windows or\n # *nix systems.\n return (\n subpath.startswith(path + '\\\\') or subpath.startswith(path + '/')\n )",
"def seleccionar_ficha(self, opciones_fichas):",
"def DoSelect(self, parent, path=None):\n result = False\n for c in parent.children:\n if c.name == path[0]:\n path.remove(path[0])\n if len(path)==0:\n c.selected = True\n c.draw()\n return True\n else:\n if not c.expanded:\n c.invoke() \n return c.DoSelect(c,path)",
"def subDirs(self) -> list:\n # do not do anything if the object was deleted\n if self._path is None:\n return []\n subdirs = []\n with os.scandir(self._path[:-1]) as it:\n for entry in it:\n if not entry.is_dir():\n continue\n subdirs.append( entry.name )\n return subdirs",
"def get_subgraph_from_paths(self, paths):\n pass",
"def get_folder(self, c_path):\n segments = c_path.split()\n current_folder = self # must be root folder !\n for segment in segments:\n sub_folder = current_folder.get_child_by_name(segment)\n if not sub_folder:\n return None\n current_folder = sub_folder\n return current_folder",
"def getSelectionPathList(self) -> \"SoPathList const &\":\n return _coin.SoExtSelection_getSelectionPathList(self)",
"def path(self, target):\n if self.name == target:\n return [self]\n if self.left is not None:\n left_path = self.left.path(target)\n if len(left_path) > 0:\n return [self] + left_path\n if self.right is not None:\n right_path = self.right.path(target)\n if len(right_path) > 0:\n return [self] + right_path\n return []",
"def getPath(self, index: 'int const') -> \"SoPath *\":\n return _coin.SoSelection_getPath(self, index)",
"def listar_directorio(ruta):\n archivos = [a for a in listdir(ruta) if isfile(join(ruta, a)) ]\n \n return archivos",
"def GetSubMatch(self) -> \"unsigned int\":\n return _ITKIOImageBaseBasePython.itkRegularExpressionSeriesFileNames_GetSubMatch(self)",
"def segments(self):\r\n segments = tuple(map(path_decode, self.split('/')))\r\n if segments[0] == u'':\r\n return segments[1:]\r\n return segments",
"def prolunga(self, du):\n if self.t0 < self.t1:\n return TrattoPath(self.path, self.t0, self.t1 + du)\n else:\n return TrattoPath(self.path, self.t0, self.t1 - du)",
"def find_path(self):\n j = JeuRecherche(self.pos, self.goal.pos, distManhattan, self.dir_vecs, self.dims, self.walls)\n self.path = astar(j)\n self.current = 0\n if(self.verbose):\n print(\"Player {} moving towards Restaurant {}.\".format(self.id, self.goal.id))",
"def test_scan(self):\n select = Select(Path(\"/\"))\n\n assert select.glob_get_sf(\"**.py\", 1)(Path(\"/\")) == 2\n assert select.glob_get_sf(\"**.py\", 1)(Path(\"foo\")) == 2\n assert select.glob_get_sf(\"**.py\", 1)(Path(\"usr/local/bin\")) == 2\n assert select.glob_get_sf(\"/testfiles/select/**.py\", 1)(Path(\"/testfiles/select\")) == 2\n assert select.glob_get_sf(\"/testfiles/select/test.py\", 1)(Path(\"/testfiles/select\")) == 2\n assert select.glob_get_normal_sf(\"/testfiles/se?ect/test.py\", 1)(Path(\"/testfiles/select\")) == 2\n assert select.glob_get_sf(\"/testfiles/select/test.py\", 0)(Path(\"/testfiles/select\")) is None\n assert select.glob_get_normal_sf(\"/testfiles/select/test.py\", 0)(Path(\"/testfiles/select\")) is None",
"def _select_path(self):\r\n path = self._tree.model().filePath(self._tree.currentIndex())\r\n if path:\r\n self.pathSelected = path\r\n self.close()",
"def _search_new_path(self, path):\n if not path.startswith(os.sep):\n return None\n path = path.split(os.sep)[1:]\n # First get name and remove it from path\n name = None\n for i in range(len(path)-1, -1, -1):\n if path[i] != \"\":\n name = path[i]\n path = path[:i]\n break\n if name is None:\n return None\n\n # Walk the directory hierarchy\n cur_dir = self.root_dir\n for node in path:\n if node == \"\":\n continue\n if not isinstance(cur_dir, Dir):\n # A file - doesn't have children\n return None\n try:\n cur_dir = cur_dir.files[node]\n except KeyError:\n return None\n return cur_dir, name",
"def test_split_path(self):\n zope_root = self.root.getPhysicalRoot()\n self.assertEqual(\n split_path('publication/document', self.root),\n (['root', 'publication', 'document'], zope_root))\n self.assertEqual(\n split_path('/publication/document', self.root),\n (['publication', 'document'], zope_root))\n self.assertEqual(\n split_path('./../root/publication/document', self.root),\n (['root', 'publication', 'document'], zope_root))\n self.assertEqual(\n split_path('./document', self.root.publication),\n (['root', 'publication', 'document'], zope_root))\n self.assertEqual(\n split_path('.//document', self.root.publication, self.root),\n (['publication', 'document'], self.root))\n self.assertEqual(\n split_path('./.././publication/document',\n self.root.publication, self.root),\n (['publication', 'document'], self.root))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Devuelve todos los artefactos y subartefactos(acciones) contenidos como una lista de contextos del analizador. | def a_contextos(self):
resultado = []
for principal, alias in six.iteritems(self.nombres_de_artefactos):
artefacto = self[principal]
resultado.append(
AnalizadorDeContexto(
nombre=principal, alias=alias, args=artefacto.obtener_argumentos()
)
)
return resultado | [
"def get_acls(self):\n return self.access_list_manager.get_objects()",
"def get_dobj_acl(self, root_token):\n objects = []\n acls = []\n \n objects = [token for token in root_token.children if token.dep in SYMBOLS_FOR_OBJECTS]\n # get associated acl\n for obj in objects:\n acl_token = None\n for child in obj.rights:\n #if child.dep == acl:\n if child.dep in SYMBOLS_FOR_MODIFIERS:\n acl_token = child\n break\n acls.append(acl_token)\n \n return objects, acls",
"def determine_contexts(self):\n return []",
"def contexts_results(self):\n return list(self._data.values())",
"def accesos_list():\n accesos = Accesos().get_accesos()\n return render_template(\n 'access/list.html.jinja',\n accesos=accesos\n )",
"def listarActivo(self):\n return Proyecto.query.filter(Proyecto.estado == \"Activo\").all()",
"def set_context(self):\n\n super(MultiDeleteObjectMixin, self).set_context()\n\n self.add_context('objects', self.get_objects())",
"def get_context_mappings(self):\n if not self.contexts:\n return []\n log.verbose(\"Getting all mappings for specified contexts.\", verbosity=55)\n if self.args.all:\n mappings = self._list_mappings(\"*.*map\")\n with log.verbose_warning_on_exception(\"Downloading all mappings failed\"):\n self.dump_files(self.default_context, mappings)\n else:\n mappings = self.contexts\n\n useable_contexts, mapping_closure = self._dump_and_vet_mappings(mappings)\n self.contexts = useable_contexts # XXXX reset self.contexts\n log.verbose(\"Got mappings from specified (usable) contexts: \", self.contexts, verbosity=55)\n\n return mapping_closure",
"def resources() -> [acl.AclResource]:\n return []",
"def getallobjlists(idf, refname):\n dtls = idf.model.dtls\n objlists = []\n for i, fieldidds in enumerate(idf.idd_info):\n indexlist = []\n for j, fieldidd in enumerate(fieldidds):\n if \"object-list\" in fieldidd:\n if fieldidd[\"object-list\"][0].upper() == refname.upper():\n indexlist.append(j)\n if indexlist != []:\n objkey = dtls[i]\n objlists.append((objkey, refname, indexlist))\n return objlists",
"def _get_objs(self):\n return []",
"def all(self):\n return self._instance._client.acls.all(self._instance.name)",
"def get_types_accessible_by(self, context):\n if not context:\n raise RuntimeError(\"Invalid context \\\"{}\\\"\".format(context))\n accessible_types = {}\n query = setools.terulequery.TERuleQuery(policy=self.policy,\n ruletype=[\"allow\"],\n source=context.type)\n # Filter all rules\n for rule in query.results():\n # Add it to the dictionary\n if rule.target in accessible_types:\n accessible_types[rule.target].append(rule)\n else:\n accessible_types[rule.target] = [rule]\n return accessible_types",
"def _get_attributes(self, entity):\n\n # NOTE: This is a major point of complexity: recursed dimensions\n # may incur in name conflicts and infinite loops.\n result = []\n\n '''\n # Commented out: recursed attributes are actually dimension attributes\n for dimension in entity.dimensions:\n if isinstance(dimension, AliasDimension):\n # FIXME: the aliased dimension may not be a FactDimension?\n referenced_attributes = self._get_attributes_recursively(dimension.dimension.fact)\n result.extend(referenced_attributes)\n '''\n\n for attribute in entity.get_attributes():\n result.append(attribute)\n\n return result",
"def iter_context_objects(self):\n tid = current_greenlet()\n objects = self._cache.get(tid)\n if objects is None:\n if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:\n self._cache.clear()\n objects = self._global[:]\n objects.extend(getattr(self._context, 'stack', ()))\n objects.sort(reverse=True)\n objects = [x[1] for x in objects]\n self._cache[tid] = objects\n return iter(objects)",
"def load_context_relationships(permissions):\n read_contexts = set(\n permissions.get('read', {}).\n get('Program', {}).\n get('contexts', []) +\n permissions.get('read', {}).\n get('Audit', {}).\n get('contexts', []))\n write_contexts = set(\n permissions.get('update', {}).\n get('Program', {}).\n get('contexts', []) +\n permissions.get('update', {}).\n get('Audit', {}).\n get('contexts', []))\n read_only_contexts = read_contexts - write_contexts\n\n read_objects = context_relationship_query(read_only_contexts)\n for res in read_objects:\n id_, type_, _ = res\n actions = [\"read\", \"view_object_page\"]\n for action in actions:\n permissions.setdefault(action, {})\\\n .setdefault(type_, {})\\\n .setdefault('resources', list())\\\n .append(id_)\n\n write_objects = context_relationship_query(write_contexts)\n for res in write_objects:\n id_, type_, role_name = res\n actions = [\"read\", \"view_object_page\", \"create\", \"update\", \"delete\"]\n for action in actions:\n permissions.setdefault(action, {})\\\n .setdefault(type_, {})\\\n .setdefault('resources', list())\\\n .append(id_)",
"def get_permissions(self, context={}):\n context['has_permission'] = self.mongoadmin.has_permission(self.request)\n context['has_staff_permission'] = self.mongoadmin.has_staff_permission(self.request) \n return context",
"def _prepare_context(self, resource_id):\n self.ctx = dict(by_subject={}, by_object={})\n assocs = self._rr.find_associations(anyside=resource_id, id_only=False)\n self._add_associations(assocs)\n log.debug(\"Found %s associations for resource %s\", len(assocs), resource_id)",
"def amenities(self):\n amenity_list = []\n for inst in models.storage.all(Amenity).values():\n if inst.place_id == self.id:\n amenity_list.append(inst)\n return amenity_list"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Toma un Lexicon y apliqua ".transformar" a sus claves y alias. | def _transformar_con_lexicon(self, viejo):
nuevo_ = Lexicon()
# Los léxicos exhiben solo sus claves reales en la mayoría de los lugares,
# por lo que esto solo tomará esos, no los alias.
for clave, valor in six.iteritems(viejo):
# Realice una Deepcopy (copia profunda) del valor para que no solo estemos
# copiando una referencia
nuevo_[self.transformar(clave)] = copy.deepcopy(valor)
# Copie también todos los alias, que son asignaciones de teclas de cadena_a_cadena
for clave, valor in six.iteritems(viejo.alias):
nuevo_.alias(from_=self.transformar(clave), to=self.transformar(valor))
return nuevo_ | [
"def transform(self, translate, scale, theta):\n return self.rotate(theta).scale(scale).translate(translate)",
"def transform(self, verbose=1, **kwargs):\n self.current = self.tokens.copy()\n transformation_selection = self.transformation_selection.copy()\n for kw in kwargs:\n if kw in transformation_selection:\n transformation_selection[kw] = kwargs[kw]\n for i, trans in enumerate(self.transformation_order, 1):\n if verbose:\n print(f\"{i}/{len(self.transformation_order)}. {trans}...\", end='')\n if transformation_selection[trans]:\n self.current = self.current.apply(eval(f\"self.{trans}\", ), \n stopwords=self.sw, \n lemmatizer=self.lm, \n stemmer=self.sm,\n exceptions=self.non_alpha_exceptions,\n replacer=self.replacer)\n if verbose:\n print(\"Completed.\" if transformation_selection[trans] else \"Skipped.\")\n return self.current",
"def add_transform(self, xforms):\n return",
"def updateTransforms(self, node):\n transforms = []\n for _, transform in self.instrument.positioning_stack.model():\n transforms.append(transform)\n\n for detector in self.instrument.detectors.values():\n for _, transform in detector.model():\n transforms.append(transform)\n\n for _, transform in self.instrument.jaws.model():\n transforms.append(transform)\n\n node.per_object_transform[:len(transforms)] = transforms",
"def transform(self, ctx, modules):",
"def _transform(self, dataset):\n\n for t in self.transforms:\n method = getattr(dataset, t.name)\n dataset = method(*t.args, **t.kwargs)\n\n return dataset",
"def _apply_transformation_on_transformed_link(self, item, transformation):\n if getattr(item, \"current_transformation\"):\n relative_transformation = transformation * item.current_transformation.inverse()\n else:\n relative_transformation = transformation\n for native_geometry in item.native_geometry or []:\n self.transform(native_geometry, relative_transformation)\n item.current_transformation = transformation",
"def _cmd_help_translate(self, ident, _from, to, msg, cmd):\n cinfo = self.init_cmd(ident, _from, to, msg)\n access = \"all\"\n\n if cmds[cmd][CMD_LEVEL] == 4:\n access = \"root\"\n elif cmds[cmd][CMD_LEVEL] == irc.LEVEL_MASKS['o']:\n access = \"op\"\n elif cmds[cmd][CMD_LEVEL] == irc.LEVEL_MASKS['v']:\n access = \"voice\"\n\n usage = '\\x02' + \"Usage\" + COLOR[\"rewind\"] + \": translate <lang_from> <lang_to> <string>.\"\n desc = '\\x02' + \"Description\" + COLOR[\"rewind\"] + \": Translate <string> from <lang_from> to <lang_to>.\"\n aliases = '\\x02' + \"Aliases\" + COLOR[\"rewind\"] + ': ' + \", \".join(cmds[cmd][CMD_ALIASES]) + '.'\n access = '\\x02' + \"Access\" + COLOR[\"rewind\"] + \": %s.\" %access\n\n self.privmsg(cinfo[1], usage + ' ' + desc + ' ' + aliases + ' ' + access)\n return None",
"def translate(obj, a0, a1):\n atoms = getats(obj)\n M = getmat(atoms)\n p0 = (a0.r if isinstance(a0, Atom) else a0)\n p1 = (a1.r if isinstance(a1, Atom) else a1)\n if len(p0)!=3 or len(p1)!=3:\n print('ERROR: invalid a0 or a1!')\n exit(1)\n M = matvec.translate(M, p0, p1)\n putmat(atoms, M)",
"def transformFromPosition(*args):\n return _almathswig.transformFromPosition(*args)",
"def transform_corpus(self, transformation):\n self.docs = self.transformation[self.docs]\n transformed_model = transformation(self.docs)\n self.transformation = transformed_model\n return",
"def transform(func):\n WalkoffTag.transform.tag(func)\n return func",
"def translate(data, taxfiledirect, mode='species'):\n t = makeTrans(mode, taxfiledirect)\n for i in range(len(data['category'])):\n data['category'][i] = changeCategory(t, data['hierarchy'][i])\n return data",
"def transform(self, corpus: Corpus) -> Corpus:\n utt_ids = corpus.get_utterance_ids()\n for utt_id in utt_ids: #add scores to each utterances metadata\n corpus.get_utterance(utt_id).meta['ARI'] = ARI(corpus.get_utterance(utt_id).text)\n corpus.get_utterance(utt_id).meta['Flesch-Kincaid'] = Flesch_Kincaid(corpus.get_utterance(utt_id).text)\n return corpus",
"def _get_transform_functions(ax, axis):\n axis_obj = getattr(ax, f\"{axis}axis\")\n transform = axis_obj.get_transform()\n return transform.transform, transform.inverted().transform",
"def transform(self, node):\n try:\n handler = getattr(self, 'transform_%s' % node.kind.name.lower())\n return handler(node)\n except AttributeError:\n print(\n \"Ignoring node of type %s (%s)\" % (\n node.kind,\n ' '.join(\n t.spelling for t in node.get_tokens())\n ),\n file=sys.stderr\n )",
"def setTransform(self, *args) -> \"void\":\n return _coin.SbDPMatrix_setTransform(self, *args)",
"def superimpose_apply(atoms, transformation):\n trans1, rot, trans2 = transformation\n s_coord = coord(atoms).copy()\n s_coord += trans1\n s_coord = np.dot(rot, s_coord.T).T\n s_coord += trans2\n\n if isinstance(atoms, np.ndarray):\n return s_coord\n else:\n transformed = atoms.copy()\n transformed.coord = s_coord\n return transformed",
"def transformed(self, transformation):\n wrench = self.copy()\n wrench.transform(transformation)\n return wrench"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
getReviewersAndWatchers(db, commits=None, changesets=None) > tuple Returns a tuple containing two dictionaries, each mapping file IDs to dictionaries mapping user IDs to sets of changeset IDs. The first dictionary defines the reviwers of each file, the second dictionary defines the watchers of each file. For any changes in a file for which no reviewer is identified, None is used as a key in the dictionary instead of a real user ID. | def getReviewersAndWatchers(db, repository, commits=None, changesets=None, reviewfilters=None,
applyfilters=True, applyparentfilters=False):
if changesets is None:
changesets = []
changeset_utils.createChangesets(db, repository, commits)
for commit in commits:
changesets.extend(changeset_utils.createChangeset(db, None, repository, commit, do_highlight=False))
cursor = db.cursor()
filters = Filters()
filters.setFiles(db, list(getFileIdsFromChangesets(changesets)))
if applyfilters:
filters.load(db, repository=repository, recursive=applyparentfilters)
if reviewfilters:
filters.addFilters(reviewfilters)
reviewers = {}
watchers = {}
for changeset in changesets:
author_user_ids = changeset.child.author.getUserIds(db) if changeset.child else set()
cursor.execute("SELECT DISTINCT file FROM fileversions WHERE changeset=%s", (changeset.id,))
for (file_id,) in cursor:
reviewers_found = False
for user_id, (filter_type, delegate) in filters.listUsers(file_id).items():
if filter_type == 'reviewer':
if user_id not in author_user_ids:
reviewer_user_ids = [user_id]
elif delegate:
reviewer_user_ids = []
for delegate_user_name in delegate.split(","):
delegate_user = dbutils.User.fromName(db, delegate_user_name)
reviewer_user_ids.append(delegate_user.id)
else:
reviewer_user_ids = []
for reviewer_user_id in reviewer_user_ids:
reviewers.setdefault(file_id, {}).setdefault(reviewer_user_id, set()).add(changeset.id)
reviewers_found = True
else:
watchers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset.id)
if not reviewers_found:
reviewers.setdefault(file_id, {}).setdefault(None, set()).add(changeset.id)
return reviewers, watchers | [
"def getReviewedReviewers(db, review):\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT reviewfiles.reviewer, reviewfiles.changeset, reviewfiles.file\n FROM reviewfiles\n WHERE reviewfiles.review=%s\n AND reviewfiles.state='reviewed'\"\"\",\n (review.id,))\n\n reviewers = {}\n\n for user_id, changeset_id, file_id in cursor.fetchall():\n reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)\n\n return reviewers",
"def getPendingReviewers(db, review):\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT reviewuserfiles.uid, reviewfiles.changeset, reviewfiles.file\n FROM reviewfiles\n LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)\n WHERE reviewfiles.review=%s\n AND reviewfiles.state='pending'\"\"\",\n (review.id,))\n\n reviewers = {}\n\n for user_id, changeset_id, file_id in cursor.fetchall():\n reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)\n\n return reviewers",
"def getChangedFilesForCommits(self):\n\t\t\"\"\"Returns [{'time':time, 'files':[filenames,]}]\"\"\"\n\t\trequestString = \"https://api.github.com/repos/{}/{}/compare\"\n\t\trequestString = requestString.format(self.user, self.repo)\n\t\tcommits = self.getCommits()\n\t\tchanges = []\n\t\tfor commitIndex in range(len(commits)):\n\t\t\tif commitIndex == 0:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcurrent = commits[commitIndex]['sha']\n\t\t\t\tprevious = commits[commitIndex - 1]['sha']\n\t\t\t\tcommitTime = parseGitTimeString(commits[commitIndex]['commit']['committer']['date'])\n\t\t\t\tcompareString = \"/{}...{}\"\n\t\t\t\tcompareString = compareString.format(previous, current)\n\t\t\t\ttempRequestString = requestString + compareString\n\t\t\t\tresponse = urllib.request.urlopen(tempRequestString)\n\t\t\t\tdata = response.read().decode('utf-8')\n\t\t\t\tdata = json.loads(data)\n\t\t\t\tfiles = data['files']\n\t\t\t\t#this right here is wrong... should be commitsha:{time:124523523,files:changed}\n\t\t\t\tfilesChanged = {'time': commitTime, 'files': [file['filename'] for file in files if file['status'] == 'modified']}\n\t\t\t\tchanges.append(filesChanged)\n\t\treturn changes",
"def set_reviewers(self, changes, **kwargs):\n status, data = self.run_gerrit_command('set-reviewers', changes, **kwargs)\n\n return status, data",
"def review(self, commits, **kwargs):\n status, data = self.run_gerrit_command('review', commits, **kwargs)\n\n return status, data",
"def collectReviewTeams(reviewers):\n\n teams = {}\n\n for file_id, file_reviewers in reviewers.items():\n if None in file_reviewers:\n teams.setdefault(None, set()).add(file_id)\n team = frozenset(filter(None, file_reviewers.keys()))\n if team: teams.setdefault(team, set()).add(file_id)\n\n return teams",
"def extract_changes_from_commit(commit_hash: str) -> Tuple[str, List[str], dict, str]:\n commit_github_url = os.path.join(GITHUB_BASE_URL, commit_hash)\n # Getting Commit Author ('%an') from last (-1) git log entry. Last git log entry is the current commit.\n output_stream = os.popen(\"git log -1 --pretty=format:'%an'\")\n commit_author = output_stream.read()\n\n # Getting changed file names between last commit on branch (HEAD^) and current commit.\n output_stream = os.popen(\n f\"git diff-tree --no-commit-id --name-only -r HEAD^ {commit_hash}\"\n )\n changed_files = output_stream.read().split(\"\\n\")\n logger.info(f\"all Changed files: {changed_files}\")\n\n change_diffs = {}\n for changed_file in changed_files:\n if changed_file.startswith(PREFIX) and changed_file.endswith(SUFFIX):\n # Getting diff of specific changed file from last commit on branch (HEAD^).\n # Filtering only lines indicating changes: starting with + or -\n output_stream = os.popen(\n f\"git diff HEAD^ -- {changed_file} | grep '^[+|-][^+|-]'\"\n )\n change_diffs[changed_file] = output_stream.read()\n\n relevant_changed_files = list(change_diffs.keys())\n return commit_github_url, relevant_changed_files, change_diffs, commit_author",
"def changed_files(revset, filter_re=None):\n require('code_dir')\n\n with cd(env.code_dir):\n result = run(\"hg status --rev '%s'\" % revset, quiet=True).splitlines()\n\n if filter_re:\n regex = re.compile(filter_re)\n result = filter(lambda filename: regex.search(filename), result)\n\n return result",
"def get_previous_repository_reviews( app, repository, changeset_revision ):\n repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )\n reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ]\n previous_reviews_dict = odict()\n for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ):\n previous_changeset_revision = str( repo.changectx( changeset ) )\n if previous_changeset_revision in reviewed_revision_hashes:\n previous_rev, previous_changeset_revision_label = \\\n hg_util.get_rev_label_from_changeset_revision( repo, previous_changeset_revision )\n revision_reviews = get_reviews_by_repository_id_changeset_revision( app,\n app.security.encode_id( repository.id ),\n previous_changeset_revision )\n previous_reviews_dict[ previous_changeset_revision ] = \\\n dict( changeset_revision_label=previous_changeset_revision_label,\n reviews=revision_reviews )\n return previous_reviews_dict",
"def get_reviews_by_repository_id_changeset_revision( app, repository_id, changeset_revision ):\n sa_session = app.model.context.current\n return sa_session.query( app.model.RepositoryReview ) \\\n .filter( and_( app.model.RepositoryReview.repository_id == app.security.decode_id( repository_id ),\n app.model.RepositoryReview.changeset_revision == changeset_revision ) ) \\\n .all()",
"def userReviews():\n usersList = files.readUsers()\n beersList = files.readBeers()\n breweryList = files.readBreweries()\n breweryToBeers = files.readBreweryToBeers()\n\n total = 0\n totalUsersComplete = 0\n for userHash, user in usersList.iteritems():\n totalUsersComplete += 1\n # if the data has been normalized, old data will not\n # have usernames. Ignore older users which may have\n # already gotten reviews\n if user.username:\n userId = user.uid\n username = user.username\n user.username = None\n userReviewCount = 0\n offsetTotal = 0\n ratings = {}\n\n print 'Processing ' + str(userId) + ': ' + username\n # each response returns at most 25 reviews. To get more user\n # reviews, call again with an offset get at most 50 reviews\n # from the same user\n while (userReviewCount < 2):\n print username + ': ' + str(userReviewCount + 1)\n data = untappd.getUserReviewData(username, offsetTotal)\n offset = data['response']['beers']['count']\n offsetTotal += offset\n reviews = data['response']['beers']['items']\n for review in reviews:\n userRating = review['rating_score']\n if userRating > 0:\n beerInfo = review['beer']\n breweryInfo = review['brewery']\n # fill in beer information\n if hash(str(beerInfo['bid'])) not in beersList:\n stylesList = []\n style = unicode(beerInfo['beer_style']).encode(\"utf-8\")\n styles = style.lower().title().split('/')\n for style in styles:\n style = style.strip()\n stylesList.append(style)\n beerAttribs = {\n 'bid': str(beerInfo['bid']),\n 'name': unicode(beerInfo['beer_name']).encode(\"utf-8\"),\n 'label': beerInfo['beer_label'],\n 'abv': beerInfo['beer_abv'],\n 'ibu': beerInfo['beer_ibu'],\n 'style': stylesList,\n 'description': unicode(beerInfo['beer_description']).encode(\"utf-8\"),\n 'rating': beerInfo['rating_score'],\n 'numRatings': 1,\n 'brewery': str(breweryInfo['brewery_id'])\n }\n beer = UT.UntappdBeer(beerAttribs)\n beersList[hash(beer.bid)] = beer\n else:\n beersList[hash(str(beerInfo['bid']))].numRatings += 1\n # fill in brewery information\n if hash(str(breweryInfo['brewery_id'])) not in breweryList:\n breweryAttribs = {\n 'breweryId': str(breweryInfo['brewery_id']),\n 'name': unicode(breweryInfo['brewery_name']).encode(\"utf-8\"),\n 'label': breweryInfo['brewery_label'],\n 'country': unicode(breweryInfo['country_name']).encode(\"utf-8\"),\n 'location': unicode(breweryInfo['location']).encode(\"utf-8\")\n }\n brewery = UT.UntappdBrewery(breweryAttribs)\n breweryList[hash(brewery.breweryId)] = brewery\n\n # map breweery_id to a list of beers produced there\n if hash(str(breweryInfo['brewery_id'])) not in breweryToBeers:\n # store the current beer in a list of beers of\n # the brewery\n breweryToBeers[hash(str(breweryInfo['brewery_id']))] = {str(breweryInfo['brewery_id']): [str(beerInfo['bid'])]}\n else:\n # add current beer to brewery's list of beers\n breweryToBeers[hash(str(breweryInfo['brewery_id']))][str(breweryInfo['brewery_id'])].append(str(beerInfo['bid']))\n\n # add list of beer ratings to user\n ratings[str(beerInfo['bid'])] = userRating\n userReviewCount += 1\n user.ratings = ratings\n\n # store the dictionaries after new data so user doesn't kill process before writing\n # with open('../data/users.json', 'wb') as usersFile:\n # json = jpickle.encode(usersList)\n # usersFile.write(json)\n # with open('../data/beers.json', 'wb') as beersFile:\n # json = jpickle.encode(beersList)\n # beersFile.write(json)\n # with open('../data/breweries.json', 'wb') as breweriesFile:\n # json = jpickle.encode(breweryList)\n # breweriesFile.write(json)\n # with open('../data/breweryToBeers.json', 'wb') as breweryToBeersFile:\n # json = jpickle.encode(breweryToBeers)\n # breweryToBeersFile.write(json)\n\n # if the offset is less than 25, then there are no more reviews to retrieve\n if offset < 25:\n break\n writeJSONFile('../data/users.json', usersList)\n writeJSONFile('../data/beers.json', beersList)\n writeJSONFile('../data/breweries.json', breweryList)\n writeJSONFile('../data/breweryToBeers.json', breweryToBeers)\n\n total += len(ratings)\n print str(userId) + ': ' + username + ', Processed: ' + str(len(ratings)) + ' reviews'\n print 'Total Reviews: ' + str(total)\n print 'Total Users Completed: ' + str(totalUsersComplete)\n sleep(37 * (userReviewCount))\n else:\n total += len(user.ratings)",
"def getAllContributors(server,repo):\n contributors={}\n url=server+\"/repos/\"+repo+\"/stats/contributors\"\n res=conn.get(url)\n dicres=json.loads(res.text)\n for contributor in dicres:\n additionDeletion=getAdditionsDeletions(contributor.get(\"weeks\"))\n additions=str(additionDeletion[0])\n deletions=str(additionDeletion[1])\n commits=str(contributor.get(\"total\"))\n #contributor will be -> author_id:(commit,additions,deletions)\n contributors[str(contributor.get(\"author\").get(\"id\"))]=(commits,additions,deletions)\n return contributors",
"def files_touched(self, commit):\n if commit.parents:\n par_list = commit.parents\n else:\n par_list = [empty_tree_oid()]\n new_oid_set = set()\n for p in par_list:\n diff = self._repo.diff(p, commit)\n for dd in diff.deltas:\n new_oid_set.add((dd.new_file.path, dd.new_file.id))\n return new_oid_set",
"def test_diff_viewer_filter_by_change_type(repo_with_diffs: Tuple[Repo, Commit, Commit]):\n repo, previous_head, new_head = repo_with_diffs\n with DiffViewer(previous_head, new_head) as viewer:\n # we added 1 file, we expect the added() generator to return only 1 diff\n diffs = list(viewer.added())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"other/gbac.rego\") in paths\n\n # we modified 1 file, we expect the modified() generator to return only 1 diff\n diffs = list(viewer.modified())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"mylist.txt\") in paths\n\n # we deleted 1 file, we expect the deleted() generator to return only 1 diff\n diffs = list(viewer.deleted())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"other/data.json\") in paths\n\n # we renamed 1 file, we expect the renamed() generator to return only 1 diff\n diffs = list(viewer.renamed())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert len(paths) == 2 # both old and new file name\n assert Path(\"ignored.json\") in paths\n assert Path(\"ignored2.json\") in paths",
"def choose_reviewers(self, diff, author, global_=None):\n chosen_reviewers = set()\n mention_list = set()\n for _ in range(40):\n reviewer = self.choose_reviewer(\n 'rust', 'rust-lang', diff, author, global_\n )\n mentions = self.get_to_mention(diff, global_)\n chosen_reviewers.add(reviewer)\n for mention in mentions:\n for reviewer in mention['reviewers']:\n mention_list.add(reviewer)\n return chosen_reviewers, mention_list",
"def git_annotate_file_order_by_author(commits, git_actor_dedupe_table):\n file_commits_by_author = collections.defaultdict(\n lambda: collections.defaultdict(list))\n\n for k, c in commits.items():\n if 'order' in c:\n for fname in c['files']:\n author = git_actor_dedupe_table[c['author']]['standard_actor']\n file_commits_by_author[fname][author].append((c['order'], k))\n # Use this as opportunity to tack on new field\n c['file_order_for_author'] = {}\n\n for fname, entry in file_commits_by_author.items():\n for author, val in entry.items():\n for i, (order, c) in enumerate(sorted(val, key=lambda x: x[0])):\n commits[c]['file_order_for_author'][fname] = i + 1",
"def grab_changesets(self, path, url, changesets):\n raise NotImplementedError",
"def get_review_by_repository_id_changeset_revision_user_id( app, repository_id, changeset_revision, user_id ):\n sa_session = app.model.context.current\n return sa_session.query( app.model.RepositoryReview ) \\\n .filter( and_( app.model.RepositoryReview.repository_id == app.security.decode_id( repository_id ),\n app.model.RepositoryReview.changeset_revision == changeset_revision,\n app.model.RepositoryReview.user_id == app.security.decode_id( user_id ) ) ) \\\n .first()",
"def all_commits(change_id, curr_project, curr_ref):\n commits = []\n manifest = ET.ElementTree(file='.repo/manifest.xml')\n commits.append((curr_project, project_path(manifest, curr_project), curr_ref))\n\n url = (GERRIT_ROOT + 'changes/?o=CURRENT_REVISION&q=status:open+' +\n change_id)\n changes = request.urlopen(url)\n for change in parse_response(changes):\n project = change['project']\n fetch = change['revisions'][change['current_revision']]['fetch']\n # The `ref` is the same for every download scheme, hence we can use\n # the first one that is there\n ref = fetch.values()[0]['ref']\n path = project_path(manifest, project, change['branch'])\n if path and project != curr_project:\n commits.append((project, path, ref))\n\n return commits"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
getReviewedReviewers(db, review) > dictionary Returns a dictionary, like the ones returned by getReviewersAndWatchers(), but with details about all reviewed changes in the review. | def getReviewedReviewers(db, review):
cursor = db.cursor()
cursor.execute("""SELECT reviewfiles.reviewer, reviewfiles.changeset, reviewfiles.file
FROM reviewfiles
WHERE reviewfiles.review=%s
AND reviewfiles.state='reviewed'""",
(review.id,))
reviewers = {}
for user_id, changeset_id, file_id in cursor.fetchall():
reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)
return reviewers | [
"def getPendingReviewers(db, review):\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT reviewuserfiles.uid, reviewfiles.changeset, reviewfiles.file\n FROM reviewfiles\n LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)\n WHERE reviewfiles.review=%s\n AND reviewfiles.state='pending'\"\"\",\n (review.id,))\n\n reviewers = {}\n\n for user_id, changeset_id, file_id in cursor.fetchall():\n reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)\n\n return reviewers",
"def getReviewersAndWatchers(db, repository, commits=None, changesets=None, reviewfilters=None,\n applyfilters=True, applyparentfilters=False):\n\n if changesets is None:\n changesets = []\n changeset_utils.createChangesets(db, repository, commits)\n for commit in commits:\n changesets.extend(changeset_utils.createChangeset(db, None, repository, commit, do_highlight=False))\n\n cursor = db.cursor()\n\n filters = Filters()\n filters.setFiles(db, list(getFileIdsFromChangesets(changesets)))\n\n if applyfilters:\n filters.load(db, repository=repository, recursive=applyparentfilters)\n\n if reviewfilters:\n filters.addFilters(reviewfilters)\n\n reviewers = {}\n watchers = {}\n\n for changeset in changesets:\n author_user_ids = changeset.child.author.getUserIds(db) if changeset.child else set()\n\n cursor.execute(\"SELECT DISTINCT file FROM fileversions WHERE changeset=%s\", (changeset.id,))\n\n for (file_id,) in cursor:\n reviewers_found = False\n\n for user_id, (filter_type, delegate) in filters.listUsers(file_id).items():\n if filter_type == 'reviewer':\n if user_id not in author_user_ids:\n reviewer_user_ids = [user_id]\n elif delegate:\n reviewer_user_ids = []\n for delegate_user_name in delegate.split(\",\"):\n delegate_user = dbutils.User.fromName(db, delegate_user_name)\n reviewer_user_ids.append(delegate_user.id)\n else:\n reviewer_user_ids = []\n\n for reviewer_user_id in reviewer_user_ids:\n reviewers.setdefault(file_id, {}).setdefault(reviewer_user_id, set()).add(changeset.id)\n reviewers_found = True\n else:\n watchers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset.id)\n\n if not reviewers_found:\n reviewers.setdefault(file_id, {}).setdefault(None, set()).add(changeset.id)\n\n return reviewers, watchers",
"def getReviews( self ):\n\t\tresList = list()\n\t\tquery = \"select * from game_reviews\"\n\t\tself.csr.execute( query )\n\t\t\n\t\tresSet = self.csr.fetchall()\t\n\t\tfor res in resSet:\n\t\t\tnewListItem = { 'reviewID': res[REVIEW_ID], 'score': res[REVIEW_SCORE], \\\n\t\t\t\t\t'articleLink': res[ARTICLE_LINK], 'asin':res[REVIEW_ASIN], 'reviewContent':res[REVIEW_CONTENT] }\n\t\t\tresList.append( newListItem )\n\n\t\treturn resList",
"def reviews(self):\n review_dict = models.storage.all(Review)\n return [review for review in review_dict.values()\n if review.place_id == self.id]",
"def get_previous_repository_reviews( app, repository, changeset_revision ):\n repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )\n reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ]\n previous_reviews_dict = odict()\n for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ):\n previous_changeset_revision = str( repo.changectx( changeset ) )\n if previous_changeset_revision in reviewed_revision_hashes:\n previous_rev, previous_changeset_revision_label = \\\n hg_util.get_rev_label_from_changeset_revision( repo, previous_changeset_revision )\n revision_reviews = get_reviews_by_repository_id_changeset_revision( app,\n app.security.encode_id( repository.id ),\n previous_changeset_revision )\n previous_reviews_dict[ previous_changeset_revision ] = \\\n dict( changeset_revision_label=previous_changeset_revision_label,\n reviews=revision_reviews )\n return previous_reviews_dict",
"def set_reviewers(self, reviewers):\n if not reviewers:\n return\n for reviewer in reviewers:\n try:\n account = self.handler.get_account(reviewer)\n if account[\"status\"] != constants.ENABLED:\n continue\n if account[\"role\"] not in (constants.ADMIN, constants.STAFF):\n continue\n self[\"reviewers\"][account[\"email\"]] = {\n \"status\": constants.REVIEW,\n \"review\": None,\n \"modified\": utils.timestamp(),\n }\n except ValueError:\n pass",
"def get_reviews(restaurant_id):\n db_session = current_app.config[\"DB_SESSION\"]\n review = (\n db_session.query(Review).filter(restaurant_id == Review.restaurant_id).all()\n )\n return review",
"def get_reviewers(user, product, types=('a', 'f', 'fof', 'mf', 'ml')):\n\n result = dict()\n\n # we get users ids that did in a list\n # note that it contains friends and fof too\n all_reviewers = [dict(id=x.user.id, username=x.user.username) for\n x in Reviewing.objects.filter(product=product)\n .exclude(user=user.id)]\n\n if 'a' in types:\n if all_reviewers:\n result['all_reviewers'] = all_reviewers\n\n if 'f' in types and user.is_authenticated():\n f = user_utils.get_friends(user)\n f_reviewers = [u for u in all_reviewers if u['id'] in f]\n if f_reviewers:\n result['f_reviewers'] = f_reviewers\n\n if 'fof' in types and user.is_authenticated():\n fof = user_utils.get_fof(user)\n fof_reviewers = [u for u in all_reviewers if u['id'] in fof]\n # We remove friends from friends of friends\n if f:\n fof_reviewers = [u for u in fof_reviewers if u['id'] not in f]\n if fof_reviewers:\n result['fof_reviewers'] = fof_reviewers\n\n if 'mf' in types and user.is_authenticated():\n mutual_friends = user_utils.get_users_share_mutual_friends(user)\n mutual_friends_rev = [u for u in all_reviewers if\n u['id'] in mutual_friends]\n if mutual_friends_rev:\n result['mf_reviewers'] = mutual_friends_rev\n\n if 'ml' in types and user.is_authenticated():\n mutual_likes = user_utils.get_users_share_mutual_likes(user)\n mutual_likes_rev = [u for u in all_reviewers\n if u['id'] in mutual_likes]\n if mutual_likes_rev:\n result['ml_reviewers'] = mutual_likes_rev\n\n return result",
"def get_reviews(attachment):\n reviews = []\n if not 'flags' in attachment:\n return reviews\n for flag in attachment['flags']:\n for review_type in ['review', 'superreview', 'ui-review']:\n if flag.get('name') == review_type:\n reviews.append({'type':review_type,\n 'reviewer':flag['setter']['name'],\n 'result':flag['status']})\n break\n return reviews",
"def reviews(self):\n qs = self._reviews_all.filter(reply_to=None)\n # Force the query to occur immediately. Several\n # reviews-related tests hang if this isn't done.\n return qs",
"def get_reviews(self, submission_pk, submission_id):\n reviews = list(self.execute(\"SELECT * FROM reviews WHERE submission_id = ? AND submission_pk = ?\", (submission_id, database_blob(submission_pk))))\n reviews_list = []\n for review in reviews:\n reviews_list.append(self.get_review(str(review[1]), review[0]))\n return reviews_list",
"def get_review_data(user, product):\n\n rev_info = dict()\n\n try:\n reviewing = Reviewing.objects.get(\n user=user, product=product)\n except:\n return False\n\n rev_info['boolAnswers'] = get_bool_answers(reviewing)\n\n try:\n rev_info['comment'] = (\n ReviewComment.objects.get(reviewing=reviewing).text_value)\n except:\n pass\n\n try:\n rev_info['rating'] = (\n ReviewRating.objects.get(reviewing=reviewing).rating)\n except:\n pass\n\n # we only return if not empty\n return {k: v for k, v in rev_info.items() if v}",
"def get_reviews_by_repository_id_changeset_revision( app, repository_id, changeset_revision ):\n sa_session = app.model.context.current\n return sa_session.query( app.model.RepositoryReview ) \\\n .filter( and_( app.model.RepositoryReview.repository_id == app.security.decode_id( repository_id ),\n app.model.RepositoryReview.changeset_revision == changeset_revision ) ) \\\n .all()",
"def all_reviews(place_id):\n place_exist = storage.get(Place, place_id)\n if place_exist is None:\n abort(404)\n reviews_all = storage.all('Review')\n review_list = []\n for review in reviews_all.values():\n if review.place_id == place_id:\n review_list.append(review.to_dict())\n return jsonify(review_list)",
"def collectReviewTeams(reviewers):\n\n teams = {}\n\n for file_id, file_reviewers in reviewers.items():\n if None in file_reviewers:\n teams.setdefault(None, set()).add(file_id)\n team = frozenset(filter(None, file_reviewers.keys()))\n if team: teams.setdefault(team, set()).add(file_id)\n\n return teams",
"def build_review_data(review_notifications):\n review_requested_data = collections.defaultdict(dict)\n object_state_reverted_data = collections.defaultdict(dict)\n for notification in review_notifications:\n review = notification.object\n reviewable = review.reviewable\n if not reviewable:\n continue\n link = get_object_url(reviewable)\n fill_review_requested_data(link, review, reviewable, review_requested_data)\n fill_object_state_reverted_data(link, notification,\n object_state_reverted_data, review,\n reviewable)\n return {\n \"review_requested_data\": review_requested_data,\n \"object_state_reverted_data\": object_state_reverted_data\n }",
"def set_reviewers(self, changes, **kwargs):\n status, data = self.run_gerrit_command('set-reviewers', changes, **kwargs)\n\n return status, data",
"def get_review():\n try:\n\n c = get_cursor()\n c.execute(\"\"\"select cart.cart_id, cart.cart_status_id, cart.total_cost,\n cart.manual_hold,\n address.bill_first_name, address.bill_last_name,\n address.bill_phone, address.email\n from cart, address\n where cart.cart_id = address.cart_id\n and cart.cart_status_id = %s\n and cart.manual_hold = ''\n order by cart.cart_id\"\"\",\n (STATUS_REVIEW))\n if (c.rowcount == 0):\n return []\n cart_list = list(c.fetchall())\n for cart in cart_list:\n cart['cart_id'] = int(cart['cart_id'])\n cart['total_cost'] = str(cart['total_cost'])\n cart['cart_status'] = Statics.cart_statuses.get_id(cart['cart_status_id'])['name']\n del cart['cart_status_id']\n return cart_list\n\n except DbKeyInvalid as e:\n raise DbKeyInvalid(e)\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")",
"def review_as_dict(self):\n\n review = {r.name: getattr(self, r.name) for r in self.__table__.columns}\n review['author'] = self.author.first_name + ' ' + self.author.last_name\n return review"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
getPendingReviewers(db, review) > dictionary Returns a dictionary, like the ones returned by getReviewersAndWatchers(), but with details about remaining unreviewed changes in the review. Changes not assigned to a reviewer are handled the same way. | def getPendingReviewers(db, review):
cursor = db.cursor()
cursor.execute("""SELECT reviewuserfiles.uid, reviewfiles.changeset, reviewfiles.file
FROM reviewfiles
LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'""",
(review.id,))
reviewers = {}
for user_id, changeset_id, file_id in cursor.fetchall():
reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)
return reviewers | [
"def getReviewedReviewers(db, review):\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT reviewfiles.reviewer, reviewfiles.changeset, reviewfiles.file\n FROM reviewfiles\n WHERE reviewfiles.review=%s\n AND reviewfiles.state='reviewed'\"\"\",\n (review.id,))\n\n reviewers = {}\n\n for user_id, changeset_id, file_id in cursor.fetchall():\n reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)\n\n return reviewers",
"def pending_reviewers(self):\n pending = self.get_reviewers()\n comments = self.get_comments()\n for comment in comments:\n username = comment['user']['login']\n if username in pending and approve_regex.search(comment['body']):\n pending.remove(username)\n return pending",
"def pending_reviews(self):\n pending = QUORUM\n comments = self.get_comments()\n for comment in comments:\n username = comment['user']['login']\n if (approve_regex.search(comment['body'])\n and (username in QUORUM_USERS or len(QUORUM_USERS) == 0)):\n pending = pending - 1\n return pending",
"def get_pending_review(self, user):\n from reviewboard.reviews.models.review import Review\n\n return Review.objects.get_pending_review(self, user)",
"def set_reviewers(self, reviewers):\n if not reviewers:\n return\n for reviewer in reviewers:\n try:\n account = self.handler.get_account(reviewer)\n if account[\"status\"] != constants.ENABLED:\n continue\n if account[\"role\"] not in (constants.ADMIN, constants.STAFF):\n continue\n self[\"reviewers\"][account[\"email\"]] = {\n \"status\": constants.REVIEW,\n \"review\": None,\n \"modified\": utils.timestamp(),\n }\n except ValueError:\n pass",
"def build_review_data(review_notifications):\n review_requested_data = collections.defaultdict(dict)\n object_state_reverted_data = collections.defaultdict(dict)\n for notification in review_notifications:\n review = notification.object\n reviewable = review.reviewable\n if not reviewable:\n continue\n link = get_object_url(reviewable)\n fill_review_requested_data(link, review, reviewable, review_requested_data)\n fill_object_state_reverted_data(link, notification,\n object_state_reverted_data, review,\n reviewable)\n return {\n \"review_requested_data\": review_requested_data,\n \"object_state_reverted_data\": object_state_reverted_data\n }",
"def reviews(self):\n qs = self._reviews_all.filter(reply_to=None)\n # Force the query to occur immediately. Several\n # reviews-related tests hang if this isn't done.\n return qs",
"def get_previous_repository_reviews( app, repository, changeset_revision ):\n repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )\n reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ]\n previous_reviews_dict = odict()\n for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ):\n previous_changeset_revision = str( repo.changectx( changeset ) )\n if previous_changeset_revision in reviewed_revision_hashes:\n previous_rev, previous_changeset_revision_label = \\\n hg_util.get_rev_label_from_changeset_revision( repo, previous_changeset_revision )\n revision_reviews = get_reviews_by_repository_id_changeset_revision( app,\n app.security.encode_id( repository.id ),\n previous_changeset_revision )\n previous_reviews_dict[ previous_changeset_revision ] = \\\n dict( changeset_revision_label=previous_changeset_revision_label,\n reviews=revision_reviews )\n return previous_reviews_dict",
"def get_reviewers(user, product, types=('a', 'f', 'fof', 'mf', 'ml')):\n\n result = dict()\n\n # we get users ids that did in a list\n # note that it contains friends and fof too\n all_reviewers = [dict(id=x.user.id, username=x.user.username) for\n x in Reviewing.objects.filter(product=product)\n .exclude(user=user.id)]\n\n if 'a' in types:\n if all_reviewers:\n result['all_reviewers'] = all_reviewers\n\n if 'f' in types and user.is_authenticated():\n f = user_utils.get_friends(user)\n f_reviewers = [u for u in all_reviewers if u['id'] in f]\n if f_reviewers:\n result['f_reviewers'] = f_reviewers\n\n if 'fof' in types and user.is_authenticated():\n fof = user_utils.get_fof(user)\n fof_reviewers = [u for u in all_reviewers if u['id'] in fof]\n # We remove friends from friends of friends\n if f:\n fof_reviewers = [u for u in fof_reviewers if u['id'] not in f]\n if fof_reviewers:\n result['fof_reviewers'] = fof_reviewers\n\n if 'mf' in types and user.is_authenticated():\n mutual_friends = user_utils.get_users_share_mutual_friends(user)\n mutual_friends_rev = [u for u in all_reviewers if\n u['id'] in mutual_friends]\n if mutual_friends_rev:\n result['mf_reviewers'] = mutual_friends_rev\n\n if 'ml' in types and user.is_authenticated():\n mutual_likes = user_utils.get_users_share_mutual_likes(user)\n mutual_likes_rev = [u for u in all_reviewers\n if u['id'] in mutual_likes]\n if mutual_likes_rev:\n result['ml_reviewers'] = mutual_likes_rev\n\n return result",
"def get_reviews(attachment):\n reviews = []\n if not 'flags' in attachment:\n return reviews\n for flag in attachment['flags']:\n for review_type in ['review', 'superreview', 'ui-review']:\n if flag.get('name') == review_type:\n reviews.append({'type':review_type,\n 'reviewer':flag['setter']['name'],\n 'result':flag['status']})\n break\n return reviews",
"def getReviewersAndWatchers(db, repository, commits=None, changesets=None, reviewfilters=None,\n applyfilters=True, applyparentfilters=False):\n\n if changesets is None:\n changesets = []\n changeset_utils.createChangesets(db, repository, commits)\n for commit in commits:\n changesets.extend(changeset_utils.createChangeset(db, None, repository, commit, do_highlight=False))\n\n cursor = db.cursor()\n\n filters = Filters()\n filters.setFiles(db, list(getFileIdsFromChangesets(changesets)))\n\n if applyfilters:\n filters.load(db, repository=repository, recursive=applyparentfilters)\n\n if reviewfilters:\n filters.addFilters(reviewfilters)\n\n reviewers = {}\n watchers = {}\n\n for changeset in changesets:\n author_user_ids = changeset.child.author.getUserIds(db) if changeset.child else set()\n\n cursor.execute(\"SELECT DISTINCT file FROM fileversions WHERE changeset=%s\", (changeset.id,))\n\n for (file_id,) in cursor:\n reviewers_found = False\n\n for user_id, (filter_type, delegate) in filters.listUsers(file_id).items():\n if filter_type == 'reviewer':\n if user_id not in author_user_ids:\n reviewer_user_ids = [user_id]\n elif delegate:\n reviewer_user_ids = []\n for delegate_user_name in delegate.split(\",\"):\n delegate_user = dbutils.User.fromName(db, delegate_user_name)\n reviewer_user_ids.append(delegate_user.id)\n else:\n reviewer_user_ids = []\n\n for reviewer_user_id in reviewer_user_ids:\n reviewers.setdefault(file_id, {}).setdefault(reviewer_user_id, set()).add(changeset.id)\n reviewers_found = True\n else:\n watchers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset.id)\n\n if not reviewers_found:\n reviewers.setdefault(file_id, {}).setdefault(None, set()).add(changeset.id)\n\n return reviewers, watchers",
"def get_review_data(user, product):\n\n rev_info = dict()\n\n try:\n reviewing = Reviewing.objects.get(\n user=user, product=product)\n except:\n return False\n\n rev_info['boolAnswers'] = get_bool_answers(reviewing)\n\n try:\n rev_info['comment'] = (\n ReviewComment.objects.get(reviewing=reviewing).text_value)\n except:\n pass\n\n try:\n rev_info['rating'] = (\n ReviewRating.objects.get(reviewing=reviewing).rating)\n except:\n pass\n\n # we only return if not empty\n return {k: v for k, v in rev_info.items() if v}",
"def send_reviewers_message(self):\n if self[\"status\"] != constants.REVIEW:\n return\n if self[\"status\"] == self.original_status:\n reviewers = list(set(self[\"reviewers\"]).difference(self.original_reviewers))\n else:\n reviewers = list(set(self[\"reviewers\"]))\n if not reviewers:\n return\n try:\n order = self.handler.get_order(self[\"order\"])\n with MessageSaver(handler=self.handler) as saver:\n saver.create(\n settings[constants.REPORT][\"reviewers\"],\n name=self[\"name\"],\n title=order[\"title\"],\n url=utils.get_order_url(order),\n )\n saver.send(reviewers)\n except (KeyError, ValueError):\n pass",
"def test_get_with_review_request(self):\n # Publicly-accessible published review request.\n review_request = self.create_review_request(publish=True,\n create_repository=True)\n\n # Comment from a published review on a publicly-accessible\n # review request.\n review1 = self.create_review(review_request, publish=True)\n comment1 = self._create_diff_comment(review_request, review1)\n\n # Comment from an unpublished review on a publicly-accessible\n # review request.\n review2 = self.create_review(review_request, publish=False)\n self._create_diff_comment(review_request, review2)\n\n # Comment from a published review owned by the requester on a\n # publicly-accessible review request.\n review3 = self.create_review(review_request,\n user=self.user,\n publish=True)\n comment3 = self._create_diff_comment(review_request, review3)\n\n # Comment from an unpublished review owned by the requester on a\n # publicly-accessible review request.\n review4 = self.create_review(review_request,\n user=self.user,\n publish=False)\n comment4 = self._create_diff_comment(review_request, review4)\n\n # Published review request from a private repository the requester\n # does not have access to.\n repo = self.create_repository(public=False)\n review_request_inaccessible = self.create_review_request(\n repository=repo,\n publish=True)\n\n # Comment from a published review on a private repository the requester\n # does not have access to.\n review5 = self.create_review(review_request_inaccessible, publish=True)\n self._create_diff_comment(review_request_inaccessible, review5)\n\n # Comment from an unpublished review on a private repository the\n # requester does not have access to.\n review6 = self.create_review(review_request_inaccessible,\n publish=False)\n self._create_diff_comment(review_request_inaccessible, review6)\n\n # Testing that only comments from the given review request\n # are returned.\n rsp = self.api_get(get_root_diff_comment_list_url(), {\n 'review-request-id': review_request.id,\n }, expected_mimetype=review_diff_comment_list_mimetype)\n rsp_items = rsp[self.resource.list_result_key]\n\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['total_results'], 3)\n self.compare_item(rsp_items[0], comment1)\n self.compare_item(rsp_items[1], comment3)\n self.compare_item(rsp_items[2], comment4)\n\n # Testing that no comments are returned when the requester does\n # not have access to the given review request.\n rsp = self.api_get(get_root_diff_comment_list_url(), {\n 'review-request-id': review_request_inaccessible.id,\n }, expected_mimetype=review_diff_comment_list_mimetype)\n rsp_items = rsp[self.resource.list_result_key]\n\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['total_results'], 0)",
"def collectReviewTeams(reviewers):\n\n teams = {}\n\n for file_id, file_reviewers in reviewers.items():\n if None in file_reviewers:\n teams.setdefault(None, set()).add(file_id)\n team = frozenset(filter(None, file_reviewers.keys()))\n if team: teams.setdefault(team, set()).add(file_id)\n\n return teams",
"def reviews(self):\n review_dict = models.storage.all(Review)\n return [review for review in review_dict.values()\n if review.place_id == self.id]",
"def get_pending_changes():\n from ralph.scan.api import SCAN_RESULT_TTL\n delta = timezone.now() - datetime.timedelta(seconds=SCAN_RESULT_TTL)\n all_changes = ScanSummary.objects.filter(modified__gt=delta)\n new, changed = (\n all_changes.filter(ipaddress__device=None).count(),\n all_changes.filter(changed=True).count(),\n )\n return PendingChanges(new, changed)",
"def get_new_reviews(self, user):\n if user.is_authenticated:\n # If this ReviewRequest was queried using with_counts=True,\n # then we should know the new review count and can use this to\n # decide whether we have anything at all to show.\n if hasattr(self, \"new_review_count\") and self.new_review_count > 0:\n query = self.visits.filter(user=user)\n\n try:\n visit = query[0]\n\n return self.reviews.filter(\n public=True,\n timestamp__gt=visit.timestamp).exclude(user=user)\n except IndexError:\n # This visit doesn't exist, so bail.\n pass\n\n return self.reviews.get_empty_query_set()",
"def getReviews( self ):\n\t\tresList = list()\n\t\tquery = \"select * from game_reviews\"\n\t\tself.csr.execute( query )\n\t\t\n\t\tresSet = self.csr.fetchall()\t\n\t\tfor res in resSet:\n\t\t\tnewListItem = { 'reviewID': res[REVIEW_ID], 'score': res[REVIEW_SCORE], \\\n\t\t\t\t\t'articleLink': res[ARTICLE_LINK], 'asin':res[REVIEW_ASIN], 'reviewContent':res[REVIEW_CONTENT] }\n\t\t\tresList.append( newListItem )\n\n\t\treturn resList"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
collectReviewTeams(reviewers) > dictionary Takes a dictionary as returned by getReviewersAndWatchers() or getPendingReviewers() and transform into a dictionary mapping sets of users to sets of files that those groups of users share review responsibilities for. The same user may appear in number of sets, as may the same file. If None appears as a key in the returned dictionary, the set of files it is mapped to have changes in them with no assigned reviewers. | def collectReviewTeams(reviewers):
teams = {}
for file_id, file_reviewers in reviewers.items():
if None in file_reviewers:
teams.setdefault(None, set()).add(file_id)
team = frozenset(filter(None, file_reviewers.keys()))
if team: teams.setdefault(team, set()).add(file_id)
return teams | [
"def getReviewedReviewers(db, review):\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT reviewfiles.reviewer, reviewfiles.changeset, reviewfiles.file\n FROM reviewfiles\n WHERE reviewfiles.review=%s\n AND reviewfiles.state='reviewed'\"\"\",\n (review.id,))\n\n reviewers = {}\n\n for user_id, changeset_id, file_id in cursor.fetchall():\n reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)\n\n return reviewers",
"def getPendingReviewers(db, review):\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT reviewuserfiles.uid, reviewfiles.changeset, reviewfiles.file\n FROM reviewfiles\n LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)\n WHERE reviewfiles.review=%s\n AND reviewfiles.state='pending'\"\"\",\n (review.id,))\n\n reviewers = {}\n\n for user_id, changeset_id, file_id in cursor.fetchall():\n reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)\n\n return reviewers",
"def set_reviewers(self, reviewers):\n if not reviewers:\n return\n for reviewer in reviewers:\n try:\n account = self.handler.get_account(reviewer)\n if account[\"status\"] != constants.ENABLED:\n continue\n if account[\"role\"] not in (constants.ADMIN, constants.STAFF):\n continue\n self[\"reviewers\"][account[\"email\"]] = {\n \"status\": constants.REVIEW,\n \"review\": None,\n \"modified\": utils.timestamp(),\n }\n except ValueError:\n pass",
"def getReviewersAndWatchers(db, repository, commits=None, changesets=None, reviewfilters=None,\n applyfilters=True, applyparentfilters=False):\n\n if changesets is None:\n changesets = []\n changeset_utils.createChangesets(db, repository, commits)\n for commit in commits:\n changesets.extend(changeset_utils.createChangeset(db, None, repository, commit, do_highlight=False))\n\n cursor = db.cursor()\n\n filters = Filters()\n filters.setFiles(db, list(getFileIdsFromChangesets(changesets)))\n\n if applyfilters:\n filters.load(db, repository=repository, recursive=applyparentfilters)\n\n if reviewfilters:\n filters.addFilters(reviewfilters)\n\n reviewers = {}\n watchers = {}\n\n for changeset in changesets:\n author_user_ids = changeset.child.author.getUserIds(db) if changeset.child else set()\n\n cursor.execute(\"SELECT DISTINCT file FROM fileversions WHERE changeset=%s\", (changeset.id,))\n\n for (file_id,) in cursor:\n reviewers_found = False\n\n for user_id, (filter_type, delegate) in filters.listUsers(file_id).items():\n if filter_type == 'reviewer':\n if user_id not in author_user_ids:\n reviewer_user_ids = [user_id]\n elif delegate:\n reviewer_user_ids = []\n for delegate_user_name in delegate.split(\",\"):\n delegate_user = dbutils.User.fromName(db, delegate_user_name)\n reviewer_user_ids.append(delegate_user.id)\n else:\n reviewer_user_ids = []\n\n for reviewer_user_id in reviewer_user_ids:\n reviewers.setdefault(file_id, {}).setdefault(reviewer_user_id, set()).add(changeset.id)\n reviewers_found = True\n else:\n watchers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset.id)\n\n if not reviewers_found:\n reviewers.setdefault(file_id, {}).setdefault(None, set()).add(changeset.id)\n\n return reviewers, watchers",
"def userReviews():\n usersList = files.readUsers()\n beersList = files.readBeers()\n breweryList = files.readBreweries()\n breweryToBeers = files.readBreweryToBeers()\n\n total = 0\n totalUsersComplete = 0\n for userHash, user in usersList.iteritems():\n totalUsersComplete += 1\n # if the data has been normalized, old data will not\n # have usernames. Ignore older users which may have\n # already gotten reviews\n if user.username:\n userId = user.uid\n username = user.username\n user.username = None\n userReviewCount = 0\n offsetTotal = 0\n ratings = {}\n\n print 'Processing ' + str(userId) + ': ' + username\n # each response returns at most 25 reviews. To get more user\n # reviews, call again with an offset get at most 50 reviews\n # from the same user\n while (userReviewCount < 2):\n print username + ': ' + str(userReviewCount + 1)\n data = untappd.getUserReviewData(username, offsetTotal)\n offset = data['response']['beers']['count']\n offsetTotal += offset\n reviews = data['response']['beers']['items']\n for review in reviews:\n userRating = review['rating_score']\n if userRating > 0:\n beerInfo = review['beer']\n breweryInfo = review['brewery']\n # fill in beer information\n if hash(str(beerInfo['bid'])) not in beersList:\n stylesList = []\n style = unicode(beerInfo['beer_style']).encode(\"utf-8\")\n styles = style.lower().title().split('/')\n for style in styles:\n style = style.strip()\n stylesList.append(style)\n beerAttribs = {\n 'bid': str(beerInfo['bid']),\n 'name': unicode(beerInfo['beer_name']).encode(\"utf-8\"),\n 'label': beerInfo['beer_label'],\n 'abv': beerInfo['beer_abv'],\n 'ibu': beerInfo['beer_ibu'],\n 'style': stylesList,\n 'description': unicode(beerInfo['beer_description']).encode(\"utf-8\"),\n 'rating': beerInfo['rating_score'],\n 'numRatings': 1,\n 'brewery': str(breweryInfo['brewery_id'])\n }\n beer = UT.UntappdBeer(beerAttribs)\n beersList[hash(beer.bid)] = beer\n else:\n beersList[hash(str(beerInfo['bid']))].numRatings += 1\n # fill in brewery information\n if hash(str(breweryInfo['brewery_id'])) not in breweryList:\n breweryAttribs = {\n 'breweryId': str(breweryInfo['brewery_id']),\n 'name': unicode(breweryInfo['brewery_name']).encode(\"utf-8\"),\n 'label': breweryInfo['brewery_label'],\n 'country': unicode(breweryInfo['country_name']).encode(\"utf-8\"),\n 'location': unicode(breweryInfo['location']).encode(\"utf-8\")\n }\n brewery = UT.UntappdBrewery(breweryAttribs)\n breweryList[hash(brewery.breweryId)] = brewery\n\n # map breweery_id to a list of beers produced there\n if hash(str(breweryInfo['brewery_id'])) not in breweryToBeers:\n # store the current beer in a list of beers of\n # the brewery\n breweryToBeers[hash(str(breweryInfo['brewery_id']))] = {str(breweryInfo['brewery_id']): [str(beerInfo['bid'])]}\n else:\n # add current beer to brewery's list of beers\n breweryToBeers[hash(str(breweryInfo['brewery_id']))][str(breweryInfo['brewery_id'])].append(str(beerInfo['bid']))\n\n # add list of beer ratings to user\n ratings[str(beerInfo['bid'])] = userRating\n userReviewCount += 1\n user.ratings = ratings\n\n # store the dictionaries after new data so user doesn't kill process before writing\n # with open('../data/users.json', 'wb') as usersFile:\n # json = jpickle.encode(usersList)\n # usersFile.write(json)\n # with open('../data/beers.json', 'wb') as beersFile:\n # json = jpickle.encode(beersList)\n # beersFile.write(json)\n # with open('../data/breweries.json', 'wb') as breweriesFile:\n # json = jpickle.encode(breweryList)\n # breweriesFile.write(json)\n # with open('../data/breweryToBeers.json', 'wb') as breweryToBeersFile:\n # json = jpickle.encode(breweryToBeers)\n # breweryToBeersFile.write(json)\n\n # if the offset is less than 25, then there are no more reviews to retrieve\n if offset < 25:\n break\n writeJSONFile('../data/users.json', usersList)\n writeJSONFile('../data/beers.json', beersList)\n writeJSONFile('../data/breweries.json', breweryList)\n writeJSONFile('../data/breweryToBeers.json', breweryToBeers)\n\n total += len(ratings)\n print str(userId) + ': ' + username + ', Processed: ' + str(len(ratings)) + ' reviews'\n print 'Total Reviews: ' + str(total)\n print 'Total Users Completed: ' + str(totalUsersComplete)\n sleep(37 * (userReviewCount))\n else:\n total += len(user.ratings)",
"def get_reviewers(user, product, types=('a', 'f', 'fof', 'mf', 'ml')):\n\n result = dict()\n\n # we get users ids that did in a list\n # note that it contains friends and fof too\n all_reviewers = [dict(id=x.user.id, username=x.user.username) for\n x in Reviewing.objects.filter(product=product)\n .exclude(user=user.id)]\n\n if 'a' in types:\n if all_reviewers:\n result['all_reviewers'] = all_reviewers\n\n if 'f' in types and user.is_authenticated():\n f = user_utils.get_friends(user)\n f_reviewers = [u for u in all_reviewers if u['id'] in f]\n if f_reviewers:\n result['f_reviewers'] = f_reviewers\n\n if 'fof' in types and user.is_authenticated():\n fof = user_utils.get_fof(user)\n fof_reviewers = [u for u in all_reviewers if u['id'] in fof]\n # We remove friends from friends of friends\n if f:\n fof_reviewers = [u for u in fof_reviewers if u['id'] not in f]\n if fof_reviewers:\n result['fof_reviewers'] = fof_reviewers\n\n if 'mf' in types and user.is_authenticated():\n mutual_friends = user_utils.get_users_share_mutual_friends(user)\n mutual_friends_rev = [u for u in all_reviewers if\n u['id'] in mutual_friends]\n if mutual_friends_rev:\n result['mf_reviewers'] = mutual_friends_rev\n\n if 'ml' in types and user.is_authenticated():\n mutual_likes = user_utils.get_users_share_mutual_likes(user)\n mutual_likes_rev = [u for u in all_reviewers\n if u['id'] in mutual_likes]\n if mutual_likes_rev:\n result['ml_reviewers'] = mutual_likes_rev\n\n return result",
"def choose_reviewers(self, diff, author, global_=None):\n chosen_reviewers = set()\n mention_list = set()\n for _ in range(40):\n reviewer = self.choose_reviewer(\n 'rust', 'rust-lang', diff, author, global_\n )\n mentions = self.get_to_mention(diff, global_)\n chosen_reviewers.add(reviewer)\n for mention in mentions:\n for reviewer in mention['reviewers']:\n mention_list.add(reviewer)\n return chosen_reviewers, mention_list",
"def extract_ratings_by_users_map(ratings):\n if globals.RATINGS_BY_USER is None:\n globals.RATINGS_BY_USER = extract_ratings_by_users(ratings)\n ratings_by_user_map = {}\n for key in globals.RATINGS_BY_USER.keys():\n ratings_user = globals.RATINGS_BY_USER[key]\n if len(ratings_user) != 0: # this means the user has rated some items\n ratings_by_user_map[key] = {}\n for rating in ratings_user:\n ratings_by_user_map[key][rating['movieid']] = rating['rating']\n return ratings_by_user_map",
"def find_review_pairs_by_friends(user_dict, review_dict):\n friend_review_pairs = set()\n for user_id in user_dict:\n user = user_dict[user_id]\n user_friends = user[\"friends\"]\n user_reviews = user[\"reviews\"]\n user_businesses = user_reviews_by_business(user_reviews, review_dict)\n # Iterate through a user's friends\n for friend_id in user_friends:\n friend = user_dict[friend_id]\n friend_reviews = friend[\"reviews\"]\n friend_businesses = user_reviews_by_business(friend_reviews, review_dict)\n # Iterate through the businesses reviewed by that friend\n for business_id in friend_businesses:\n # Identify whether the friend reviewed any of the same businesses as the user and add review_id's to set\n if business_id in user_businesses:\n friend_review_id = friend_businesses[business_id]\n friend_review = review_dict[friend_review_id]\n user_review_id = user_businesses[business_id]\n user_review = review_dict[user_review_id]\n # First review in tuple is always earlier or on the same date as the second\n if (user_review[\"date\"] <= friend_review[\"date\"]):\n friend_review_pairs.add((user_review_id, friend_review_id))\n return friend_review_pairs",
"def review_participants(self):\n user_ids = list(\n self.reviews\n .filter(public=True)\n .values_list('user_id', flat=True)\n )\n users = set()\n\n if user_ids:\n users.update(User.objects.filter(pk__in=user_ids))\n\n return users",
"def get_review_ids(self):\n review_page_step = 10\n download_url = \"%s/%s_Review-%s-%s-Reviews\" % (self.__entity_type,\n self.__base_url,\n self.__entity_location,\n self.__entity_id)\n re_review_id_pattern = re.compile(r'/ShowUserReviews-g%s-d%s-r([0-9]+)-' % \n (self.__entity_location, self.__entity_id))\n \n \n \n n_reviews_downloaded = 0\n page_reviews_ids = 0\n no_more_review_ids = False\n while(n_reviews_downloaded < self.__max_num_reviews and not no_more_review_ids):\n download_url = \"%s-or%s\" % (download_url, page_reviews_ids * review_page_step)\n htmlwebpage = self.__get_webpage(download_url)\n reviews_ids = set()\n if not htmlwebpage:\n review_ids = None\n raise TripAdvisorReviewsIdsDownloadError(self.__entity_id)\n else:\n new_reviews_ids = re_review_id_pattern.findall(htmlwebpage.decode(\"utf-8\"))\n no_more_review_ids = self.__is_no_more_reviews(new_reviews_ids, reviews_ids)\n if not no_more_review_ids:\n review_ids.update(new_reviews_ids)\n if len(new_reviews_ids) + len(reviews_ids) > self.__max_num_reviews:\n reviews_ids = review_ids[:self.__max_num_reviews]\n page_reviews_ids +=1\n return reviews_ids",
"def send_reviewers_message(self):\n if self[\"status\"] != constants.REVIEW:\n return\n if self[\"status\"] == self.original_status:\n reviewers = list(set(self[\"reviewers\"]).difference(self.original_reviewers))\n else:\n reviewers = list(set(self[\"reviewers\"]))\n if not reviewers:\n return\n try:\n order = self.handler.get_order(self[\"order\"])\n with MessageSaver(handler=self.handler) as saver:\n saver.create(\n settings[constants.REPORT][\"reviewers\"],\n name=self[\"name\"],\n title=order[\"title\"],\n url=utils.get_order_url(order),\n )\n saver.send(reviewers)\n except (KeyError, ValueError):\n pass",
"def merge_callers_dicts(this_callers, other_callers):\n\n # Both data sets have this file, so merge them.\n for line_or_arc, other_test_result in iitems(other_callers):\n\n # If the other line/arc is not in this file, add it and move on.\n this_test_result = this_callers.get(line_or_arc, None)\n if this_test_result is None:\n this_callers[line_or_arc] = other_test_result\n continue\n\n # This line/arc is present in both files; merge them.\n this_test_result.merge(other_test_result)\n\n return this_callers",
"def get_similar_users(target_rating: Rating,\n user_ratings: UserRatingDict,\n movie_users: MovieUserDict) -> Dict[int, float]:\n\n # Your code here\n mov_list = []\n similar_p = {}\n for mov in target_rating:\n mov_list.append(mov)\n remove_unknown_movies(user_ratings, movie_users)\n p_watched = get_users_who_watched(mov_list, movie_users)\n for p in p_watched:\n if p in user_ratings:\n similarity = get_similarity(target_rating, user_ratings[p])\n similar_p[p] = similarity\n return similar_p",
"def build_review_data(review_notifications):\n review_requested_data = collections.defaultdict(dict)\n object_state_reverted_data = collections.defaultdict(dict)\n for notification in review_notifications:\n review = notification.object\n reviewable = review.reviewable\n if not reviewable:\n continue\n link = get_object_url(reviewable)\n fill_review_requested_data(link, review, reviewable, review_requested_data)\n fill_object_state_reverted_data(link, notification,\n object_state_reverted_data, review,\n reviewable)\n return {\n \"review_requested_data\": review_requested_data,\n \"object_state_reverted_data\": object_state_reverted_data\n }",
"def set_reviewers(self, changes, **kwargs):\n status, data = self.run_gerrit_command('set-reviewers', changes, **kwargs)\n\n return status, data",
"def stable_marriage(suitors, reviewers, optimal=\"suitor\"):\n\n if optimal.lower() == \"reviewer\":\n suitors, reviewers = reviewers, suitors\n\n free_suitors = [s for s in suitors if not s.matching]\n while free_suitors:\n\n suitor = free_suitors.pop()\n reviewer = suitor.get_favourite()\n\n if reviewer.matching:\n curr_match = reviewer.matching\n unmatch_pair(curr_match, reviewer)\n free_suitors.append(curr_match)\n\n match_pair(suitor, reviewer)\n\n successors = reviewer.get_successors()\n for successor in successors:\n delete_pair(successor, reviewer)\n\n if optimal.lower() == \"reviewer\":\n suitors, reviewers = reviewers, suitors\n\n return {s: s.matching for s in suitors}",
"def test_no_potential_reviewers(self):\n self.handler = HighfiveHandlerMock(\n Payload({}), repo_config=self.fakes['config']['empty']\n ).handler\n chosen_reviewers, mentions = self.choose_reviewers(\n self.fakes['diff']['normal'], 'alexcrichton',\n self.fakes['global_']['base']\n )\n assert set([None]) == chosen_reviewers\n assert set() == mentions",
"def get_updated_teams():\n\n m.start_scraping()\n \n current_rankings, old_rankings = second_file_exists()\n key = 0\n\n for current, old in zip(current_rankings, old_rankings):\n if key == 0:\n key += 1\n continue\n\n for item in range(len(old)):\n\n if current[0] == old[item]:\n match = point = rating = ''\n \n if int(current[1]) > int(old[1]):\n match = current[1]\n \n if int(current[2]) > int(old[2]):\n point = current[2]\n \n if int(current[3]) > int(old[3]):\n rating = current[3]\n\n if match != '' or point != '' or rating != '':\n teams_updated.update({current[0]:[match, point, rating]})\n \n break\n break\n\n if teams_updated:\n send_mail()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to add a Task to the given list, for registering with argparse. | def register_task(choices):
def decorator(cls):
instantiated_task = cls()
choices.append(instantiated_task)
logging.debug(f"Registered {instantiated_task.name} task with argparse choices")
return cls
return decorator | [
"def addtask(self, func, *args, **kwargs):\n if not iscoroutinefunction(func):\n async_f = self.func2async(func)\n else:\n async_f = func\n\n _t = tuple([async_f, args, kwargs])\n self.tasklist.append(_t)",
"def alm_add_task(self, task):\n pass",
"def add_task(task):\n if not type(task.run) is types.MethodType:\n print \"Task %s does not have a run() method\"%task\n system.exit(1)\n core.g_tasks.append(task)",
"def add(\n ctx: typer.Context,\n title: list[str],\n project: Annotated[Optional[str], typer.Option(\"--list\")] = None,\n) -> None:\n project = project_set(project, ctx.obj[\"project\"])\n title_str = \" \".join(title)\n task = TaskItem(title_str, project)\n new_task = task.add()\n print(f\":white_check_mark: Task '{new_task.title}' added to {new_task.parent}.\")",
"def add_task(self, cls, *args, **kwargs):\n if 'cycle' not in kwargs:\n kwargs['cycle'] = self\n\n task = cls(self.trial, *args, **kwargs)\n self.tasks.append(task)\n return task",
"def add(self, task):\n self.task_list.append(task)\n if task.creates in self.task_dict:\n raise NonUniqueTask(\n \"task `creates` '%s' is not unique\" % task.creates\n )\n self.task_dict[task.creates] = task",
"def add_task (self,priority,task):\r\n self.todo[self.uid] = (priority, task)\r\n self.uid += 1",
"def add_task(func):\n # Add task function to be run in the task manager's thread pool\n _TASK_MANAGER.add_task(func)",
"def add_task(self, task, input=None, outputs=None, runner_count=1, max_input_size=-1):\n self._tasks.append(task)\n\n if input is None:\n task_input = None\n else:\n input_queue = self._get_queue(input, maxsize=max_input_size)\n task_input = input_queue.add_and_input()\n\n task_output_queues = dict()\n if outputs is not None:\n for output_queque_name in outputs:\n task_output_queues[output_queque_name] = self._get_queue(output_queque_name)\n\n task.configure(self, task_input, task_output_queues, runner_count)",
"def add_a_task(self, task_name, task_content):\n raise NotImplementedError",
"def addTask(self, method, name, extraArgs = [], appendTask = False, sim = True):\n\n mgr = base.simTaskMgr if sim else base.taskMgr\n\n task = mgr.add(method, self.taskName(name), extraArgs = extraArgs, appendTask = appendTask)\n self._tasks[name] = (task, mgr)\n return task",
"def map(self, tasks: iter):\n for task in tasks:\n self.add_task(task)",
"def task_added(self, task):\n pass",
"def create_lists(self, skip_tasks=[]):\n self._tasks = task_list(self._group, skip_tasks)",
"def add_task(tid, name, project, tags, priority, entry, due, recur):\n with io.with_feedback(f\"Importing '{name}' ({project})\"):\n return taskwarrior.task_add(\n name,\n project=project,\n tags=tags,\n priority=priority,\n entry=entry,\n due=due,\n recur=recur,\n todoist_id=tid,\n )",
"def insert(self, task):\r\n if task.name() in self._task_map: return False\r\n\r\n # Add the task in the list\r\n self._task_map[task.name()] = task\r\n self._task_list.append(task)\r\n self.log.msg('Task `' + task.name() + '` added to the end of the task'\\\r\n + 'list')\r\n \r\n task.start(self)\r\n return True",
"def add_task_interactive(**task_data):\n callbacks = {\n 'y': lambda: task_data,\n 'n': lambda: task_data,\n\n # Rename\n 'd': lambda: {\n **task_data,\n 'name': io.prompt(\n 'Set name',\n default=task_data['name'],\n value_proc=lambda x: x.strip(),\n ),\n },\n\n # Edit tags\n 't': lambda: {\n **task_data,\n 'tags': io.prompt(\n 'Set tags (space delimited)',\n default=' '.join(task_data['tags']),\n show_default=False,\n value_proc=lambda x: x.split(' '),\n ),\n },\n\n # Edit project\n 'P': lambda: {\n **task_data,\n 'project': io.prompt(\n 'Set project',\n default=task_data['project'],\n ),\n },\n\n # Edit priority\n 'p': lambda: {\n **task_data,\n 'priority': io.prompt(\n 'Set priority',\n default='',\n show_default=False,\n type=click.Choice(['L', 'M', 'H', '']),\n ),\n },\n\n # Edit recur\n 'r': lambda: {\n **task_data,\n 'recur': io.prompt(\n 'Set recurrence (todoist style)',\n default='',\n value_proc=validation.validate_recur,\n ),\n },\n\n\n # Quit\n 'q': lambda: exit(1),\n\n # Help message\n # Note: this echoes prompt help and then returns the\n # task_data unchanged.\n '?': lambda: io.warn('\\n'.join([\n x.strip() for x in\n add_task_interactive.__doc__.split('\\n')\n ])) or task_data,\n }\n\n response = None\n while response not in ('y', 'n'):\n io.task(task_data)\n response = io.prompt(\n \"Import this task?\",\n type=click.Choice(callbacks.keys()),\n show_choices=True,\n )\n\n # Execute operation\n task_data = callbacks[response]()\n\n if response == 'n':\n io.warn('Skipping task')\n return\n\n return add_task(**task_data)",
"def add_task_cycles(self, cls, *args, **kwargs):\n orig_args = args\n setup_tasks = None\n if 'setup_tasks' in kwargs:\n setup_tasks = kwargs['setup_tasks']\n kwargs.pop('setup_tasks', None)\n\n tasks = list()\n for i, cycle in enumerate(self.cycles):\n\n # Put setup task at front of argument list\n if setup_tasks:\n args = (setup_tasks[i],) + orig_args \n\n task = cycle.add_task(cls, *args, **kwargs)\n\n if ((\"Inverse Kinematics\" in task.doc) or \n (\"Inverse Dynamics\" in task.doc)):\n raise Exception(\"TrialTask creation for individual cycles not \"\n \" currently supported for the Inverse Kinematics and \"\n \" Inverse Dynamics tools\")\n\n tasks.append(task)\n\n return tasks",
"def add_task(self, task: Task):\n self.update_from_file(self.storage_path)\n task.task_id = self.next_id\n self.tasks.append(task)\n self.next_id += 1\n self.save_to_file(self.storage_path)\n return task"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.