query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
parse a variable decl
|
def variable_decl():
@generate
def persistent_global_scope():
yield keyword("persistent")
yield normalspaces()
yield keyword("global")
return s.Construct(s.PERSISTENTGLOBAL)
@generate
def global_scope():
yield keyword("global")
return s.Construct(s.GLOBAL)
@generate
def local_scope():
yield keyword("local")
return s.Construct(s.LOCAL)
@generate
def scope_def():
sdef = yield (
persistent_global_scope ^
global_scope ^
local_scope)
return sdef
# parsing (if there is no scope, it not a decl it an assignment)
scope = yield scope_def
yield normalspaces()
assignments = yield sepBy1(
decl, # optional_assignment if scope else assignment,
listsep())
return s.Construct(s.VARIABLE_DECL, scope, assignments)
|
[
"def declvars(self, name: str):",
"def _parse_variable(variable_ast: dict) -> \"VariableNode\":\n return VariableNode(\n name=_parse_name(variable_ast[\"name\"]),\n location=_parse_location(variable_ast[\"loc\"]),\n )",
"def _parse_var(lexer: shlex.shlex) -> Tuple[str, Optional[Any]]:\n flags_token = lexer.get_token()\n\n if flags_token != \"--\":\n var_flags = set(flags_token[1:])\n else:\n var_flags = set()\n\n var_name = lexer.get_token()\n var_value: Optional[Any] = None\n lookahead = lexer.get_token()\n\n if lookahead == \"=\":\n if \"a\" in var_flags:\n var_value = _parse_indexed(lexer)\n elif \"A\" in var_flags:\n var_value = _parse_assoc(lexer)\n else:\n var_value = _parse_string(lexer.get_token())\n else:\n lexer.push_token(lookahead)\n\n return var_name, var_value",
"def _decode_var(self, line):\n\t\tline_args = list(line[6:].split(\" -> \"))\n\n\t\tif line_args[1] == \"num\":\n\t\t\tvalue = v.Var(name=line_args[0], value=int(line_args[2]), value_type=line_args[1])\n\n\t\telif line_args[1] == \"bool\":\n\t\t\tif line_args[2][:-1] == \"True\":\n\t\t\t\tthe_bool = True\n\t\t\telse:\n\t\t\t\tthe_bool = False\n\n\t\t\tvalue = v.Var(name=line_args[0], value=the_bool, value_type=line_args[1])\n\t\telse:\n\t\t\tvalue = v.Var(name=line_args[0], value=f\"'{line_args[2][:-1]}'\", value_type=line_args[1])\n\n\t\treturn value",
"def compile_var_declaration(self) -> int:\n\n var_count = 0\n\n # was var kind (var)\n kind = self.tokenizer.get_current_token()[1]\n self.tokenizer.advance()\n # now type\n\n # get type which is int|char|boolean|class\n type_var = self.tokenizer.get_current_token()[1]\n self.tokenizer.advance()\n # now name\n\n # get name which is int|char|boolean|class\n name = self.tokenizer.get_current_token()[1]\n self.tokenizer.advance()\n # now , or ;\n\n # adding to symbol table\n self.symbol_table.define(name, type_var, kind)\n\n var_count += 1\n\n # run in a loop and print all names, with \",\" in between\n while self.tokenizer.current_word == COMMA:\n # was ,\n var_count += 1\n self.tokenizer.advance()\n # now name\n\n # get name which for the int|char|boolean|class var\n name = self.tokenizer.get_current_token()[1]\n self.tokenizer.advance()\n # now , or ;\n\n # adding to symbol table\n self.symbol_table.define(name, type_var, kind)\n\n # end of declaration\n\n # was ;\n self.tokenizer.advance()\n # now next line\n return var_count",
"def compileVarDec(self):\n self.current_compile = \"compileVarDec\"\n symbol_kind = self.eat(\"var\")\n symbol_type = self.eatTag([\"keyword\", \"identifier\"])\n symbol_name = self.eatTag(\"identifier\")\n self.symbol_table.define(symbol_name, symbol_type, symbol_kind)\n\n while not self.currentTokenEquals(\";\"):\n self.eat(\",\")\n symbol_name = self.eatTag(\"identifier\")\n self.symbol_table.define(symbol_name, symbol_type, symbol_kind)\n\n self.eat(\";\")",
"def split_variable_declaration(line):\n\n if len(line) == 0:\n return None\n\n #Ghastly regex ensures things inside quoutes are left alone\n token_regex = (\"(?x) \"\n \"([ *=;]*) \" #Split on 0 or more of these characters\n \"(?= \" #Followed by:\n \" (?: \" #Start of non-capture group\n \" [^\\\"]* \" #0 or more non-quoute characters\n \" \\\" \" #1 quoute\n \" [^\\\"]* \" #0 or more non-quoute characters\n \" \\\" \" #1 quoute\n \" )* \" #0 or more repetitions of non-capture group\n \" [^\\\"]* \" #0 or more non-quoutes\n \" $ \" #Until the end\n \") \")\n\n\n #Get the non-whitespace tokens in a list\n tokens = re.split(token_regex, line)\n tokens = [x for x in tokens if len(x) > 0 and not x.isspace()]\n\n #Remove whitespace from the asterisk and space tokens\n for i, tok in enumerate(tokens):\n if \"*\" in tok or \"=\" in tok:\n tokens[i] = tok.replace(\" \", \"\")\n\n components = [\"\"]*6\n\n first_split = 0\n if \"=\" in tokens:\n first_split = tokens.index(\"=\")\n elif \";\" in tokens:\n first_split = tokens.index(\";\")\n else:\n return None\n\n #The last token before the first_split is the name\n components[2] = tokens[first_split-1]\n\n #If the token before the name is only asterisks, it is the asterisk\n #component\n #Join everything before this to get the type component\n if tokens[first_split-2] == (len(tokens[first_split-2]) * \"*\"):\n components[1] = tokens[first_split-2]\n components[0] = \" \".join(tokens[0:first_split-2])\n else:\n components[0] = \" \".join(tokens[0:first_split-1])\n\n\n if tokens[first_split] == \"=\":\n components[3] = \"=\"\n if \";\" in tokens:\n components[4] = \" \".join(tokens[first_split+1:tokens.index(\";\")])\n else:\n components[4] = \" \".join(tokens[first_split+1:-1])\n\n\n if \";\" in tokens:\n components[5] = \";\"\n\n return components",
"def parse_var(tokens: List[lexer.LexerToken]) -> Tuple[InitVariableToken, List[lexer.LexerToken]]:\n\n # Eat optional static keyword\n static_token, tokens = eat_one(tokens, lexer.KeywordToken, False, \"static\")\n\n _, tokens = eat_one(tokens, lexer.KeywordToken, with_value=\"var\")\n identifier, tokens = eat_one(tokens, lexer.IdentifierToken)\n type_token, tokens = parse_typehint(tokens)\n\n # Is this an array initializer?\n arr, tokens = eat_one(tokens, lexer.SquareOpenToken, False)\n if arr:\n size, tokens = eat_one(tokens, lexer.NumberLiteralToken)\n _, tokens = eat_one(tokens, lexer.SquareCloseToken)\n\n # Does the array have an init value?\n assignment, tokens = eat_one(tokens, lexer.AssignmentToken, False)\n\n if assignment:\n init_val, tokens = parse_token(tokens)\n else:\n init_val = None\n\n value = FixedSizeArrayToken(LiteralToken(size), init_val)\n else:\n # Assignment is optional\n op, tokens = eat_one(tokens, lexer.AssignmentToken, False)\n if op:\n value, tokens = parse_token(tokens)\n else:\n value = UnsetValueToken()\n\n return InitVariableToken(identifier, type_token, value, static_token != None), tokens",
"def parse(cls, expr: str) -> \"Variable\":\n return _parse_and_convert(expr, rule_name=\"onlyVariable\")",
"def _parse_variable_definition(\n variable_definition_ast: dict\n) -> \"VariableDefinitionNode\":\n return VariableDefinitionNode(\n variable=_parse_variable(variable_definition_ast[\"variable\"]),\n type=_parse_type(variable_definition_ast[\"type\"]),\n default_value=_parse_value(variable_definition_ast[\"defaultValue\"]),\n location=_parse_location(variable_definition_ast[\"loc\"]),\n )",
"def visit_Variable(self, node):\n if isinstance(node.type, asr.Integer):\n var_type = IntBaseType(String('integer'))\n value = Integer(0)\n elif isinstance(node.type, asr.Real):\n var_type = FloatBaseType(String('real'))\n value = Float(0.0)\n else:\n raise NotImplementedError(\"Data type not supported\")\n\n if not (node.intent == 'in'):\n new_node = Variable(\n node.name\n ).as_Declaration(\n type = var_type,\n value = value\n )\n self._py_ast.append(new_node)",
"def isvar(tok):\n if type(tok) != str:\n return False\n if not tok[0] in alpha:\n return False\n for c in tok:\n if not c in alpha+nums:\n return False\n return True",
"def test_var_name(splat, name, trailing_ws, newline):\n example = f\"{splat}{name}{trailing_ws}{newline}\"\n if len(splat) <= 2 and name.isidentifier() and not newline:\n result = var_name.parse(example)\n assert result == f\"{splat}{name}\"\n else:\n with pytest.raises(parsy.ParseError):\n var_name.parse(example)",
"def _parse_definition(self, line):\n op_pos = line.find('=')\n op_end = op_pos + 1\n if op_pos < 0:\n self._error('not a variable definition')\n\n if op_pos > 0 and line[op_pos - 1] in [':', '+']:\n op_pos -= 1\n else:\n self._error('only := and += are supported')\n\n # set op, sym, and val\n op = line[op_pos:op_end]\n sym = line[:op_pos].strip()\n val = self._expand_value(line[op_end:].lstrip())\n\n if op == ':=':\n self.symbol_table[sym] = val\n elif op == '+=':\n self.symbol_table[sym] += ' ' + val",
"def initVariablesFromText(self, text):\n\t\t#\n\t\t# Declares\n\t\ts = StringIO(text)\n\t\tscan = Scanner(self.lexicon, s)\n\t\twhile 1:\n\t\t\ttok = scan.read()\n\t\t\tprint tok\n\t\t\tif tok[0] == \"declare\":\n\t\t\t\tv = StringIO(tok[1])\n\t\t\t\tinner_scan = Scanner(self.sub_lex, v)\n\t\t\t\twhile 1:\n\t\t\t\t\tinner_tok = inner_scan.read()\n\t\t\t\t\tprint inner_tok\n\t\t\t\t\tif inner_tok[0] is None:\n\t\t\t\t\t\tbreak\n\t\t\tif tok[0] is None:\n\t\t\t\tbreak",
"def find_var(str):\n next_index = 0\n while next_index < len(str):\n if str[next_index].isspace() or str[next_index] in ('$', '\\'', '\\\"'):\n break\n next_index += 1\n var_name = str[0:next_index]\n str = str[next_index:]\n return var_name, str",
"def _compile_var_name(self) -> None:\n first_char_of_token = self.tokenizer.token[0]\n if not first_char_of_token.isdigit():\n self._eat(self.tokenizer.token)\n else:\n raise IncorrectVariableName(\n \"First character of the variable cannot be a digit!\"\n )",
"def get_variable_value(variable_name, current_line, content, split_symbol='='):\n line_number = current_line\n while not variable_name in content[line_number]:\n line_number += 1\n single_line = content[line_number]\n line_splits = single_line.split(split_symbol)\n variable_value = float(line_splits[1])\n return (variable_value, line_number)",
"def match_variable(string, idx, var_chars=variable_chars):\n def match_variable_helper(string, idx, var_chars, name, length):\n # check is there remaining string to process\n if idx >= len(string):\n return (name, length)\n\n first = string[idx]\n\n # check var_chars\n correct_char = False\n for var_str in var_chars:\n if first in var_str:\n correct_char = True\n break\n if not correct_char:\n return (name, length)\n\n return match_variable_helper(string, idx + 1, var_chars, name + first, length + 1)\n\n # check if the first character could be variable:\n if (string[idx] not in UNDERSCORE) and (string[idx] not in ALPHABETICAL):\n return (None, 0)\n\n v, l = match_variable_helper(string, idx, var_chars, \"\", 0)\n\n # no match\n if l == 0:\n return (None, 0)\n\n # variable cannot start with numeric\n if VARIABLE_NOT_START_NUMERIC and v[0] in NUMERICAL:\n return (None, 0)\n\n return ((VARIABLE, v), l)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a while loop
|
def while_loop():
yield keyword("while")
yield normalspaces()
whileexpr = yield expression
yield normalspaces()
yield keyword("do")
yield normalspaces()
bodyexpr = yield expression
return s.Construct(s.WHILE_LOOP, whileexpr, bodyexpr)
|
[
"def _while(self, node):\n write_code('label while') \n self._eval_token(node.children[0])\n write_code('if-goto while_end')\n map(lambda x: self._eval_token(x), node.children[1:])\n write_code('goto while')\n return",
"def parse_while_statement(self):\n location = self.consume(\"while\").loc\n condition = self.parse_condition()\n body = self.parse_statement()\n return self.semantics.on_while(condition, body, location)",
"def parse_while(\n tokens: List[lexer.LexerToken],\n) -> Tuple[WhileStatementToken, List[lexer.LexerToken]]:\n\n _, tokens = eat_one(tokens, lexer.KeywordToken, with_value=\"while\")\n condition, tokens = parse_condition(tokens)\n body, tokens = parse_scope(tokens)\n return WhileStatementToken(condition, body), tokens",
"def visit_While(self, node):\n raise ScriptSyntaxError('while statements are not allowed')",
"def test_do_while_stmt2(self):\r\n input = \"\"\"int main () {\r\n /* local variable definition */\r\n int a;\r\n a = 0;\r\n /* do loop execution */\r\n do {\r\n printf(\"value of a: \", a);\r\n a = a + 1;\r\n }while( a < 20 );\r\n return 0;\r\n}\r\n\"\"\"\r\n expect = \"successful\"\r\n self.assertTrue(TestParser.checkParser(input,expect,232))",
"def process_while(self, node, state, *_):\n # Update the scope\n scope_path = state.scope_path.copy()\n if len(scope_path) == 0:\n scope_path.append(\"@global\")\n scope_path.append(\"loop\")\n\n # Initialize intermediate variables\n container_argument = []\n container_repeat = True\n container_return_value = []\n container_updated = []\n function_output = []\n function_updated = []\n function_input = []\n loop_condition_inputs = []\n loop_condition_inputs_lambda = []\n loop_variables_grfn = []\n loop_functions_grfn = []\n\n # Increment the loop index universally across the program\n if self.loop_index > -1:\n self.loop_index += 1\n else:\n self.loop_index = 0\n\n # First, get the `container_id_name` of the loop container\n container_id_name = self.generate_container_id_name(\n self.fortran_file, self.current_scope, f\"loop${self.loop_index}\"\n )\n\n # Update the scope of the loop container so that everything inside\n # the body of the loop will have the below scope\n self.current_scope = f\"{self.current_scope}.loop${self.loop_index}\"\n\n loop_test = self.gen_grfn(node.test, state, \"while\")\n\n # Define a new empty state that will be used for mapping the state of\n # the operations within the loop container\n loop_last_definition = {}\n loop_state = state.copy(\n last_definitions=loop_last_definition,\n next_definitions={},\n last_definition_default=-1,\n )\n\n # We want the loop_state to have state information about variables\n # defined one scope above its current parent scope. The below code\n # allows us to do that\n if self.parent_loop_state:\n for var in self.parent_loop_state.last_definitions:\n if var not in state.last_definitions:\n # state.last_definitions[var] = \\\n # self.parent_loop_state.last_definitions[var]\n state.last_definitions[var] = -1\n\n # Now populate the IF and EXIT functions for the loop by identifying\n # the loop conditionals\n # TODO Add a test to check for loop validity in this area. Need to\n # test with more types of while loops to finalize on a test condition\n\n for item in loop_test:\n if not isinstance(item, list):\n item = [item]\n for var in item:\n if \"var\" in var:\n function_input.append(\n f\"@variable::\"\n f\"{var['var']['variable']}::\"\n f\"{var['var']['index']}\"\n )\n container_argument.append(\n f\"@variable::\" f\"{var['var']['variable']}::-1\"\n )\n loop_condition_inputs.append(\n f\"@variable::\" f\"{var['var']['variable']}::-1\"\n )\n loop_condition_inputs_lambda.append(var[\"var\"][\"variable\"])\n elif \"call\" in var:\n # TODO: Very specifically for arrays. Will probably break\n # for other calls\n self._get_call_inputs(\n var[\"call\"],\n function_input,\n container_argument,\n loop_condition_inputs,\n loop_condition_inputs_lambda,\n state,\n )\n\n function_input = self._remove_duplicate_from_list(function_input)\n container_argument = self._remove_duplicate_from_list(\n container_argument\n )\n loop_condition_inputs = self._remove_duplicate_from_list(\n loop_condition_inputs\n )\n loop_condition_inputs_lambda = self._remove_duplicate_from_list(\n loop_condition_inputs_lambda\n )\n\n # Save the current state of the system so that it can used by a\n # nested loop to get information about the variables declared in its\n # outermost scopes.\n self.parent_loop_state = state\n\n # Define some condition and break variables in the loop state\n loop_state.last_definitions[\"IF_0\"] = 0\n loop_state.last_definitions[\"EXIT\"] = 0\n loop_state.variable_types[\"IF_0\"] = \"bool\"\n loop_state.variable_types[\"EXIT\"] = \"bool\"\n\n # Now, create the `variable` spec, `function name` and `container\n # wiring` for the check condition and break decisions.\n\n loop_check_variable = self.generate_variable_definition(\n [\"IF_0\"], None, False, loop_state\n )\n\n loop_state.next_definitions[\"#cond\"] = 1\n loop_state.last_definitions[\"#cond\"] = 0\n\n loop_check_function_name = self.generate_function_name(\n \"__condition__\", loop_check_variable[\"name\"], None\n )\n loop_condition_function = {\n \"function\": loop_check_function_name,\n \"input\": loop_condition_inputs,\n \"output\": [f\"@variable::IF_0::0\"],\n \"updated\": [],\n }\n\n loop_break_variable = self.generate_variable_definition(\n [\"EXIT\"], None, False, loop_state\n )\n # Increment the next definition of EXIT\n loop_state.next_definitions[\"EXIT\"] = 1\n\n loop_break_function_name = self.generate_function_name(\n \"__decision__\", loop_break_variable[\"name\"], None\n )\n loop_break_function = {\n \"function\": loop_break_function_name,\n \"input\": [f\"@variable::IF_0::0\"],\n \"output\": [f\"@variable::EXIT::0\"],\n \"updated\": [],\n }\n\n # Create the lambda function for the index variable initiations and\n # other loop checks. This has to be done through a custom lambda\n # function operation since the structure of genCode does not conform\n # with the way this lambda function will be created.\n # TODO Add a separate function to get the variables/literals of a\n # more complex form. The one below is for the basic case.\n\n # Second, lambda function for IF_0_0 test\n loop_continuation_test_lambda = self.generate_lambda_function(\n node.test,\n loop_check_function_name[\"name\"],\n True,\n False,\n False,\n False,\n loop_condition_inputs_lambda,\n state,\n True,\n )\n loop_state.lambda_strings.append(loop_continuation_test_lambda)\n\n # Third, lambda function for EXIT code\n loop_exit_test_lambda = self.generate_lambda_function(\n \"IF_0_0\",\n loop_break_function_name[\"name\"],\n True,\n False,\n False,\n False,\n [\"IF_0_0\"],\n state,\n True,\n )\n loop_state.lambda_strings.append(loop_exit_test_lambda)\n # Parse through the body of the loop container\n loop = self.gen_grfn(node.body, loop_state, \"for\")\n # Separate the body grfn into `variables` and `functions` sub parts\n (\n body_variables_grfn,\n body_functions_grfn,\n body_container_grfn,\n ) = self._get_variables_and_functions(loop)\n\n # Get a list of all variables that were used as inputs within the\n # loop body (nested as well).\n loop_body_inputs = []\n\n # Get only the dictionaries\n body_functions_grfn = [\n item for item in body_functions_grfn if isinstance(item, dict)\n ]\n for function in body_functions_grfn:\n if function[\"function\"][\"type\"] == \"lambda\":\n for ip in function[\"input\"]:\n (_, input_var, input_index) = ip.split(\"::\")\n if (\n int(input_index) == -1\n and input_var not in loop_body_inputs\n ):\n loop_body_inputs.append(input_var)\n elif function[\"function\"][\"type\"] == \"container\":\n # The same code as above but separating it out just in case\n # some extra checks are added in the future\n for ip in function[\"input\"]:\n (_, input_var, input_index) = ip.split(\"::\")\n # TODO Hack for bypassing `boolean` types. Will be\n # removed once the `literal` as an input question is\n # answered.\n if int(input_index) == -1 and input_var != \"boolean\":\n loop_body_inputs.append(input_var)\n\n # Remove any duplicates since variables can be used multiple times in\n # various assignments within the body\n loop_body_inputs = self._remove_duplicate_from_list(loop_body_inputs)\n\n # TODO: Not doing this right now. Refine this code and do it then.\n \"\"\"\n # Now, we remove the variables which were defined inside the loop\n # body itself and not taken as an input from outside the loop body\n filtered_loop_body_inputs = []\n for input_var in loop_body_inputs:\n # We filter out those variables which have -1 index in `state` (\n # which means it did not have a defined value above the loop\n # body) and is not a function argument (since they have an index\n # of -1 as well but have a defined value)\n if not (state.last_definitions[input_var] == -1 and input_var not in\n self.function_argument_map[main_function_name][\n \"argument_list\"]\n ):\n filtered_loop_body_inputs.append(input_var)\n\n \"\"\"\n\n # for item in filtered_loop_body_inputs:\n for item in loop_body_inputs:\n # TODO Hack for now, this should be filtered off from the code\n # block above\n if (\n \"IF\" not in item\n and state.last_definitions.get(item) is not None\n ):\n function_input.append(\n f\"@variable::{item}::\" f\"{state.last_definitions[item]}\"\n )\n container_argument.append(f\"@variable::{item}::-1\")\n\n function_input = self._remove_duplicate_from_list(function_input)\n container_argument = self._remove_duplicate_from_list(\n container_argument\n )\n\n # Creating variable specs for the inputs to the containers.\n start_definitions = loop_state.last_definitions.copy()\n container_definitions = start_definitions.copy()\n container_input_state = loop_state.copy(\n last_definitions=container_definitions\n )\n for argument in container_argument:\n (_, var, index) = argument.split(\"::\")\n container_input_state.last_definitions[var] = int(index)\n argument_variable = self.generate_variable_definition(\n [var], None, False, container_input_state\n )\n body_variables_grfn.append(argument_variable)\n\n # TODO: Think about removing (or retaining) variables which even\n # though defined outside the loop, are defined again inside the loop\n # and then used by an operation after it.\n # E.g. x = 5\n # for ___ :\n # x = 2\n # for ___:\n # y = x + 2\n # Here, loop$1 will have `x` as an input but will loop$0 have `x` as\n # an input as well?\n # Currently, such variables are included in the `input`/`argument`\n # field.\n\n # Now, we list out all variables that have been updated/defined\n # inside the body of the loop\n loop_body_outputs = {}\n for function in body_functions_grfn:\n if function[\"function\"][\"type\"] == \"lambda\":\n # TODO Currently, we only deal with a single output variable.\n # Modify the line above to not look at only [0] but loop\n # through the output to incorporate multiple outputs\n (_, output_var, output_index) = function[\"output\"][0].split(\n \"::\"\n )\n loop_body_outputs[output_var] = output_index\n elif function[\"function\"][\"type\"] == \"container\":\n for ip in function[\"updated\"]:\n (_, output_var, output_index) = ip.split(\"::\")\n loop_body_outputs[output_var] = output_index\n\n for item in loop_body_outputs:\n # TODO the indexing variables in of function block and container\n # block will be different. Figure about the differences and\n # implement them.\n # TODO: Hack, this IF check should not even appear in\n # loop_body_outputs\n if (\n \"IF\" not in item\n and \"EXIT\" not in item\n and state.last_definitions.get(item) is not None\n ):\n if (state.last_definitions[item] == -2) or (item == \"EXIT\"):\n updated_index = 0\n else:\n updated_index = state.last_definitions[item] + 1\n function_updated.append(\n f\"@variable::{item}::\" f\"{updated_index}\"\n )\n state.last_definitions[item] = updated_index\n state.next_definitions[item] = updated_index + 1\n item_id = loop_state.last_definitions.get(\n item, loop_body_outputs[item]\n )\n container_updated.append(f\"@variable::{item}::\" f\"{item_id}\")\n # Create variable spec for updated variables in parent scope.\n # So, temporarily change the current scope to its previous form\n tmp_scope = self.current_scope\n self.current_scope = \".\".join(\n self.current_scope.split(\".\")[:-1]\n )\n updated_variable = self.generate_variable_definition(\n [item], None, False, state\n )\n body_variables_grfn.append(updated_variable)\n # Changing it back to its current form\n self.current_scope = tmp_scope\n\n # TODO: For the `loop_body_outputs`, all variables that were\n # defined/updated inside the loop body are included. Sometimes,\n # some variables are defined inside the loop body, used within that\n # body and then not used or re-assigned to another value outside the\n # loop body. Do we include such variables in the updated list?\n # Another heuristic to think about is whether to keep only those\n # variables in the `updated` list which are in the `input` list.\n\n loop_variables_grfn.append(loop_check_variable)\n loop_variables_grfn.append(loop_break_variable)\n\n loop_functions_grfn.append(loop_condition_function)\n loop_functions_grfn.append(loop_break_function)\n\n loop_functions_grfn += body_functions_grfn\n\n container_gensym = self.generate_gensym(\"container\")\n\n loop_container = {\n \"name\": container_id_name,\n \"source_refs\": [],\n \"gensym\": container_gensym,\n \"repeat\": container_repeat,\n \"arguments\": container_argument,\n \"updated\": container_updated,\n \"return_value\": container_return_value,\n \"body\": loop_functions_grfn,\n }\n loop_function = {\n \"function\": {\"name\": container_id_name, \"type\": \"container\"},\n \"input\": function_input,\n \"output\": function_output,\n \"updated\": function_updated,\n }\n loop_container = [loop_container] + body_container_grfn\n loop_variables = body_variables_grfn + loop_variables_grfn\n grfn = {\n \"containers\": loop_container,\n \"variables\": loop_variables,\n \"functions\": [loop_function],\n }\n self.current_scope = \".\".join(self.current_scope.split(\".\")[:-1])\n\n return [grfn]",
"def compile_while(self):\n self.__stream.write(self.__open_terminal(\"whileStatement\") + \"\\n\")\n self.__stream.write(self.__make_terminal(\"keyword\", \"while\"))\n self.__compile_symbol(\"(\")\n self.compile_expression()\n self.__compile_symbol(\")\")\n self.__compile_symbol(\"{\")\n self.compile_statements()\n self.__compile_symbol(\"}\")\n self.__stream.write(self.__close_terminal(\"whileStatement\") + \"\\n\")",
"def test_do_while_stmt3(self):\r\n input = \"\"\"int main () {\r\n\r\n do {\r\n func(x%4)[a[i]]; \r\n }while(!5);\r\n return -1;\r\n}\r\n\"\"\"\r\n expect = \"successful\"\r\n self.assertTrue(TestParser.checkParser(input,expect,233))",
"async def parse_periodic():\n while True:\n status = await check_status()\n #status = 4 - parser must be stoped\n if status == 4:\n break\n await parse(status)\n await asyncio.sleep(settings.fetching_interval_in_seconds)",
"def test_do_while_stmt_error2(self):\r\n input = \"\"\"int main () {\r\n do{\r\n a=c= 5%4;\r\n cal(a,b,d);\r\n }while(arr[true])\r\n}\r\n\"\"\"\r\n expect = \"Error on line 6 col 0: }\"\r\n self.assertTrue(TestParser.checkParser(input,expect,236))",
"def do_loop():\n yield keyword(\"do\")\n yield normalspaces()\n bodyexpr = yield expression\n yield normalspaces()\n yield keyword(\"while\")\n yield normalspaces()\n whileexpr = yield expression\n return s.Construct(s.DO_LOOP, bodyexpr, whileexpr)",
"def visit_While(self, node): # pylint: disable=invalid-name\n self.add_frame()\n test = self.visit(node.test)\n self.pop_frame()\n orig_type_map = self.type_map.copy()\n body = list(self.visit(expr) for expr in node.body)\n for var, t in self.type_map.items():\n if var not in orig_type_map:\n # new variables in while body are considered active after the leaving the block\n orig_type_map[var] = t\n elif orig_type_map[var] != t:\n # variables for which arg_types diverge are considered dead after leaving the block\n self._dead_vars[var] = (t, orig_type_map.pop(var))\n self.type_map = orig_type_map\n orelse = list(self.visit(expr) for expr in node.orelse)\n return ast.While(test, body, orelse)",
"def test_convert_while_loop():\n s = cst.parse_module(\n \"\"\"\npos = 0\nfinish = 5\nwhile pos <= finish:\n m = self.search(s, pos)\n if not m:\n res += s[pos:]\n break \n \n \"\"\"\n )\n expected = cst.parse_module(\n \"\"\"\npos = 0\nfinish = 5\nfor _while_ in range(WHILE_LOOP_EMULATION_ITERATION):\n if pos > finish:\n break\n m = self.search(s, pos)\n if not m:\n res += s[pos:]\n break \n \n \"\"\"\n )\n ctx = CodemodContext()\n w2f = rewrite_loopz.WhileToForLoop(ctx)\n rewritten = s.visit(w2f)\n assert expected.code.strip() == rewritten.code.strip()",
"def until(parser: Parser[Input, Output]) -> UntilParser:\n return UntilParser(wrap_literal(parser))",
"def transform_while_stmt(self, node):\n children = node.get_children()\n\n condition = self.transform(next(children))\n statements = self.transform(next(children))\n\n if isinstance(statements, list):\n statement_block = CodeBlock(*statements)\n else:\n statement_block = CodeBlock(statements)\n\n return While(condition, statement_block)",
"def _oldloop():\n \n # and now loop untill we recieve a response or time out\n while True:\n\n # this was ported across from ruby where a raw socket recieves all icmp reply's\n # probably something to do wit setting promisc mode or something\n # this pythong config seems to only return packets sourced from the app\n # needs more testing\n\n # this section not needed\n loop_time = time.clock()\n current_rttms = (loop_time - start_time) * 1000\n \n # break out on timeout\n if (current_rttms > timeout):\n return(False,timeout)\n \n sel = select.select([sock], [], [], (timeout / 1000))\n if sel[0] == []: continue\n \n # we got a packet\n loop_time = time.clock()\n current_rttms = (loop_time - start_time) * 1000\n\n data, addr = sock.recvfrom(1500)\n if (addr[0] != ip):\n print \"not my packet\"\n continue\n \n header = data[20:28]\n x, y, z, r_id, r_sequence = struct.unpack('!BBHHH', header)\n\n if (r_id == process_id and r_sequence == sequence):\n return(True, current_rttms)",
"def end_game_parse(line):\n tokens = line.split()\n while tokens:\n token = tokens.pop(0)\n \"*** YOUR CODE HERE ***\"\n return None",
"def parseloop(self, n):\n\t\tn.paramlist, n.arglist = self.creatematchlists(n)\n\t\tlog(\"%for: \", n.paramlist, \" => \", n.arglist)\n\n\t\t# params must be a simple list of references\n\t\tself.checkparams(n.paramlist)\n\t\tif len(n.paramlist) == 0:\n\t\t\traise SyntaxError(\"empty paramlist in for [%s] invalid in node [%s] from %s@%i\" % (n.line, n.name, n.path(), n.linenr()))",
"def _extract_loop(self, paragraph):\n inside_loop = False\n loop_tree = etree.Element(\"root\") # Create a new tree root\n prev_paragraph = None\n list_of_all_loop_nodes = []\n\n # Keep looking for the first (and only) loop after this tag\n for node, text, node_index in self._itersiblingtext(paragraph):\n if '<' in text:\n assert not inside_loop\n inside_loop = True\n loop_start_node = self._get_parent_paragraph(node)\n self._assert_element_is(loop_start_node, 'p')\n\n if inside_loop:\n # This text node is enclosed by a run, which is in turn enclosed by the paragraph\n # This paragraph is what we want to extract\n current_paragraph = self._get_parent_paragraph(node)\n self._assert_element_is(current_paragraph, 'p')\n if prev_paragraph != current_paragraph:\n loop_tree.insert(node_index, copy.deepcopy(current_paragraph))\n prev_paragraph = current_paragraph\n list_of_all_loop_nodes.append(current_paragraph)\n\n if '>' in text:\n assert inside_loop\n # Done with finding loop, so exit this iterator\n break\n logging.debug(\"Found a loop spanning %d paragraphs, %d\" % (node_index, len(loop_tree)))\n return loop_start_node, loop_tree, list_of_all_loop_nodes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a do loop
|
def do_loop():
yield keyword("do")
yield normalspaces()
bodyexpr = yield expression
yield normalspaces()
yield keyword("while")
yield normalspaces()
whileexpr = yield expression
return s.Construct(s.DO_LOOP, bodyexpr, whileexpr)
|
[
"def test_do_while_stmt2(self):\r\n input = \"\"\"int main () {\r\n /* local variable definition */\r\n int a;\r\n a = 0;\r\n /* do loop execution */\r\n do {\r\n printf(\"value of a: \", a);\r\n a = a + 1;\r\n }while( a < 20 );\r\n return 0;\r\n}\r\n\"\"\"\r\n expect = \"successful\"\r\n self.assertTrue(TestParser.checkParser(input,expect,232))",
"def test_do_while_stmt3(self):\r\n input = \"\"\"int main () {\r\n\r\n do {\r\n func(x%4)[a[i]]; \r\n }while(!5);\r\n return -1;\r\n}\r\n\"\"\"\r\n expect = \"successful\"\r\n self.assertTrue(TestParser.checkParser(input,expect,233))",
"def test_do_while_stmt_error2(self):\r\n input = \"\"\"int main () {\r\n do{\r\n a=c= 5%4;\r\n cal(a,b,d);\r\n }while(arr[true])\r\n}\r\n\"\"\"\r\n expect = \"Error on line 6 col 0: }\"\r\n self.assertTrue(TestParser.checkParser(input,expect,236))",
"def parseloop(self, n):\n\t\tn.paramlist, n.arglist = self.creatematchlists(n)\n\t\tlog(\"%for: \", n.paramlist, \" => \", n.arglist)\n\n\t\t# params must be a simple list of references\n\t\tself.checkparams(n.paramlist)\n\t\tif len(n.paramlist) == 0:\n\t\t\traise SyntaxError(\"empty paramlist in for [%s] invalid in node [%s] from %s@%i\" % (n.line, n.name, n.path(), n.linenr()))",
"def while_loop():\n yield keyword(\"while\")\n yield normalspaces()\n whileexpr = yield expression\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n bodyexpr = yield expression\n return s.Construct(s.WHILE_LOOP, whileexpr, bodyexpr)",
"def test_do_while(name, label, control_comma, terminal_expression, end_name, end_label):\n # pylint: disable=redefined-outer-name, too-many-arguments\n name_snippet = name + \": \" if name else None\n label_snippet = label + \" \" if label else None\n comma_snippet = \", \" if control_comma else None\n code = \"\"\"{name}do {label}{comma}while ({term})\n write (6, '(I0)') variable\n{endlabel} {endstmt}\n\"\"\".format(\n name=name_snippet or \"\",\n label=label_snippet or \"\",\n comma=comma_snippet or \"\",\n term=terminal_expression,\n endlabel=end_label or \"\",\n endstmt=get_end_do(end_name),\n )\n expected = \"\"\" {name}DO {label}while ({term})\n WRITE (6, '(I0)') variable\n{endlabel} {endstmt}\n\"\"\".format(\n name=name_snippet or \"\",\n label=label_snippet or \"\",\n term=terminal_expression,\n endlabel=end_label or \" \",\n endstmt=get_end_do(end_name),\n )\n print(code)\n reader = FortranStringReader(code)\n reader.set_format(FortranFormat(True, False))\n parser = FortranParser(reader)\n if (name != end_name) or (label and (label != end_label)):\n with pytest.raises(AnalyzeError):\n parser.parse()\n else:\n parser.parse()\n loop = parser.block.content[0]\n assert str(loop).splitlines() == expected.splitlines()",
"def c_loop(self, args):\n # first, build everything\n # then, enter loop\n # TODO: incremental fetching\n while True:\n print('starting...')\n self.c_fetch_all(args)\n print('waiting...')\n time.sleep(300)",
"def compile_do(self):\n # <doStatement>\n self.output.write(self.tag(\"doStatement\") + NEW_LINE)\n\n # do\n if self.tokenizer.current_value == \"do\":\n self.output.write(self.tag(grammar.K_KEYWORD) + self.tokenizer.current_value + self.ctag(grammar.K_KEYWORD)\n + NEW_LINE)\n\n # subroutineCall\n self.tokenizer.advance()\n self.subroutineCall()\n\n # ;\n self.tokenizer.advance()\n self.checkSymbol(\";\")\n\n # </doStatement>\n self.output.write(self.ctag(\"doStatement\") + NEW_LINE)",
"def compile_do(self):\n # write <do_statement>\n self.non_terminal_open(XML_DO_STATEMENT)\n # write <keyword> do <keyword>\n self.one_liner(XML_KEY_WORD, self.tokenizer.current_token)\n # advance to next token (subroutine call)\n self.tokenizer.advance()\n # write <identifier> name_of_func <identifier>\n self.one_liner(XML_IDENTIFIER, self.tokenizer.current_token)\n self.tokenizer.advance()\n # compile the subroutine call\n self.compile_subroutine_call()\n # write <symbol> ; <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n # write <do_statement>\n self.non_terminal_end(XML_DO_STATEMENT)\n self.tokenizer.advance()",
"def compile_do(self):\n self.__stream.write(self.__open_terminal(\"doStatement\") + \"\\n\")\n self.__stream.write(self.__make_terminal(\"keyword\", \"do\"))\n self.__compile_subroutine_call()\n self.__compile_symbol(\";\")\n self.__stream.write(self.__close_terminal(\"doStatement\") + \"\\n\")",
"async def parse_periodic():\n while True:\n status = await check_status()\n #status = 4 - parser must be stoped\n if status == 4:\n break\n await parse(status)\n await asyncio.sleep(settings.fetching_interval_in_seconds)",
"def cli_loop_select(self, timeout):\n parsed_some = True # requires thight loop, as it may be sending messages core<->cmd\n while parsed_some:\n parsed_some = False\n self.debug(\"Checking if data ready: %s // to %s\"%(repr(self.filenos()), timeout) )\n for n, clients_ready in enumerate(select.select(self.filenos(),[],[], timeout)):\n # self.debug(\"Clients ready[%s]: \"%n, clients_ready)\n for c in clients_ready:\n # self.debug(\"Data ready at %s\"%repr(c))\n parsed_some |= c.recv_and_parse()\n # self.debug(\"parsed_more\", parsed_some)\n timeout=0.1\n # self.debug(\"User input\", parsed_some)",
"def _process_loop(self):\n\n while True:\n func_path, subtask_id, client_uri = self.pending_tasks.get()\n data = self._get_task_data(subtask_id, client_uri)\n func = self._load_function(func_path, client_uri)\n\n if data and func:\n start_time = datetime.now()\n try:\n result = func(data, subtask_id[1])\n self._total_time += datetime.now() - start_time\n except Exception as e:\n self.log.report(\n 'While subtask %s was executing the following exception occurred: %s.' % (subtask_id, type(e)),\n True, 'red')\n else:\n # Enqueue the result to be delivered\n self.completed_tasks.put((result, subtask_id, client_uri))\n self.log.report('Subtask %s result is ready.' % str(subtask_id))",
"def _oldloop():\n \n # and now loop untill we recieve a response or time out\n while True:\n\n # this was ported across from ruby where a raw socket recieves all icmp reply's\n # probably something to do wit setting promisc mode or something\n # this pythong config seems to only return packets sourced from the app\n # needs more testing\n\n # this section not needed\n loop_time = time.clock()\n current_rttms = (loop_time - start_time) * 1000\n \n # break out on timeout\n if (current_rttms > timeout):\n return(False,timeout)\n \n sel = select.select([sock], [], [], (timeout / 1000))\n if sel[0] == []: continue\n \n # we got a packet\n loop_time = time.clock()\n current_rttms = (loop_time - start_time) * 1000\n\n data, addr = sock.recvfrom(1500)\n if (addr[0] != ip):\n print \"not my packet\"\n continue\n \n header = data[20:28]\n x, y, z, r_id, r_sequence = struct.unpack('!BBHHH', header)\n\n if (r_id == process_id and r_sequence == sequence):\n return(True, current_rttms)",
"def loop_through_buffer():\n global RECV_BUFFER\n global SEQUENCE\n\n top_msg = RECV_BUFFER.get_top()\n\n while top_msg:\n if top_msg['sequence'] == SEQUENCE:\n if top_msg['eof']:\n log(\"[completed]\")\n sys.exit(0)\n\n write_data_to_stdout(top_msg)\n\n RECV_BUFFER.remove_top()\n top_msg = RECV_BUFFER.get_top()\n else:\n break",
"def loop(self):\n for I in self.test_cases:\n yield I",
"def read_tasks(mongo):\n while True:\n query = mongo.db.tasks.find({\"status\" : \"Not started\" }) #query undone tasks\n for task in query:\n task_to_run = str(task['cmd']) # Store task to run\n try:\n process = subprocess.Popen(task_to_run.split(), stdout= subprocess.PIPE, \n stderr = subprocess.PIPE)\n process_output =str(process.communicate()[0])\n process_status = \"Done\"\n except Exception as e:\n process_output = \"There was an error running the task: \" + str(e)\n process_status = \"Error\"\n\n\n mongo.db.tasks.update(\n {\"_id\" : task['_id']}, \n {\"$set\":\n {\n \"status\" : process_status,\n \"output\" : process_output\n }\n }\n )\n sleep(10)",
"def ars_read_loop (self):\n write_log ('Read starting up\\n', dst='both')\n i = 1\n while self.runflag:\n self.ars2msg (loglvl=LG_DEF)\n if ( self.ars_pending > 2048 ):\n print ('Pending Buffer =', self.ars_pending)\n elif ( self.ars_pending < 0 ):\n print ('Trouble with connection to device\\n')\n sleep (rdloopdt)\n \n self.runflag = False\n write_log ('Read shutting down\\n', dst='both')\n return",
"def on_do_handler():\n @generate\n def do_exprseq():\n yield keyword(\"do\")\n yield normalspaces()\n handler = yield expression # expr_seq\n return handler\n\n yield keyword(\"on\")\n yield normalspaces()\n event = yield var_name()\n yield normalspaces()\n handler = yield function_return | do_exprseq\n return s.Construct(s.ON_DO_HANDLER, event, handler)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a loop exit
|
def loop_exit():
@generate
def with_expr():
yield normalspaces()
yield keyword("with")
yield normalspaces()
value = yield operand
return value
yield keyword("exit")
value = yield optional(with_expr)
return s.Construct(s.LOOP_EXIT, value)
|
[
"def end_game_parse(line):\n tokens = line.split()\n while tokens:\n token = tokens.pop(0)\n \"*** YOUR CODE HERE ***\"\n return None",
"def end_while_true(self):\n seen_close = 0\n while ((self.program[self.pc] != '[' or seen_close > 0) and \\\n self.pc >= 0):\n self.pc -= 1\n if (self.program[self.pc] == ']'):\n seen_close += 1\n elif (self.program[self.pc] == '[' and seen_close > 0):\n seen_close -= 1\n\n # because runStep will increment the program counter after\n # this method finishes, it needs to be offset by 1 so the\n # loop test will occur properly\n self.pc -= 1",
"def test_do_while_stmt3(self):\r\n input = \"\"\"int main () {\r\n\r\n do {\r\n func(x%4)[a[i]]; \r\n }while(!5);\r\n return -1;\r\n}\r\n\"\"\"\r\n expect = \"successful\"\r\n self.assertTrue(TestParser.checkParser(input,expect,233))",
"def test_do_while_stmt_error2(self):\r\n input = \"\"\"int main () {\r\n do{\r\n a=c= 5%4;\r\n cal(a,b,d);\r\n }while(arr[true])\r\n}\r\n\"\"\"\r\n expect = \"Error on line 6 col 0: }\"\r\n self.assertTrue(TestParser.checkParser(input,expect,236))",
"def test_do_while_stmt2(self):\r\n input = \"\"\"int main () {\r\n /* local variable definition */\r\n int a;\r\n a = 0;\r\n /* do loop execution */\r\n do {\r\n printf(\"value of a: \", a);\r\n a = a + 1;\r\n }while( a < 20 );\r\n return 0;\r\n}\r\n\"\"\"\r\n expect = \"successful\"\r\n self.assertTrue(TestParser.checkParser(input,expect,232))",
"def _while(self, node):\n write_code('label while') \n self._eval_token(node.children[0])\n write_code('if-goto while_end')\n map(lambda x: self._eval_token(x), node.children[1:])\n write_code('goto while')\n return",
"def while_loop():\n yield keyword(\"while\")\n yield normalspaces()\n whileexpr = yield expression\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n bodyexpr = yield expression\n return s.Construct(s.WHILE_LOOP, whileexpr, bodyexpr)",
"def while_exit_cond(i, result, logits, cache, decoding_stats): # pylint: disable=unused-argument\n not_overflow = i < decode_length\n return not_overflow",
"async def parse_periodic():\n while True:\n status = await check_status()\n #status = 4 - parser must be stoped\n if status == 4:\n break\n await parse(status)\n await asyncio.sleep(settings.fetching_interval_in_seconds)",
"def do_EOF(self, arg):\n print(\"\", end=\"\")\n sys.exit()",
"def _parse_stop_response(self, response, prompt):\n\n log.debug(\"STOP RESPONSE = %s\" % response)\n\n if response.startswith(\"ERROR\"):\n log.error(\"Instrument returned error in response to STOP Command: %s\" % response)\n\n self._streaming = False\n\n return response",
"def _oldloop():\n \n # and now loop untill we recieve a response or time out\n while True:\n\n # this was ported across from ruby where a raw socket recieves all icmp reply's\n # probably something to do wit setting promisc mode or something\n # this pythong config seems to only return packets sourced from the app\n # needs more testing\n\n # this section not needed\n loop_time = time.clock()\n current_rttms = (loop_time - start_time) * 1000\n \n # break out on timeout\n if (current_rttms > timeout):\n return(False,timeout)\n \n sel = select.select([sock], [], [], (timeout / 1000))\n if sel[0] == []: continue\n \n # we got a packet\n loop_time = time.clock()\n current_rttms = (loop_time - start_time) * 1000\n\n data, addr = sock.recvfrom(1500)\n if (addr[0] != ip):\n print \"not my packet\"\n continue\n \n header = data[20:28]\n x, y, z, r_id, r_sequence = struct.unpack('!BBHHH', header)\n\n if (r_id == process_id and r_sequence == sequence):\n return(True, current_rttms)",
"def parseloop(self, n):\n\t\tn.paramlist, n.arglist = self.creatematchlists(n)\n\t\tlog(\"%for: \", n.paramlist, \" => \", n.arglist)\n\n\t\t# params must be a simple list of references\n\t\tself.checkparams(n.paramlist)\n\t\tif len(n.paramlist) == 0:\n\t\t\traise SyntaxError(\"empty paramlist in for [%s] invalid in node [%s] from %s@%i\" % (n.line, n.name, n.path(), n.linenr()))",
"def a_lo_exit(self):\n self.current_loop = self.ref_stack.pop(-1)",
"def _endloop(self):\n if not self._band.get():\n self._loop_stack[0:1] = []\n else:\n self._pointer = self._loop_stack[0]",
"def stop(data):\n raise StopIteration()",
"def do_loop():\n yield keyword(\"do\")\n yield normalspaces()\n bodyexpr = yield expression\n yield normalspaces()\n yield keyword(\"while\")\n yield normalspaces()\n whileexpr = yield expression\n return s.Construct(s.DO_LOOP, bodyexpr, whileexpr)",
"def process(self):\n while self.__global_state != DFAGlobalState.HALT:\n self.step()",
"def quit_():\n raise AbortPromptLoop"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a try expr
|
def try_expr():
yield keyword("try")
yield normalspaces()
tryexpr = yield expression
yield normalspaces()
yield keyword("catch")
yield normalspaces()
catchexpr = yield expression
return s.Construct(s.TRY_EXPR, tryexpr, catchexpr)
|
[
"def testTryExceptElseFinally(self):\n token = self.parser.parse(filename='evo/TryExceptElseFinally.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertTrue(res['finally'])",
"def testTryExceptElse(self):\n token = self.parser.parse(filename='evo/TryExceptElse.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertFalse(res['finally'])",
"def testTryExceptElseFinallyTrailing(self):\n token = self.parser.parse(\n filename='evo/TryExceptElseFinallyTrailing.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertTrue(res['else'])\n self.assertTrue(res['finally'])",
"def test_syntaxerror():\n inp = '@article{name}'\n with pytest.raises(pyparsing.ParseException):\n parse_entry(inp)",
"def parse(s):\n t = _Tokens(s)\n ret = t.parse_expr(True)\n if len(t) != 0:\n raise ValueError('extra stuff:' + str(t))\n return ret",
"def testTryExceptFinally(self):\n token = self.parser.parse(filename='evo/TryExceptFinally.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assertEqual(len(res['exc']), 1)\n self.assertFalse(res['else'])\n self.assertTrue(res['finally'])",
"def try_block(self, tree, parent):\n self.add_line(f\"try\")\n with self.scope():\n self.subtree(tree.nested_block)\n\n if tree.catch_block:\n self.catch_block(tree.catch_block, parent=parent)\n if tree.finally_block:\n self.finally_block(tree.finally_block, parent=parent)",
"def test_unbalanced_parens(self):\n with self.assertRaises(SyntacticError):\n tokens = TokenStream(StringIO(\"(a (b c (d e (f (g))\"))\n lexer = Lexer()\n lexer.parse(tokens)",
"def test_do_while_stmt_error2(self):\r\n input = \"\"\"int main () {\r\n do{\r\n a=c= 5%4;\r\n cal(a,b,d);\r\n }while(arr[true])\r\n}\r\n\"\"\"\r\n expect = \"Error on line 6 col 0: }\"\r\n self.assertTrue(TestParser.checkParser(input,expect,236))",
"def testSyntaxErrorDoubleElse(self):\n template = '{{ if [var] }} {{ else }} {{ else }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)",
"def test_syntax(self):\n lisp = self.lisp\n for expr in [\n \"(\",\n \"(()\",\n \")\",\n \"())\",\n \".)\"\n ]:\n self.assertRaises(ParseError, lisp.readLisp, expr)",
"def handleParseProblem(t: java.lang.Throwable, functionString: unicode) -> unicode:\n ...",
"def test_parse_num_empty_number_raises_error():\n from esolang_whitespace import SpaceInterpreter\n i = SpaceInterpreter('')\n with pytest.raises(SyntaxError):\n i.parse_num('')",
"def test_useless_try_nodes(\n assert_errors,\n assert_error_text,\n parse_ast_tree,\n code,\n statement,\n default_options,\n mode,\n):\n tree = parse_ast_tree(mode(code.format(statement)))\n\n visitor = StatementsWithBodiesVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [UselessNodeViolation])\n assert_error_text(visitor, 'try')",
"def testSyntaxErrorElifAfterElse(self):\n template = '{{ if [var] }} {{ else }} {{ elif [var] }} {{ endif }}'\n self.assertRaises(templateparser.TemplateSyntaxError, self.parse, template)",
"def test_do_while_stmt3(self):\r\n input = \"\"\"int main () {\r\n\r\n do {\r\n func(x%4)[a[i]]; \r\n }while(!5);\r\n return -1;\r\n}\r\n\"\"\"\r\n expect = \"successful\"\r\n self.assertTrue(TestParser.checkParser(input,expect,233))",
"def test_read_code_position_handles_malformed_input(self):\n\n def assert_is_parsed(code_position_string):\n code_position = parser._read_code_position([code_position_string], 0)\n self.assertEqual(len(code_position), 4)\n self.assertTrue(isinstance(code_position[3], int))\n\n parser = Log4jParser()\n assert_is_parsed('?(C.java:23)') # ('', '?', 'C.java', 23))\n assert_is_parsed('.m(C.java:23)') # ('', 'm', 'C.java', 23))\n assert_is_parsed('C.(C.java:23)') # ('C', '', 'C.java', 23))\n assert_is_parsed('.(C.java:23)') # ('', '', 'C.java', 23))\n assert_is_parsed('(C.java:23)') # ('', '', 'C.java', 23))\n assert_is_parsed('C.m(?)') # ('C', 'm', '?', -1))\n assert_is_parsed('C.m(:23)') # ('C', 'm', '', 23))\n assert_is_parsed('C.m(C.java:)') # ('C', 'm', 'C.java', -1))\n assert_is_parsed('C.m(:)') # ('C', 'm', '', -1))\n assert_is_parsed('C.m()') # ('C', 'm', '', -1))\n assert_is_parsed('C.m(C.java:NaN)') # ('C', 'm', 'C.java', -1))\n assert_is_parsed('C.m(C.java:3rr0r)') # ('C', 'm', 'C.java', -1))\n assert_is_parsed('?.?:?') # ('', '', '?.?', -1))\n assert_is_parsed('(C.java:23)') # ('', '', 'C.java', 23))\n assert_is_parsed('C.m(') # ('C', 'm', '', -1))\n assert_is_parsed('(') # ('', '', '', -1))\n assert_is_parsed('') # ('', '', '', -1))\n assert_is_parsed('C.m(C.java:23:42)') # ('C', 'm', 'C.java', -1))\n assert_is_parsed('C.m(C.java:23)(D.java:42)') # ('C.m(C', 'java:23)', 'D.java', 42))\n assert_is_parsed('C.m(C.ja(D.java:42)va:23)') # ('C.m(C', 'ja', 'D.java', -1))\n assert_is_parsed('C.m(C.java:23') # ('C', 'm', 'C.java', 23))\n assert_is_parsed('C.m(C.java:23:') # ('C', 'm', 'C.java', 23))",
"def test_except_infer_pars(self):\n src = Source([])\n self.assertRaises(RuntimeError, src.set_expression, 'a+a')\n self.assertRaises(RuntimeError, src.set_expression, '2*a')\n self.assertRaises(ValueError, src.set_expression, '2*a', ['a'])\n self.assertRaises(ValueError, src.set_expression, '2*a', grads=['2'])\n self.assertRaises(ValueError, src.set_expression, 'a*b', ['a', 'b'], ['b'])",
"def eval_par(expr: Expression, ptr: int, task: Task) -> Tuple[int, int]:\n counter = 0\n for idx, token in enumerate(expr[ptr:]):\n if token == \"(\":\n counter += 1\n if token == \")\":\n counter -= 1\n if counter == 0:\n start = ptr + 1\n end = ptr + idx\n return evaluate(expr[start:end], task), end\n raise Exception(\"Malformed expression: parenthesis doesn't match!\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a factor in a case
|
def case_factor():
@generate
def unary_minus():
yield string("-")
yield normalspaces()
expr = yield expression
return s.Construct(s.UNARY_MINUS, expr)
@generate
def unary_not():
yield keyword("not")
yield normalspaces()
expr = yield expression
return s.Construct(s.UNARY_NOT, expr)
ret = yield (hexnumber() ^
time ^
smptetime() ^
number() |
quoted |
path_name |
case_var_name() |
mxsname ^
array ^
bitarray ^
point4 ^
point3 ^
point2 ^
unary_minus ^
unary_not ^
expr_seq
# ??? ? last listener result (OMG!!) ==> could be shimmed in python if true
)
return ret
|
[
"def parse_case_field(page):\n case_pattern = re.compile(\n r'''\n Case: # The name of the field we are looking for.\n .* # Any character.\n (\\d{2}-\\d{6,7}) # The case the number we are looking for.\n ''',\n re.VERBOSE,\n )\n return match_pattern(page, case_pattern)",
"def parse_factor_line(line):\n\n raw = line.strip().split()\n inode,itrans,nfac = [int(i) for i in raw[:3]]\n fac_data = {int(raw[ifac])-1:float(raw[ifac+1]) for ifac in range(4,4+nfac*2,2)}\n # fac_data = {}\n # for ifac in range(4,4+nfac*2,2):\n # pnum = int(raw[ifac]) - 1 #zero based to sync with pandas\n # fac = float(raw[ifac+1])\n # fac_data[pnum] = fac\n return inode,itrans,fac_data",
"def cat_to_veto_def_cat(val):\n if val == '1':\n return 1\n if val == '2':\n return 2\n if val == '3':\n return 4\n if val == 'H':\n return 3 \n else:\n raise ValueError('Invalid Category Choice')\n\n return cat_sets",
"def parse_fact(expr, variables):\r\n expr = expr.lstrip()\r\n\r\n # Test (Exp)\r\n if expr.startswith('('):\r\n new_expr, value = parse_expression(expr[1:], variables)\r\n if new_expr is not False and new_expr.startswith(')'):\r\n return new_expr[1:], value\r\n\r\n # Test -Fact or +Fact\r\n if expr.startswith('-') or expr.startswith('+'):\r\n new_expr, value = parse_fact(expr[1:], variables)\r\n if new_expr is not False:\r\n if expr.startswith('-'):\r\n return new_expr, -value\r\n else:\r\n return new_expr, value\r\n\r\n # Test literal\r\n new_expr, value = parse_literal(expr)\r\n\r\n if new_expr is not False:\r\n return new_expr, value\r\n\r\n # Test identifier\r\n new_expr, identifier = parse_identifier(expr)\r\n\r\n # Check that the identifier has been initialized\r\n if identifier not in variables.keys():\r\n print(identifier + \" is not initialized\")\r\n return False, None\r\n\r\n if new_expr is not False:\r\n return new_expr, variables[identifier]\r\n\r\n return False, None",
"def getCases (parser):\n\treturn parser(fileinput.input())",
"def solve_case(self, case):\n pass",
"def parseCase(node):\n\n artist = node.find(\"./artist\")\n title = node.find(\"./songtitle\")\n fileType = node.find(\"./datatype\")\n fileName = node.find(\"./filename\")\n\n extension = None\n if fileType.text.lower() == \"mxl\":\n extension = \"xml\"\n elif fileType.text.lower() == \"midi\":\n extension = \"mid\"\n else:\n # don't know how to handle this\n extension = fileType.text.lower()\n\n return {\n \"song\": title.text,\n \"artist\": artist.text,\n \"file\": fileName.text + \".\" + extension,\n \"fileType\": fileType.text\n }",
"def parse_unit(x):\n if is_numlike(x):\n return x, None\n else:\n for i, xi in enumerate(x+\" \"):\n if not xi.isdigit() and not xi == '.':\n break # we only want the right i\n number = x[:i]\n unit = x[i:].strip()\n if unit == '':\n unit = None\n # return result\n if number.isdigit():\n return int(number), unit\n elif number == \"\":\n return 1, unit\n else:\n return float(number), unit",
"def _parse_fields(self, unf_str):\n unf_str = unf_str.strip(self.BORDER_CHAR)\n unf_str = unf_str.lstrip(\"Test Case \")\n number, desc_token = unf_str.split(\": \")\n case_name = re.search(VPatterns.get_test_case_name(),\n desc_token).group(0)\n fields = []\n fields.append(case_name)\n fields.append(int(number))\n fields.append(desc_token)\n return fields",
"def parse_number_or_function(iterator: ExpressionIterator):\n iterator.previous()\n ch = iterator.next()\n if ch in NUMBER_START_CHARS:\n return (parse_number(iterator))\n elif ch in string.ascii_lowercase:\n token = parse_charcter_thing(iterator)\n if token in CONSTANTS:\n token = CONSTANTS[token]\n return token\n else:\n raise ValueError(f\"{ch} is not a valid token\")",
"def find_measure_and_unit(measure):\n measure = measure.strip().replace(\" \", \"\")\n match = re.match(\"([-0-9.]+)([a-zA-Z]*)\", measure)\n if not match:\n return None, None\n num = float(match.group(1))\n measure = match.group(2).lower()\n if not measure:\n measure = None\n return num, measure",
"def parse_entry(entry):\n if entry.startswith('nan'):\n val = float(entry[:3])\n par = entry[3:]\n else:\n i = -1\n while not entry[i].isdigit(): i -= 1\n if i != -1:\n val = float(entry[:i+1])\n par = entry[i+1:]\n else:\n val = float(entry)\n par = ''\n\n return val,par",
"def parsePrimitiveCategory(chunks, primitives, families, var):\n ...",
"def process(line):\n line = remove_parentheses(line)\n line = remove_punctuation(line)\n line = convert_vulgar_fractions(line)\n\n number_result = NUMBERS.match(line)\n if number_result:\n number_result = number_result.group()\n number_result = convert_mixed_fractions(number_result)\n\n measurement_result = check_measurements(line)\n food_result = check_foods(line)\n category_result = foods.get(food_result)\n\n return {'number': number_result,\n 'measurement': measurement_result,\n 'food': food_result,\n 'category': category_result}",
"def parse(cls, input):",
"def parse_sv_frequency(variant, info_key):\n value = variant.INFO.get(info_key, 0)\n if any([float_str in info_key.upper() for float_str in [\"AF\", \"FRQ\"]]):\n value = float(value)\n else:\n value = int(value)\n if value > 0:\n return value\n return None",
"def case_expr():\n # pylint: disable=useless-return\n @generate\n def default():\n yield keyword(\"default\")\n return None\n\n @generate\n def case_item():\n case = yield default ^ case_factor\n yield normalspaces()\n yield string(\":\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.CASE_ITEM, case, expr)\n\n yield keyword(\"case\")\n yield normalspaces()\n expr = yield optional(expression)\n yield normalspaces()\n yield keyword(\"of\")\n yield normalspaces()\n yield lparen\n yield normalspaces()\n cases = yield sepBy(case_item, end_of_statement)\n yield normalspaces()\n yield rparen\n return s.Construct(s.CASE_EXPR, expr, cases)",
"def factor_check(number, factor):",
"def _parse_prefix(string: str) -> Tuple[Term, str]:\n # Task 7.3.1\n if is_variable(string[0]):\n return Term.variable_case(string)\n\n elif is_constant(string[0]):\n return Term.constant_case(string)\n\n elif is_function(string[0]):\n return Term.function_case(string)\n\n else: # string[0] is a char that doesnt correspond to any pattern\n return None, string"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a case expr
|
def case_expr():
# pylint: disable=useless-return
@generate
def default():
yield keyword("default")
return None
@generate
def case_item():
case = yield default ^ case_factor
yield normalspaces()
yield string(":")
yield normalspaces()
expr = yield expression
return s.Construct(s.CASE_ITEM, case, expr)
yield keyword("case")
yield normalspaces()
expr = yield optional(expression)
yield normalspaces()
yield keyword("of")
yield normalspaces()
yield lparen
yield normalspaces()
cases = yield sepBy(case_item, end_of_statement)
yield normalspaces()
yield rparen
return s.Construct(s.CASE_EXPR, expr, cases)
|
[
"def visitCaseFunctionCall(self, ctx: MySqlParser.CaseFunctionCallContext) -> SQLToken:\n branches = []\n for func_alternative in ctx.caseFuncAlternative():\n branches.append(self.visit(func_alternative))\n if ctx.ELSE():\n branches.append((None, self.visit(ctx.functionArg())))\n return SQLToken(CASE, branches)",
"def parse_switch_statement(self):\n location = self.consume(\"switch\").loc\n self.consume(\"(\")\n expression = self.parse_expression()\n self.consume(\")\")\n self.semantics.on_switch_enter(expression)\n statement = self.parse_statement()\n return self.semantics.on_switch_exit(expression, statement, location)",
"def case_factor():\n @generate\n def unary_minus():\n yield string(\"-\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.UNARY_MINUS, expr)\n\n @generate\n def unary_not():\n yield keyword(\"not\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.UNARY_NOT, expr)\n\n ret = yield (hexnumber() ^\n time ^\n smptetime() ^\n number() |\n quoted |\n path_name |\n case_var_name() |\n mxsname ^\n array ^\n bitarray ^\n point4 ^\n point3 ^\n point2 ^\n unary_minus ^\n unary_not ^\n expr_seq\n # ??? ? last listener result (OMG!!) ==> could be shimmed in python if true\n )\n return ret",
"def parse_case_field(page):\n case_pattern = re.compile(\n r'''\n Case: # The name of the field we are looking for.\n .* # Any character.\n (\\d{2}-\\d{6,7}) # The case the number we are looking for.\n ''',\n re.VERBOSE,\n )\n return match_pattern(page, case_pattern)",
"def getCases (parser):\n\treturn parser(fileinput.input())",
"def parse_number_or_function(iterator: ExpressionIterator):\n iterator.previous()\n ch = iterator.next()\n if ch in NUMBER_START_CHARS:\n return (parse_number(iterator))\n elif ch in string.ascii_lowercase:\n token = parse_charcter_thing(iterator)\n if token in CONSTANTS:\n token = CONSTANTS[token]\n return token\n else:\n raise ValueError(f\"{ch} is not a valid token\")",
"def solve_case(self, case):\n pass",
"def parse_fact(expr, variables):\r\n expr = expr.lstrip()\r\n\r\n # Test (Exp)\r\n if expr.startswith('('):\r\n new_expr, value = parse_expression(expr[1:], variables)\r\n if new_expr is not False and new_expr.startswith(')'):\r\n return new_expr[1:], value\r\n\r\n # Test -Fact or +Fact\r\n if expr.startswith('-') or expr.startswith('+'):\r\n new_expr, value = parse_fact(expr[1:], variables)\r\n if new_expr is not False:\r\n if expr.startswith('-'):\r\n return new_expr, -value\r\n else:\r\n return new_expr, value\r\n\r\n # Test literal\r\n new_expr, value = parse_literal(expr)\r\n\r\n if new_expr is not False:\r\n return new_expr, value\r\n\r\n # Test identifier\r\n new_expr, identifier = parse_identifier(expr)\r\n\r\n # Check that the identifier has been initialized\r\n if identifier not in variables.keys():\r\n print(identifier + \" is not initialized\")\r\n return False, None\r\n\r\n if new_expr is not False:\r\n return new_expr, variables[identifier]\r\n\r\n return False, None",
"def sched_switch_parser(event, text):\n if text.count('=') == 2: # old format\n regex = re.compile(\n r'(?P<prev_comm>\\S.*):(?P<prev_pid>\\d+) \\[(?P<prev_prio>\\d+)\\] (?P<status>\\S+)'\n r' ==> '\n r'(?P<next_comm>\\S.*):(?P<next_pid>\\d+) \\[(?P<next_prio>\\d+)\\]'\n )\n parser_func = regex_body_parser(regex)\n return parser_func(event, text)\n else: # there are more than two \"=\" -- new format\n return default_body_parser(event, text.replace('==>', ''))",
"def parseCase(node):\n\n artist = node.find(\"./artist\")\n title = node.find(\"./songtitle\")\n fileType = node.find(\"./datatype\")\n fileName = node.find(\"./filename\")\n\n extension = None\n if fileType.text.lower() == \"mxl\":\n extension = \"xml\"\n elif fileType.text.lower() == \"midi\":\n extension = \"mid\"\n else:\n # don't know how to handle this\n extension = fileType.text.lower()\n\n return {\n \"song\": title.text,\n \"artist\": artist.text,\n \"file\": fileName.text + \".\" + extension,\n \"fileType\": fileType.text\n }",
"def _add_case_statement(self):\n case_query = self.function.format(\"case when {0} = \\\"{1}\\\" then {2} else {3} end\") + \" as {4},\\n\"\n\n query = \"\".join([case_query.format(self.pivot_col, piv_col_val, self.values_col,\n self.not_eq_default, piv_col_name)\n for piv_col_val, piv_col_name in zip(self.piv_col_vals, self.piv_col_names)])\n \n query = query[:-2] + \"\\n\"\n return query",
"def parse(instruction_str):\n match = re.search(\"(nop|acc|jmp) (.*)$\", instruction_str)\n return {\"operation\": match[1], \"argument\": int(match[2])}",
"def visitCase(self, testCase):",
"def case(\n *whens: Union[\n typing_Tuple[_ColumnExpressionArgument[bool], Any], Mapping[Any, Any]\n ],\n value: Optional[Any] = None,\n else_: Optional[Any] = None,\n) -> Case[Any]:\n return Case(*whens, value=value, else_=else_)",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError, err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError as err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def _expand_cases(casestr):\n # For specifying the cases in a unit test, we should allow ranges like\n # \"standard.cr[1-12]\" so that the developer doesn't need to enter each\n # of the cases separately. We should still allow a comma-separated list\n # of cases, but each must allow the shorthand notation.\n rawcases = re.split(\",\\s*\", casestr)\n if \"[\" in casestr:\n cases = []\n rxcase = r\"(?P<prefix>[^[]*)\\[(?P<range>\\d+-\\d+)](?P<suffix>.*)\"\n recase = re.compile(rxcase)\n for craw in rawcases:\n m = recase.match(craw)\n if m:\n prefix = m.group(\"prefix\")\n vals = list(map(int, m.group(\"range\").split(\"-\")))\n suffix = m.group(\"suffix\")\n if prefix is None:\n prefix = \"\"\n if suffix is None:\n suffix = \"\"\n for v in range(vals[0], vals[1]+1):\n cases.append(\"{}{}{}\".format(prefix, v, suffix))\n else:\n cases.append(craw)\n return cases\n else:\n return rawcases",
"def parse_expression(self, input_string, case_sensitive=True, **values):\n\n if not input_string:\n return self.Quantity(1)\n\n input_string = string_preprocessor(input_string)\n gen = tokenizer(input_string)\n\n return build_eval_tree(gen).evaluate(lambda x: self._eval_token(x,\n case_sensitive=case_sensitive,\n **values))",
"def parse(self) -> None:\n if self.current[0] == Token.CTE: # constant ?\n print(self.current[1])\n self.current = self.next_token() # reads next token\n return # recursion end\n elif self.current[0] == Token.PARL: # ( ?\n print('(')\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n if self.current[0] == Token.ADD:\n print('+') # operator?\n elif self.current[0] == Token.SUB:\n print('-')\n elif self.current[0] == Token.MUL:\n print('*')\n elif self.current[0] == Token.DIV:\n print('/')\n else:\n raise ParsingException(\"Wrong operator or left parenthesis expected\")\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( ... oper expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n else:\n raise ParsingException(\"Right parenthesis expected\")\n else:\n raise ParsingException(\"Left parenthesis or constant expected\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse the with context expr
|
def with_context():
# pylint: disable=line-too-long
yield optional(keyword("with"))
yield normalspaces()
kw = yield keyword("(animate|undo|redraw|quiet|printAllElements|defaultAction|MXSCallstackCaptureEnabled|dontRepeatMessages|macroRecorderEmitterEnabled)")
yield normalspaces()
v = yield operand #expression
return s.Construct(s.CONTEXT_WITH, kw, v)
|
[
"def context_expr():\n contexts = yield sepBy1(\n about_context ^\n incoordsys_context ^\n innode_context ^\n at_context ^\n with_context, listsep())\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.CONTEXT_EXPR, contexts, expr)",
"def do_eval(expr, context):\n return eval(expr, context.vals)",
"def at_context():\n yield keyword(\"at\")\n yield normalspaces()\n kw = yield keyword(\"level|time\")\n yield normalspaces()\n v = yield operand\n return s.Construct(s.CONTEXT_AT, kw, v)",
"def parse_primary_expression(self):\n if self.peek == \"ID\":\n identifier = self.consume(\"ID\")\n expr = self.semantics.on_variable_access(\n identifier.val, identifier.loc\n )\n elif self.peek == \"NUMBER\":\n number = self.consume()\n expr = self.semantics.on_number(number.val, number.loc)\n elif self.peek == \"FLOAT\":\n number = self.consume()\n expr = self.semantics.on_float(number.val, number.loc)\n elif self.peek == \"CHAR\":\n char = self.consume()\n expr = self.semantics.on_char(char.val, char.loc)\n elif self.peek == \"STRING\":\n txt = self.consume()\n expr = self.semantics.on_string(txt.val, txt.loc)\n elif self.peek in [\"!\", \"*\", \"+\", \"-\", \"~\", \"&\", \"--\", \"++\"]:\n op = self.consume()\n if op.val in [\"--\", \"++\"]:\n operator = op.val + \"x\"\n else:\n operator = op.val\n expr = self.parse_primary_expression()\n expr = self.semantics.on_unop(operator, expr, op.loc)\n elif self.peek == \"__builtin_va_start\":\n location = self.consume(\"__builtin_va_start\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_start(ap, location)\n elif self.peek == \"__builtin_va_arg\":\n location = self.consume(\"__builtin_va_arg\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\",\")\n typ = self.parse_typename()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_arg(ap, typ, location)\n elif self.peek == \"__builtin_va_copy\":\n location = self.consume(\"__builtin_va_copy\").loc\n self.consume(\"(\")\n dest = self.parse_assignment_expression()\n self.consume(\",\")\n src = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_copy(dest, src, location)\n elif self.peek == \"__builtin_offsetof\":\n location = self.consume(\"__builtin_offsetof\").loc\n self.consume(\"(\")\n typ = self.parse_typename()\n self.consume(\",\")\n member = self.consume(\"ID\").val\n self.consume(\")\")\n expr = self.semantics.on_builtin_offsetof(typ, member, location)\n elif self.peek == \"sizeof\":\n location = self.consume(\"sizeof\").loc\n if self.peek == \"(\":\n self.consume(\"(\")\n if self.is_declaration_statement():\n typ = self.parse_typename()\n else:\n typ = self.parse_expression()\n self.consume(\")\")\n expr = self.semantics.on_sizeof(typ, location)\n else:\n sizeof_expr = self.parse_primary_expression()\n expr = self.semantics.on_sizeof(sizeof_expr, location)\n elif self.peek == \"(\":\n loc = self.consume(\"(\").loc\n # Is this a type cast?\n if self.is_declaration_statement():\n # Cast or compound literal!\n to_typ = self.parse_typename()\n self.consume(\")\")\n if self.peek == \"{\":\n init = self.parse_initializer_list(to_typ)\n expr = self.semantics.on_compound_literal(\n to_typ, init, loc\n )\n else:\n casted_expr = self.parse_primary_expression()\n expr = self.semantics.on_cast(to_typ, casted_expr, loc)\n else:\n # Parenthized expression (reset precedence)\n expr = self.parse_expression()\n self.consume(\")\")\n else:\n self.error(\"Expected expression\")\n\n # Postfix operations (have the highest precedence):\n while self.peek in [\"--\", \"++\", \"[\", \".\", \"->\", \"(\"]:\n if self.peek in [\"--\", \"++\"]:\n op = self.consume()\n expr = self.semantics.on_unop(\"x\" + op.val, expr, op.loc)\n elif self.peek == \"[\":\n location = self.consume(\"[\").loc\n index = self.parse_expression()\n self.consume(\"]\")\n expr = self.semantics.on_array_index(expr, index, location)\n elif self.peek == \"(\":\n expr = self.parse_call(expr)\n elif self.peek == \".\":\n location = self.consume(\".\").loc\n field = self.consume(\"ID\").val\n expr = self.semantics.on_field_select(expr, field, location)\n elif self.peek == \"->\":\n location = self.consume(\"->\").loc\n field = self.consume(\"ID\").val\n # Dereference pointer:\n expr = self.semantics.on_unop(\"*\", expr, location)\n expr = self.semantics.on_field_select(expr, field, location)\n else: # pragma: no cover\n self.not_impl()\n return expr",
"def test_expressions_with_fame(self):\n c = Context()\n c[\"foo\"] = dict(a=1, b=2, bar=\"apples\")\n c[\"top\"] = 10\n c[\"r\"] = list(range(10))\n tests = [(\"a+b\", 3), (\".top\", 10), (\"a+.top\", 11), (\".r.4+.top\", 14)]\n with c.frame(\"foo\"):\n for expression, result in tests:\n self.assertEqual(c.eval(expression), result)",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError, err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError as err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def parse_expression(expr):\n child_expressions = []\n for child_expr in expr:\n if isinstance(child_expr, pyparsing.ParseResults):\n child_expressions.append(parse_expression(child_expr))\n else:\n child_expressions.append(child_expr)\n while len(child_expressions) > 2:\n res = eval(\"\".join(map(str, child_expressions[0:3])))\n child_expressions = [res] + child_expressions[3:]\n return int(child_expressions[0])",
"def parse_expr(expr):\n global re_braces,re_dollar\n state_deps = []\n arg_deps = []\n def f(match):\n state_deps.append(match.group()[1:-1])\n return '_state_[\"'+match.group()[1:-1]+'\"]'\n def g(match):\n arg_deps.append(match.group()[1:])\n return '_args_[\"'+match.group()[1:]+'\"]'\n res = re_braces.sub(f,expr)\n res = re_dollar.sub(g,res)\n return res,state_deps,arg_deps",
"def parse(self) -> None:\n if self.current[0] == Token.CTE: # constant ?\n print(self.current[1])\n self.current = self.next_token() # reads next token\n return # recursion end\n elif self.current[0] == Token.PARL: # ( ?\n print('(')\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n if self.current[0] == Token.ADD:\n print('+') # operator?\n elif self.current[0] == Token.SUB:\n print('-')\n elif self.current[0] == Token.MUL:\n print('*')\n elif self.current[0] == Token.DIV:\n print('/')\n else:\n raise ParsingException(\"Wrong operator or left parenthesis expected\")\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( ... oper expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n else:\n raise ParsingException(\"Right parenthesis expected\")\n else:\n raise ParsingException(\"Left parenthesis or constant expected\")",
"def evaluate(expr, locals):",
"def handle_result(self, expr, result, ctx):\n pass",
"def eval_par(expr: Expression, ptr: int, task: Task) -> Tuple[int, int]:\n counter = 0\n for idx, token in enumerate(expr[ptr:]):\n if token == \"(\":\n counter += 1\n if token == \")\":\n counter -= 1\n if counter == 0:\n start = ptr + 1\n end = ptr + idx\n return evaluate(expr[start:end], task), end\n raise Exception(\"Malformed expression: parenthesis doesn't match!\")",
"def get_cond_expr_tokens(self, attr_token):\n expression = ''\n child_amod = None\n child_quantmod = None\n child_nummod = None\n\n children = [child for child in attr_token.lefts] # consider only left children\n \n # find out if attr_token has nummod, if so, then amod and quantmod\n # will be relative to the nummod token - so modify children list\n # otherwise they are relative to attr_token - so leave the children list as is.\n # \n # Refer Eg #1 & #2 above\n\n for child in children:\n # get nummod\n if child.dep_ == 'nummod':\n child_nummod = child\n if child_nummod:\n children = [child for child in child_nummod.lefts] # consider only left children\n\n for child in children:\n # get amod\n if child.dep == amod:\n child_amod = child\n # get quantmod\n if child.dep == quantmod:\n child_quantmod = child\n \n #expression = ' '.join[child_amod.text, child_quantmod.text, child_nummod.text] \n return (child_amod, child_quantmod, child_nummod)",
"def _parse_context_string(data : dict, value : str) -> Any:\n # Find all context values in string.\n contexts = re.findall(\"({[<%#:]{1} [\\S]+ [%#:>]{1}})\", value)\n # If there is no any context values in string,\n # return the string itself.\n if len(contexts) == 0:\n return value\n # If value is just a context value, \n # return the value of the context item instead of a string.\n if len(contexts) == 1 and value.strip() == contexts[0]:\n return ConduitStep._parse_context_tag(data, contexts[0])\n else:\n val = value\n for item in contexts:\n val = ConduitStep._parse_context_string(data, val.replace(item, ConduitStep._parse_context_tag(data, item)))\n return val",
"def tool_handler():\n yield keyword(\"on\")\n yield normalspaces()\n yield var_name()\n yield normalspaces()\n yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return expr",
"def __init__(self, expr: str) -> None:\n self.expr = expr\n self.length = len(self.expr)\n self.idx = 0 # index used by next_token()\n self.current = self.next_token() # reads first token",
"def evaluate(expr,**bindings):\n expr = expr.replace(\" \", \"\")\n paren, lst, lst_op = 0, -1, None\n #finds the last operator to be evaluated.\n for i in range(len(expr)):\n if expr[i] == \"(\":\n paren = paren + 1\n elif expr[i] == ')':\n paren = paren - 1\n else:\n s = op_by_symbol(expr[i])\n if s is None or paren != 0:\n continue\n elif lst == -1:\n lst = i\n lst_op = s\n elif s < lst_op:\n lst = i\n lst_op = s\n\n if lst_op is None:\n #if there were no operators found, make sure the expr was not wrapped in ()\n if expr[0] == '(' and expr[len(expr) - 1] == \")\":\n return evaluate(expr[1: len(expr) - 1], **bindings)\n else:#if not in (), this must be a variable\n return bindings[expr]\n elif lst_op == Operators.NOT:#otherwise, evaluate the operator.\n return lst_op(evaluate(expr[lst + 1:], **bindings))\n else:\n return lst_op([evaluate(expr[:lst], **bindings),evaluate(expr[lst + 1:], **bindings)])",
"def evaluate_option_expr(self, expr):\n #x = FilterExpressions.WordExpression(expr)\n opL = self.attrs.get( 'option_list', [] )\n return expr.evaluate( opL.count )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse the at context expr
|
def at_context():
yield keyword("at")
yield normalspaces()
kw = yield keyword("level|time")
yield normalspaces()
v = yield operand
return s.Construct(s.CONTEXT_AT, kw, v)
|
[
"def context_expr():\n contexts = yield sepBy1(\n about_context ^\n incoordsys_context ^\n innode_context ^\n at_context ^\n with_context, listsep())\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.CONTEXT_EXPR, contexts, expr)",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError, err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError as err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def parse_primary_expression(self):\n if self.peek == \"ID\":\n identifier = self.consume(\"ID\")\n expr = self.semantics.on_variable_access(\n identifier.val, identifier.loc\n )\n elif self.peek == \"NUMBER\":\n number = self.consume()\n expr = self.semantics.on_number(number.val, number.loc)\n elif self.peek == \"FLOAT\":\n number = self.consume()\n expr = self.semantics.on_float(number.val, number.loc)\n elif self.peek == \"CHAR\":\n char = self.consume()\n expr = self.semantics.on_char(char.val, char.loc)\n elif self.peek == \"STRING\":\n txt = self.consume()\n expr = self.semantics.on_string(txt.val, txt.loc)\n elif self.peek in [\"!\", \"*\", \"+\", \"-\", \"~\", \"&\", \"--\", \"++\"]:\n op = self.consume()\n if op.val in [\"--\", \"++\"]:\n operator = op.val + \"x\"\n else:\n operator = op.val\n expr = self.parse_primary_expression()\n expr = self.semantics.on_unop(operator, expr, op.loc)\n elif self.peek == \"__builtin_va_start\":\n location = self.consume(\"__builtin_va_start\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_start(ap, location)\n elif self.peek == \"__builtin_va_arg\":\n location = self.consume(\"__builtin_va_arg\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\",\")\n typ = self.parse_typename()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_arg(ap, typ, location)\n elif self.peek == \"__builtin_va_copy\":\n location = self.consume(\"__builtin_va_copy\").loc\n self.consume(\"(\")\n dest = self.parse_assignment_expression()\n self.consume(\",\")\n src = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_copy(dest, src, location)\n elif self.peek == \"__builtin_offsetof\":\n location = self.consume(\"__builtin_offsetof\").loc\n self.consume(\"(\")\n typ = self.parse_typename()\n self.consume(\",\")\n member = self.consume(\"ID\").val\n self.consume(\")\")\n expr = self.semantics.on_builtin_offsetof(typ, member, location)\n elif self.peek == \"sizeof\":\n location = self.consume(\"sizeof\").loc\n if self.peek == \"(\":\n self.consume(\"(\")\n if self.is_declaration_statement():\n typ = self.parse_typename()\n else:\n typ = self.parse_expression()\n self.consume(\")\")\n expr = self.semantics.on_sizeof(typ, location)\n else:\n sizeof_expr = self.parse_primary_expression()\n expr = self.semantics.on_sizeof(sizeof_expr, location)\n elif self.peek == \"(\":\n loc = self.consume(\"(\").loc\n # Is this a type cast?\n if self.is_declaration_statement():\n # Cast or compound literal!\n to_typ = self.parse_typename()\n self.consume(\")\")\n if self.peek == \"{\":\n init = self.parse_initializer_list(to_typ)\n expr = self.semantics.on_compound_literal(\n to_typ, init, loc\n )\n else:\n casted_expr = self.parse_primary_expression()\n expr = self.semantics.on_cast(to_typ, casted_expr, loc)\n else:\n # Parenthized expression (reset precedence)\n expr = self.parse_expression()\n self.consume(\")\")\n else:\n self.error(\"Expected expression\")\n\n # Postfix operations (have the highest precedence):\n while self.peek in [\"--\", \"++\", \"[\", \".\", \"->\", \"(\"]:\n if self.peek in [\"--\", \"++\"]:\n op = self.consume()\n expr = self.semantics.on_unop(\"x\" + op.val, expr, op.loc)\n elif self.peek == \"[\":\n location = self.consume(\"[\").loc\n index = self.parse_expression()\n self.consume(\"]\")\n expr = self.semantics.on_array_index(expr, index, location)\n elif self.peek == \"(\":\n expr = self.parse_call(expr)\n elif self.peek == \".\":\n location = self.consume(\".\").loc\n field = self.consume(\"ID\").val\n expr = self.semantics.on_field_select(expr, field, location)\n elif self.peek == \"->\":\n location = self.consume(\"->\").loc\n field = self.consume(\"ID\").val\n # Dereference pointer:\n expr = self.semantics.on_unop(\"*\", expr, location)\n expr = self.semantics.on_field_select(expr, field, location)\n else: # pragma: no cover\n self.not_impl()\n return expr",
"def parse_expression(expr):\n child_expressions = []\n for child_expr in expr:\n if isinstance(child_expr, pyparsing.ParseResults):\n child_expressions.append(parse_expression(child_expr))\n else:\n child_expressions.append(child_expr)\n while len(child_expressions) > 2:\n res = eval(\"\".join(map(str, child_expressions[0:3])))\n child_expressions = [res] + child_expressions[3:]\n return int(child_expressions[0])",
"def with_context():\n # pylint: disable=line-too-long\n yield optional(keyword(\"with\"))\n yield normalspaces()\n kw = yield keyword(\"(animate|undo|redraw|quiet|printAllElements|defaultAction|MXSCallstackCaptureEnabled|dontRepeatMessages|macroRecorderEmitterEnabled)\")\n yield normalspaces()\n v = yield operand #expression\n return s.Construct(s.CONTEXT_WITH, kw, v)",
"def eval_par(expr: Expression, ptr: int, task: Task) -> Tuple[int, int]:\n counter = 0\n for idx, token in enumerate(expr[ptr:]):\n if token == \"(\":\n counter += 1\n if token == \")\":\n counter -= 1\n if counter == 0:\n start = ptr + 1\n end = ptr + idx\n return evaluate(expr[start:end], task), end\n raise Exception(\"Malformed expression: parenthesis doesn't match!\")",
"def parse(self) -> None:\n if self.current[0] == Token.CTE: # constant ?\n print(self.current[1])\n self.current = self.next_token() # reads next token\n return # recursion end\n elif self.current[0] == Token.PARL: # ( ?\n print('(')\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n if self.current[0] == Token.ADD:\n print('+') # operator?\n elif self.current[0] == Token.SUB:\n print('-')\n elif self.current[0] == Token.MUL:\n print('*')\n elif self.current[0] == Token.DIV:\n print('/')\n else:\n raise ParsingException(\"Wrong operator or left parenthesis expected\")\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( ... oper expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n else:\n raise ParsingException(\"Right parenthesis expected\")\n else:\n raise ParsingException(\"Left parenthesis or constant expected\")",
"def tool_handler():\n yield keyword(\"on\")\n yield normalspaces()\n yield var_name()\n yield normalspaces()\n yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return expr",
"def __init__(self, expr: str) -> None:\n self.expr = expr\n self.length = len(self.expr)\n self.idx = 0 # index used by next_token()\n self.current = self.next_token() # reads first token",
"def test_default_expression_context(self):\n\n analytics_tool = getToolByName(self.portal, 'portal_analytics', None)\n report = analytics_tool['site-visits-line']\n\n context = self.portal\n request = self.portal.REQUEST\n\n renderer = getMultiAdapter(\n (context, request, report),\n interface=IAnalyticsReportRenderer\n )\n\n expression = 'python:[context, request, today, date, timedelta, unique_list]'\n\n result = [\n self.portal,\n self.portal.REQUEST,\n datetime.date.today(),\n getDate,\n getTimeDelta,\n unique_list,\n ]\n\n exp_context = renderer._getExpressionContext()\n evaluated_exp = evaluateTALES(expression, exp_context)\n self.assertEqual(evaluated_exp, result)",
"def do_eval(expr, context):\n return eval(expr, context.vals)",
"def parse_expr(expr):\n global re_braces,re_dollar\n state_deps = []\n arg_deps = []\n def f(match):\n state_deps.append(match.group()[1:-1])\n return '_state_[\"'+match.group()[1:-1]+'\"]'\n def g(match):\n arg_deps.append(match.group()[1:])\n return '_args_[\"'+match.group()[1:]+'\"]'\n res = re_braces.sub(f,expr)\n res = re_dollar.sub(g,res)\n return res,state_deps,arg_deps",
"def parse(self, sentence):\n pcfg = self.pcfg\n\n alpha, backtrace = self._inside(sentence)\n beta = self._outside(sentence, alpha)\n\n return alpha, beta, backtrace",
"def parser(string): \n#1 we tokenize the expression, thanks to the lexer and the Token constructor\n# the names are mapped thanks to the token_map dictionary\n tokens = [Token(token_map.get(x, 'ATOM'), x) for x in lex(string)]\n try:\n (e, i) = parse_iff(tokens)\n if not i:\n return e\n else:\n raise Exception('Unparsed input')\n except:\n raise",
"def six_axt_parse(source_block, filename='<source_block>', compatible=True):\n pt = ast.parse(source_block, filename=filename)\n return pt",
"def interpretETREE(etree) :\r\n if isinstance(etree, str) and etree.isdigit() : # NUM -- string of digits\r\n ans = int(etree)\r\n elif etree[0] in (\"+\", \"-\") : # [OP, ETREE, ETREE]\r\n ans1 = interpretETREE(etree[1])\r\n ans2 = interpretETREE(etree[2])\r\n if isinstance(ans1,int) and isinstance(ans2, int) :\r\n if etree[0] == \"+\" :\r\n ans = ans1 + ans2\r\n elif etree[0] == \"-\" :\r\n ans = ans1 - ans2\r\n else : crash(etree, \"addition error --- nonint value used\")\r\n elif etree[0] == \"deref\" : # [\"deref\", LTREE]\r\n handle, field = interpretLTREE(etree[1])\r\n ans = lookup(handle,field)\r\n else : crash(etree, \"invalid expression form\")\r\n return ans",
"def parse(s):\n t = _Tokens(s)\n ret = t.parse_expr(True)\n if len(t) != 0:\n raise ValueError('extra stuff:' + str(t))\n return ret",
"def evaluate(expr, locals):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse the in coordsys context expr
|
def incoordsys_context():
@generate
def special_name():
name = yield keyword("world|local|parent|grid|screen")
return s.Construct(s.NAME, name)
yield optional(keyword("in"))
yield normalspaces()
yield keyword("coordsys")
yield normalspaces()
v = yield special_name | operand
return s.Construct(s.CONTEXT_IN_COORDSYS, v)
|
[
"def context_expr():\n contexts = yield sepBy1(\n about_context ^\n incoordsys_context ^\n innode_context ^\n at_context ^\n with_context, listsep())\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.CONTEXT_EXPR, contexts, expr)",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError as err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError, err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def parse_expr(expr):\n global re_braces,re_dollar\n state_deps = []\n arg_deps = []\n def f(match):\n state_deps.append(match.group()[1:-1])\n return '_state_[\"'+match.group()[1:-1]+'\"]'\n def g(match):\n arg_deps.append(match.group()[1:])\n return '_args_[\"'+match.group()[1:]+'\"]'\n res = re_braces.sub(f,expr)\n res = re_dollar.sub(g,res)\n return res,state_deps,arg_deps",
"def at_context():\n yield keyword(\"at\")\n yield normalspaces()\n kw = yield keyword(\"level|time\")\n yield normalspaces()\n v = yield operand\n return s.Construct(s.CONTEXT_AT, kw, v)",
"def parseCoords(ps):\n\tcs = None\n\tres=[]\n\tfor i in range(0,len(ps)):\n\t\tif (cs!=None):\n\t\t\tif ps[i]=='}':\n\t\t\t\tcs = cs+ps[i]\n\t\t\t\ttry:\n\t\t\t\t\tres.append(epq.StageCoordinate.fromString(cs))\n\t\t\t\tfinally:\n\t\t\t\t\tcs = None\n\t\t\telse:\n\t\t\t\tcs = cs + ps[i]\n\t\telif ps[i]=='{':\n\t\t\tcs=ps[i]\n\treturn tuple(res)",
"def parse_primary_expression(self):\n if self.peek == \"ID\":\n identifier = self.consume(\"ID\")\n expr = self.semantics.on_variable_access(\n identifier.val, identifier.loc\n )\n elif self.peek == \"NUMBER\":\n number = self.consume()\n expr = self.semantics.on_number(number.val, number.loc)\n elif self.peek == \"FLOAT\":\n number = self.consume()\n expr = self.semantics.on_float(number.val, number.loc)\n elif self.peek == \"CHAR\":\n char = self.consume()\n expr = self.semantics.on_char(char.val, char.loc)\n elif self.peek == \"STRING\":\n txt = self.consume()\n expr = self.semantics.on_string(txt.val, txt.loc)\n elif self.peek in [\"!\", \"*\", \"+\", \"-\", \"~\", \"&\", \"--\", \"++\"]:\n op = self.consume()\n if op.val in [\"--\", \"++\"]:\n operator = op.val + \"x\"\n else:\n operator = op.val\n expr = self.parse_primary_expression()\n expr = self.semantics.on_unop(operator, expr, op.loc)\n elif self.peek == \"__builtin_va_start\":\n location = self.consume(\"__builtin_va_start\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_start(ap, location)\n elif self.peek == \"__builtin_va_arg\":\n location = self.consume(\"__builtin_va_arg\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\",\")\n typ = self.parse_typename()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_arg(ap, typ, location)\n elif self.peek == \"__builtin_va_copy\":\n location = self.consume(\"__builtin_va_copy\").loc\n self.consume(\"(\")\n dest = self.parse_assignment_expression()\n self.consume(\",\")\n src = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_copy(dest, src, location)\n elif self.peek == \"__builtin_offsetof\":\n location = self.consume(\"__builtin_offsetof\").loc\n self.consume(\"(\")\n typ = self.parse_typename()\n self.consume(\",\")\n member = self.consume(\"ID\").val\n self.consume(\")\")\n expr = self.semantics.on_builtin_offsetof(typ, member, location)\n elif self.peek == \"sizeof\":\n location = self.consume(\"sizeof\").loc\n if self.peek == \"(\":\n self.consume(\"(\")\n if self.is_declaration_statement():\n typ = self.parse_typename()\n else:\n typ = self.parse_expression()\n self.consume(\")\")\n expr = self.semantics.on_sizeof(typ, location)\n else:\n sizeof_expr = self.parse_primary_expression()\n expr = self.semantics.on_sizeof(sizeof_expr, location)\n elif self.peek == \"(\":\n loc = self.consume(\"(\").loc\n # Is this a type cast?\n if self.is_declaration_statement():\n # Cast or compound literal!\n to_typ = self.parse_typename()\n self.consume(\")\")\n if self.peek == \"{\":\n init = self.parse_initializer_list(to_typ)\n expr = self.semantics.on_compound_literal(\n to_typ, init, loc\n )\n else:\n casted_expr = self.parse_primary_expression()\n expr = self.semantics.on_cast(to_typ, casted_expr, loc)\n else:\n # Parenthized expression (reset precedence)\n expr = self.parse_expression()\n self.consume(\")\")\n else:\n self.error(\"Expected expression\")\n\n # Postfix operations (have the highest precedence):\n while self.peek in [\"--\", \"++\", \"[\", \".\", \"->\", \"(\"]:\n if self.peek in [\"--\", \"++\"]:\n op = self.consume()\n expr = self.semantics.on_unop(\"x\" + op.val, expr, op.loc)\n elif self.peek == \"[\":\n location = self.consume(\"[\").loc\n index = self.parse_expression()\n self.consume(\"]\")\n expr = self.semantics.on_array_index(expr, index, location)\n elif self.peek == \"(\":\n expr = self.parse_call(expr)\n elif self.peek == \".\":\n location = self.consume(\".\").loc\n field = self.consume(\"ID\").val\n expr = self.semantics.on_field_select(expr, field, location)\n elif self.peek == \"->\":\n location = self.consume(\"->\").loc\n field = self.consume(\"ID\").val\n # Dereference pointer:\n expr = self.semantics.on_unop(\"*\", expr, location)\n expr = self.semantics.on_field_select(expr, field, location)\n else: # pragma: no cover\n self.not_impl()\n return expr",
"def parse_expression(expr):\n child_expressions = []\n for child_expr in expr:\n if isinstance(child_expr, pyparsing.ParseResults):\n child_expressions.append(parse_expression(child_expr))\n else:\n child_expressions.append(child_expr)\n while len(child_expressions) > 2:\n res = eval(\"\".join(map(str, child_expressions[0:3])))\n child_expressions = [res] + child_expressions[3:]\n return int(child_expressions[0])",
"def eval_par(expr: Expression, ptr: int, task: Task) -> Tuple[int, int]:\n counter = 0\n for idx, token in enumerate(expr[ptr:]):\n if token == \"(\":\n counter += 1\n if token == \")\":\n counter -= 1\n if counter == 0:\n start = ptr + 1\n end = ptr + idx\n return evaluate(expr[start:end], task), end\n raise Exception(\"Malformed expression: parenthesis doesn't match!\")",
"def do_eval(expr, context):\n return eval(expr, context.vals)",
"def _expressionWithinParentheses(expr, opos, cpos):\n pos = expr[cpos + 1:].find(')')\n if pos == -1:\n assertError(\"SYNTAX ERROR\")\n cpos += pos + 1\n\n pos = expr[opos + 1:cpos].find('(')\n if pos == -1:\n return expr[:cpos + 1]\n opos += pos + 1\n\n return _expressionWithinParentheses(expr, opos, cpos)",
"def parse_expression(self):\n\t\t# Check amount of operators\n\t\toperations = [re.findall(operation, self.expression) for operation in self.operations]\n\t\t# Clean empty matches\n\t\toperations = [operation[0] for operation in operations if operation]\n\t\tn_operations = len(operations)\n\n\t\t# Check if the formula is correct\n\t\tif n_operations > 1:\n\t\t\traise ValueError(\"Chaining different operators is not implemented.\")\n\t\telif n_operations == 0:\n\t\t\traise ValueError(\"Formula is not valid\")\n\t\telse:\n\t\t\t# Get operation\n\t\t\toperation = operations[0]\n\n\t\t\t# Find whichs cells do we need for the formulae\n\t\t\tinvolved_cells_str = re.search(r'\\((.*?)\\)', self.expression).group(1)\n\t\t\tinvolved_idxs = []\n\n\t\t\t# Iterate over groups (; is just separation)\n\t\t\tfor group in involved_cells_str.split(';'):\n\t\t\t\t# Check if it is an slice\n\t\t\t\tif ':' in group:\n\t\t\t\t\tinit, end = group.split(':')\n\t\t\t\t\tprint(init, end)\n\t\t\t\t\t# Extract column alias\n\t\t\t\t\tinit_column = \"\".join(re.findall(\"[a-zA-Z]+\", init))\n\t\t\t\t\tend_column = \"\".join(re.findall(\"[a-zA-Z]+\", end))\n\n\t\t\t\t\t# Extract row idx, as python array starts at poisition 0 we need to substract one to the row idxs\n\n\t\t\t\t\tinit_row = int(''.join([char for char in init if char not in init_column]))-1\n\t\t\t\t\tend_row = int(''.join([char for char in end if char not in end_column]))-1\n\t\t\t\t\tprint(\"Start: {}\\n End {}\\n\".format((init_column, init_row),(end_column, end_row)))\n\n\t\t\t\t\t# Convert alias to idx for columns\n\t\t\t\t\tinit_column = self.code_2_idx(init_column)\n\t\t\t\t\tend_column = self.code_2_idx(end_column)\n\n\t\t\t\t\tprint(\"Start: {}\\n End {}\\n\".format((init_column, init_row),(end_column, end_row)))\n\n\t\t\t\t\trange_rows = list(range(init_row, end_row+1, 1))\n\t\t\t\t\trange_cols = list(range(init_column, end_column+1, 1))\n\t\t\t\t\tprint(range_rows)\n\t\t\t\t\tfor position in itertools.product(range_cols, range_rows):\n\t\t\t\t\t\tinvolved_idxs.append(position)\n\n\t\t\t\telse:\n\t\t\t\t\tcol = \"\".join(re.findall(\"[a-zA-Z]+\", group))\n\t\t\t\t\trow = int(\"\".join([char for char in group if char not in col]))-1\n\t\t\t\t\tinvolved_idxs.append((self.alias_list.index(col), row))\n\n\t\t\treturn operation, involved_idxs",
"def expr(self):\n\n expr = Z3.parse(str(self))\n return expr",
"def evaluate(expr, locals):",
"def test_simple_parse(self):\n # pylint: disable=protected-access\n eqn = Equation(self.model, 'x = y')\n self.assertIsNotNone(eqn)\n\n self.assertEqual('x = y', eqn.equation)\n eqn.parse(self.model._local_context)\n\n self.assertEqual('y', str(eqn.expr))\n self.assertEqual(self.y, eqn.expr)\n self.assertEqual(eqn, self.x.equation)\n self.assertEqual(self.x, eqn.variable)",
"def interpretETREE(etree) :\r\n if isinstance(etree, str) and etree.isdigit() : # NUM -- string of digits\r\n ans = int(etree)\r\n elif etree[0] in (\"+\", \"-\") : # [OP, ETREE, ETREE]\r\n ans1 = interpretETREE(etree[1])\r\n ans2 = interpretETREE(etree[2])\r\n if isinstance(ans1,int) and isinstance(ans2, int) :\r\n if etree[0] == \"+\" :\r\n ans = ans1 + ans2\r\n elif etree[0] == \"-\" :\r\n ans = ans1 - ans2\r\n else : crash(etree, \"addition error --- nonint value used\")\r\n elif etree[0] == \"deref\" : # [\"deref\", LTREE]\r\n handle, field = interpretLTREE(etree[1])\r\n ans = lookup(handle,field)\r\n else : crash(etree, \"invalid expression form\")\r\n return ans",
"def parse(self) -> None:\n if self.current[0] == Token.CTE: # constant ?\n print(self.current[1])\n self.current = self.next_token() # reads next token\n return # recursion end\n elif self.current[0] == Token.PARL: # ( ?\n print('(')\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n if self.current[0] == Token.ADD:\n print('+') # operator?\n elif self.current[0] == Token.SUB:\n print('-')\n elif self.current[0] == Token.MUL:\n print('*')\n elif self.current[0] == Token.DIV:\n print('/')\n else:\n raise ParsingException(\"Wrong operator or left parenthesis expected\")\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( ... oper expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n else:\n raise ParsingException(\"Right parenthesis expected\")\n else:\n raise ParsingException(\"Left parenthesis or constant expected\")",
"def _evalExpression(self):\n value = self.expressionVar.get().strip()\n if value:\n for point in self.data:\n if point.eval(value):\n point.setState(Point.DISCARDED)",
"def parse_expression(self):\n text_parts = []\n\n while self.pos < len(self.string):\n char = self.string[self.pos]\n\n if char not in self.special_chars:\n # A non-special character. Skip to the next special\n # character, treating the interstice as literal text.\n next_pos = (\n self.special_char_re.search(self.string[self.pos:]).start()\n + self.pos\n )\n text_parts.append(self.string[self.pos:next_pos])\n self.pos = next_pos\n continue\n\n if self.pos == len(self.string) - 1:\n # The last character can never begin a structure, so we\n # just interpret it as a literal character (unless it\n # terminates the expression, as with , and }).\n if char not in (GROUP_CLOSE, ARG_SEP):\n text_parts.append(char)\n self.pos += 1\n break\n\n next_char = self.string[self.pos + 1]\n if char == ESCAPE_CHAR and next_char in \\\n (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP):\n # An escaped special character ($$, $}, etc.). Note that\n # ${ is not an escape sequence: this is ambiguous with\n # the start of a symbol and it's not necessary (just\n # using { suffices in all cases).\n text_parts.append(next_char)\n self.pos += 2 # Skip the next character.\n continue\n\n # Shift all characters collected so far into a single string.\n if text_parts:\n self.parts.append(u''.join(text_parts))\n text_parts = []\n\n if char == SYMBOL_DELIM:\n # Parse a symbol.\n self.parse_symbol()\n elif char == FUNC_DELIM:\n # Parse a function call.\n self.parse_call()\n elif char in (GROUP_CLOSE, ARG_SEP):\n # Template terminated.\n break\n elif char == GROUP_OPEN:\n # Start of a group has no meaning hear; just pass\n # through the character.\n text_parts.append(char)\n self.pos += 1\n else:\n assert False\n\n # If any parsed characters remain, shift them into a string.\n if text_parts:\n self.parts.append(u''.join(text_parts))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a context expr
|
def context_expr():
contexts = yield sepBy1(
about_context ^
incoordsys_context ^
innode_context ^
at_context ^
with_context, listsep())
yield normalspaces()
expr = yield expression
return s.Construct(s.CONTEXT_EXPR, contexts, expr)
|
[
"def parse_expression(expr):\n child_expressions = []\n for child_expr in expr:\n if isinstance(child_expr, pyparsing.ParseResults):\n child_expressions.append(parse_expression(child_expr))\n else:\n child_expressions.append(child_expr)\n while len(child_expressions) > 2:\n res = eval(\"\".join(map(str, child_expressions[0:3])))\n child_expressions = [res] + child_expressions[3:]\n return int(child_expressions[0])",
"def do_eval(expr, context):\n return eval(expr, context.vals)",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError, err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError as err:\r\n err.msg += ' in expression \"%s\" of \"%s\" directive' % (expr,\r\n cls.tagname)\r\n raise TemplateSyntaxError(err, template.filepath, lineno,\r\n offset + (err.offset or 0))",
"def parse_primary_expression(self):\n if self.peek == \"ID\":\n identifier = self.consume(\"ID\")\n expr = self.semantics.on_variable_access(\n identifier.val, identifier.loc\n )\n elif self.peek == \"NUMBER\":\n number = self.consume()\n expr = self.semantics.on_number(number.val, number.loc)\n elif self.peek == \"FLOAT\":\n number = self.consume()\n expr = self.semantics.on_float(number.val, number.loc)\n elif self.peek == \"CHAR\":\n char = self.consume()\n expr = self.semantics.on_char(char.val, char.loc)\n elif self.peek == \"STRING\":\n txt = self.consume()\n expr = self.semantics.on_string(txt.val, txt.loc)\n elif self.peek in [\"!\", \"*\", \"+\", \"-\", \"~\", \"&\", \"--\", \"++\"]:\n op = self.consume()\n if op.val in [\"--\", \"++\"]:\n operator = op.val + \"x\"\n else:\n operator = op.val\n expr = self.parse_primary_expression()\n expr = self.semantics.on_unop(operator, expr, op.loc)\n elif self.peek == \"__builtin_va_start\":\n location = self.consume(\"__builtin_va_start\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_start(ap, location)\n elif self.peek == \"__builtin_va_arg\":\n location = self.consume(\"__builtin_va_arg\").loc\n self.consume(\"(\")\n ap = self.parse_assignment_expression()\n self.consume(\",\")\n typ = self.parse_typename()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_arg(ap, typ, location)\n elif self.peek == \"__builtin_va_copy\":\n location = self.consume(\"__builtin_va_copy\").loc\n self.consume(\"(\")\n dest = self.parse_assignment_expression()\n self.consume(\",\")\n src = self.parse_assignment_expression()\n self.consume(\")\")\n expr = self.semantics.on_builtin_va_copy(dest, src, location)\n elif self.peek == \"__builtin_offsetof\":\n location = self.consume(\"__builtin_offsetof\").loc\n self.consume(\"(\")\n typ = self.parse_typename()\n self.consume(\",\")\n member = self.consume(\"ID\").val\n self.consume(\")\")\n expr = self.semantics.on_builtin_offsetof(typ, member, location)\n elif self.peek == \"sizeof\":\n location = self.consume(\"sizeof\").loc\n if self.peek == \"(\":\n self.consume(\"(\")\n if self.is_declaration_statement():\n typ = self.parse_typename()\n else:\n typ = self.parse_expression()\n self.consume(\")\")\n expr = self.semantics.on_sizeof(typ, location)\n else:\n sizeof_expr = self.parse_primary_expression()\n expr = self.semantics.on_sizeof(sizeof_expr, location)\n elif self.peek == \"(\":\n loc = self.consume(\"(\").loc\n # Is this a type cast?\n if self.is_declaration_statement():\n # Cast or compound literal!\n to_typ = self.parse_typename()\n self.consume(\")\")\n if self.peek == \"{\":\n init = self.parse_initializer_list(to_typ)\n expr = self.semantics.on_compound_literal(\n to_typ, init, loc\n )\n else:\n casted_expr = self.parse_primary_expression()\n expr = self.semantics.on_cast(to_typ, casted_expr, loc)\n else:\n # Parenthized expression (reset precedence)\n expr = self.parse_expression()\n self.consume(\")\")\n else:\n self.error(\"Expected expression\")\n\n # Postfix operations (have the highest precedence):\n while self.peek in [\"--\", \"++\", \"[\", \".\", \"->\", \"(\"]:\n if self.peek in [\"--\", \"++\"]:\n op = self.consume()\n expr = self.semantics.on_unop(\"x\" + op.val, expr, op.loc)\n elif self.peek == \"[\":\n location = self.consume(\"[\").loc\n index = self.parse_expression()\n self.consume(\"]\")\n expr = self.semantics.on_array_index(expr, index, location)\n elif self.peek == \"(\":\n expr = self.parse_call(expr)\n elif self.peek == \".\":\n location = self.consume(\".\").loc\n field = self.consume(\"ID\").val\n expr = self.semantics.on_field_select(expr, field, location)\n elif self.peek == \"->\":\n location = self.consume(\"->\").loc\n field = self.consume(\"ID\").val\n # Dereference pointer:\n expr = self.semantics.on_unop(\"*\", expr, location)\n expr = self.semantics.on_field_select(expr, field, location)\n else: # pragma: no cover\n self.not_impl()\n return expr",
"def parse_expr(expr):\n global re_braces,re_dollar\n state_deps = []\n arg_deps = []\n def f(match):\n state_deps.append(match.group()[1:-1])\n return '_state_[\"'+match.group()[1:-1]+'\"]'\n def g(match):\n arg_deps.append(match.group()[1:])\n return '_args_[\"'+match.group()[1:]+'\"]'\n res = re_braces.sub(f,expr)\n res = re_dollar.sub(g,res)\n return res,state_deps,arg_deps",
"def at_context():\n yield keyword(\"at\")\n yield normalspaces()\n kw = yield keyword(\"level|time\")\n yield normalspaces()\n v = yield operand\n return s.Construct(s.CONTEXT_AT, kw, v)",
"def eval_par(expr: Expression, ptr: int, task: Task) -> Tuple[int, int]:\n counter = 0\n for idx, token in enumerate(expr[ptr:]):\n if token == \"(\":\n counter += 1\n if token == \")\":\n counter -= 1\n if counter == 0:\n start = ptr + 1\n end = ptr + idx\n return evaluate(expr[start:end], task), end\n raise Exception(\"Malformed expression: parenthesis doesn't match!\")",
"def parse(self) -> None:\n if self.current[0] == Token.CTE: # constant ?\n print(self.current[1])\n self.current = self.next_token() # reads next token\n return # recursion end\n elif self.current[0] == Token.PARL: # ( ?\n print('(')\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n if self.current[0] == Token.ADD:\n print('+') # operator?\n elif self.current[0] == Token.SUB:\n print('-')\n elif self.current[0] == Token.MUL:\n print('*')\n elif self.current[0] == Token.DIV:\n print('/')\n else:\n raise ParsingException(\"Wrong operator or left parenthesis expected\")\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( ... oper expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n else:\n raise ParsingException(\"Right parenthesis expected\")\n else:\n raise ParsingException(\"Left parenthesis or constant expected\")",
"def with_context():\n # pylint: disable=line-too-long\n yield optional(keyword(\"with\"))\n yield normalspaces()\n kw = yield keyword(\"(animate|undo|redraw|quiet|printAllElements|defaultAction|MXSCallstackCaptureEnabled|dontRepeatMessages|macroRecorderEmitterEnabled)\")\n yield normalspaces()\n v = yield operand #expression\n return s.Construct(s.CONTEXT_WITH, kw, v)",
"def evaluate(expr, locals):",
"def parse(s):\n t = _Tokens(s)\n ret = t.parse_expr(True)\n if len(t) != 0:\n raise ValueError('extra stuff:' + str(t))\n return ret",
"def _parse_context_string(data : dict, value : str) -> Any:\n # Find all context values in string.\n contexts = re.findall(\"({[<%#:]{1} [\\S]+ [%#:>]{1}})\", value)\n # If there is no any context values in string,\n # return the string itself.\n if len(contexts) == 0:\n return value\n # If value is just a context value, \n # return the value of the context item instead of a string.\n if len(contexts) == 1 and value.strip() == contexts[0]:\n return ConduitStep._parse_context_tag(data, contexts[0])\n else:\n val = value\n for item in contexts:\n val = ConduitStep._parse_context_string(data, val.replace(item, ConduitStep._parse_context_tag(data, item)))\n return val",
"def interpretETREE(etree) :\r\n if isinstance(etree, str) and etree.isdigit() : # NUM -- string of digits\r\n ans = int(etree)\r\n elif etree[0] in (\"+\", \"-\") : # [OP, ETREE, ETREE]\r\n ans1 = interpretETREE(etree[1])\r\n ans2 = interpretETREE(etree[2])\r\n if isinstance(ans1,int) and isinstance(ans2, int) :\r\n if etree[0] == \"+\" :\r\n ans = ans1 + ans2\r\n elif etree[0] == \"-\" :\r\n ans = ans1 - ans2\r\n else : crash(etree, \"addition error --- nonint value used\")\r\n elif etree[0] == \"deref\" : # [\"deref\", LTREE]\r\n handle, field = interpretLTREE(etree[1])\r\n ans = lookup(handle,field)\r\n else : crash(etree, \"invalid expression form\")\r\n return ans",
"def get_cond_expr_tokens(self, attr_token):\n expression = ''\n child_amod = None\n child_quantmod = None\n child_nummod = None\n\n children = [child for child in attr_token.lefts] # consider only left children\n \n # find out if attr_token has nummod, if so, then amod and quantmod\n # will be relative to the nummod token - so modify children list\n # otherwise they are relative to attr_token - so leave the children list as is.\n # \n # Refer Eg #1 & #2 above\n\n for child in children:\n # get nummod\n if child.dep_ == 'nummod':\n child_nummod = child\n if child_nummod:\n children = [child for child in child_nummod.lefts] # consider only left children\n\n for child in children:\n # get amod\n if child.dep == amod:\n child_amod = child\n # get quantmod\n if child.dep == quantmod:\n child_quantmod = child\n \n #expression = ' '.join[child_amod.text, child_quantmod.text, child_nummod.text] \n return (child_amod, child_quantmod, child_nummod)",
"def evaluate(expr,**bindings):\n expr = expr.replace(\" \", \"\")\n paren, lst, lst_op = 0, -1, None\n #finds the last operator to be evaluated.\n for i in range(len(expr)):\n if expr[i] == \"(\":\n paren = paren + 1\n elif expr[i] == ')':\n paren = paren - 1\n else:\n s = op_by_symbol(expr[i])\n if s is None or paren != 0:\n continue\n elif lst == -1:\n lst = i\n lst_op = s\n elif s < lst_op:\n lst = i\n lst_op = s\n\n if lst_op is None:\n #if there were no operators found, make sure the expr was not wrapped in ()\n if expr[0] == '(' and expr[len(expr) - 1] == \")\":\n return evaluate(expr[1: len(expr) - 1], **bindings)\n else:#if not in (), this must be a variable\n return bindings[expr]\n elif lst_op == Operators.NOT:#otherwise, evaluate the operator.\n return lst_op(evaluate(expr[lst + 1:], **bindings))\n else:\n return lst_op([evaluate(expr[:lst], **bindings),evaluate(expr[lst + 1:], **bindings)])",
"def __init__(self, expr: str) -> None:\n self.expr = expr\n self.length = len(self.expr)\n self.idx = 0 # index used by next_token()\n self.current = self.next_token() # reads first token",
"def test_expressions_with_fame(self):\n c = Context()\n c[\"foo\"] = dict(a=1, b=2, bar=\"apples\")\n c[\"top\"] = 10\n c[\"r\"] = list(range(10))\n tests = [(\"a+b\", 3), (\".top\", 10), (\"a+.top\", 11), (\".r.4+.top\", 14)]\n with c.frame(\"foo\"):\n for expression, result in tests:\n self.assertEqual(c.eval(expression), result)",
"def parser(string): \n#1 we tokenize the expression, thanks to the lexer and the Token constructor\n# the names are mapped thanks to the token_map dictionary\n tokens = [Token(token_map.get(x, 'ATOM'), x) for x in lex(string)]\n try:\n (e, i) = parse_iff(tokens)\n if not i:\n return e\n else:\n raise Exception('Unparsed input')\n except:\n raise"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a set context
|
def set_context():
yield keyword("set")
yield normalspaces()
cxt = yield about_context ^ incoordsys_context ^ innode_context ^ at_context ^ with_context
return s.Construct(s.SET_CONTEXT, cxt)
|
[
"def parse_set_list(self,tokiter,scopes):\n peek=tokiter.peek()\n while peek.token_type=='varname':\n yield peek.token_value\n lvaltoken=peek\n tokiter.next() # discard varname\n peek=tokiter.peek()\n while peek.token_type==end_of_line_type:\n tokiter.next()\n peek=tokiter.peek()\n if peek.token_type=='==':\n # this is an \"if var==value\" condition\n tokiter.next() # consume ==\n peek=tokiter.peek()\n if peek.token_type!='varname':\n self.error('run setname @ ... var==',peek)\n rvaltoken=tokiter.next()\n lval=self.action_resolve(lvaltoken,scopes)\n rval=self.action_resolve(rvaltoken,scopes)\n yield lval == rval\n\n peek=tokiter.peek()\n if peek.token_type!=',':\n return # reached end of list.\n tokiter.next() # discard \",\"\n peek=tokiter.peek()",
"def _parse_context_string(data : dict, value : str) -> Any:\n # Find all context values in string.\n contexts = re.findall(\"({[<%#:]{1} [\\S]+ [%#:>]{1}})\", value)\n # If there is no any context values in string,\n # return the string itself.\n if len(contexts) == 0:\n return value\n # If value is just a context value, \n # return the value of the context item instead of a string.\n if len(contexts) == 1 and value.strip() == contexts[0]:\n return ConduitStep._parse_context_tag(data, contexts[0])\n else:\n val = value\n for item in contexts:\n val = ConduitStep._parse_context_string(data, val.replace(item, ConduitStep._parse_context_tag(data, item)))\n return val",
"def _parse_content_all(data : dict, value : Any) -> Any:\n if isinstance(value, str):\n return ConduitStep._parse_context_string(data, value)\n elif isinstance(value, list):\n return [ConduitStep._parse_content_all(data, x) for x in value]\n elif isinstance(value, dict):\n return { ConduitStep._parse_context_string(data, x) : ConduitStep._parse_content_all(data, y) for x, y in value.items() }\n return value",
"def prepare_context(pipeline, context_in_string, context):\n logger.debug(\"starting\")\n\n parsed_context = get_parsed_context(\n pipeline=pipeline,\n context_in_string=context_in_string)\n\n context.update(parsed_context)\n\n logger.debug(\"done\")",
"def questionParse(self):\n text = self.text\n text = text.lower()\n nlp = self.nlp\n doc = nlp(text)\n print(\"Finding entities set and relations set...\\n\")\n ents_set = set(str(ent) for ent in doc.ents)\n rels_list = self.get_relation(doc)\n rels_set = set(str(rel[-1]) for rel in rels_list)\n return ents_set, rels_set",
"def process_context(self, context: Iterable) -> Any:\n ns_map: Dict = {}\n for event, element in context:\n if event == EventType.START:\n self.parser.start(\n self.clazz,\n self.queue,\n self.objects,\n element.tag,\n element.attrib,\n self.merge_parent_namespaces(ns_map),\n )\n ns_map = {}\n elif event == EventType.END:\n self.parser.end(\n self.queue,\n self.objects,\n element.tag,\n element.text,\n element.tail,\n )\n element.clear()\n elif event == EventType.START_NS:\n prefix, uri = element\n ns_map[prefix or None] = uri\n else:\n raise XmlHandlerError(f\"Unhandled event: `{event}`.\")\n\n return self.objects[-1][1] if self.objects else None",
"def set(contextIn):\n global context\n context = contextIn",
"def test_parser_context_dict():\n parser = QueryParamParser(query_params={}, context={\"a\": \"b\"})\n assert parser.context.get(\"a\") == \"b\"",
"def _parse(self):\n logger.debug('Parsing file: %s', self.filename)\n self._context = []\n self._last_popped = None\n self.statement_pre_read = None\n self.sw = None\n while self.can_read():\n token = self.next_token()\n if token is None:\n continue\n if token.model is None:\n continue\n if self.find_context_top(cond=lambda x: x != token and x.isinstance(CodeBlock)) is None:\n # this token model has no parents, we must save it separately\n self._save_model(token.model)\n self.parsed = True",
"def _parse_set_response(self, response, prompt):\n\n log.debug(\"_parse_set_response RESPONSE = \" + str(response) + \"/PROMPT = \" + str(prompt))\n if ((prompt != Prompt.COMMAND) or ('Error' in response)):\n raise InstrumentProtocolException('Protocol._parse_set_response : Set command not recognized: %s' % response)",
"def _parse_selection_set(\n selection_set_ast: Optional[dict]\n) -> Optional[\"SelectionSetNode\"]:\n if selection_set_ast:\n return SelectionSetNode(\n selections=_parse_selections(selection_set_ast[\"selections\"]),\n location=_parse_location(selection_set_ast[\"loc\"]),\n )\n return None",
"def parse_sets(self, map22, character_names):\n character_lists = {x.path: x for x in map22.entries if x.type == \"MapCharacterList\"}\n set_collection = [x for x in map22.entries if x.type == 0x438850FF]\n\n if not set_collection:\n # backward compatibility\n longest_character_list = max(character_lists.values(), key=lambda v: len(v.fields[0].value))\n champion_list = longest_character_list.fields[0].value\n set_characters = [character_names.get(char) for char in champion_list]\n return [(1, \"Base\", set_characters)]\n\n sets = []\n for item in set_collection:\n char_list = item.getv(\"characterLists\")[0]\n set_info = item[0xD2538E5A].value\n set_number = set_info[\"SetNumber\"][\"mValue\"].value\n set_name = set_info[\"SetName\"][\"mValue\"].value\n\n if char_list not in character_lists:\n continue\n\n set_characters = [character_names[char] for char in character_lists[char_list].getv(\"Characters\")]\n sets.append((set_number, set_name, set_characters))\n return sets",
"def toolset_from_grammar():\n ### <toolset>\n def doMult(node):\n \t(a,b) = node\n \tnode.value = a.value * b.value\n \n def doAdd(node):\n \t(a,b) = node\n \tnode.value = a.value + b.value\n \n def formatResult(node):\n \tnode.value = \"%.3f\" % node.value\n \n return locals().copy()",
"def parse(self,tokenizer,scope=None,unique_id=None,morevars=None):\n if scope is None:\n scope=Scope()\n if unique_id is None:\n unique_id=os.getpid()\n if morevars is not None:\n for k,v in morevars.iteritems():\n scope.setlocal(str(k),String([scope],str(v),False))\n if not isinstance(unique_id,int):\n raise TypeError(\n 'The unique_id argument to Parser.parse() must be an '\n 'int, not a %s %s.'%( type(unique_id).__name__,\n elipses(repr(unique_id)) ))\n tokiter=peekable(tokenizer)\n scope.setlocal('ENV',Environ())\n assert(isinstance(unique_id,int))\n scope.setlocal('UNIQUE_ID',Numeric(unique_id))\n if self.run_mode==BASELINE:\n scope.setlocal('RUN_MODE',String([scope],'BASELINE',False))\n else:\n scope.setlocal('RUN_MODE',String([scope],'EXECUTION',False))\n result=self.parse_subscope(\n tokiter,[scope],[end_of_text_type],\n self.parse_between_assignments,\n allow_overwrite=False,\n allow_resolve=True,\n allow_run=True,\n allow_null=False,\n allow_use=False,\n allow_load=True,\n scope_name='global scope')\n self.resolve_deps()",
"def __contexts(self, triple):\n return (\n self.__context_obj_map.get(ctx_str, ctx_str)\n for ctx_str in self.__get_context_for_triple(triple, skipQuoted=True)\n if ctx_str is not None\n )",
"def _parse_set_response(self, response, prompt):\n\n log.debug(\"SET RESPONSE = %s\" % response)\n\n if response.startswith(\"ERROR\"):\n log.error(\"Instrument returned error in response to SET Command: %s\" % response)\n raise InstrumentProtocolException(\n 'Protocol._parse_set_response: Instrument returned: ' + response)\n\n return response",
"def __get_unused_context(self, parse_result, context):\n tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])\n result_context = [c for c in context if c['key'] not in tags_keys]\n return result_context",
"def parse_input(data: Iterator[str]) -> tuple[set[tuple[int, int]], Iterator[Fold]]:\n coords = (line.strip().split(',')\n for line in takewhile(lambda line: line != '\\n', data))\n instructs = (line.strip().split(' ')[-1].split('=') for line in data)\n return (set((int(x), int(y)) for x, y in coords),\n (Fold(Direction(d), int(v)) for d, v in instructs))",
"def lineParser( line, context, lineByLine=True ):\n global alreadyCaught\n global preFilterHandler\n global postFilterHandler\n\n linestack = []\n done = False\n \n while not done:\n\tlinestack.append(line)\n reo = rex.search(line)\n if reo:\n gotns=reo.group(4)\n gottag=reo.group(5)\n\t gotargv=reo.group(7)\n\n\t # In case the argument list contains tags as in: @foo(@arg@)@\n\t # Process the argument list before calling the replacement function.\n\t if gotargv:\n\t\tgotargv = lineParser(gotargv, context)\n\t \n\n\t # If we got a scoped tag, lookup the correct context first.\n\t if gotns:\n\t\tif ( context.name == 'gotns'):\n\t\t curctx = context\n\t\telse:\n\t\t tmpctx = context.getContext(gotns)\n if tmpctx:\n curctx = tmpctx\n else:\n print \"No such context [ %s ]\" % gotns\n gottag = \"UNDEFINED\"\n curctx = context\n\t else:\n\t\tcurctx = context\n\n try:\n newtext = curctx.replace(gottag, gotargv)\n # Only replace 1 occurrence at a time.\n # This is potentially fragile because it allows\n # the possibility that the wrong tag is replaced\n # if there are multiple matches on a line. I'm\n # assuming their is a well defined order on the\n # matches.\n\t\tstart = reo.start(2) - 1\n line = line[:start] + rex.sub( newtext, line[start:], 1 )\n except ValueError, (tag):\n\t\tif alreadyCaught:\n\t\t print \" from text: %s\" % linestack.pop()\n\t\telse:\n\t\t print \" in text: %s\" % linestack.pop()\n\t\t\talreadyCaught = True\n\t\tlinestack.reverse()\n\t\tremaining = len(linestack)\n\t\tfor l in linestack:\n\t\t\tremaining = remaining - 1\n\t\t\tif not remaining:\n\t\t \tprint \" from original text: %s\" % (l)\n\t\t\telse:\n\t\t\t print \" from previous text: %s\" % (l)\n raise ValueError, (tag, line)\n\t except InvalidReplacement, e:\n print \" Invalid replacement text: \", e\n raise Exception(\"\\n\\n###################################################################################################\\nCaught exception while parsing @%s@ in line:\\n%s\\n##################################################################################################\\n\\n\" % (gottag, line[:80]))\n\t except UnknownContext, e:\n\t\tprint e\n\t\traise Exception(\"\\n\\n###################################################################################################\\nGenerated UnknownContext Exception [ %s ] while parsing @%s@ in line:\\n##################################################################################################\\n\\n\" % (e, gottag)) \n\t except Exception, e:\n\t\tprint e\n raise Exception(\"\\n\\n###################################################################################################\\nCaught exception while parsing @%s@ in line:\\n%s\\n##################################################################################################\\n\\n\" % (gottag, line[:80]))\n else:\n done = True\n \n # Finally replace @@@ with @.\n reo = atx.search(line)\n if reo:\n line = atx.sub('@', line)\n\n\n if postFilterHandler != None:\n\tpostHandler(line)\n \n return line"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a global decl
|
def global_decl():
yield keyword("global")
yield normalspaces()
decls = yield sepBy1(decl, listsep())
return s.Construct(s.GLOBAL_DECL, decls)
|
[
"def get_decl():\n s=r'''\nvoid a(){\n float ret;\n int *aa;\n}\n'''\n parser = c_parser.CParser()\n a_ast = parser.parse(s)\n a_decl=a_ast.ext[0].body.block_items\n return a_decl",
"def variable_decl():\n @generate\n def persistent_global_scope():\n yield keyword(\"persistent\")\n yield normalspaces()\n yield keyword(\"global\")\n return s.Construct(s.PERSISTENTGLOBAL)\n\n @generate\n def global_scope():\n yield keyword(\"global\")\n return s.Construct(s.GLOBAL)\n\n @generate\n def local_scope():\n yield keyword(\"local\")\n return s.Construct(s.LOCAL)\n\n @generate\n def scope_def():\n sdef = yield (\n persistent_global_scope ^\n global_scope ^\n local_scope)\n return sdef\n\n # parsing (if there is no scope, it not a decl it an assignment)\n scope = yield scope_def\n yield normalspaces()\n assignments = yield sepBy1(\n decl, # optional_assignment if scope else assignment,\n listsep())\n\n return s.Construct(s.VARIABLE_DECL, scope, assignments)",
"def declvars(self, name: str):",
"def globaldecl(self):\n #We need to give preference to the test specification if it is available.\n vars = None\n if self.group.finder is not None and self.group.finder.test is not None:\n vars = self.group.finder.test.variables\n if vars is None:\n vars = self.group.variables\n\n if self.name.lower() in vars:\n return vars[self.name.lower()]\n else:\n return None",
"def parse_decl_group(self, decl_spec):\n declarator = self.parse_declarator()\n if decl_spec.storage_class == \"typedef\":\n self.parse_typedef(decl_spec, declarator)\n while self.has_consumed(\",\"):\n declarator = self.parse_declarator()\n self.parse_typedef(decl_spec, declarator)\n self.consume(\";\")\n elif self.peek == \"{\":\n # if function, parse implementation.\n # func_def = None\n self.parse_function_declaration(decl_spec, declarator)\n else:\n # We have variables here\n self.parse_variable_declaration(decl_spec, declarator)\n while self.has_consumed(\",\"):\n declarator = self.parse_declarator()\n self.parse_variable_declaration(decl_spec, declarator)\n self.consume(\";\")",
"def _parsefuncdecl(self, decl):\n i = decl.find(b'(')\n if i >= 0:\n return decl[:i]\n else:\n return decl",
"def parse_fileAST_exts(ast):\n global_ids = []\n global_funcs = []\n for ext in ast.ext:\n if type(ext) == c_ast.Decl:\n global_ids.append(ext)\n elif type(ext) == c_ast.FuncDef:\n global_funcs.append(ext)\n else:\n print(\"something else\")\n return global_ids, global_funcs",
"def is_declared_global(self, ):\n\t\tpass",
"def global_add(variables, order, name, doc):\n if name not in variables:\n variables[name] = GlobalDeclaration(doc)\n order.append(name)\n elif (hasattr(variables[name].element, \"doctype\") and\n variables[name].element.doctype == \"AUTOPARAM\"):\n #We can override the existing variable declaration because it was from a\n #parent test group and a global tag takes precedence. AUTOPARAMS are only\n #generated from regular=\"true\" tags, so the [default] <globals> would never\n #be handled in this section.\n variables[name] = GlobalDeclaration(doc)\n order.append(name) \n else:\n #We need to make sure that it is unique compared to the existing\n #one. If it isn't, stop the execution. If it is, we don't need\n #to add it again.\n existing = variables[name]\n if not existing.ignore and not existing.compare(doc):\n msg.err(\"variables in the calling namespace have the same name,\" + \\\n \" but different types: \\n{}{}\".format(existing.element, doc))\n exit(1)",
"def get_global_defs(self):\n if not self.valid8r:\n self.xsd_validate()\n\n def _name_in(name, gllist):\n return name in map(lambda g: g[0], gllist)\n def _first_item_in(name, gllist):\n matches = filter(lambda g: g[0] == name, gllist)\n return ((len(matches) > 0) and matches[0]) or None\n\n gltps = []\n glels = []\n incomplete = []\n self.abstypes = []\n\n # first collect the global element and type names, extracting typing\n # info\n for el in self.tree.getroot().iterchildren():\n\n if el.tag == self._fmt_qname(XSD_NS, \"element\"):\n # Global element\n tp = el.get(\"type\")\n if tp:\n tp = self._resolve_qname(tp, el, self.namespace)\n glels.append( (el.get(\"name\"), tp) )\n \n elif el.tag == self._fmt_qname(XSD_NS, \"complexType\") or \\\n el.tag == self._fmt_qname(XSD_NS, \"simpleType\"):\n # Global type\n name = el.get('name')\n if not name:\n line = \"\"\n if el.sourceline:\n line = \" at line \"+str(el.sourceline)\n raise SchemaValidationError(\"Empty or missing name for \"+\n \"global type definition\"+line)\n\n abstract = el.get('abstract', 'false')\n abstract = abstract == 'true'\n \n parent = self._get_super_type(el) \n if parent == self._resolve_qname(name, el, self.namespace):\n raise SchemaValidationError(\"Global type '\"+name+\"' derives\"+\n \" from itself!\")\n gltps.append( (name, parent) )\n if abstract:\n self.abstypes.append(name)\n\n # build the type ancestry lines\n gltps = self._trace_anscestors( gltps )\n self.global_types = dict(\n filter(lambda t: len(t[1]) == 0 or t[1][-1] != \"__missing__\",\n gltps.iteritems()) )\n incomplete.extend( map(lambda t: IncompleteType(t[0], t[1]),\n filter(lambda t: len(t[1]) > 0 and t[1][-1] == \"__missing__\",\n gltps.iteritems())) )\n\n # Now make sure our global elements are all defined\n self.global_elems = {}\n for elem, tp in glels:\n if elem in self.global_elems:\n raise SchemaValidationError(\"Multiple definitions found for \"+\n \"global element, \" + tp[0])\n elif self._global_type_defined(tp):\n self.global_elems[elem] = tp\n else:\n incomplete.append( IncompleteElement(elem, tp) )\n\n return incomplete",
"def parse(name):\n\n pass",
"def decltypes(self, name: str):",
"def forward_decl():\n\n @Parser\n def f(tokens, s):\n raise NotImplementedError('you must define() a forward_decl somewhere')\n\n return f",
"def print_declarations(self, decl=None, detailed=True, recursive=True, writer=sys.stdout.write):\r\n if None is decl:\r\n decl = self.global_ns\r\n decl_wrappers.print_declarations( decl, detailed, recursive, writer )",
"def obv_module_decls(self, stream, visitor):\n pass",
"def poa_module_decls(self, stream, visitor):\n pass",
"def find_modifiers_decl(tokens_inside_decl):\n modifiers = mods.UnitDeclarationModifiersRepr()\n\n i = 0\n if tokens_inside_decl[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n i += 1\n\n expecting_variation = False\n expecting_argument = False\n while i < len(tokens_inside_decl):\n if tokens_inside_decl[i] == VARIATION_SYM:\n modifiers.variation_name = \"\"\n expecting_variation = True\n expecting_argument = False\n elif tokens_inside_decl[i] == ARG_SYM:\n modifiers.argument_name = \"\"\n expecting_variation = False\n expecting_argument = True\n elif expecting_variation:\n modifiers.variation_name += tokens_inside_decl[i]\n elif expecting_argument:\n modifiers.argument_name += tokens_inside_decl[i]\n i += 1\n\n modifiers.variation_name = remove_escapement(modifiers.variation_name)\n modifiers.argument_name = remove_escapement(modifiers.argument_name)\n\n return modifiers",
"def forward_decl():\n\n @Parser\n def f(_tokens, _s):\n raise NotImplementedError(\"you must define() a forward_decl somewhere\")\n\n f.name = \"forward_decl()\"\n return f",
"def visit_Decl(self, node): # pylint: disable=invalid-name\n if node.name is not None:\n self.ids.add(node.name)\n return node"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a rollout item
|
def rollout_item():
# pylint: disable=line-too-long
kw = yield keyword("dotnetcontrol|hyperlink|subrollout|multilistbox|imgtag|curvecontrol|angle|label|button|edittext|combobox|dropdownList|listbox|spinner|slider|pickbutton|radiobuttons|checkbox|checkbutton|colorPicker|mapbutton|materialbutton|progressbar|timer|bitmap|groupbox")
yield normalspaces()
var = yield var_name()
yield normalspaces()
label = yield optional(quoted)
yield normalspaces()
args = yield sepBy(named_argument, normalspaces())
return s.Construct(s.ROLLOUT_ITEM, kw, var, label, args)
|
[
"def parse_item(self):\n msg(\"parsing u.item\")\n lines = file('/'.join((self.datadir,\"u.item\"))).read().split(\"\\n\")\n infos = [line.replace('||','|').split('|') for line in lines if line]\n for info in infos:\n movie_id = int(info[0])\n if len(info[1].rstrip(')').rsplit(' (',1))==2:\n title, date1 = info[1].rstrip(')').rsplit(' (',1)\n else:\n title=info[1]\n date1 = ''\n release = 0\n if info[2]:\n release = time.strptime(info[2],\"%d-%b-%Y\")\n genres=info[-19:]\n self.item_info[movie_id]=(title,date1,release,info[3],genres)\n for i in xrange(len(genres)):\n if int(genres[i]) == 1:\n self.genre_by_item[i].append(movie_id)",
"def _ParseItem(cls, parser, data):\n value = parser[1].unpack(data)\n if len(parser) < 3:\n return parser[0]._make(value)\n result = []\n idx = 0\n for entry in parser[2]:\n offset, count = entry[:2]\n result.extend(value[idx:offset])\n if len(entry) == 2:\n result.append(tuple(value[offset:offset+count]))\n idx = offset + count\n continue\n subparser = entry[2]\n if not count:\n result.append(cls._ParseItem(subparser, value[offset]))\n else:\n val_list = value[offset:offset+count]\n result.append(tuple([cls._ParseItem(subparser, x) for x in val_list]))\n idx += offset + (count or 1)\n result.extend(value[idx:])\n return parser[0]._make(result)",
"def parse_item(self, item):\n if self.has_iattr(item.conf, 'hue2_id') and self.has_iattr(item.conf, 'hue2_function'):\n self.logger.debug(\"parse item: {}\".format(item))\n conf_data = {}\n conf_data['id'] = self.get_iattr_value(item.conf, 'hue2_id')\n conf_data['resource'] = self.get_iattr_value(item.conf, 'hue2_resource')\n conf_data['function'] = self.get_iattr_value(item.conf, 'hue2_function')\n conf_data['item'] = item\n self.plugin_items[item.path()] = conf_data\n if conf_data['resource'] == 'sensor':\n # ensure that the scheduler for sensors will be started if items use sensor data\n self.sensor_items_configured = True\n if conf_data['resource'] == 'light':\n # ensure that the scheduler for sensors will be started if items use sensor data\n self.light_items_configured = True\n\n if conf_data['resource'] == 'group':\n # bridge updates are allways scheduled\n self.logger.debug(\"parse_item: configured group item = {}\".format(conf_data))\n\n if conf_data['function'] != 'reachable':\n return self.update_item\n return",
"def processItem(self,entry):\n pass",
"def parse_item(self, response):\n self.shutdown_on_error()\n item = ItemLoader(ApartmentItem(), response=response)\n item.add_value('url', response.url)\n\n item.add_css('title', 'div.text_data > h2::text')\n item.add_css('availability', 'div.row > div.text_data > p::text')\n item.add_css('description', 'div.object_details div.col_left p::text')\n item.add_value('neighborhood',\n response.css('div.object_meta div.container div.text_data p strong::text').extract()[0])\n item.add_xpath('address', \"//li[@class='map']/a/@href\")\n\n keys = response.css('div.object_meta table.object_meta_data th::text').extract()\n values = response.css('div.object_meta table.object_meta_data td::text').extract()\n features = dict(zip(keys, values))\n item.add_value('warm_rent', features.get('Rent'))\n item.add_value('size', features.get('Size'))\n item.add_value('rooms', features.get('Room/s'))\n\n return item.load_item()",
"def _parse_golem_item(self):\n golem_item = None\n golem_header = int_from_bbytes(self._reader.read(2))\n has_golem = bool(int_from_lbytes(self._reader.read(1)))\n if has_golem and golem_header == self._GOLEM_ITEM_HEADER:\n golem_item = self._parse_items(skip_items_header=True)[0]\n return golem_item",
"def _parse_description(self, item):\n try:\n return item.get(\"Meeting Location\").split(\"\\n\")[1].split(\"--em--\")[1]\n except IndexError:\n return \"\"",
"def handleRIGITEM(self,token):\n\t\tif token == 'DATE': self.createDateObject(token); return # for all date at the start\n\t\tif token == 'RIGS': return\n\t\tif token == '/': self.state = self.previousState; return\n\t\tif token == '&RIG': \n\t\t\tself.lastRigItem = self.lastDate.addRig()\n\t\t\tself.state = self.ST_RIG_ITEM\n\t\t\tvalue = self.scanner.read() \t\t # Get the next item \n\t\t\tif value[1] == '/':\n\t\t\t\tself.state = self.previousState\n\t\t\t\treturn\n\t\t\tvalue = self.scanner.produce(value) # Get the actual name\n\t\t\treturn\n\t\tvalue = self.scanner.read() \t\t # Get the =\n\t\tvalue = self.scanner.read() \t\t # Get the value of the token..\n\t\t# ###################################################\n\t\t# TODO:\n\t\t# Actually, I only allow certain keywords for rigs. \n\t\t# The unrecognized keywords are ignored.\n\t\t# ###################################################\n\t\tif self.lastRigItem <> None: \n\t\t\ttoken = token.upper()\n\t\t\tif token in cRigAllowedKeywords: \n\t\t\t\tself.lastRigItem.setKeywordValue(token,value[1])\n\t\t\telse:\n\t\t\t\tself.addErrorMessage(token,value[1])",
"def _parse_title(self, item):\n title = item[\"subject\"]\n return title",
"def parse_event(self, event):",
"def parse_row(self, row):\n return LineItem(row)",
"def map_listjobs(item):\n fields = item.split()\n\n fields = [x.split(\":\", 1)[-1] for x in fields]\n\n return tuple( fields )",
"def parse_item(self, item_json):\n track_id, source = item_json['track_id'], item_json['source']\n return get_or_create(db.session, Item, track_id=track_id, source=source)",
"def _ParseFileEntry(self, mediator, file_entry):",
"def _parse(self):\n self.title = self._extract_title()\n self.creator_name = self._extract_creator_name()\n self.album_name = self._extract_album_name()\n self.track_number = self._extract_track_number()\n self.duration = self._extract_duration()\n self.isrcs = self._extract_isrcs()",
"def parse_move(s):",
"def parse_product(obj):",
"def _parse_entry(self,entry):\n item_meta={'title':entry.title,\n 'description':entry.description,\n 'category':entry.category,\n 'tags':entry.tags,\n 'page_url':entry.url,\n 'lq_url':None,\n 'hq_url':None,\n 'hd_url':None,\n 'search-id':self.search_id,\n 'source':'5',}\n self._logger.debug('Video Metadata: %s',item_meta)\n return item_meta",
"def _parse_start(self, item):\n date_line = \" \".join(item.split(\"\\n\")[1:])\n date_match = re.search(r\"[A-Z][a-z]{2,8} \\d{1,2}\", date_line)\n if not date_match:\n return\n year_str = str(datetime.now().year)\n year_match = re.search(r\"20\\d{2}\", date_line)\n if year_match:\n year_str = year_match.group()\n\n date_str = \" \".join([date_match.group().replace(\",\", \"\"), year_str])\n time_match = re.search(r\"\\d{1,2}:\\d{2} ?[apm\\.]{2,4}\", item, flags=re.I)\n time_str = \"12:00am\"\n if time_match:\n time_str = re.sub(r\"[ \\.]\", \"\", time_match.group())\n try:\n return datetime.strptime(\" \".join([date_str, time_str]), \"%B %d %Y %I:%M%p\")\n except ValueError:\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse an item group
|
def item_group():
yield keyword("group")
yield normalspaces()
qstring = yield quoted
yield normalspaces()
yield string("(")
yield normalspaces()
group = yield sepBy(rollout_item, normalspaces())
yield normalspaces()
yield string(")")
return s.Construct(s.ROLLOUT_GROUP, qstring, group)
|
[
"def Item(self) -> Group:",
"def parse_item(self):\n msg(\"parsing u.item\")\n lines = file('/'.join((self.datadir,\"u.item\"))).read().split(\"\\n\")\n infos = [line.replace('||','|').split('|') for line in lines if line]\n for info in infos:\n movie_id = int(info[0])\n if len(info[1].rstrip(')').rsplit(' (',1))==2:\n title, date1 = info[1].rstrip(')').rsplit(' (',1)\n else:\n title=info[1]\n date1 = ''\n release = 0\n if info[2]:\n release = time.strptime(info[2],\"%d-%b-%Y\")\n genres=info[-19:]\n self.item_info[movie_id]=(title,date1,release,info[3],genres)\n for i in xrange(len(genres)):\n if int(genres[i]) == 1:\n self.genre_by_item[i].append(movie_id)",
"def parse_group(self, element: etree.Element) -> Dict:\n\n if element is None:\n return {}\n\n group = {\n \"id\": int(self._eav(element=element, attribute=\"id\")),\n \"url\": self._eav(element=element, attribute=\"url\"),\n \"name\": self._et(element=element),\n }\n\n return group",
"def _itemGroup(separator, items):\n group = []\n\n for item in items:\n if separator(item):\n yield group\n group = [item]\n else:\n group.append(item)\n\n yield group",
"def _get_groups(self, item, field):\n return [group.strip() for group in item.get(field, '').split(',')]",
"def _ParseItem(cls, parser, data):\n value = parser[1].unpack(data)\n if len(parser) < 3:\n return parser[0]._make(value)\n result = []\n idx = 0\n for entry in parser[2]:\n offset, count = entry[:2]\n result.extend(value[idx:offset])\n if len(entry) == 2:\n result.append(tuple(value[offset:offset+count]))\n idx = offset + count\n continue\n subparser = entry[2]\n if not count:\n result.append(cls._ParseItem(subparser, value[offset]))\n else:\n val_list = value[offset:offset+count]\n result.append(tuple([cls._ParseItem(subparser, x) for x in val_list]))\n idx += offset + (count or 1)\n result.extend(value[idx:])\n return parser[0]._make(result)",
"def PopulateItem(self, item):\n item_output = {}\n self.CopyToDict(item,\n item_output,\n ['name', 'type', 'min_size', 'max_size', 'multiplier'])\n\n if item['type'] == 'group':\n children = []\n for child_item in item['items']:\n child_item_output = self.PopulateItem(child_item)\n children.append(child_item_output)\n item_output['items'] = children\n\n labeled_values = []\n for value, label in item.get('labels', []):\n labeled_value_output = {\n 'value': value,\n 'label': label,\n }\n labeled_values.append(labeled_value_output)\n if labeled_values:\n item_output['enums'] = labeled_values\n\n ranges = []\n for min_value, max_value in item.get('range', []):\n range_output = {\n 'min': min_value,\n 'max': max_value,\n }\n ranges.append(range_output)\n if ranges:\n item_output['ranges'] = ranges\n return item_output",
"def parse_groups(text):\n counter = 1\n scores = []\n garbage_scores = []\n i = 1\n while i < len(text):\n # Inside a group. First, check if we have an inner group or garbage.\n # Handle inner group.\n if text[i] == \"{\":\n i += 1\n counter += 1\n continue\n # Handle garbage - skip it.\n if text[i] == \"<\":\n (i, gscore) = parse_garbage(text, i+1)\n garbage_scores.append(gscore)\n continue\n # If we get here, we need to scan. Stop scanning at , or }\n m = re.match(r\"[^,}]*[,}]\", text[i:])\n if not m:\n raise ValueError(\"Bad syntax (after position {}) in group: Expecting , or }}\".format(i))\n i += m.end(0) - m.start(0)\n # If we ended on a }, process end-of-group. Save its score.\n if text[i-1] == \"}\":\n scores.append(counter)\n counter -= 1\n if counter < 0:\n raise ValueError(\"Bad syntax (position {}) in group: Too many }} closing braces\".format(i))\n # Special case - skip over commas after }'s - saves us one loop.\n if i < len(text) and text[i] == \",\":\n i += 1\n return (counter, scores, garbage_scores)",
"def parse_item(self, item):\n if self.has_iattr(item.conf, 'hue2_id') and self.has_iattr(item.conf, 'hue2_function'):\n self.logger.debug(\"parse item: {}\".format(item))\n conf_data = {}\n conf_data['id'] = self.get_iattr_value(item.conf, 'hue2_id')\n conf_data['resource'] = self.get_iattr_value(item.conf, 'hue2_resource')\n conf_data['function'] = self.get_iattr_value(item.conf, 'hue2_function')\n conf_data['item'] = item\n self.plugin_items[item.path()] = conf_data\n if conf_data['resource'] == 'sensor':\n # ensure that the scheduler for sensors will be started if items use sensor data\n self.sensor_items_configured = True\n if conf_data['resource'] == 'light':\n # ensure that the scheduler for sensors will be started if items use sensor data\n self.light_items_configured = True\n\n if conf_data['resource'] == 'group':\n # bridge updates are allways scheduled\n self.logger.debug(\"parse_item: configured group item = {}\".format(conf_data))\n\n if conf_data['function'] != 'reachable':\n return self.update_item\n return",
"def _process_target_group_tags(self, item, target_group):\n target_group['Properties']['Tags'] = []\n\n tag = {}\n tag['Key'] = \"Environment\"\n tag['Value'] = self.infos.environment\n target_group['Properties']['Tags'].append(tag)\n\n tag = {}\n tag['Key'] = \"Project\"\n tag['Value'] = self.infos.project\n target_group['Properties']['Tags'].append(tag)\n\n tag = {}\n tag['Key'] = \"Service\"\n tag['Value'] = self.infos.service_name\n target_group['Properties']['Tags'].append(tag)\n\n tag = {}\n tag['Key'] = \"Version\"\n tag['Value'] = self.infos.service_version\n target_group['Properties']['Tags'].append(tag)\n\n tag = {}\n tag['Key'] = \"Container\"\n tag['Value'] = item['ContainerName']\n target_group['Properties']['Tags'].append(tag)\n\n tag = {}\n tag['Key'] = \"ContainerPort\"\n tag['Value'] = str(item['ContainerPort'])\n target_group['Properties']['Tags'].append(tag)\n\n tag = {}\n tag['Key'] = \"CanaryRelease\"\n tag['Value'] = self.infos.green_infos.canary_release\n target_group['Properties']['Tags'].append(tag)",
"def parse_group_names_and_ids(response):\r\n soup = BeautifulSoup(response.text, features=\"html.parser\")\r\n group_options = soup.find(\"div\", {\"class\": \"select-group\"}).find_all(\"option\")\r\n groups = {option.text: option.attrs[\"value\"] for option in group_options}\r\n return groups",
"def _itemGroup2(separator, items):\n notSeparator = lambda x: False if separator(x) else True # negate separator\n group = []\n\n for item in dropwhile(notSeparator, items):\n if separator(item):\n if group != []:\n yield group\n group = [item]\n else:\n group.append(item)\n\n yield group",
"def _read_groups(self, node):\n for child in node:\n assert child.tag == 'group'\n gname = child.attrib['name']\n assert gname not in self.groups\n group = []\n self.groups[gname] = group\n for child2 in child:\n group.append(enum_name(child2.attrib['name']))",
"def group2entry(\n group: str, linesep: str\n) -> Tuple[str, Union[List[Union[str, int, float]], Dict[str, Union[str, int, float]]]]:\n # If there are no values for this section, return a default dict of\n # integers, so that any property a user requests will be 0\n if linesep not in group:\n # Remove the brackets from the key\n key = group[1:-1]\n return key, collections.defaultdict(int)\n\n raw_key, raw_value = group.split(linesep, 1)\n\n # Remove the brackets from the key\n key = raw_key[1:-1]\n\n values = parse_raw_value(raw_value, linesep)\n\n if not key.startswith(\"Distribution of variants on\"):\n valued = {k: convert(v) for k, v in values}\n return key, valued\n\n return key, [convert(v) for _, v in values]",
"def _parse_items(self, skip_items_header=False):\n if skip_items_header:\n items_count = 1\n else:\n items_header = int_from_bbytes(self._reader.read(2))\n if items_header != self._ITEMS_HEADER:\n raise ItemParseError(\n f'Invalid items header: 0x{items_header:04X}'\n )\n items_count = int_from_lbytes(self._reader.read(2))\n\n items = []\n\n while items_count:\n item = Item(self._reader)\n if item.location_id == Item.LOC_SOCKETED:\n socketed_item = items[-1]\n socket_attrs = None\n\n if socketed_item.itype == Item.T_WEAPON:\n socket_attrs = self._items_data.get_weapon_sock_attrs(\n item.code\n )\n elif socketed_item.itype == Item.T_ARMOR:\n socket_attrs = self._items_data.get_armor_sock_attrs(\n item.code\n )\n elif socketed_item.itype == Item.T_SHIELD:\n socket_attrs = self._items_data.get_shield_sock_attrs(\n item.code\n )\n\n if socket_attrs is None:\n # Item is a jewel.\n if item.code == 'jew':\n socketed_item.magic_attrs.extend(item.magic_attrs)\n socketed_item.socketed_items.append(item)\n continue\n raise ItemParseError(f'Unknown item: {item.code}')\n\n for attr in socket_attrs:\n attr_name = self._items_data.get_magic_attr(attr['id'])[\n 'name'\n ]\n socketed_item.magic_attrs.append(\n attr_name.format(*attr['values'])\n )\n socketed_item.socketed_items.append(item)\n\n else:\n if not item.is_simple and item.inserted_items_count:\n items_count += item.inserted_items_count\n items.append(item)\n items_count -= 1\n\n return items",
"def parse_item(self, item):\n # Create a dictionary from values for each field\n parsed_data = {}\n\n for field_name in self.fields:\n # A field-name may be mapped to another identifier on the source,\n # it could be a XML path or a CSV column name / position.\n # Defaults to the field-name itself.\n source_name = self.field_map.get(field_name, field_name)\n\n # Uses a custom method \"parse_%(field_name)\"\n # or get the value from the item\n parse = getattr(self, 'parse_%s' % field_name, None)\n if parse:\n value = parse(item, field_name, source_name)\n else:\n value = self.get_value(item, source_name)\n\n # Add the value to the parsed data\n parsed_data[field_name] = value\n return parsed_data",
"def test_groups(self):\n p = field_list_pattern\n \n m = re.search(p, '\\n\\n:A heading: \\tSome text\\n\\n', re.M)\n self.assertIsNotNone(m)\n self.assertEqual(m.group(1), 'A heading')\n self.assertEqual(m.group(2), 'Some text')\n \n m = re.search(p, ':heading::text : with : colons')\n self.assertIsNotNone(m)\n self.assertEqual(m.group(1), 'heading')\n self.assertEqual(m.group(2), ':text : with : colons')\n \n m = re.search(p, ':*: text')\n self.assertIsNotNone(m)\n self.assertEqual(m.group(1), '*')\n self.assertEqual(m.group(2), 'text')",
"def __route_segment_to_parser(self, segment):\n if segment.startswith(GroupHeader().id.name):\n self.__parse_group_header(segment)\n elif segment.startswith(GroupTrailer().id.name):\n self.__parse_group_trailer(segment)\n elif segment.startswith(EdiDocument().interchange.trailer.id.name):\n self.__parse_interchange_trailer(segment)\n elif segment.startswith(TransactionSetHeader().id.name):\n self.__parse_transaction_set_header(segment)\n elif segment.startswith(TransactionSetTrailer().id.name):\n self.__parse_transaction_set_trailer(segment)\n else:\n pass",
"def test_division_logistics_item_groups_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a rollout handler
|
def rollout_handler():
yield keyword("on")
yield normalspaces()
handlername = yield var_name()
yield normalspaces()
varn = yield var_name()
yield normalspaces()
varn2 = yield optional(var_name())
yield normalspaces()
varn3 = yield optional(var_name())
yield normalspaces()
yield keyword("do")
yield normalspaces()
expr = yield expression
return s.Construct(s.ROLLOUT_HANDLER, handlername, varn, varn2, varn3, expr)
|
[
"def parse_event(self, event):",
"def _parse(self):\n\t\t\n\t\tself.reply_msg = MessageHandler.fire_handlers(self)",
"def handle_raw_endpoint_event(self, msg):\n classname = msg.data.keys()[0]\n attr = msg.data[classname]\n parsed_msg = msg.wf.ept_epm_parser.parse(classname, attr, attr[\"_ts\"])\n # ensure we copy over msg.now and msg.wf from original msg to parsed_msg\n # (note these are added before handler is called and not in original eptMsgWorker event)\n setattr(parsed_msg, \"wf\", msg.wf)\n setattr(parsed_msg, \"now\", msg.now)\n parsed_msg.seq = msg.seq\n logger.debug(parsed_msg)\n self.handle_endpoint_event(parsed_msg)",
"def _ParseFileEntry(self, mediator, file_entry):",
"def __route_segment_to_parser(self, segment):\n if segment.startswith(GroupHeader().id.name):\n self.__parse_group_header(segment)\n elif segment.startswith(GroupTrailer().id.name):\n self.__parse_group_trailer(segment)\n elif segment.startswith(EdiDocument().interchange.trailer.id.name):\n self.__parse_interchange_trailer(segment)\n elif segment.startswith(TransactionSetHeader().id.name):\n self.__parse_transaction_set_header(segment)\n elif segment.startswith(TransactionSetTrailer().id.name):\n self.__parse_transaction_set_trailer(segment)\n else:\n pass",
"def handle(self, handler: Handler):\n pass",
"def handle(event={}, context={}):\n LoLNewsHandler().run()\n return 'ok'",
"def addHandler(identifier, handler): #@NoSelf",
"def svn_file_invoke_rev_handler_old(*args) -> \"svn_txdelta_window_handler_t *, void **\":\n return _delta.svn_file_invoke_rev_handler_old(*args)",
"def json_handler(cls, fn: Handler) -> MessageHandler:\n return lambda message: fn(**cls.parse_json(message))",
"def rollout_def():\n yield keyword(\"rollout\")\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n qstring = yield quoted\n yield normalspaces()\n vnop = yield sepBy(named_argument, normalspaces())\n yield normalspaces()\n yield string(\"(\")\n yield normalspaces()\n clauses = yield sepBy(rollout_clause, normalspaces())\n yield normalspaces()\n yield string(\")\")\n return s.Construct(s.ROLLOUT_DEF, vname, qstring, vnop, clauses)",
"def parse(payload: bytes):\n return parser.EventParser(payload).parse()",
"def _parse_line(\n self, line: str, handler_lookup: Dict[str, Callable[[str, Path], str]],\n path_file: Optional[Path] = None,\n ) -> List[str]:\n lines: List[str] = []\n if '{cte}' in line and self.state == self.state_auto: # end\n self.end()\n elif '{cts}' in line: # start\n self.start_auto()\n matches = [text_match for text_match in handler_lookup if text_match in line]\n if len(matches) == 1:\n lines.extend(handler_lookup[matches[0]](line, path_file))\n else:\n logger.error('Could not parse: {line}', line=line)\n lines.append(line)\n self.end()\n elif self.state == self.state_user:\n lines.append(line)\n # else: discard the lines in the auto-section\n return lines",
"def _try_pop_handler(self, indent):\n if indent in self.handlers:\n self.handlers.pop(indent)()",
"def handle_stream(events):\n events = events.decode().split('\\n')\n for event in events:\n if \":\" in event:\n (field, value) = event.split(\":\", 1)\n field = field.strip()\n if field == 'data':\n try:\n data = json.loads(value)\n except ValueError as error:\n raise error\n else:\n data['time'] = datetime.datetime.now(tz=pytz.UTC).isoformat()\n binder['events'].append(data)\n binder['phase'] = data.get('phase', '')\n binder['id'] = data.get('url', '')\n binder['token'] = data.get('token', '')",
"def travis_handler(request, response):\n payload = urllib.unquote_plus(request.body).split('payload=')[1]\n signature = request.headers.get('signature')\n message, code = webhook_handler(payload, signature)\n return code, [(\"Content-Type\", \"text/plain\")], message",
"def handler(self, sub_command, args):\n pass",
"def svn_file_invoke_rev_handler(*args) -> \"svn_txdelta_window_handler_t *, void **\":\n return _delta.svn_file_invoke_rev_handler(*args)",
"def my_event_handler(sender, event):\n print(\"Event:\")\n print(\" sender:\", sender)\n print(\" event.event:\", event.event)\n print(\" event.parsed:\", event.parsed)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a rollout clause
|
def rollout_clause():
clause = yield (local_decl ^
global_decl ^
function_def ^
struct_def ^
mousetool_def ^
item_group ^
rollout_item ^
rollout_handler)
# this is weird, why this clause?
# (in the macroscript thing we use it as a bundle of things...
# not sure I remember why either)
return s.Construct(s.ROLLOUT_CLAUSE, clause)
|
[
"def rollout_def():\n yield keyword(\"rollout\")\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n qstring = yield quoted\n yield normalspaces()\n vnop = yield sepBy(named_argument, normalspaces())\n yield normalspaces()\n yield string(\"(\")\n yield normalspaces()\n clauses = yield sepBy(rollout_clause, normalspaces())\n yield normalspaces()\n yield string(\")\")\n return s.Construct(s.ROLLOUT_DEF, vname, qstring, vnop, clauses)",
"def rollout_handler():\n yield keyword(\"on\")\n yield normalspaces()\n handlername = yield var_name()\n yield normalspaces()\n varn = yield var_name()\n yield normalspaces()\n varn2 = yield optional(var_name())\n yield normalspaces()\n varn3 = yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.ROLLOUT_HANDLER, handlername, varn, varn2, varn3, expr)",
"def parse_move(s):",
"def _parse_recent_log_args(token):\r\n tokens = token.split_contents()\r\n if len(tokens) != 4:\r\n msg = \"Wrong number of arguments for %s.\"\r\n raise template.TemplateSyntaxError(msg % tokens[0])\r\n elif tokens[2] != 'as':\r\n msg = \"Wrong syntax for %s: third argument must be the keyword 'as'\"\r\n raise template.TemplateSyntaxError(msg % tokens[0])\r\n return (tokens[1], tokens[3])",
"def svn_txdelta_parse_svndiff(*args) -> \"svn_stream_t *\":\n return _delta.svn_txdelta_parse_svndiff(*args)",
"def _parse_s3_command(statement):\n statement = strip(statement)\n params = dict()\n\n # deleting 'unload'\n tokens = statement.split()[1:]\n\n # Fetching select statement\n select_statement = \"\"\n error_flag = False\n for index, token in enumerate(tokens):\n if token.lower() == \"to\":\n tokens = tokens[index:]\n break\n select_statement += \" \" + token\n params[\"select_statement\"] = select_statement\n if error_flag:\n raise ValueError(\n (\n \"Possibly malformed SELECT Statement. \"\n \"Statement = {statement}\"\n \"Redshift fixture only supports S3 Unload statements with the following syntax: \"\n \"UNLOAD ('select-statement') TO 's3://object-path/name-prefix'\"\n \"authorization 'aws_access_key_id=<aws_access_key_id>;\"\n \"aws_secret_access_key=<aws_secret_access_key>'\"\n \"[GZIP] [DELIMITER [ AS ] 'delimiter-char']\"\n ).format(statement=statement)\n )\n\n # Fetching s3_uri\n if tokens.pop(0).lower() != \"to\":\n raise ValueError(\n (\n \"Possibly malformed S3 URI Format. \"\n \"Statement = {statement}\"\n \"Redshift fixture only supports S3 Unload statements with the following syntax: \"\n \"UNLOAD ('select-statement') TO 's3://object-path/name-prefix'\"\n \"authorization 'aws_access_key_id=<aws_access_key_id>;\"\n \"aws_secret_access_key=<aws_secret_access_key>'\"\n \"[GZIP] [DELIMITER [ AS ] 'delimiter-char']\"\n ).format(statement=statement)\n )\n params[\"s3_uri\"] = strip(tokens.pop(0))\n\n # Fetching authorization\n for token in tokens:\n if \"aws_access_key_id\" in token.lower() or \"aws_secret_access_key\" in token.lower():\n # This is because of the following possibiliteis:\n # ... [with ]authorization[ AS] 'aws_access_key_id=x;aws_secret_access_key=y'\n # OR\n # ... [with ]authorization[ AS] 'aws_secret_access_key=y;aws_access_key_id=x'\n # OR\n # ... [with ]authorization[ AS] 'aws_secret_access_key=y;\\naws_access_key_id=x'\n # OR\n # ... [with ]authorization[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'\n # Supportred AWS authorization format:\n # [with ]authorization[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'\n # No Support for additional credential formats, eg IAM roles, etc, yet.\n credentials_list = token.split(\";\")\n for credentials in credentials_list:\n if \"aws_access_key_id\" in credentials:\n params[\"aws_access_key_id\"] = credentials.split(\"=\")[-1]\n elif \"aws_secret_access_key\" in credentials:\n params[\"aws_secret_access_key\"] = credentials.split(\"=\")[-1]\n else:\n raise ValueError(\n (\n \"Possibly malformed AWS Credentials Format. \"\n \"Statement = {statement}\"\n \"Redshift fixture only supports S3 Copy statements with the following \"\n \"syntax: COPY <table_name> FROM [(column 1, [column2, [..]])] '\"\n \"<file path on S3 bucket>' \"\n \"credentials 'aws_access_key_id=<aws_access_key_id>;\"\n \"aws_secret_access_key=<aws_secret_access_key>' \"\n \"Supportred AWS credentials format: \"\n \"[with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'\"\n \" No Support for additional credential formats, eg IAM roles, etc, yet.\"\n ).format(statement=statement)\n )\n\n # Fetching GZIP Flag\n params[\"gzip\"] = False\n for token in tokens:\n if strip(token.lower()) == \"gzip\":\n params[\"gzip\"] = True\n\n # Fetching delimiter\n for index, token in enumerate(tokens):\n if token.lower() == \"delimiter\":\n try:\n if tokens[index + 1].lower() != \"as\":\n params[\"delimiter\"] = strip(tokens[index + 1])\n else:\n params[\"delimiter\"] = strip(tokens[index + 2])\n except IndexError:\n raise ValueError(\n (\n \"Possibly malformed Delimiter Format. \"\n \"Statement = {statement}\"\n \"Redshift fixture only supports S3 Unload statements with the following\"\n \"syntax: UNLOAD ('select-statement') TO 's3://object-path/name-prefix'\"\n \"authorization 'aws_access_key_id=<aws_access_key_id>;\"\n \"aws_secret_access_key=<aws_secret_access_key>'\"\n \"[GZIP] [DELIMITER [ AS ] 'delimiter-char']\"\n ).format(statement=statement)\n )\n return params",
"def rollout_item():\n # pylint: disable=line-too-long\n kw = yield keyword(\"dotnetcontrol|hyperlink|subrollout|multilistbox|imgtag|curvecontrol|angle|label|button|edittext|combobox|dropdownList|listbox|spinner|slider|pickbutton|radiobuttons|checkbox|checkbutton|colorPicker|mapbutton|materialbutton|progressbar|timer|bitmap|groupbox\")\n yield normalspaces()\n var = yield var_name()\n yield normalspaces()\n label = yield optional(quoted)\n yield normalspaces()\n args = yield sepBy(named_argument, normalspaces())\n return s.Construct(s.ROLLOUT_ITEM, kw, var, label, args)",
"def parse_select(stream):\n # first apply ()\n stream = group(stream, [Parenthesis])\n \n # then split in select from where for first one\n stream = group_select(stream)\n \n return stream",
"def parse_add_or_mod():\n first = to_py(sys.stdin.readline())\n second = sys.stdin.readline()\n if second == '':\n # There was only one line, i.e. this is an add operation.\n return ('add', None, first)\n # There were two lines, this is a mod operation.\n return ('mod', first, to_py(second))",
"def rollout_getAction(ast):\n def rollout_policy(s, tree):\n return ast.random_action()\n return rollout_policy",
"def parse_lsr(prod, text):\n lines = text.split(\"\\n\")\n if len(lines) < 2:\n prod.warnings.append(\n (\"LSR text is too short |%s|\\n%s\")\n % (text.replace(\"\\n\", \"<NL>\"), text)\n )\n return None\n lsr = LSR()\n lsr.product = prod\n lsr.text = text\n tokens = lines[0].split()\n h12 = tokens[0][:-2]\n mm = tokens[0][-2:]\n ampm = tokens[1]\n dstr = f\"{h12}:{mm} {ampm} {lines[1][:10]}\"\n lsr.valid = datetime.datetime.strptime(dstr, \"%I:%M %p %m/%d/%Y\")\n lsr.assign_timezone(prod.tz, prod.z)\n # Check that we are within bounds\n if lsr.utcvalid > (prod.valid + FUTURE_THRESHOLD) or lsr.utcvalid > (\n utc() + FUTURE_THRESHOLD\n ):\n prod.warnings.append(\n \"LSR is from the future!\\n\"\n f\"prod.valid: {prod.valid} lsr.valid: {lsr.valid}\\n\"\n f\"{text}\\n\"\n )\n return None\n\n lsr.wfo = prod.source[1:]\n\n lsr.typetext = lines[0][12:29].strip()\n if lsr.typetext.upper() not in reference.lsr_events:\n prod.warnings.append(f\"Unknown lsr.typetext |{lsr.typetext}|\\n{text}\")\n return None\n\n lsr.city = lines[0][29:53].strip()\n\n tokens = lines[0][53:].strip().split()\n lat = float(tokens[0][:-1])\n lon = 0 - float(tokens[1][:-1])\n if lon <= -180 or lon >= 180 or lat >= 90 or lat <= -90:\n prod.warnings.append(f\"Invalid Geometry Lat: {lat} Lon: {lon}\\n{text}\")\n return None\n lsr.geometry = ShapelyPoint((lon, lat))\n\n lsr.consume_magnitude(lines[1][12:29].strip())\n if lsr.magnitude_f is not None and math.isnan(lsr.magnitude_f):\n prod.warnings.append(f\"LSR has NAN magnitude\\n{text}\")\n return None\n lsr.county = lines[1][29:48].strip()\n if lsr.county == \"\":\n prod.warnings.append(f\"LSR has empty county\\n{text}\")\n lsr.state = lines[1][48:50].strip()\n if lsr.state == \"\":\n prod.warnings.append(f\"LSR has empty state\\n{text}\")\n lsr.source = lines[1][53:].strip()\n if lsr.source == \"\":\n prod.warnings.append(f\"LSR has empty source\\n{text}\")\n if len(lines) > 2:\n meat = \" \".join(lines[2:]).strip()\n if meat.strip() != \"\":\n lsr.remark = \" \".join(meat.split())\n if lsr.typetext.upper() == \"ICE STORM\" and lsr.magnitude_f is None:\n val = _icestorm_remark(lsr.remark)\n if val is not None:\n lsr.magnitude_f = val\n lsr.magnitude_qualifier = \"U\"\n lsr.magnitude_units = \"INCH\"\n return lsr",
"def some(pred):\n\n @Parser\n def _some(tokens, s):\n if s.pos >= len(tokens):\n raise NoParseError('no tokens left in the stream', s)\n else:\n t = tokens[s.pos]\n if pred(t):\n pos = s.pos + 1\n s2 = State(pos, max(pos, s.max))\n if debug:\n log.debug('*matched* \"%s\", new state = %s' % (t, s2))\n return t, s2\n else:\n if debug:\n log.debug('failed \"%s\", state = %s' % (t, s))\n raise NoParseError('got unexpected token', s)\n\n _some.name = '(some)'\n return _some",
"def parse_rule(self):\n p = self.parse_symbol()\n self.consume(\":\")\n symbols, action = self.parse_rhs()\n self.grammar.add_production(p, symbols, action)\n while self.has_consumed(\"|\"):\n symbols, action = self.parse_rhs()\n self.grammar.add_production(p, symbols, action)\n self.consume(\";\")",
"def parse_line(self, line: str) -> Operation:\n words = line.replace(',', ':')\\\n .replace(': ', ' ')\\\n .replace(':', ' ')\\\n .split(' ')\n\n operation, *args = words\n\n if Keyword(operation) not in LANGUAGE_OPTYPES:\n raise BadOperationIdentifier(operation)\n\n op_type = LANGUAGE_OPTYPES[Keyword(operation)]\n\n if op_type is OperationType.Nop:\n return Operation(\n op_type=op_type,\n op_word=operation,\n op_args=[NOP_ARG, NOP_ARG]\n )\n\n elif op_type is OperationType.Unary:\n argument = args[0]\n is_label_or_jump = operation in LABELS_OR_JUMPS\n arg1 = self.parse_argument(argument, is_label_or_jump)\n\n if operation == 'NOT':\n op_args = [arg1, arg1]\n else:\n op_args = [arg1, NOP_ARG]\n\n return Operation(\n op_type=op_type,\n op_word=operation,\n op_args=op_args\n )\n\n # Binary operation\n arguments = [args[0], args[1]]\n\n arg12 = [\n self.parse_argument(arg)\n for arg in arguments\n ]\n\n return Operation(\n op_type=op_type,\n op_word=operation,\n op_args=arg12\n )",
"def parse_layup(layup_input):\n\n\tlayup = layup_input\n\torientation=[]\n\tsymmetric = False\n\tif layup.endswith('s'):\n\t\tlayup=layup[:-1]\n\t\tif 's' in layup:\n\t\t\tprint \"s must be added at the very end. You wrote : %s\" % layup_input\n\t\t\tsys.exit(1)\n\t\tsymmetric = True\n\tlayup_slash = layup.split('/')\n\n\tfor i in layup_slash:\n\t\tcheck_p = False\n\t\t#check for repetition\n\t\trep = i.split('_')\n\t\tif len(rep)==2:\n\t\t\torientation.extend([int(rep[0])] * int(rep[1]))\n\t\t\tcontinue\n\t\telif len(rep)==1:\n\t\t\tpass\n\t\t\t# orientation.append(int(rep))\n\t\telse:\n\t\t\tprint \"%r is not a valid orientation\" % i\n\t\t\tsys.exit(1)\n\n\t\t#check for plus or minus. step not reached if i contains '_'\n\t\tif 'p' in i:\n\t\t\tval = int(i.split('p')[1])\n\t\t\torientation.extend([val,-val])\n\t\telif 'm' in i:\n\t\t\tval = int(i.split('m')[1])\n\t\t\torientation.extend([-val,val])\n\t\telse:\n\t\t\torientation.append(int(i))\n\n\tif symmetric:\n\t\torientation.extend(orientation[::-1])\n\n\treturn orientation, symmetric",
"def sched_switch_parser(event, text):\n if text.count('=') == 2: # old format\n regex = re.compile(\n r'(?P<prev_comm>\\S.*):(?P<prev_pid>\\d+) \\[(?P<prev_prio>\\d+)\\] (?P<status>\\S+)'\n r' ==> '\n r'(?P<next_comm>\\S.*):(?P<next_pid>\\d+) \\[(?P<next_prio>\\d+)\\]'\n )\n parser_func = regex_body_parser(regex)\n return parser_func(event, text)\n else: # there are more than two \"=\" -- new format\n return default_body_parser(event, text.replace('==>', ''))",
"def rollouts(self):\n pass",
"def multiline_parse(s, l, t):\n return \"\\n\".join(t)",
"def _parse_lanes(self, in_lanes, out_lanes):\n north_south = []\n east_west = []\n if self.junction.junction_id == '0':\n # do the incoming first\n for in_lane in in_lanes:\n marker = in_lane.split('_')[0]\n if marker == '1' or marker == '0l' or marker == '0r':\n east_west.append(in_lane)\n elif marker == '0u' or marker == '0d':\n north_south.append(in_lane)\n else:\n print(\"unrecognized lane :c\")\n exit(2)\n\n elif self.junction.junction_id == '1':\n # incoming\n for in_lane in in_lanes:\n marker = in_lane.split('_')[0]\n if marker == '0' or marker == '1l' or marker == '1r':\n east_west.append(in_lane)\n elif marker == '1u' or marker == '1d':\n north_south.append(in_lane)\n else:\n print(\"unrecognized lane :c\")\n exit(2)\n\n else:\n print(\"unrecognized agent :c\")\n exit()\n\n return north_south, east_west"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a rollout def
|
def rollout_def():
yield keyword("rollout")
yield normalspaces()
vname = yield var_name()
yield normalspaces()
qstring = yield quoted
yield normalspaces()
vnop = yield sepBy(named_argument, normalspaces())
yield normalspaces()
yield string("(")
yield normalspaces()
clauses = yield sepBy(rollout_clause, normalspaces())
yield normalspaces()
yield string(")")
return s.Construct(s.ROLLOUT_DEF, vname, qstring, vnop, clauses)
|
[
"def parse_move(s):",
"def rollout_handler():\n yield keyword(\"on\")\n yield normalspaces()\n handlername = yield var_name()\n yield normalspaces()\n varn = yield var_name()\n yield normalspaces()\n varn2 = yield optional(var_name())\n yield normalspaces()\n varn3 = yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.ROLLOUT_HANDLER, handlername, varn, varn2, varn3, expr)",
"def parse_revision(operations, revision): \n if not is_trusted(operations, revision):\n raise Exception(\"Revision %s is not trusted!\" % revision)\n\n # The order of certain operations, e.g rename matter so don't use a set\n revision_description = {}\n revision_description[\"revision\"] = revision\n revision_description[\"added_dirs\"] = []\n revision_description[\"added_files\"] = []\n revision_description[\"removed\"] = []\n revision_description[\"modified\"] = []\n revision_description[\"renamed\"] = []\n revision_description[\"set_attributes\"] = []\n revision_description[\"clear_attributes\"] = []\n\n old_rev = None\n\n for line in operations.get_revision(revision):\n if line[0] == \"format_version\":\n assert(line[1] == \"1\")\n elif line[0] == \"old_revision\":\n if not \"parent\" in revision_description:\n revision_description[\"parent\"] = []\n if len(line[1]) != 0:\n revision_description[\"parent\"].append(line[1])\n old_rev = line[1]\n elif line[0] == \"new_manifest\":\n revision_description[\"manifest\"] = line[1]\n elif line[0] == \"clear\":\n revision_description[\"clear_attributes\"].append((line[1], line[3], old_rev))\n elif line[0] == \"set\":\n revision_description[\"set_attributes\"].append((line[1], line[3], line[5], old_rev))\n elif line[0] in [\"rename\", \"patch\", \"delete\", \"add_dir\", \"add_file\"]:\n pass\n else:\n print >> sys.stderr, line\n assert(False)\n\n for cert in operations.certs(revision):\n # Known cert names used by mtn, we can ignore them as they can't be converted to git\n if cert[5] in [\"suspend\", \"testresult\", \"file-comment\", \"comment\", \"release-candidate\"]:\n pass\n elif cert[5] in [\"author\", \"changelog\", \"date\", \"branch\", \"tag\"]:\n revision_description[cert[5]] = cert[7]\n if cert[5] == \"author\":\n revision_description[\"committer\"] = cert[1]\n else:\n print >> sys.stderr, \"Unknown Cert: Ignoring\", cert[5], cert[7]\n #assert(False)\n\n return revision_description",
"def rollout_item():\n # pylint: disable=line-too-long\n kw = yield keyword(\"dotnetcontrol|hyperlink|subrollout|multilistbox|imgtag|curvecontrol|angle|label|button|edittext|combobox|dropdownList|listbox|spinner|slider|pickbutton|radiobuttons|checkbox|checkbutton|colorPicker|mapbutton|materialbutton|progressbar|timer|bitmap|groupbox\")\n yield normalspaces()\n var = yield var_name()\n yield normalspaces()\n label = yield optional(quoted)\n yield normalspaces()\n args = yield sepBy(named_argument, normalspaces())\n return s.Construct(s.ROLLOUT_ITEM, kw, var, label, args)",
"def parse_lsr(prod, text):\n lines = text.split(\"\\n\")\n if len(lines) < 2:\n prod.warnings.append(\n (\"LSR text is too short |%s|\\n%s\")\n % (text.replace(\"\\n\", \"<NL>\"), text)\n )\n return None\n lsr = LSR()\n lsr.product = prod\n lsr.text = text\n tokens = lines[0].split()\n h12 = tokens[0][:-2]\n mm = tokens[0][-2:]\n ampm = tokens[1]\n dstr = f\"{h12}:{mm} {ampm} {lines[1][:10]}\"\n lsr.valid = datetime.datetime.strptime(dstr, \"%I:%M %p %m/%d/%Y\")\n lsr.assign_timezone(prod.tz, prod.z)\n # Check that we are within bounds\n if lsr.utcvalid > (prod.valid + FUTURE_THRESHOLD) or lsr.utcvalid > (\n utc() + FUTURE_THRESHOLD\n ):\n prod.warnings.append(\n \"LSR is from the future!\\n\"\n f\"prod.valid: {prod.valid} lsr.valid: {lsr.valid}\\n\"\n f\"{text}\\n\"\n )\n return None\n\n lsr.wfo = prod.source[1:]\n\n lsr.typetext = lines[0][12:29].strip()\n if lsr.typetext.upper() not in reference.lsr_events:\n prod.warnings.append(f\"Unknown lsr.typetext |{lsr.typetext}|\\n{text}\")\n return None\n\n lsr.city = lines[0][29:53].strip()\n\n tokens = lines[0][53:].strip().split()\n lat = float(tokens[0][:-1])\n lon = 0 - float(tokens[1][:-1])\n if lon <= -180 or lon >= 180 or lat >= 90 or lat <= -90:\n prod.warnings.append(f\"Invalid Geometry Lat: {lat} Lon: {lon}\\n{text}\")\n return None\n lsr.geometry = ShapelyPoint((lon, lat))\n\n lsr.consume_magnitude(lines[1][12:29].strip())\n if lsr.magnitude_f is not None and math.isnan(lsr.magnitude_f):\n prod.warnings.append(f\"LSR has NAN magnitude\\n{text}\")\n return None\n lsr.county = lines[1][29:48].strip()\n if lsr.county == \"\":\n prod.warnings.append(f\"LSR has empty county\\n{text}\")\n lsr.state = lines[1][48:50].strip()\n if lsr.state == \"\":\n prod.warnings.append(f\"LSR has empty state\\n{text}\")\n lsr.source = lines[1][53:].strip()\n if lsr.source == \"\":\n prod.warnings.append(f\"LSR has empty source\\n{text}\")\n if len(lines) > 2:\n meat = \" \".join(lines[2:]).strip()\n if meat.strip() != \"\":\n lsr.remark = \" \".join(meat.split())\n if lsr.typetext.upper() == \"ICE STORM\" and lsr.magnitude_f is None:\n val = _icestorm_remark(lsr.remark)\n if val is not None:\n lsr.magnitude_f = val\n lsr.magnitude_qualifier = \"U\"\n lsr.magnitude_units = \"INCH\"\n return lsr",
"def parse(name):\n\n pass",
"def parse(self, hgvs_string):\n pass",
"def parse(cls, input):",
"def parser(data):\n\t\tds = data.split('\\n')\n\t\treturn Station(ds[1][20:], ds[2][20:], ds[3][20:], ds[4][20:], ds[5], ds[6][20:])",
"def parse_event(self, event):",
"def deserialize(transition_params):",
"def _ParseFileEntry(self, mediator, file_entry):",
"def _parse_recent_log_args(token):\r\n tokens = token.split_contents()\r\n if len(tokens) != 4:\r\n msg = \"Wrong number of arguments for %s.\"\r\n raise template.TemplateSyntaxError(msg % tokens[0])\r\n elif tokens[2] != 'as':\r\n msg = \"Wrong syntax for %s: third argument must be the keyword 'as'\"\r\n raise template.TemplateSyntaxError(msg % tokens[0])\r\n return (tokens[1], tokens[3])",
"def parse_source(src, opts={}):\n\n s = [x for x in src.split('/') if x]\n\n if len(s) == 3 and s[1] == 'lists':\n return lambda api: api.list_timeline(owner_screen_name=s[0], slug=s[2], **opts)\n if len(s) == 2 and s[0] == 'lists':\n return lambda api: api.list_timeline(owner_id=api.me().id, slug=s[1], **opts)\n elif len(s) == 1:\n return lambda api: api.user_timeline(id=s[0], **opts)\n elif len(s) == 0:\n return lambda api: api.home_timeline(**opts)\n else:\n raise Exception('invalid source string: %s' % src)",
"def parse_ptask(l) :\n task = {}\n l = l.strip()\n s = l.split(',')\n task['max_inst'] = 3\n for x in s :\n prop_extract(x, 'name', task)\n prop_extract(x, 'ctime', task)\n prop_extract(x, 'period', task)\n prop_extract(x, 'deadline', task)\n prop_extract(x, 'max_inst', task)\n return task",
"def __parse_locus_version_6(self,handle): \n self.ilmn_id = read_string(handle)\n self.source_strand = SourceStrand.from_string(self.ilmn_id.split(\"_\")[-2])\n self.name = read_string(handle)\n for idx in range(3):\n read_string(handle)\n handle.read(4)\n for idx in range(2):\n read_string(handle)\n self.snp = read_string(handle)\n self.chrom = read_string(handle)\n for idx in range(2):\n read_string(handle)\n self.map_info = int(read_string(handle))\n for idx in range(2):\n read_string(handle)\n self.address_a = read_int(handle)\n self.address_b = read_int(handle)\n for idx in range(7):\n read_string(handle)\n handle.read(3)\n self.assay_type = read_byte(handle)\n if self.assay_type not in [0,1,2]:\n raise Exception(\"Format error in reading assay type from locus entry\")\n if self.address_b == 0:\n if self.assay_type != 0:\n raise Exception(\"Manifest format error: Assay type is inconsistent with address B\")\n else:\n if self.assay_type == 0:\n raise Exception(\"Manifest format error: Assay type is inconsistent with address B\")",
"def parse_output(self, output):\n pass",
"def sched_stat_parser(event, text):\n return default_body_parser(event, text.replace(' [ns]', ''))",
"def parse(line):\n if line.startswith('turn on'):\n action = 'on'\n elif line.startswith('turn off'):\n action = 'off'\n elif line.startswith('toggle'):\n action = 'toggle'\n else:\n raise Exception('Unexpected input: \"{}\"'.format(line))\n start, end = map(parse_pair, re.findall(r'\\d+,\\d+', line))\n return action, start, end"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a rcmenu item
|
def rcmenu_item():
yield keyword("menuitem|separator|submenu")
yield normalspaces()
varname = yield var_name()
yield normalspaces()
label = yield quoted
yield normalspaces()
vnarg = yield sepBy(named_argument, singlelinespaces())
return s.Construct(s.RCMENU_ITEM, varname, label, vnarg)
|
[
"def rcmenu_def():\n yield keyword(\"rcmenu\")\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n yield string(\"(\")\n yield normalspaces()\n clauses = yield sepBy(rcmenu_clause, end_of_statement)\n yield normalspaces()\n yield string(\")\")\n return s.Construct(s.RCMENU_DEF, vname, clauses)",
"def build_menu(parser, token):\n return MyMenuObject()",
"def rcmenu_handler():\n yield keyword(\"on\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n vn2 = yield var_name()\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.RCMENU_HANDLER, varname, vn2, expr)",
"def menu_item(self):\n return dict(name=self.param, description=self.description)",
"def buildMenu(item):\n\n # fill the marking menu items\n name = item['name']\n subMenu = item['subMenu']\n position = item['position']\n # to be added to each item to correctly close the marking menu\n onCloseCommand = ';import dmptools.setup.markingMenu as markingMenu;markingMenu.deleteMarkingMenu()'\n # create item\n if position:\n command = item['command'].replace('python(\"', '').replace('\");', '')\n cmds.menuItem(\n label=name,\n subMenu=subMenu,\n command=command+onCloseCommand,\n enable=True,\n data=0,\n boldFont=False,\n radialPosition=position,\n enableCommandRepeat=True,\n image=\"commandButton.png\",\n echoCommand=1,\n sourceType=\"python\",\n )\n else:\n if name == 'separator':\n cmds.menuItem(divider=True)\n else:\n command = item['command'].replace('python(\"', '').replace('\");', '')\n cmds.menuItem(\n label=name,\n subMenu=subMenu,\n command=command+onCloseCommand,\n enable=True,\n data=0,\n boldFont=False,\n enableCommandRepeat=True,\n image=\"commandButton.png\",\n echoCommand=1,\n sourceType=\"python\",\n )",
"def menu(options):\r\n hashed_options = False\r\n if not options:\r\n return None\r\n menu = dict(menuItems=[item(thing) for thing in options])\r\n if all_are_instance(options, (tuple, NoneType)):\r\n hashed_options = True\r\n plist = to_plist(menu)\r\n proc = subprocess.Popen([dialog, '-u'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)\r\n proc.stdin.write(plist)\r\n output, _ = proc.communicate()\r\n result = from_plist(output)\r\n if not 'selectedIndex' in result:\r\n return None\r\n index = int(result['selectedIndex'])\r\n if hashed_options:\r\n return options[index][1]\r\n return options[index]",
"def parse_item(self, item):\n if self.has_iattr(item.conf, 'hue2_id') and self.has_iattr(item.conf, 'hue2_function'):\n self.logger.debug(\"parse item: {}\".format(item))\n conf_data = {}\n conf_data['id'] = self.get_iattr_value(item.conf, 'hue2_id')\n conf_data['resource'] = self.get_iattr_value(item.conf, 'hue2_resource')\n conf_data['function'] = self.get_iattr_value(item.conf, 'hue2_function')\n conf_data['item'] = item\n self.plugin_items[item.path()] = conf_data\n if conf_data['resource'] == 'sensor':\n # ensure that the scheduler for sensors will be started if items use sensor data\n self.sensor_items_configured = True\n if conf_data['resource'] == 'light':\n # ensure that the scheduler for sensors will be started if items use sensor data\n self.light_items_configured = True\n\n if conf_data['resource'] == 'group':\n # bridge updates are allways scheduled\n self.logger.debug(\"parse_item: configured group item = {}\".format(conf_data))\n\n if conf_data['function'] != 'reachable':\n return self.update_item\n return",
"def create_menu(subparsers: \"argparse._SubParsersAction\") -> None:\n dfetch.commands.command.Command.parser(subparsers, Import)",
"def processItem(self,entry):\n pass",
"def get_item(menu, item):\n for i in menu['items']:\n if i['id'] == item:\n return i",
"def PopupMenuItems(self,menu):\n pass",
"def rcmenu_clause():\n clause = yield (\n rcmenu_handler ^\n local_decl ^\n function_def ^\n struct_def ^\n rcmenu_item)\n return clause",
"def gedit2_menu(xml):\n return MENU_UI.format(xml) # Splice in the examples menu",
"def menu_process():\n data = selectAll_db(\"Menu\")\n lookup = {}\n for item in data:\n lookup[item[0]] = item[1]\n return lookup",
"def populate_menu(node, parm):\n try:\n return menu_callbacks[parm.name()](node)\n except Exception as e:\n hou.ui.displayMessage(title='Connection Error', text=str(e),\n severity=hou.severityType.Error)\n return []",
"def is_valid_menu_item(self, item_name: str) -> bool:",
"def menuFormat(self):\n \n pass",
"def attributeMenu(beginMenu=bool, inputs=bool, editor=\"string\", unregPulldownMenuCommand=int, regPulldownMenuCommand=\"string\", plug=\"string\", finishMenu=bool):\n pass",
"def attrEnumOptionMenu(string, docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", numberOfPopupMenus=bool, useTemplate=\"string\", manage=bool, label=\"string\", dragCallback=\"string\", highlightColor=float, annotation=\"string\", enable=bool, preventOverride=bool, popupMenuArray=bool, width=int, exists=bool, changeCommand=\"string\", enableBackground=bool, visibleChangeCommand=\"string\", visible=bool, fullPathName=bool, attribute=\"string\", dropCallback=\"string\", noBackground=bool, backgroundColor=float, enumeratedItem=int, isObscured=bool):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a rcmenu handler
|
def rcmenu_handler():
yield keyword("on")
yield normalspaces()
varname = yield var_name()
yield normalspaces()
vn2 = yield var_name()
yield normalspaces()
yield keyword("do")
yield normalspaces()
expr = yield expression
return s.Construct(s.RCMENU_HANDLER, varname, vn2, expr)
|
[
"def rcmenu_def():\n yield keyword(\"rcmenu\")\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n yield string(\"(\")\n yield normalspaces()\n clauses = yield sepBy(rcmenu_clause, end_of_statement)\n yield normalspaces()\n yield string(\")\")\n return s.Construct(s.RCMENU_DEF, vname, clauses)",
"def _on_menubar(self, event: str, value: Any) -> None:\n print(\"_on_menubar\", event, value)\n if not value:\n return\n label, key = value.split(\"::__\", 1)\n tokens = key.split(\"_\")\n if tokens[0] == \"FILE\":\n if tokens[1] == \"OPEN\":\n if tokens[2] == \"GCODE\":\n # TODO\n self.publish(\"gui:select_file_gcode\", None)\n elif tokens[2] == \"CONTROLLER\":\n self.publish(\"%s:set_active\" % label, True)\n elif tokens[1] == \"NEW\":\n if tokens[2] == \"GCODE\":\n # TODO\n pass\n elif tokens[2] == \"CONTROLLER\":\n self.publish(\"##new_controller:picker\", \"##new_controller\")\n #self.publish(\"request_new_controller\", label)",
"def rcmenu_item():\n yield keyword(\"menuitem|separator|submenu\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n label = yield quoted\n yield normalspaces()\n vnarg = yield sepBy(named_argument, singlelinespaces())\n return s.Construct(s.RCMENU_ITEM, varname, label, vnarg)",
"def create_menu(subparsers: \"argparse._SubParsersAction\") -> None:\n dfetch.commands.command.Command.parser(subparsers, Import)",
"def populate_menu(node, parm):\n try:\n return menu_callbacks[parm.name()](node)\n except Exception as e:\n hou.ui.displayMessage(title='Connection Error', text=str(e),\n severity=hou.severityType.Error)\n return []",
"def build_menu(parser, token):\n return MyMenuObject()",
"def rcmenu_clause():\n clause = yield (\n rcmenu_handler ^\n local_decl ^\n function_def ^\n struct_def ^\n rcmenu_item)\n return clause",
"def handler(self, sub_command, args):\n pass",
"def main():\n menu()",
"def actionHighlighted(self, selectedLine, keypress):\n # #action_highlighted\n global householdID\n\n if (self.parent.myStatus == 'Main'):\n self.parent.wMain.values = ['Selection: ', selectedLine,\n '\\tM\\t\\t to return to the main menu']\n self.parent.wMain.display()\n global ActionKeys\n ActionKeys[selectedLine[1]]()\n\n elif (self.parent.myStatus == 'Contact'):\n dataArray = selectedLine.split('\\t')\n householdID = getHouseholdForContact(str(dataArray[0]))\n message(\"Waring:\\nThere may be other Household entries for this Contact\")\n # contactID = str(dataArray[0])\n self.parent.wStatus2.value =\\\n \"Contact changed to \" + str(dataArray[1])\n self.parent.setMainMenu()\n\n elif (self.parent.myStatus == 'Households'):\n # items are padded out with spaces to produce neat columns. These are removed with .strip()\n dataArray = selectedLine.split('\\t')\n householdID = str(dataArray[0]).strip()\n self.parent.wStatus2.value =\\\n \"Household changed to \" + householdID\n self.parent.wStatus2.display()\n self.parent.setMainMenu()\n\n elif (self.parent.myStatus == 'Meta'):\n dataArray = selectedLine.split('\\t\\t')\n global metaID\n metaID = str(dataArray[0])\n dataType = str(dataArray[3])\n self.parent.wStatus2.value =\\\n \"Meta \" + metaID + \", type \" + dataType\n self.parent.wStatus2.display()\n self.parent.setMainMenu()\n\n elif (self.parent.myStatus == 'Household'):\n dataArray = selectedLine.split('\\t')\n householdID = str(dataArray[0])\n self.parent.wStatus2.value =\\\n \"Household changed to \" + str(dataArray[0])\n self.parent.wStatus2.display()\n self.parent.setMainMenu()\n\n elif (self.parent.myStatus == 'Individual'):\n dataArray = selectedLine.split('\\t')\n self.parent.wStatus2.value =\\\n \"Individual changed to \" + str(dataArray[2]) + \" from household \" + str(dataArray[0])\n self.parent.wStatus2.display()\n self.parent.setMainMenu()\n\n elif (self.parent.myStatus == 'Tables'):\n self.parent.wStatus2.values = ['Table ', selectedLine, 'was selected!']\n self.parent.wStatus2.display()\n self.parent.display_selected_data(selectedLine)\n\n else:\n # check if command key is present in format [x]\n Key = selectedLine.split('[')[1]\n try:\n ActionKeys[Key[0]](ord(Key[0]))\n except:\n message(\"No action for %s defined\" % Key[0])",
"def menu_process():\n data = selectAll_db(\"Menu\")\n lookup = {}\n for item in data:\n lookup[item[0]] = item[1]\n return lookup",
"def _initMenu(self):\n #--- Menu Project ---#\n self.mi_newProject.setShortcut(\"Ctrl+Shift+N\")\n self.mi_newProject.triggered.connect(self.on_miNewProject)\n self.mi_loadProject.setShortcut(\"Ctrl+Shift+L\")\n self.mi_loadProject.triggered.connect(self.on_miLoadProject)\n #--- Menu Settings ---#\n self.mi_toolSettings.setShortcut(\"Ctrl+Shift+T\")\n self.mi_toolSettings.triggered.connect(self.on_miToolSettings)\n self.mi_projectSettings.setShortcut(\"Ctrl+Shift+P\")\n self.mi_projectSettings.triggered.connect(self.on_miProjectSettings)\n #--- Menu Help ---#\n #- Log Level\n for level in self.log.levels:\n menuItem = self.m_logLevel.addAction(level)\n menuItem.setCheckable(True)\n menuItem.triggered.connect(partial(self.on_miLogLevel, level))\n self.on_miLogLevel(self.log.level)\n #- Style\n for style in pQt.Style().styles:\n menuItem = self.m_style.addAction(style)\n menuItem.setCheckable(True)\n menuItem.triggered.connect(partial(self.on_miStyle, style))\n self.on_miStyle('darkGrey')",
"def getMenu(self,parent):\r\n self.menu = tk.Menu(parent)\r\n self.filemenu = tk.Menu(self.menu ,tearoff = 0)\r\n new_gameOption = tk.Menu(self.filemenu ,tearoff = 0)\r\n new_gameOption.add_command(label=\"Camera Input\", command = lambda: self.launchGame_CameraInput())\r\n new_gameOption.add_command(label=\"Manual Input\", command = lambda: self.launchGame_ManualInput())\r\n self.filemenu.add_cascade(label = \"New Game Solver\", menu= new_gameOption)\r\n self.filemenu.add_separator()\r\n self.filemenu.add_command(label=\"Return\", command = lambda: self.controller.show_frame(\"StartPage\",\"300x\"+str(210*len(self.controller.games)+100)))\r\n self.filemenu.add_command(label=\"Exit\", command = parent.destroy)\r\n self.menu.add_cascade(label=\"File\",menu=self.filemenu)\r\n self.helpmenu = tk.Menu(self.menu ,tearoff = 0)\r\n message = \"This is a Sudoku Solver, you add a new game either by typing the numbers or by importing an image\"\r\n self.helpmenu.add_command(label=\"About\", command = lambda: mb.showinfo(\"About!\",message))\r\n self.menu.add_cascade(label=\"Help\",menu=self.helpmenu)\r\n return(self.menu)",
"def handle_command():\n # Get the fully populated argparser\n parser = _construct_parser()\n # Parse the arguments\n args = parser.parse_args()\n # Execute the handler. Every subparser has handler so either it is set or the\n # ArgumentParser informs the user that the given command does not exist and this code\n # isn't reached.\n args.handler(args)",
"def connect(menu):\n #Load the controls file and set the current_menu, controls, and process values\n load_controls(menu)\n #Set the process_con value\n create_connection()",
"def get_menu_interaction():\n with open(constants.FILE_MENU_TEXT(), 'r') as fileobject:\n return input_check(fileobject.read(), 3)",
"def fillCommandMenuItems(self):\r\n self.addCommandMenuItem(label=\"ReplayFrames\", command=self.replayFramesCallback)",
"def PopupMenuItems(self,menu):\n pass",
"async def dispatcher(level: str) -> tuple[menu.ReplyKeyboardMarkup, str]:\n keyboard_cor, prev_level = await find_in_dict(level, menu_storage)\n return keyboard_cor, prev_level"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a rcmenu clause
|
def rcmenu_clause():
clause = yield (
rcmenu_handler ^
local_decl ^
function_def ^
struct_def ^
rcmenu_item)
return clause
|
[
"def rcmenu_def():\n yield keyword(\"rcmenu\")\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n yield string(\"(\")\n yield normalspaces()\n clauses = yield sepBy(rcmenu_clause, end_of_statement)\n yield normalspaces()\n yield string(\")\")\n return s.Construct(s.RCMENU_DEF, vname, clauses)",
"def rcmenu_item():\n yield keyword(\"menuitem|separator|submenu\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n label = yield quoted\n yield normalspaces()\n vnarg = yield sepBy(named_argument, singlelinespaces())\n return s.Construct(s.RCMENU_ITEM, varname, label, vnarg)",
"def rcmenu_handler():\n yield keyword(\"on\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n vn2 = yield var_name()\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.RCMENU_HANDLER, varname, vn2, expr)",
"def build_menu(parser, token):\n return MyMenuObject()",
"def parse_line(line):\n label = opcode = operand = \"\"\n\n token_list = Util.get_token_list(line)\n\n token_length = len(token_list)\n\n mnemonics_list = list(Optab.as_dict().keys())\n\n if token_length == 1:\n if token_list[0] in mnemonics_list:\n # like RSUB\n opcode = token_list[0]\n else:\n # like END\n label = token_list[0]\n elif token_length == 2:\n if token_list[0] in mnemonics_list:\n # like ADD THREE\n opcode, operand = token_list\n elif token_list[1] in mnemonics_list:\n # like END RSUB\n label, opcode = token_list\n elif token_length == 3:\n if token_list[0] in mnemonics_list:\n # like LDA BUFFER, X\n opcode, operand, _ = token_list\n else:\n # like THREE WORD 3\n label, opcode, operand = token_list\n elif token_length == 4:\n # like LOOP LDA BUFFER, X\n # or EOF BYTE C'454F46'\n label = token_list[0]\n opcode = token_list[1]\n\n if opcode == OpCode.BYTE:\n # if opcode is BYTE then the 4th string\n # will be the actual value,(token_list[3]).\n # 3rd string will be 'C' or 'X'\n operand = token_list[3]\n else:\n operand = token_list[2]\n\n return label, opcode, operand",
"def arg_plist(self, line):\n if not line:\n raise IllegalClientResponse(\"Missing argument\")\n\n if line[:1] != b\"(\":\n raise IllegalClientResponse(\"Missing parenthesis\")\n\n i = line.find(b\")\")\n\n if i == -1:\n raise IllegalClientResponse(\"Mismatched parenthesis\")\n\n return (parseNestedParens(line[1:i],0), line[i+2:])",
"def parseCmd(self, arg):\n args = arg.split(';')\n if len(args) > 1:\n for a in args:\n self.parseCmd(a)\n args = arg.split()\n log.debug(str(len(args))+\" arguments: \"+str(args))\n\n # Skip one-word commands.\n # They would be commands for this app and they should've been\n # already handled.\n if len(args) <= 1:\n return\n\n action = args[0]\n target = \"clip\"\n\n if self.reader.isGroupCommand(args[1]):\n target = args[1]\n args.pop(1)\n\n # translate the words to commands\n # first argument is the command\n cmd = self.reader.getCommand(action, target)\n if cmd == None:\n return\n # the rest are parameters\n args.pop(0)\n args = \" \".join(args)\n\n osc.send(cmd, args)\n\n # execute translation as if it had been typed\n # self.onecmd(str(cmd))",
"def __init__(self, key, text=\"\", links=None, linktexts=None,\r\n keywords=None, cols=1, helptext=None,\r\n selectcmds=None, code=\"\", nodefaultcmds=False, separator=\"\"):\r\n self.key = key\r\n self.cmdset = None\r\n self.links = links\r\n self.linktexts = linktexts\r\n self.keywords = keywords\r\n self.cols = cols\r\n self.selectcmds = selectcmds\r\n self.code = code\r\n self.nodefaultcmds = nodefaultcmds\r\n self.separator = separator\r\n Nlinks = len(self.links)\r\n\r\n # validate the input\r\n if not self.links:\r\n self.links = []\r\n if not self.linktexts or (len(self.linktexts) != Nlinks):\r\n self.linktexts = [None for i in range(Nlinks)]\r\n if not self.keywords or (len(self.keywords) != Nlinks):\r\n self.keywords = [None for i in range(Nlinks)]\r\n if not selectcmds or (len(self.selectcmds) != Nlinks):\r\n self.selectcmds = [None for i in range(Nlinks)]\r\n\r\n # Format default text for the menu-help command\r\n if not helptext:\r\n helptext = \"Select one of the valid options (\"\r\n for i in range(Nlinks):\r\n if self.keywords[i]:\r\n if self.keywords[i] not in (CMD_NOMATCH, CMD_NOINPUT):\r\n helptext += \"%s, \" % self.keywords[i]\r\n else:\r\n helptext += \"%s, \" % (i + 1)\r\n helptext = helptext.rstrip(\", \") + \")\"\r\n self.helptext = helptext\r\n\r\n # Format text display\r\n string = \"\"\r\n if text:\r\n string += \"%s\\n\" % text\r\n\r\n # format the choices into as many collumns as specified\r\n choices = []\r\n for ilink, link in enumerate(self.links):\r\n choice = \"\"\r\n if self.keywords[ilink]:\r\n if self.keywords[ilink] not in (CMD_NOMATCH, CMD_NOINPUT):\r\n choice += \"{g%s{n\" % self.keywords[ilink]\r\n else:\r\n choice += \"{g %i{n\" % (ilink + 1)\r\n if self.linktexts[ilink]:\r\n choice += \" - %s\" % self.linktexts[ilink]\r\n choices.append(choice)\r\n cols = [[] for i in range(min(len(choices), cols))]\r\n while True:\r\n for i in range(len(cols)):\r\n if not choices:\r\n cols[i].append(\"\")\r\n else:\r\n cols[i].append(choices.pop(0))\r\n if not choices:\r\n break\r\n ftable = utils.format_table(cols)\r\n for row in ftable:\r\n string += \"\\n\" + \"\".join(row)\r\n # store text\r\n self.text = self.separator + \"\\n\" + string.rstrip()",
"def _parse_args(self):\n self._args = self.msg.strip().split()\n\n try:\n command_uc = self.args.pop(0)\n self._command = command_uc.lower()\n except IndexError:\n return\n\n # e.g. \"!command>user arg1 arg2\"\n if \">\" in self.command:\n command_uc, self._reply_nick = command_uc.split(\">\", 1)\n self._command = command_uc.lower()\n\n if self.command.startswith(\"!\") or self.command.startswith(\".\"):\n # e.g. \"!command arg1 arg2\"\n self._is_command = True\n self._trigger = self.command[0]\n self._command = self.command[1:] # Strip the \"!\" or \".\"\n elif re.match(r\"{0}\\W*?$\".format(re.escape(self.my_nick)),\n self.command, re.U):\n # e.g. \"EarwigBot, command arg1 arg2\"\n self._is_command = True\n self._trigger = self.my_nick\n try:\n self._command = self.args.pop(0).lower()\n except IndexError:\n self._command = \"\"\n else:\n try:\n if self.msg[-1] == \".\" and self.msg[-2] != \".\":\n if self.args:\n self.args[-1] = self.args[-1][:-1]\n else:\n self._command = self.command[:-1]\n except IndexError:\n pass\n\n # e.g. \"!command >user arg1 arg2\"\n if self.args and self.args[0].startswith(\">\"):\n self._reply_nick = self.args.pop(0)[1:]",
"def gedit2_menu(xml):\n return MENU_UI.format(xml) # Splice in the examples menu",
"def create_menu(subparsers: \"argparse._SubParsersAction\") -> None:\n dfetch.commands.command.Command.parser(subparsers, Import)",
"def get_menu_interaction():\n with open(constants.FILE_MENU_TEXT(), 'r') as fileobject:\n return input_check(fileobject.read(), 3)",
"def attributeMenu(beginMenu=bool, inputs=bool, editor=\"string\", unregPulldownMenuCommand=int, regPulldownMenuCommand=\"string\", plug=\"string\", finishMenu=bool):\n pass",
"def _parse_line(self):\r\n #if self.debug: print '\\t ' + str(self._current_node)\r\n\r\n # PyParser setParseAction's actually execute during parsing,\r\n # So we need closures in order to change the current scope\r\n\r\n \r\n def depth_from_indentation(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n \r\n def depth_from_match(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #print self._current_node\r\n self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap \r\n\r\n def depth_from_nemo_tag(function):\r\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\r\n def wrap(start, values):\r\n # print 'Depth %d | %d %s' %(self._depth, start, values)\r\n self._depth = start\r\n tokens = values[1]\r\n self._current_node = function(tokens)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n\r\n\r\n\r\n # Match HTML\r\n from pyparsing import NotAny, MatchFirst\r\n html = restOfLine\r\n html.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Match Mako control tags\r\n nemo_tag = Literal('%')\r\n\r\n begin = Keyword('for') | Keyword('if') | Keyword('while')\r\n middle = Keyword('else') | Keyword('elif')\r\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\r\n control = nemo_tag + (begin | middle | end)\r\n\r\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\r\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\r\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\r\n\r\n # Match Nemo tags\r\n argument_name = Word(alphas,alphanums+\"_-:\")\r\n argument_value = quotedString\r\n regular_argument = argument_name + Literal('=') + argument_value\r\n\r\n class_name = Literal('.').setParseAction(lambda x: 'class=')\r\n id_name = Literal('#').setParseAction(lambda x: 'id=')\r\n special_argument = (class_name | id_name) + argument_value\r\n argument = Combine(special_argument) | Combine(regular_argument)\r\n\r\n # Match single Nemo statement (Part of a multi-line)\r\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\r\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\r\n\r\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \r\n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\r\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\r\n\r\n # Match a multi-statement expression. Nemo statements are seperated by |. Anything after || is treated as html\r\n separator = Literal('|').suppress()\r\n html_separator = Literal('||') # | Literal('|>')\r\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\r\n inline_html = html.copy()\r\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\r\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\r\n\r\n # Match empty Nemo statement\r\n empty = nemo_tag + Empty()\r\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\r\n\r\n # Match unused Mako tags\r\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\r\n mako = mako_tags\r\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Matches General\r\n nemo = (control | nemo_multi | empty)\r\n line = mako_tags | nemo | html\r\n\r\n # Depth Calculation (deprecated?)\r\n self._depth = len(self._c) - len(self._c.strip())\r\n\r\n #try:\r\n line.parseString(self._c)\r\n\r\n #except ParseException:\r\n # Finally if we couldn't match, then handle it as HTML\r\n #add_html_node(self._c)\r",
"def parse_query(self, line):\n query = {'bool': [], 'phrase': [], 'wild': []}\n self.line = re.sub(r'[_]|[^\\w\\s\"*]', ' ', line.strip().lower())\n query = self.parse_wildcard(query)\n query = self.parse_phrase(query)\n query = self.parse_boolean(query)\n return query",
"def targetMenu()->None:\n print(\"\\nEscoja el rango de edad\")\n print(\"*******************************************\")\n print(\"0. 0-10 \")\n print(\"1. 11-20\")\n print(\"2. 21-30\")\n print(\"3. 31-40\")\n print(\"4. 41-50\")\n print(\"5. 51-60\")\n print(\"6. 60+\")\n print(\"*******************************************\")",
"def menuFormat(self):\n \n pass",
"def __init__(self, level, clause):\n self.clause = clause\n self.level = level",
"def attrEnumOptionMenu(string, docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", numberOfPopupMenus=bool, useTemplate=\"string\", manage=bool, label=\"string\", dragCallback=\"string\", highlightColor=float, annotation=\"string\", enable=bool, preventOverride=bool, popupMenuArray=bool, width=int, exists=bool, changeCommand=\"string\", enableBackground=bool, visibleChangeCommand=\"string\", visible=bool, fullPathName=bool, attribute=\"string\", dropCallback=\"string\", noBackground=bool, backgroundColor=float, enumeratedItem=int, isObscured=bool):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse an rc menu def
|
def rcmenu_def():
yield keyword("rcmenu")
yield normalspaces()
vname = yield var_name()
yield normalspaces()
yield string("(")
yield normalspaces()
clauses = yield sepBy(rcmenu_clause, end_of_statement)
yield normalspaces()
yield string(")")
return s.Construct(s.RCMENU_DEF, vname, clauses)
|
[
"def rcmenu_handler():\n yield keyword(\"on\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n vn2 = yield var_name()\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.RCMENU_HANDLER, varname, vn2, expr)",
"def rcmenu_item():\n yield keyword(\"menuitem|separator|submenu\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n label = yield quoted\n yield normalspaces()\n vnarg = yield sepBy(named_argument, singlelinespaces())\n return s.Construct(s.RCMENU_ITEM, varname, label, vnarg)",
"def create_menu(subparsers: \"argparse._SubParsersAction\") -> None:\n dfetch.commands.command.Command.parser(subparsers, Import)",
"def get_menu_interaction():\n with open(constants.FILE_MENU_TEXT(), 'r') as fileobject:\n return input_check(fileobject.read(), 3)",
"def build_menu(parser, token):\n return MyMenuObject()",
"def menuFormat(self):\n \n pass",
"def main():\n menu()",
"def gedit2_menu(xml):\n return MENU_UI.format(xml) # Splice in the examples menu",
"def targetMenu()->None:\n print(\"\\nEscoja el rango de edad\")\n print(\"*******************************************\")\n print(\"0. 0-10 \")\n print(\"1. 11-20\")\n print(\"2. 21-30\")\n print(\"3. 31-40\")\n print(\"4. 41-50\")\n print(\"5. 51-60\")\n print(\"6. 60+\")\n print(\"*******************************************\")",
"def attributeMenu(beginMenu=bool, inputs=bool, editor=\"string\", unregPulldownMenuCommand=int, regPulldownMenuCommand=\"string\", plug=\"string\", finishMenu=bool):\n pass",
"def __init__(self):\n self.CLI_COMMAND = os.path.basename(sys.argv[0])\n\n self.ctrl_parser = argparse.ArgumentParser(prog=self.CLI_COMMAND,\n description='Control Component Parser')\n\n self.ctrl_subparser = self.ctrl_parser.add_subparsers(\n title='Sub Commands',\n description='List of Valid Sub Commands', dest='subparser_name')\n\n self.add_simple_args()\n\n \"\"\"Sub Parser for all Cli Commands\"\"\"\n self.add_subparser('power', 'Power on/off/reset a device.',\n ['on', 'off', 'cycle', 'bios', 'efi', 'hdd', 'pxe', 'cdrom', 'removable'],\n 'Select an option: on/off/cycle/bios/efi/hdd/pxe/cdrom/removable.'\n ' Ex: {} power on node001'.format(self.CLI_COMMAND),\n [\n {\n 'name': '-f',\n 'name2': '--force',\n 'action': 'store_true',\n 'help': 'This option will allow user to force the Power On/Off/Reboot'\n },\n {\n 'name': '-o',\n 'name2': '--outlet',\n 'type': int,\n 'nargs': '?',\n 'help': 'Specify the outlet to edit (PDUs only)'\n }\n ])\n\n self.add_subparser('resource', 'Resource add/remove from a resource pool.', ['add', 'remove', 'check'],\n 'Select one of the following options: add/remove/check'\n ' Ex: {} resource add node001'.format(self.CLI_COMMAND))\n\n self.add_subparser('process', 'Process list/kill on a node in a cluster.', ['list', 'kill'],\n 'Select one of two options: list/kill.'\n ' Ex: {} process kill 1232 node001'.format(self.CLI_COMMAND),\n [\n {\n 'name': 'process_id',\n 'help': 'Please provide process id to list or kill a process'\n }\n ])\n\n self.add_subparser('get', 'Get powercap/freq value of a node.', ['freq', 'powercap'])\n\n self.add_subparser('set', 'Set powercap/freq value of a node.', ['freq', 'powercap'], 'Select an option to set',\n [\n {\n 'name': 'value',\n 'help': 'Please provide the value to be set'\n }\n ])\n\n self.add_subparser('service', 'Check, start or stop services specified in the configuration file',\n ['status', 'start', 'stop'], 'Select an action to perform')\n\n self.ctrl_subparser.add_parser('datastore', help=\"Raw access to the database and its contects\", add_help=False)\n self.ctrl_subparser.add_parser('cmm', help=\"Configuration Manifest Management (CMM) is a user friendly way to update your configuration.\", add_help=False)\n self.ctrl_subparser.add_parser('provision', help=\"Adding, setting and removing provisioning \"\n \"options for devices\", add_help=False)\n self.ctrl_subparser.add_parser('diag', help=\"Launching diagnostic tests on devices\", add_help=False)\n\n self.add_subparser('bios', 'Update or get version of bios on specified nodes/group of nodes',\n ['update', 'get-version'], 'Select an action to perform',\n [\n {\n 'name': '-i',\n 'name2': '--image',\n 'nargs': '?',\n 'help': 'Specify the bios image'\n }\n ])\n\n self.add_subparser('sensor', 'Get specified sensor value on specified nodes/group of nodes',\n ['get'], 'Select option to get sensor values'\n 'Ex: 1. {0} sensor-name temp 2. {1} sensor-name temp --get-overtime 2 3'.\n format(self.CLI_COMMAND, self.CLI_COMMAND),\n [\n {\n 'name': 'sensor_name',\n 'nargs': '?',\n 'help': 'Provide a specific sensor, a comma seperated list of multiple sensors '\n 'or \"*\" for all sensors'\n },\n {\n 'name': '--get-overtime',\n 'nargs': 2,\n 'type': int,\n 'metavar': ('<sample-rate>', '<duration>'),\n 'help': 'Provide a sample rate(per second) and a duration of time(seconds) to sample'\n ' over, both values must be integers greater than 1'\n }\n ])\n self.ctrl_subparser.add_parser('job', help='Launching, checking, '\n 'retrieving and canceling job', add_help=False)",
"def _on_menubar(self, event: str, value: Any) -> None:\n print(\"_on_menubar\", event, value)\n if not value:\n return\n label, key = value.split(\"::__\", 1)\n tokens = key.split(\"_\")\n if tokens[0] == \"FILE\":\n if tokens[1] == \"OPEN\":\n if tokens[2] == \"GCODE\":\n # TODO\n self.publish(\"gui:select_file_gcode\", None)\n elif tokens[2] == \"CONTROLLER\":\n self.publish(\"%s:set_active\" % label, True)\n elif tokens[1] == \"NEW\":\n if tokens[2] == \"GCODE\":\n # TODO\n pass\n elif tokens[2] == \"CONTROLLER\":\n self.publish(\"##new_controller:picker\", \"##new_controller\")\n #self.publish(\"request_new_controller\", label)",
"def menu():\n print(\"lines - counting lines \")\n print(\"words - number of words \")\n print(\"letters - amout of letters \")\n print(\"word_frequency - 7 most frequent words \")\n print(\"letter_frequency - 7 most used letters \")\n print(\"all - show all menu choices \")",
"def main_menu(self):\n self.clear()\n self.tree.print_info()\n self.print_current()\n self.print_end()\n choix = input(\"\"\" >>>> JS, (c'est correct si je t'appelle JS?), je suis pret a produire des\n diagrammes LaTeX vraiment sick pour toi!!\n\n Je peux produire plusieurs sortes de diagrammes pour toi. Choisis le\n type de diagramme que tu veux produire\n\n 1: Mainline partant du noeud courant\n 2: Trouver noeud de depart\n 3: Trouver noeud de fin\n\n Choix :\"\"\")\n if choix == '1':\n self.fileS = self.bm.mainline_from(self.current)\n self.state = 'validateFile'\n if choix == '2':\n self.state = 'findNode'\n if choix == '3':\n self.state = 'findEndNode'",
"def parse_commands():\n\n # Action classes\n class SetupAction(argparse.Action):\n \"\"\"The setup action class that is called when setup is found in the command line.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n choice = int(\n input(\n \"You can now edit config files using QUBEKit, choose an option to continue:\\n\"\n \"1) Edit a config file\\n\"\n \"2) Create a new master template\\n\"\n \"3) Make a normal config file\\n\"\n \"4) Cancel\\n>\"\n )\n )\n\n if choice == 1:\n inis = Configure().show_ini()\n name = input(\n f\"Enter the name or number of the config file to edit\\n\"\n f'{\"\".join(f\"{inis.index(ini)}:{ini} \" for ini in inis)}\\n>'\n )\n # make sure name is right\n if name in inis:\n Configure().ini_edit(name)\n else:\n Configure().ini_edit(inis[int(name)])\n\n elif choice == 2:\n Configure().ini_writer(\"master_config.ini\")\n Configure().ini_edit(\"master_config.ini\")\n\n elif choice == 3:\n name = input(\"Enter the name of the config file to create\\n>\")\n Configure().ini_writer(name)\n Configure().ini_edit(name)\n\n else:\n sys.exit(\n \"Cancelling setup; no changes made. \"\n \"If you accidentally entered the wrong key, restart with QUBEKit -setup\"\n )\n\n sys.exit()\n\n class CSVAction(argparse.Action):\n \"\"\"The csv creation class run when the csv option is used.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n generate_bulk_csv(*values)\n sys.exit()\n\n class ProgressAction(argparse.Action):\n \"\"\"Run the pretty progress function to get the progress of all running jobs.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n pretty_progress()\n sys.exit()\n\n class DisplayMolAction(argparse.Action):\n \"\"\"Display the molecule objects requested\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n display_molecule_objects(*values)\n sys.exit()\n\n class TorsionMakerAction(argparse.Action):\n \"\"\"Help the user make a torsion scan file.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n # load in the ligand\n mol = Ligand(values)\n\n # Prompt the user for the scan order\n scanner = TorsionScan(mol)\n scanner.find_scan_order()\n\n # Write out the scan file\n with open(f\"{mol.name}.dihedrals\", \"w+\") as qube:\n qube.write(\n \"# dihedral definition by atom indices starting from 0\\n# i j k l\\n\"\n )\n for scan in mol.scan_order:\n scan_di = mol.dihedrals[scan][0]\n qube.write(\n f\" {scan_di[0]:2} {scan_di[1]:2} {scan_di[2]:2} {scan_di[3]:2}\\n\"\n )\n printf(f\"{mol.name}.dihedrals made.\")\n\n sys.exit()\n\n intro = (\n \"Welcome to QUBEKit! For a list of possible commands, use the help command: -h. \"\n \"Alternatively, take a look through our github page for commands, recipes and common problems: \"\n \"https://github.com/qubekit/QUBEKit\"\n )\n parser = argparse.ArgumentParser(\n prog=\"QUBEKit\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=intro,\n )\n\n # Add all of the command line options in the arg parser\n parser.add_argument(\n \"-c\",\n \"--charge\",\n type=int,\n help=\"Enter the charge of the molecule, default 0.\",\n )\n parser.add_argument(\n \"-m\",\n \"--multiplicity\",\n type=int,\n help=\"Enter the multiplicity of the molecule, default 1.\",\n )\n parser.add_argument(\n \"-threads\",\n \"--threads\",\n type=int,\n help=\"Number of threads used in various stages of analysis, especially for engines like \"\n \"PSI4, Gaussian09, etc. Value is given as an int.\",\n )\n parser.add_argument(\n \"-memory\",\n \"--memory\",\n type=int,\n help=\"Amount of memory used in various stages of analysis, especially for engines like \"\n \"PSI4, Gaussian09, etc. Value is given as an int, e.g. 6GB is simply 6.\",\n )\n parser.add_argument(\n \"-ddec\",\n \"--ddec_version\",\n choices=[3, 6],\n type=int,\n help=\"Enter the ddec version for charge partitioning, does not effect ONETEP partitioning.\",\n )\n parser.add_argument(\n \"-geo\",\n \"--geometric\",\n choices=[True, False],\n type=string_to_bool,\n help=\"Turn on geometric to use this during the qm optimisations, recommended.\",\n )\n parser.add_argument(\n \"-bonds\",\n \"--bonds_engine\",\n choices=[\"psi4\", \"g09\", \"g16\"],\n help=\"Choose the QM code to calculate the bonded terms.\",\n )\n parser.add_argument(\n \"-charges\",\n \"--charges_engine\",\n choices=[\"onetep\", \"chargemol\"],\n help=\"Choose the method to do the charge partitioning.\",\n )\n parser.add_argument(\n \"-density\",\n \"--density_engine\",\n choices=[\"onetep\", \"g09\", \"g16\", \"psi4\"],\n help=\"Enter the name of the QM code to calculate the electron density of the molecule.\",\n )\n parser.add_argument(\n \"-solvent\",\n \"--solvent\",\n choices=[True, False],\n type=string_to_bool,\n help=\"Enter whether or not you would like to use a solvent.\",\n )\n # Maybe separate into known solvents and IPCM constants?\n parser.add_argument(\n \"-convergence\",\n \"--convergence\",\n choices=[\"GAU\", \"GAU_TIGHT\", \"GAU_VERYTIGHT\"],\n type=str.upper,\n help=\"Enter the convergence criteria for the optimisation.\",\n )\n parser.add_argument(\n \"-param\",\n \"--parameter_engine\",\n choices=[\"xml\", \"antechamber\", \"openff\", \"none\"],\n help=\"Enter the method of where we should get the initial molecule parameters from, \"\n \"if xml make sure the xml has the same name as the pdb file.\",\n )\n parser.add_argument(\n \"-mm\",\n \"--mm_opt_method\",\n choices=[\"openmm\", \"rdkit_mff\", \"rdkit_uff\", \"none\"],\n help=\"Enter the mm optimisation method for pre qm optimisation.\",\n )\n parser.add_argument(\n \"-config\",\n \"--config_file\",\n choices=Configure().show_ini(),\n help=\"Enter the name of the configuration file you wish to use for this run from the list \"\n \"available, defaults to master.\",\n )\n parser.add_argument(\n \"-theory\",\n \"--theory\",\n help=\"Enter the name of the qm theory you would like to use.\",\n )\n parser.add_argument(\n \"-basis\", \"--basis\", help=\"Enter the basis set you would like to use.\"\n )\n parser.add_argument(\n \"-restart\",\n \"--restart\",\n choices=[\n \"parametrise\",\n \"mm_optimise\",\n \"qm_optimise\",\n \"hessian\",\n \"mod_sem\",\n \"density\",\n \"charges\",\n \"lennard_jones\",\n \"torsion_scan\",\n \"torsion_optimise\",\n ],\n help=\"Enter the restart point of a QUBEKit job.\",\n )\n parser.add_argument(\n \"-end\",\n \"-end\",\n choices=[\n \"parametrise\",\n \"mm_optimise\",\n \"qm_optimise\",\n \"hessian\",\n \"mod_sem\",\n \"density\",\n \"charges\",\n \"lennard_jones\",\n \"torsion_scan\",\n \"torsion_optimise\",\n \"finalise\",\n ],\n help=\"Enter the end point of the QUBEKit job.\",\n )\n parser.add_argument(\n \"-progress\",\n \"--progress\",\n nargs=\"?\",\n const=True,\n help=\"Get the current progress of a QUBEKit single or bulk job.\",\n action=ProgressAction,\n )\n parser.add_argument(\n \"-skip\",\n \"--skip\",\n nargs=\"+\",\n choices=[\n \"mm_optimise\",\n \"qm_optimise\",\n \"hessian\",\n \"mod_sem\",\n \"density\",\n \"charges\",\n \"lennard_jones\",\n \"torsion_scan\",\n \"torsion_optimise\",\n \"finalise\",\n ],\n help=\"Option to skip certain stages of the execution.\",\n )\n parser.add_argument(\n \"-tor_test\",\n \"--torsion_test\",\n action=\"store_true\",\n help=\"Enter True if you would like to run a torsion test on the chosen torsions.\",\n )\n parser.add_argument(\n \"-tor_make\",\n \"--torsion_maker\",\n action=TorsionMakerAction,\n help=\"Allow QUBEKit to help you make a torsion input file for the given molecule\",\n )\n parser.add_argument(\n \"-log\",\n \"--log\",\n type=str,\n help=\"Enter a name to tag working directories with. Can be any alphanumeric string.\"\n \"This helps differentiate (by more than just date) different analyses of the \"\n \"same molecule.\",\n )\n parser.add_argument(\n \"-vib\",\n \"--vib_scaling\",\n type=float,\n help=\"Enter the vibrational scaling to be used with the basis set.\",\n )\n parser.add_argument(\n \"-iters\",\n \"--iterations\",\n type=int,\n help=\"Max number of iterations for QM scan.\",\n )\n parser.add_argument(\n \"-constraints\",\n \"--constraints_file\",\n type=str,\n help=\"The name of the geometric constraints file.\",\n )\n parser.add_argument(\n \"-dihedrals\",\n \"--dihedral_file\",\n type=str,\n help=\"The name of the qubekit/tdrive torsion file.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n choices=[True, False],\n type=string_to_bool,\n help=\"Decide whether the log file should contain all the input/output information\",\n )\n parser.add_argument(\n \"-display\",\n \"--display\",\n type=str,\n nargs=\"+\",\n action=DisplayMolAction,\n help=\"Get the molecule object with this name in the cwd\",\n )\n parser.add_argument(\n \"-symmetry\",\n \"--enable_symmetry\",\n choices=[True, False],\n type=string_to_bool,\n help=\"Enable or disable the use of symmetrisation for bond, angle, charge, and \"\n \"Lennard-Jones parameters\",\n )\n parser.add_argument(\n \"-sites\",\n \"--enable_virtual_sites\",\n choices=[True, False],\n type=string_to_bool,\n help=\"Enable or disable the use of virtual sites in the charge fitting.\",\n )\n parser.add_argument(\n \"-site_err\",\n \"--v_site_error_factor\",\n type=float,\n help=\"Maximum error factor from adding a site that means the site will be kept\",\n )\n\n # Add mutually exclusive groups to stop certain combinations of options,\n # e.g. setup should not be run with csv command\n groups = parser.add_mutually_exclusive_group()\n groups.add_argument(\n \"-setup\",\n \"--setup_config\",\n nargs=\"?\",\n const=True,\n help=\"Setup a new configuration or edit an existing one.\",\n action=SetupAction,\n )\n groups.add_argument(\n \"-sm\",\n \"--smiles\",\n nargs=\"+\",\n help=\"Enter the smiles string of a molecule as a starting point.\",\n )\n groups.add_argument(\n \"-bulk\",\n \"--bulk_run\",\n help=\"Enter the name of the csv file to run as bulk, bulk will use smiles unless it finds \"\n \"a molecule file with the same name.\",\n )\n groups.add_argument(\n \"-csv\",\n \"--csv_filename\",\n action=CSVAction,\n nargs=\"*\",\n help=\"Enter the name of the csv file you would like to create for bulk runs. \"\n \"Optionally, you may also add the maximum number of molecules per file.\",\n )\n groups.add_argument(\n \"-i\", \"--input\", help=\"Enter the molecule input pdb file (only pdb so far!)\"\n )\n groups.add_argument(\"-version\", \"--version\", action=\"version\", version=\"2.6.3\")\n\n # Ensures help is shown (rather than an error) if no arguments are provided.\n return parser.parse_args(args=None if sys.argv[1:] else [\"--help\"])",
"def menu(options):\r\n hashed_options = False\r\n if not options:\r\n return None\r\n menu = dict(menuItems=[item(thing) for thing in options])\r\n if all_are_instance(options, (tuple, NoneType)):\r\n hashed_options = True\r\n plist = to_plist(menu)\r\n proc = subprocess.Popen([dialog, '-u'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)\r\n proc.stdin.write(plist)\r\n output, _ = proc.communicate()\r\n result = from_plist(output)\r\n if not 'selectedIndex' in result:\r\n return None\r\n index = int(result['selectedIndex'])\r\n if hashed_options:\r\n return options[index][1]\r\n return options[index]",
"def parse_commands():\n\n # Action classes\n class SetupAction(argparse.Action):\n \"\"\"The setup action class that is called when setup is found in the command line.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n \"\"\"This function is executed when setup is called.\"\"\"\n\n choice = int(input('You can now edit config files using QUBEKit, choose an option to continue:\\n'\n '1) Edit a config file\\n'\n '2) Create a new master template\\n'\n '3) Make a normal config file\\n>'))\n\n if choice == 1:\n inis = Configure.show_ini()\n name = input(f'Enter the name or number of the config file to edit\\n'\n f'{\"\".join(f\"{inis.index(ini)}:{ini} \" for ini in inis)}\\n>')\n # make sure name is right\n if name in inis:\n Configure.ini_edit(name)\n else:\n Configure.ini_edit(inis[int(name)])\n\n elif choice == 2:\n Configure.ini_writer('master_config.ini')\n Configure.ini_edit('master_config.ini')\n\n elif choice == 3:\n name = input('Enter the name of the config file to create\\n>')\n Configure.ini_writer(name)\n Configure.ini_edit(name)\n\n else:\n raise KeyError('Invalid selection; please choose from 1, 2 or 3.')\n\n sys_exit()\n\n class CSVAction(argparse.Action):\n \"\"\"The csv creation class run when the csv option is used.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n \"\"\"This function is executed when csv is called.\"\"\"\n\n generate_bulk_csv(values)\n sys_exit()\n\n class ProgressAction(argparse.Action):\n \"\"\"Run the pretty progress function to get the progress of all running jobs.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n \"\"\"This function is executed when progress is called.\"\"\"\n\n pretty_progress()\n sys_exit()\n\n parser = argparse.ArgumentParser(prog='QUBEKit', formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"QUBEKit is a Python 3.6+ based force field derivation toolkit for Linux operating systems.\nOur aims are to allow users to quickly derive molecular mechanics parameters directly from quantum mechanical calculations.\nQUBEKit pulls together multiple pre-existing engines, as well as bespoke methods to produce accurate results with minimal user input.\nQUBEKit aims to use as few parameters as possible while also being highly customisable.\"\"\", epilog=\"\"\"QUBEKit should currently be considered a work in progress.\nWhile it is stable we are constantly working to improve the code and broaden its compatibility. \nWe use lots of software written by many different people;\nif reporting a bug please (to the best of your ability) make sure it is a bug with QUBEKit and not with a dependency.\nWe welcome any suggestions for additions or changes.\"\"\")\n\n # Add all of the command line options in the arg parser\n parser.add_argument('-c', '--charge', default=0, type=int, help='Enter the charge of the molecule, default 0.')\n parser.add_argument('-m', '--multiplicity', default=1, type=int, help='Enter the multiplicity of the '\n 'molecule, default 1.')\n parser.add_argument('-ddec', '--ddec_version', choices=[3, 6], type=int,\n help='Enter the ddec version for charge partitioning, does not effect ONETEP partitioning.')\n parser.add_argument('-geo', '--geometric', choices=[True, False], type=bool,\n help='Turn on geometric to use this during the qm optimisations, recommended.')\n parser.add_argument('-bonds', '--bonds_engine', choices=['psi4', 'g09'],\n help='Choose the QM code to calculate the bonded terms.')\n parser.add_argument('-charges', '--charges_engine', choices=['onetep', 'chargemol'],\n help='Choose the method to do the charge partioning.')\n parser.add_argument('-density', '--density_engine', choices=['onetep', 'g09', 'psi4'],\n help='Enter the name of the QM code to calculate the electron density of the molecule.')\n parser.add_argument('-solvent', '--solvent',\n help='Enter the dielectric constant or the name of the solvent you wish to use.')\n # maybe separate into known solvents and IPCM constants?\n parser.add_argument('-convergence', '--convergence', choices=['GAU', 'GAU_TIGHT', 'GAU_VERYTIGHT'],\n help='Enter the convergence criteria for the optimisation.')\n parser.add_argument('-param', '--parameter_engine', choices=['xml', 'gaff', 'gaff2', 'openff'],\n help='Enter the method of where we should get the initial molecule parameters from, '\n 'if xml make sure the xml has the same name as the pdb file.')\n parser.add_argument('-mm', '--mm_opt_method', default='openmm', choices=['openmm', 'rdkit_mff', 'rdkit_uff'],\n help='Enter the mm optimisation method for pre qm optimisation.')\n parser.add_argument('-config', '--config_file', default='default_config', choices=Configure.show_ini(),\n help='Enter the name of the configuration file you wish to use for this run from the list '\n 'available, defaults to master.')\n parser.add_argument('-theory', '--theory',\n help='Enter the name of the qm theory you would like to use.')\n parser.add_argument('-basis', '--basis',\n help='Enter the basis set you would like to use.')\n parser.add_argument('-restart', '--restart', choices=['parametrise', 'mm_optimise', 'qm_optimise', 'hessian',\n 'mod_sem', 'density', 'charges', 'lennard_jones',\n 'torsion_scan', 'torsion_optimise'],\n help='Enter the restart point of a QUBEKit job.')\n parser.add_argument('-end', '-end', choices=['mm_optimise', 'qm_optimise', 'hessian', 'mod_sem', 'density',\n 'charges', 'lennard_jones', 'torsion_scan', 'torsion_optimise',\n 'finalise'], help='Enter the end point of the QUBEKit job.')\n parser.add_argument('-progress', '--progress', nargs='?', const=True,\n help='Get the current progress of a QUBEKit single or bulk job.', action=ProgressAction)\n parser.add_argument('-combination', '--combination', default='opls', choices=['opls', 'amber'],\n help='Enter the combination rules that should be used.')\n parser.add_argument('-skip', '--skip', nargs='+', choices=['mm_optimise', 'qm_optimise', 'hessian', 'mod_sem',\n 'density', 'charges', 'lennard_jones',\n 'torsion_scan', 'torsion_optimise', 'finalise'],\n help='Option to skip certain stages of the execution.')\n\n # Add mutually exclusive groups to stop wrong combinations of options,\n # e.g. setup should not be ran with another command\n groups = parser.add_mutually_exclusive_group()\n groups.add_argument('-setup', '--setup_config', nargs='?', const=True,\n help='Setup a new configuration or edit an existing one.', action=SetupAction)\n groups.add_argument('-sm', '--smiles', help='Enter the smiles string of a molecule as a starting point.')\n groups.add_argument('-bulk', '--bulk_run',\n help='Enter the name of the csv file to run as bulk, bulk will use smiles unless it finds '\n 'a molecule file with the same name.')\n groups.add_argument('-csv', '--csv_filename',\n help='Enter the name of the csv file you would like to create for bulk runs.',\n action=CSVAction)\n groups.add_argument('-i', '--input', help='Enter the molecule input pdb file (only pdb so far!)')\n\n return parser.parse_args()",
"def parse_cli():\n description = \"example: ./usb-watch.py [-d] [-p <pid file>] [-s]\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"-p\",\n \"--pid_file\",\n help=\"Location of PID file\",\n default=PID_FILE,\n required=False)\n parser.add_argument(\"-d\",\n \"--daemonize\",\n help=\"Daemonize/fork to background\",\n action=\"store_true\")\n parser.add_argument(\"-s\",\n \"--sms\",\n help=\"Disable SMS messaging\",\n action=\"store_false\")\n\n args = parser.parse_args()\n return args",
"def attrEnumOptionMenu(string, docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", numberOfPopupMenus=bool, useTemplate=\"string\", manage=bool, label=\"string\", dragCallback=\"string\", highlightColor=float, annotation=\"string\", enable=bool, preventOverride=bool, popupMenuArray=bool, width=int, exists=bool, changeCommand=\"string\", enableBackground=bool, visibleChangeCommand=\"string\", visible=bool, fullPathName=bool, attribute=\"string\", dropCallback=\"string\", noBackground=bool, backgroundColor=float, enumeratedItem=int, isObscured=bool):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a on do handler
|
def on_do_handler():
@generate
def do_exprseq():
yield keyword("do")
yield normalspaces()
handler = yield expression # expr_seq
return handler
yield keyword("on")
yield normalspaces()
event = yield var_name()
yield normalspaces()
handler = yield function_return | do_exprseq
return s.Construct(s.ON_DO_HANDLER, event, handler)
|
[
"def _parse(self):\n\t\t\n\t\tself.reply_msg = MessageHandler.fire_handlers(self)",
"def on_map_do_handler():\n @generate\n def do_exprseq():\n yield keyword(\"do\")\n yield normalspaces()\n handler = yield expression # expr_seq\n return handler\n\n yield keyword(\"on\")\n yield normalspaces()\n yield keyword(\"map\")\n yield normalspaces()\n event = yield var_name()\n yield normalspaces()\n varname = yield var_name() # pylint: disable=unused-variable\n yield normalspaces()\n handler = yield function_return | do_exprseq\n # this is definitely faulty, we ignore the varname\n return s.Construct(s.ON_MAP_DO_HANDLER, event, handler)",
"def parse_event(self, event):",
"def handle(self, handler: Handler):\n pass",
"def handle_request(self,req):\r\n self.process_request(req)",
"def process_request(self,req):\r\n pass",
"def handle_request(self, tpe, obj_dict):\n if tpe == 'DataRequest':\n return self._process_data_request(obj_dict)\n if tpe == 'ConfigRequest':\n return self._process_config_request(obj_dict)\n return warning(f'Unknown command type {tpe}')",
"def handle(self):\n\n while len(self.data) >= 2:\n self.cmdHeader = self.data[:2]\n self.cmdCode, self.cmdNum = self.cmdHeader\n self.ioLogger.debug(\"command %d, %d, %d bytes\",\n self.cmdCode, self.cmdNum, len(self.data))\n try:\n self.handlers[self.cmdCode]()\n except IncompleteDataError:\n self.ioLogger.info('not enough data for one command (%d bytes). Waiting.', len(self.data))\n return\n except KeyError:\n raise RuntimeError(f\"unknown call: {self.data[0]}\")\n self.ioLogger.debug('command %d,%d handled; %d bytes in buffer',\n self.cmdCode, self.cmdNum, len(self.data))",
"def tool_handler():\n yield keyword(\"on\")\n yield normalspaces()\n yield var_name()\n yield normalspaces()\n yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return expr",
"def handle(event={}, context={}):\n LoLNewsHandler().run()\n return 'ok'",
"def rollout_handler():\n yield keyword(\"on\")\n yield normalspaces()\n handlername = yield var_name()\n yield normalspaces()\n varn = yield var_name()\n yield normalspaces()\n varn2 = yield optional(var_name())\n yield normalspaces()\n varn3 = yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.ROLLOUT_HANDLER, handlername, varn, varn2, varn3, expr)",
"def parse_Element(self, node):\n name = node.tagName\n ignores = self.ignores\n if name in ignores:\n return\n attr = \"do_%s\" % name\n if hasattr(self, attr):\n handlerMethod = getattr(self, attr)\n handlerMethod(node)\n else:\n self.generic_parse(node)\n #if name not in self.generics: self.generics.append(name)",
"def my_event_handler(sender, event):\n print(\"Event:\")\n print(\" sender:\", sender)\n print(\" event.event:\", event.event)\n print(\" event.parsed:\", event.parsed)",
"def view_handler(self, item, event, data=None):\n # forward everything\n self.handler(item, event, data)",
"def process(context, boundTo, data):",
"def read_handler(host, port, handler):\n\n sock = socket.socket()\n sock.connect((host, port))\n\n f_hand = sock.makefile()\n line = f_hand.readline()\n\n if line != \"Click::ControlSocket/1.3\\n\":\n raise ValueError(\"Unexpected reply: %s\" % line)\n\n cmd = \"read %s\\n\" % handler\n sock.send(cmd.encode(\"utf-8\"))\n\n line = f_hand.readline()\n\n regexp = '([0-9]{3}) (.*)'\n match = re.match(regexp, line)\n\n while not match:\n line = f_hand.readline()\n match = re.match(regexp, line)\n\n groups = match.groups()\n\n if int(groups[0]) == 200:\n\n line = f_hand.readline()\n res = line.split(\" \")\n\n length = int(res[1])\n data = f_hand.read(length)\n\n return (int(groups[0]), data)\n\n return (int(groups[0]), line)",
"def handle(self):\n request_data = parse_request_json(self.request)\n response = None\n if request_data[SC.MSG_TITLE] == SC.MESSAGE_GET_ROLE:\n response = self.handle_get_role(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_BROADCAST_ROLES:\n response = self.handle_get_network_information(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_PRODUCE_VOTES:\n response = self.handle_produce_votes(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_DISTRIBUTE_VOTES:\n response = self.handle_distribute_votes(request_data)\n else:\n response = self.handle_unexpected_request()\n send_response_json(self.request, response, request_data[SC.MSG_ORIGIN])",
"def on_clone_do_handler():\n @generate\n def do_exprseq():\n yield keyword(\"do\")\n yield normalspaces()\n handler = yield expression # expr_seq\n return handler\n\n yield keyword(\"on\")\n yield normalspaces()\n yield keyword(\"clone\")\n yield normalspaces()\n thing = yield var_name()\n yield normalspaces()\n handler = yield function_return | do_exprseq\n return s.Construct(s.ON_CLONE_DO_HANDLER, thing, handler)",
"def handle_telegram(self, telegram):\n self.log.debug('got telegram: %s', telegram)\n\n try:\n parsed_telegram = self.telegram_parser.parse(telegram)\n except InvalidChecksumError as e:\n self.log.warning(str(e))\n except ParseError:\n self.log.exception(\"failed to parse telegram\")\n else:\n self.telegram_callback(parsed_telegram)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a on map do handler
|
def on_map_do_handler():
@generate
def do_exprseq():
yield keyword("do")
yield normalspaces()
handler = yield expression # expr_seq
return handler
yield keyword("on")
yield normalspaces()
yield keyword("map")
yield normalspaces()
event = yield var_name()
yield normalspaces()
varname = yield var_name() # pylint: disable=unused-variable
yield normalspaces()
handler = yield function_return | do_exprseq
# this is definitely faulty, we ignore the varname
return s.Construct(s.ON_MAP_DO_HANDLER, event, handler)
|
[
"def after_map(self, map):",
"def before_map(self, map):",
"def parse_event(self, event):",
"def serve_map(self, foo):\n if(self.phase == 1):\n rospy.loginfo(\"Requesting the map for map converter\")\n rospy.wait_for_service('dynamic_map')\n try:\n map_service = rospy.ServiceProxy('dynamic_map', GetMap)\n responce = map_service()\n return responce.map\n except rospy.ServiceException as e:\n rospy.loginfo(\"service call failed: %s\" %e)\n return None\n else:\n return self.static_map",
"def _parse(self):\n\t\t\n\t\tself.reply_msg = MessageHandler.fire_handlers(self)",
"def on_do_handler():\n @generate\n def do_exprseq():\n yield keyword(\"do\")\n yield normalspaces()\n handler = yield expression # expr_seq\n return handler\n\n yield keyword(\"on\")\n yield normalspaces()\n event = yield var_name()\n yield normalspaces()\n handler = yield function_return | do_exprseq\n return s.Construct(s.ON_DO_HANDLER, event, handler)",
"def after_map(self, dmrs, nodeid):\n pass",
"def handle(mapping, fvars=None):\n for url, ofno in utils.group(mapping, 2):\n if isinstance(ofno, tuple): \n ofn, fna = ofno[0], list(ofno[1:])\n else: \n ofn, fna = ofno, []\n fn, result = utils.re_subm('^' + url + '$', ofn, web.ctx.path)\n if result: # it's a match\n if fn.split(' ', 1)[0] == \"redirect\":\n url = fn.split(' ', 1)[1]\n if web.ctx.method == \"GET\":\n x = web.ctx.env.get('QUERY_STRING', '')\n if x: \n url += '?' + x\n return http.redirect(url)\n elif '.' in fn: \n x = fn.split('.')\n mod, cls = '.'.join(x[:-1]), x[-1]\n mod = __import__(mod, globals(), locals(), [\"\"])\n cls = getattr(mod, cls)\n else:\n cls = fn\n mod = fvars\n if isinstance(mod, types.ModuleType): \n mod = vars(mod)\n try: \n cls = mod[cls]\n except KeyError: \n return web.notfound()\n \n meth = web.ctx.method\n if meth == \"HEAD\":\n if not hasattr(cls, meth): \n meth = \"GET\"\n if not hasattr(cls, meth): \n return nomethod(cls)\n tocall = getattr(cls(), meth)\n args = list(result.groups())\n for d in re.findall(r'\\\\(\\d+)', ofn):\n args.pop(int(d) - 1)\n return tocall(*([urllib.unquote(x) for x in args] + fna))\n\n return web.notfound()",
"def process(context, boundTo, data):",
"def before_map(self, dmrs, nodeid):\n pass",
"def process_request(self,req):\r\n pass",
"def handle_request(self,req):\r\n self.process_request(req)",
"def handle_request(self, tpe, obj_dict):\n if tpe == 'DataRequest':\n return self._process_data_request(obj_dict)\n if tpe == 'ConfigRequest':\n return self._process_config_request(obj_dict)\n return warning(f'Unknown command type {tpe}')",
"def processItem(self,entry):\n pass",
"def json_handler(cls, fn: Handler) -> MessageHandler:\n return lambda message: fn(**cls.parse_json(message))",
"def map_subparser(parser):\n\n # Add the Toil options so the job store is the first argument\n Job.Runner.addToilOptions(parser)\n \n # General options\n \n parser.add_argument(\"sample_name\", type=str,\n help=\"sample name (ex NA12878)\")\n parser.add_argument(\"xg_index\", type=str,\n help=\"Path to xg index\") \n parser.add_argument(\"gcsa_index\", type=str,\n help=\"Path to GCSA index\")\n parser.add_argument(\"out_store\",\n help=\"output store. All output written here. Path specified using same syntax as toil jobStore\")\n parser.add_argument(\"--id_ranges\", type=str, default=None,\n help=\"Path to file with node id ranges for each chromosome in BED format.\")\n parser.add_argument(\"--kmer_size\", type=int,\n help=\"size of kmers to use in gcsa-kmer mapping mode\")\n\n # Add common options shared with everybody\n add_common_vg_parse_args(parser)\n\n # Add mapping options\n map_parse_args(parser)\n\n # Add common docker options\n add_container_tool_parse_args(parser)",
"def cmd_votenextmap(self, data, client, cmd=None):\n if not data:\n client.message('^7Invalid or missing data, try !help votenextmap')\n else:\n defaultvote = self.console.getCvar('g_allowvote').getInt()\n match = self.console.getMapsSoundingLike(data)\n if isinstance(match, basestring):\n mapname = match\n self.console.write('g_allowvote \"8\"')\n self.console.write('spoof %s callvote nextmap %s' % (client.cid, mapname))\n self.console.write('g_allowvote \"%s\"' % defaultvote)\n if client:\n client.message('^7Voted for ^2%s' % mapname)\n elif isinstance(match, list):\n client.message('do you mean : %s ?' % string.join(match,', '))\n else:\n client.message('^7cannot find any map like ^2%s^7.' % data)",
"def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):\n return {\"value\": traverse_get(mapping, *traverse)}",
"def road_work_data_parse(self,response):\n\n print \"============================parse call back function==========================\"\n data = json.loads(response.body_as_unicode())\n pdata = data['rows']\n for i in range(0, len(pdata), 1):\n item = EventspiderItem()\n dl = pdata[i]\n item[\"spider_oid\"] = dl[u'id']\n item[\"spider_fststake\"] = dl[u\"startNumDis\"]\n item[\"spider_lststake\"] = dl[u\"endNumDis\"]\n item[\"spider_direction\"] = (dl[u'position'] + '_' + dl[u'directionTypeDis']).strip('\\r\\n\\t')\n item[\"description\"] = dl[u'describe'].strip().replace('\\n', '').replace('\\r', '')\n item[\"spider_dist\"] = self.calc_distance(fst_stake = item['spider_fststake'],\n lst_stake = item['spider_lststake'],\n desc = item[\"description\"])\n item[\"spider_postdate\"] = dl[u'detectionTimeDis']\n\n\n item[\"START_TIME\"] = dl[u'detectionTimeDis']\n item[\"END_TIME\"] = dl[u'planTimeDis']\n\n item['start_time'] =time.strptime(dl[u'detectionTimeDis'], '%Y-%m-%d %H:%M')\n item['end_time'] = time.strptime( dl[u'planTimeDis'], '%Y-%m-%d %H:%M') if dl[u'planTimeDis'] else None\n item['spider_status'] = self.check_status(postdate = item[\"START_TIME\"],\n plandate =item[\"END_TIME\"] )\n\n item[\"event_type\"] = 0\n item[\"reason\"] = 2\n item[\"event_source\"] = u\"1:上海市路政局\"\n item[\"loc_name\"] = dl[u'roadName']\n\n coord_array = dl[u'markingNumber']\n x0 = coord_array.split(\",\")[0]\n y0 = coord_array.split(\",\")[-1]\n item[\"ref_point\"] = self.convert_markingnum_coords(x=x0,y=y0) if coord_array else u\"NULL\"\n item[\"ref_point_type\"] = 1\n\n occupies = self.parse_occpuies(desc = item[\"description\"]) # return dict / array\n if occupies:\n item[\"occupy\"] = occupies['occupy']\n item[\"available\"] = occupies['available']\n item[\"city\"] = u\"上海\"\n\n \"\"\"\n +++++++++++++++++++++++DropItem cannot apply to all spider, drop item here ++++++++++++++++++++++++++++++\n\n \"\"\"\n if item['spider_status'] == \"overdue\":\n continue\n yield item"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a on clone do handler
|
def on_clone_do_handler():
@generate
def do_exprseq():
yield keyword("do")
yield normalspaces()
handler = yield expression # expr_seq
return handler
yield keyword("on")
yield normalspaces()
yield keyword("clone")
yield normalspaces()
thing = yield var_name()
yield normalspaces()
handler = yield function_return | do_exprseq
return s.Construct(s.ON_CLONE_DO_HANDLER, thing, handler)
|
[
"def on_handler_clone(self,\n nfa_name: str,\n run_id: str,\n state_name: str,\n event: BoboEvent):",
"def clone(self, data):",
"def _clone(context, obj, clone_id):\n return context.manage_clone(obj, clone_id)",
"def _parse(self):\n\t\t\n\t\tself.reply_msg = MessageHandler.fire_handlers(self)",
"def clone_run(current_run,main_script):\n \n #WRITE THIS",
"def _parse(self):\n self._chan = self.line[2]\n try:\n sender = re.findall(r\":(.*?)!(.*?)@(.*?)\\Z\", self.line[0])[0]\n except IndexError:\n self._host = self.line[0][1:]\n self._nick = self._ident = self._reply_nick = \"*\"\n return\n self._nick, self._ident, self._host = sender\n self._reply_nick = self._nick\n\n if self._msgtype in [\"PRIVMSG\", \"NOTICE\"]:\n if self.chan.lower() == self.my_nick:\n # This is a privmsg to us, so set 'chan' as the nick of the\n # sender instead of the 'channel', which is ourselves:\n self._chan = self._nick\n self._is_private = True\n self._msg = \" \".join(self.line[3:])[1:]\n if self._msgtype == \"PRIVMSG\":\n self._parse_args()\n self._parse_kwargs()",
"def SelectClone(self):\n\t\tif self.item.TemplateProcessing:\n\t\t\tself.item.TemplateProcessing.Run(self)",
"def git_clone(self, url, target):\n pass",
"def mutates(handler):\n\n @functools.wraps(handler)\n def inner(self, *args, **kwargs):\n self.debugger.save()\n name = handler.__name__[len(\"handle_\") :]\n self.command_history.append(name)\n return handler(self, *args, **kwargs)\n\n return inner",
"def _create_clone_tag_nodes(self, clones: list[Position]) -> Position:\n c, p = self.c, self.c.p\n # Create the found node.\n assert c.positionExists(c.lastTopLevel()), c.lastTopLevel()\n found = c.lastTopLevel().insertAfter()\n assert found\n assert c.positionExists(found), found\n found.h = f\"Found Tag: {self.find_text}\"\n # Clone nodes as children of the found node.\n for p in clones:\n # Create the clone directly as a child of found.\n p2 = p.copy()\n n = found.numberOfChildren()\n p2._linkCopiedAsNthChild(found, n)\n return found",
"def parse_event(self, event):",
"def parse_command(arg, ob):\n save = simulate.command_giver\n\n simulate.command_giver = ob\n res = simulate.player_parser(arg)\n simulate.command_giver = save\n return res",
"def on_created(self, e):\n def build_data(cmd, rel_new_path, new_md5, founded_path=None):\n \"\"\"\n Prepares the data from event handler to be delivered to connection_manager.\n \"\"\"\n data = {'cmd': cmd}\n if cmd == 'copy':\n data['file'] = {'src': founded_path,\n 'dst': rel_new_path,\n 'md5': new_md5}\n else:\n data['file'] = {'filepath': rel_new_path,\n 'md5': new_md5}\n return data\n\n new_md5 = self.hash_file(e.src_path)\n rel_new_path = self.relativize_path(e.src_path)\n founded_path = self.search_md5(new_md5)\n # with this check i found the copy events\n if founded_path:\n abs_founded_path = self.absolutize_path(founded_path)\n logger.info('Copy event from path : {}\\n to path: {}'.format(abs_founded_path, e.src_path))\n data = build_data('copy', rel_new_path, new_md5, founded_path)\n # this elif check that this create event aren't modify event.\n # Normally this never happen but sometimes watchdog fail to understand what has happened on file.\n # For example Gedit generate a create event instead modify event when a file is saved.\n elif rel_new_path in self.client_snapshot:\n logger.warning('WARNING this is modify event FROM CREATE EVENT!'\n 'Path of file already existent: {}'.format(e.src_path))\n data = build_data('modify', rel_new_path, new_md5)\n\n else: # Finally we find a real create event!\n logger.info('Create event on path: {}'.format(e.src_path))\n data = build_data('upload', rel_new_path, new_md5)\n\n # Send data to connection manager dispatcher and check return value.\n # If all go right update client_snapshot and local_dir_state\n if self._is_shared_file(rel_new_path):\n logger.warning('You are writing file in path: {}\\n'\n 'This is a read-only folder, so it will not be synchronized with server'\n .format(rel_new_path))\n else:\n response = self.conn_mng.dispatch_request(data['cmd'], data['file'])\n if response['successful']:\n event_timestamp = response['content']['server_timestamp']\n self.client_snapshot[rel_new_path] = [event_timestamp, new_md5]\n self.update_local_dir_state(event_timestamp)\n logger.debug('{} event completed.'.format(data['cmd']))\n else:\n self.stop(1, response['content'])",
"def copied(object, original):",
"def dup_object(self): # real signature unknown; restored from __doc__\n pass",
"def clone_to(parent, widget):\n try:\n if isinstance(widget, Widget):\n clone = widget.clone(parent)\n else:\n clone = widget.__class__(parent)\n Widget.copy_config(widget, clone)\n [Widget.clone_to(clone, i) for i in widget.winfo_children()]\n return clone\n except TypeError:\n logging.debug(f\"{widget.__class__} requires special clone handling\")",
"def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):\n params = dict(self.get_param_values())\n if new_type is None:\n clone_type = self.__class__\n else:\n clone_type = new_type\n new_params = new_type.params()\n params = {k: v for k, v in params.items()\n if k in new_params}\n if params.get('group') == self.params()['group'].default:\n params.pop('group')\n settings = dict(params, **overrides)\n if 'id' not in settings:\n settings['id'] = self.id\n\n if data is None and shared_data:\n data = self.data\n # Apply name mangling for __ attribute\n pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', [])\n return clone_type(data, *args, **{k:v for k,v in settings.items()\n if k not in pos_args})",
"def copy(self):\n parser_copy = self.__class__(self.schema_class, self.argument_class, self.result_class)\n parser_copy.args = deepcopy(self.args)\n parser_copy.trim = self.trim\n parser_copy.bundel_errors = self.bundle_errors\n return parser_copy",
"def parse_incoming(self, bot, incoming):\n self.bots[str(incoming.message.chat_id)] = bot\n self.parser(incoming.message.chat_id, incoming.message.text)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a macroscript clause
|
def macroscript_clause():
@generate
def handler_block_item():
ret = yield on_do_handler ^ local_decl ^ function_def
return ret
yield lparen
yield normalspaces()
handlers = yield sepBy1(handler_block_item, normalspaces())
yield normalspaces()
yield rparen
return s.Construct(s.MACROSCRIPT_CLAUSE, handlers)
|
[
"def macroscript_def():\n yield keyword(\"macroscript\")\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n vnop = yield sepBy(named_argument, normalspaces())\n yield normalspaces()\n handlers = yield expr_seq ^ macroscript_clause\n\n return s.Construct(s.MACROSCRIPT_DEF, vname, vnop, handlers)",
"def code_insert_macro_block(text: str, selection: str):",
"def code_insert_macro(text: str, selection: str):",
"def parse_embed_script(self,tokiter,scopes,ends,parse_between=None):\n token=tokiter.next()\n if token.token_type != 'varname':\n self.error('embed',token)\n if token.token_value != 'bash':\n self.error('embed',token,'unknown language \"%s\"'%(\n token.token_value,))\n nametoken=tokiter.next()\n if token.token_type != 'varname':\n self.error('embed script name',token)\n scope=EmbedBash(scopes)\n token=tokiter.next()\n\n while token.token_type==end_of_line_type: token=tokiter.next()\n if token.token_type=='(':\n self.parse_subscope(tokiter,[scope]+scopes,[')'],\n self.parse_between_arguments,\n allow_overwrite=False,\n allow_resolve=False,\n allow_null=True,\n only_scalars=True,\n scope_name='embed script parameters')\n scope=scope.as_parameters(self.con(token,scopes))\n token=tokiter.next()\n while token.token_type==end_of_line_type: token=tokiter.next()\n\n if token.token_type=='{':\n self.parse_subscope(tokiter,[scope]+scopes,['}'],\n self.parse_between_assignments,\n allow_overwrite=True,\n allow_resolve=True,\n allow_null=False,\n allow_use=True,\n only_scalars=True,\n scope_name='embed script variables')\n token=tokiter.next()\n while token.token_type==end_of_line_type: token=tokiter.next()\n\n if token.token_type in [ 'qstring', 'dqstring', 'bracestring' ]:\n scope.settemplate(self.action_string([scope]+scopes,token))\n else:\n self.error('embed script contents',token)\n if parse_between: \n parse_between(tokiter)\n return (nametoken.token_value,scope)",
"def expand_macro_clause(clause, settings):\n macros = settings.get('macros', {})\n relation, arguments = clause\n if relation in macros:\n m_terms, m_positive_clauses, m_negative_clause_lists, _ = \\\n parse_concept(macros[relation], settings=settings)\n\n # Go through +ve/-ve clauses and rename vars.\n substitution_map = dict(zip(m_terms, arguments))\n substitution_map['self'] = 'self'\n positive_clauses = []\n negative_clause_lists = []\n for clause in m_positive_clauses:\n rel, args = clause\n args = tuple([substitution_map[arg] for arg in args])\n positive_clauses.append((rel, args))\n\n for negative_clause_list in m_negative_clause_lists:\n new_negative_clause_list = []\n for clause in negative_clause_list:\n rel, args = clause\n args = tuple([substitution_map[arg] for arg in args])\n new_negative_clause_list.append((rel, args))\n negative_clause_lists.append(new_negative_clause_list)\n else:\n positive_clauses = [clause]\n negative_clause_lists = []\n\n return positive_clauses, negative_clause_lists",
"def code_insert_macro_array(text: str, selection: str):",
"def test_parse_define_outside_subroutine(f2003_parser):\n code = \"#define MACRO\\nSUBROUTINE FOO\\n CALL sub\\nEND SUBROUTINE FOO\"\n reader = get_reader(code)\n result = f2003_parser(reader)\n assert str(result) == code",
"def parse_py_statement(line):\n state = 0\n cur_token = \"\"\n spaces = \" \\t\\n\"\n ops = \".,;:+-*/%&!=|(){}[]^<>\"\n i = 0\n\n def _escape_char(_c):\n if _c == \"n\":\n return \"\\n\"\n elif _c == \"t\":\n return \"\\t\"\n else:\n return _c\n\n while i < len(line):\n c = line[i]\n i += 1\n if state == 0:\n if c in spaces:\n pass\n elif c in ops:\n yield \"op\", c\n elif c == \"#\":\n state = 6\n elif c == '\"':\n state = 1\n elif c == \"'\":\n state = 2\n else:\n cur_token = c\n state = 3\n elif state == 1: # string via \"\n if c == \"\\\\\":\n state = 4\n elif c == '\"':\n yield \"str\", cur_token\n cur_token = \"\"\n state = 0\n else:\n cur_token += c\n elif state == 2: # string via '\n if c == \"\\\\\":\n state = 5\n elif c == \"'\":\n yield \"str\", cur_token\n cur_token = \"\"\n state = 0\n else:\n cur_token += c\n elif state == 3: # identifier\n if c in spaces + ops + \"#\":\n yield \"id\", cur_token\n cur_token = \"\"\n state = 0\n i -= 1\n elif c == '\"': # identifier is string prefix\n cur_token = \"\"\n state = 1\n elif c == \"'\": # identifier is string prefix\n cur_token = \"\"\n state = 2\n else:\n cur_token += c\n elif state == 4: # escape in \"\n cur_token += _escape_char(c)\n state = 1\n elif state == 5: # escape in '\n cur_token += _escape_char(c)\n state = 2\n elif state == 6: # comment\n cur_token += c\n if state == 3:\n yield \"id\", cur_token\n elif state == 6:\n yield \"comment\", cur_token",
"def extract_macro(parser):\n mac_ele_list = []\n mac_num = parser[\"mac_num\"]\n if parser[\"mac_use\"] == \"all\": #抓取出所有被macro納入之參數\n mac_num = int(parser[\"mac_num\"])\n for i in range(1, mac_num+1):\n ele = set_macro_ele(parser, i)\n mac_ele_list.append(ele)\n else:\n \"\"\"\n assign macro number not all\n 僅抓取指定納入之參數\n \"\"\"\n mac_no_list = parser[\"mac_use\"].split()\n for no in mac_no_list:\n ele = set_macro_ele(parser, int(no))\n mac_ele_list.append(ele)\n return mac_ele_list",
"def parse_asm_operand(self):\n constraint = self.parse_string()\n self.consume(\"(\")\n variable = self.parse_expression()\n self.consume(\")\")\n return (constraint, variable)",
"def _process_syntax(self, param_name, item):\n a_syntax = nodes.Syntax()\n logger = logging.getLogger(self.__class__.__name__)\n\n if \"@hidden\" in item:\n a_syntax.set_hidden(item[\"@hidden\"])\n\n if \"@command\" in item:\n a_syntax.set_command(item[\"@command\"])\n\n # In a Full CWMP-DM XML, Parameters always have a @name\n logger.debug(\n \"- Processing Syntax: hidden=\\\"{}\\\", command=\\\"{}\\\"\"\n .format(a_syntax.get_hidden(), a_syntax.get_command()))\n\n if \"list\" in item:\n a_syntax.set_list_element(self._process_list_facet(item[\"list\"]))\n\n # A Syntax Element will either have a DataType or a Type Element (string, int, etc.)\n if \"dataType\" in item:\n data_type_ref_name = item[\"dataType\"][\"@ref\"]\n a_syntax.set_data_type_ref(data_type_ref_name)\n\n if not self.doc.has_data_type(data_type_ref_name):\n logger.warning(\n \"Parameter {} References {} Data Type that can't be found\"\n .format(param_name, data_type_ref_name))\n else:\n a_syntax.set_type_element(self._process_type_element(item))\n\n if \"default\" in item:\n a_syntax.set_default(self._process_default(item[\"default\"]))\n\n return a_syntax",
"def compileStatement(self):\n if self.token() == 'do':\n self.compileDo()\n elif self.token() == 'let':\n self.compileLet()\n elif self.token() == 'if':\n self.compileIf()\n elif self.token() == 'while':\n self.compileWhile()\n elif self.token() == 'return':\n self.compileReturn()",
"def hnd_magic(line,mo):\n var = mo.group('varname')\n cmd = mo.group('cmd')\n expr = make_quoted_expr(cmd)\n return itpl('$var = get_ipython().magic($expr)')",
"def tool_handler():\n yield keyword(\"on\")\n yield normalspaces()\n yield var_name()\n yield normalspaces()\n yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return expr",
"def get_cmake_macros_args(s):\n words = capture_util.split_line(s)\n if len(words) < 1:\n raise capture_util.ParserError(\"CMake macro/function define command args error! '%s'\" % s)\n\n macro_name = words[0]\n args = words[1:]\n return (macro_name, args)",
"def scanamfile(amfile):\n amfile = \"\\n\" + amfile #Add \\n so you can guess vars\n tokens = (\n \"END\",\n \"COL\",\n \"EQ\",\n \"PEQ\",\n \"CVAR\",\n \"MVAR\",\n \"TEXT\",\n \"ENDTAB\",\n \"SPACE\",\n \"IF\",\n \"ELSE\",\n \"ENDIF\",\n )\n\n states = (\n (\"com\", \"exclusive\"), #comment\n (\"var\", \"inclusive\"),\n (\"if\", \"exclusive\"),\n )\n\n def t_begin_com(t):\n r\"[ \\t]*\\#\"\n t.lexer.begin(\"com\")\n\n def t_com_other(t):\n r\"[^\\\\\\n]+\"\n pass\n\n def t_com_lit(t):\n r\"\\\\.\"\n pass\n\n def t_com_newline(t):\n r\".*\\\\\\n\"\n t.lexer.lineno += 1\n pass\n\n def t_ifbegin(t):\n #ugly hack to ensure that this is at the begining of the line and keep the newline token.\n #PLY doesn't support the \"^\" beginning of line regexp :,(\n r\"\\nif\"\n t.type = \"END\"\n t.lexer.push_state(\"if\")\n return t\n\n def t_if_IF(t):\n #http://www.gnu.org/s/hello/manual/automake/Usage-of-Conditionals.html#Usage-of-Conditionals\n r\"[ \\t]+[^ \\n\\t]*\"\n t.value = t.value.strip() #take the variable to test\n t.lexer.pop_state()\n return t\n\n def t_ELSE(t):\n r\"\\nelse\"\n return t\n\n def t_ENDIF(t):\n r\"\\nendif\"\n return t\n\n def t_CVAR(t): #configure variable\n r\"@.*?@\" #not greedy\n return t\n\n def t_MVAR(t): #makefile variable\n r\"\\$\\(.*?\\)\"\n return t\n\n def t_com_END(t):\n r\"\\n\"\n t.lexer.begin(\"INITIAL\")\n t.lexer.lineno += 1\n return t\n\n def t_EQ(t):\n r\"[ \\t]*=[ \\t]*\"\n t.lexer.begin(\"var\")\n t.value = t.value.strip()\n return t\n\n def t_PEQ(t):\n r\"[ \\t]*\\+=[ \\t]*\"\n t.lexer.begin(\"var\")\n t.value = t.value.strip()\n return t\n\n def t_contline(t):\n r\"\\\\\\n\"\n t.lexer.lineno += 1\n pass\n\n def t_litteral(t):\n r\"\\\\.\"\n t.value = t.value[1] #take the literal char\n t.type = \"TEXT\"\n return t\n\n def t_COL(t):\n r\"[ \\t]*:[ \\t]*\"\n t.lexer.begin(\"var\")\n return t\n\n def t_var_ENDTAB(t):\n r\"[ \\t]*;[ \\t]*\"\n return t\n\n def t_ENDTAB(t):\n r\"[ \\t]*\\n\\t[ \\t]*\"\n t.lexer.lineno += 1\n return t\n\n def t_var_TEXT(t):\n r\"[^ #\\n\\t,\\$@\\\\]+\"\n return t\n\n def t_TEXT(t):\n r\"[^ \\n\\t:=\\$@\\\\]+\"\n return t\n\n def t_END(t):\n r\"[ \\t]*\\n\"\n t.lexer.lineno += t.value.count('\\n')\n t.lexer.begin('INITIAL')\n return t\n\n def t_var_SPACE(t):\n r\"[ \\t]+\"\n return t\n\n def t_space(t):\n r\"[ \\t]\"\n pass\n\n def t_var_special(t):\n r\"\\$[^({]\"\n t.type = \"TEXT\"\n return t\n\n def t_ANY_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n lexer = lex.lex()\n\n #lexer.input(amfile)\n #for tok in lexer:\n # print(tok)\n\n #YACC stuff begins here\n\n def p_done(p):\n \"done : vars end\"\n p[0] = p[1]\n\n def p_vars(p):\n \"\"\"\n vars : vars end var\n | end var\n \"\"\"\n if len(p) == 4:\n p[1][0].update(p[3][0])\n p[1][2].update(p[3][2])\n p[0] = [p[1][0], p[1][1] + p[3][1], p[1][2]]\n\n else:\n p[0] = p[2]\n\n def p_if(p):\n \"\"\"\n var : IF vars ENDIF\n | IF vars ELSE vars ENDIF\n \"\"\"\n if len(p) == 4:\n p[0] = [{},[],{p[1]:p[2]}]\n\n else:\n p[0] = [{},[],{p[1]:p[2],\"!\"+p[1]:p[4]}]\n\n def p_var(p):\n \"\"\"\n var : textstr EQ textlst\n | textstr EQ\n | textstr PEQ textlst\n \"\"\"\n if p[2] == \"=\":\n if len(p) == 4:\n p[0] = [{p[1]: p[3]},[],{}]\n else:\n p[0] = [{p[1]: []},[],{}]\n else:\n p[0] = [{},[[p[1], p[3]]],{}]\n\n def p_textlst(p):\n \"\"\"\n textlst : textlst spacestr textstr\n | textstr\n \"\"\"\n if len(p) == 4:\n p[0] = p[1] + [p[3]]\n else:\n p[0] = [p[1]]\n\n def p_teststr(p):\n \"\"\"\n textstr : textstr TEXT\n | textstr CVAR\n | textstr MVAR\n | TEXT\n | CVAR\n | MVAR\n \"\"\"\n if len(p) == 3:\n p[0] = p[1] + p[2]\n else:\n p[0] = p[1]\n\n def p_space(p):\n \"\"\"\n spacestr : spacestr SPACE\n | SPACE\n \"\"\"\n if len(p) == 3:\n p[0] = p[1] + p[2]\n else:\n p[0] = p[1]\n\n def p_end(p):\n \"\"\"\n end : end END\n | END\n \"\"\"\n\n def p_error(p):\n print(\"syntax error at '%s'\" % p.type,p.value)\n pass\n\n yacc.yacc()\n\n variables = yacc.parse(amfile)\n return variables",
"def in_game_parse(line):\n tokens = line.split()\n if not tokens:\n raise SyntaxError('No Command Given')\n \"*** YOUR CODE HERE ***\"\n\n exp = ['Place', ['Put piece here', 'Put space here']]\n symbols = ['X', 'O']\n places = [str(i) for i in range(9)]\n\n while tokens:\n token = tokens.pop(0)\n \"*** YOUR CODE HERE ***\"\n raise SyntaxError(\"Symbol or Place is missing\")",
"def parse_formula_id_wiki(document_id, mathml):\n index = mathml.index('id=\"')\n formula_id = \"?\"\n if(index != -1):\n # find document id\n start = index + len('id=\"')\n end = start\n while (not mathml[start:end+1].endswith('\"')):\n end += 1\n doc_id = mathml[start:end]\n formula_id = doc_id.split(\":\")[-1]\n else:\n print(\"FAILED PARSING FORMULA ID: \" + document_id)\n print(mathml)\n return formula_id",
"def parse_macros_values(macros: dict):\n values_changed = True\n arithmetic_parser = MacrosArithmeticParser()\n while values_changed:\n values_changed = False\n for name, value in macros.items():\n new_value = None\n if value and not isinstance(value, (int, float)):\n try:\n result = arithmetic_parser.parse(value)\n new_value = result.evaluate()\n except NameError:\n continue\n if new_value is not None and new_value != value:\n macros[name] = new_value\n values_changed = True\n arithmetic_parser.add_variables({name: new_value, })\n return macros"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a macroscript def
|
def macroscript_def():
yield keyword("macroscript")
yield normalspaces()
vname = yield var_name()
yield normalspaces()
vnop = yield sepBy(named_argument, normalspaces())
yield normalspaces()
handlers = yield expr_seq ^ macroscript_clause
return s.Construct(s.MACROSCRIPT_DEF, vname, vnop, handlers)
|
[
"def macroscript_clause():\n @generate\n def handler_block_item():\n ret = yield on_do_handler ^ local_decl ^ function_def\n return ret\n yield lparen\n yield normalspaces()\n handlers = yield sepBy1(handler_block_item, normalspaces())\n yield normalspaces()\n yield rparen\n return s.Construct(s.MACROSCRIPT_CLAUSE, handlers)",
"def extract_macro(parser):\n mac_ele_list = []\n mac_num = parser[\"mac_num\"]\n if parser[\"mac_use\"] == \"all\": #抓取出所有被macro納入之參數\n mac_num = int(parser[\"mac_num\"])\n for i in range(1, mac_num+1):\n ele = set_macro_ele(parser, i)\n mac_ele_list.append(ele)\n else:\n \"\"\"\n assign macro number not all\n 僅抓取指定納入之參數\n \"\"\"\n mac_no_list = parser[\"mac_use\"].split()\n for no in mac_no_list:\n ele = set_macro_ele(parser, int(no))\n mac_ele_list.append(ele)\n return mac_ele_list",
"def parseInput(input):\n # parse=bash(\"sh ../bitpar/parse '\"+input+\"'\") # ouput: [.VP [.V draw][.NP [.D a][.N-bar [.N square]]]]\n bash(\"java -jar ../lambda/lambda-auto.jar ../lambda/input.txt > ../lambda/input.tex\")\n fml=bash(\"make -C ../lambda input.fml\")\n print fml\n cmd=`fml`.split('true ')[1]\n \n # TEST CASES\n # cmd=\"draw(Gy[red(y) & square(y)])\" \n cmd=\"draw(\\gamma y(red(y) & square(y))).\"\n\n print cmd\n parse(cmd)",
"def test_parse_define_outside_subroutine(f2003_parser):\n code = \"#define MACRO\\nSUBROUTINE FOO\\n CALL sub\\nEND SUBROUTINE FOO\"\n reader = get_reader(code)\n result = f2003_parser(reader)\n assert str(result) == code",
"def parse(name):\n\n pass",
"def code_insert_macro(text: str, selection: str):",
"def scanamfile(amfile):\n amfile = \"\\n\" + amfile #Add \\n so you can guess vars\n tokens = (\n \"END\",\n \"COL\",\n \"EQ\",\n \"PEQ\",\n \"CVAR\",\n \"MVAR\",\n \"TEXT\",\n \"ENDTAB\",\n \"SPACE\",\n \"IF\",\n \"ELSE\",\n \"ENDIF\",\n )\n\n states = (\n (\"com\", \"exclusive\"), #comment\n (\"var\", \"inclusive\"),\n (\"if\", \"exclusive\"),\n )\n\n def t_begin_com(t):\n r\"[ \\t]*\\#\"\n t.lexer.begin(\"com\")\n\n def t_com_other(t):\n r\"[^\\\\\\n]+\"\n pass\n\n def t_com_lit(t):\n r\"\\\\.\"\n pass\n\n def t_com_newline(t):\n r\".*\\\\\\n\"\n t.lexer.lineno += 1\n pass\n\n def t_ifbegin(t):\n #ugly hack to ensure that this is at the begining of the line and keep the newline token.\n #PLY doesn't support the \"^\" beginning of line regexp :,(\n r\"\\nif\"\n t.type = \"END\"\n t.lexer.push_state(\"if\")\n return t\n\n def t_if_IF(t):\n #http://www.gnu.org/s/hello/manual/automake/Usage-of-Conditionals.html#Usage-of-Conditionals\n r\"[ \\t]+[^ \\n\\t]*\"\n t.value = t.value.strip() #take the variable to test\n t.lexer.pop_state()\n return t\n\n def t_ELSE(t):\n r\"\\nelse\"\n return t\n\n def t_ENDIF(t):\n r\"\\nendif\"\n return t\n\n def t_CVAR(t): #configure variable\n r\"@.*?@\" #not greedy\n return t\n\n def t_MVAR(t): #makefile variable\n r\"\\$\\(.*?\\)\"\n return t\n\n def t_com_END(t):\n r\"\\n\"\n t.lexer.begin(\"INITIAL\")\n t.lexer.lineno += 1\n return t\n\n def t_EQ(t):\n r\"[ \\t]*=[ \\t]*\"\n t.lexer.begin(\"var\")\n t.value = t.value.strip()\n return t\n\n def t_PEQ(t):\n r\"[ \\t]*\\+=[ \\t]*\"\n t.lexer.begin(\"var\")\n t.value = t.value.strip()\n return t\n\n def t_contline(t):\n r\"\\\\\\n\"\n t.lexer.lineno += 1\n pass\n\n def t_litteral(t):\n r\"\\\\.\"\n t.value = t.value[1] #take the literal char\n t.type = \"TEXT\"\n return t\n\n def t_COL(t):\n r\"[ \\t]*:[ \\t]*\"\n t.lexer.begin(\"var\")\n return t\n\n def t_var_ENDTAB(t):\n r\"[ \\t]*;[ \\t]*\"\n return t\n\n def t_ENDTAB(t):\n r\"[ \\t]*\\n\\t[ \\t]*\"\n t.lexer.lineno += 1\n return t\n\n def t_var_TEXT(t):\n r\"[^ #\\n\\t,\\$@\\\\]+\"\n return t\n\n def t_TEXT(t):\n r\"[^ \\n\\t:=\\$@\\\\]+\"\n return t\n\n def t_END(t):\n r\"[ \\t]*\\n\"\n t.lexer.lineno += t.value.count('\\n')\n t.lexer.begin('INITIAL')\n return t\n\n def t_var_SPACE(t):\n r\"[ \\t]+\"\n return t\n\n def t_space(t):\n r\"[ \\t]\"\n pass\n\n def t_var_special(t):\n r\"\\$[^({]\"\n t.type = \"TEXT\"\n return t\n\n def t_ANY_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n lexer = lex.lex()\n\n #lexer.input(amfile)\n #for tok in lexer:\n # print(tok)\n\n #YACC stuff begins here\n\n def p_done(p):\n \"done : vars end\"\n p[0] = p[1]\n\n def p_vars(p):\n \"\"\"\n vars : vars end var\n | end var\n \"\"\"\n if len(p) == 4:\n p[1][0].update(p[3][0])\n p[1][2].update(p[3][2])\n p[0] = [p[1][0], p[1][1] + p[3][1], p[1][2]]\n\n else:\n p[0] = p[2]\n\n def p_if(p):\n \"\"\"\n var : IF vars ENDIF\n | IF vars ELSE vars ENDIF\n \"\"\"\n if len(p) == 4:\n p[0] = [{},[],{p[1]:p[2]}]\n\n else:\n p[0] = [{},[],{p[1]:p[2],\"!\"+p[1]:p[4]}]\n\n def p_var(p):\n \"\"\"\n var : textstr EQ textlst\n | textstr EQ\n | textstr PEQ textlst\n \"\"\"\n if p[2] == \"=\":\n if len(p) == 4:\n p[0] = [{p[1]: p[3]},[],{}]\n else:\n p[0] = [{p[1]: []},[],{}]\n else:\n p[0] = [{},[[p[1], p[3]]],{}]\n\n def p_textlst(p):\n \"\"\"\n textlst : textlst spacestr textstr\n | textstr\n \"\"\"\n if len(p) == 4:\n p[0] = p[1] + [p[3]]\n else:\n p[0] = [p[1]]\n\n def p_teststr(p):\n \"\"\"\n textstr : textstr TEXT\n | textstr CVAR\n | textstr MVAR\n | TEXT\n | CVAR\n | MVAR\n \"\"\"\n if len(p) == 3:\n p[0] = p[1] + p[2]\n else:\n p[0] = p[1]\n\n def p_space(p):\n \"\"\"\n spacestr : spacestr SPACE\n | SPACE\n \"\"\"\n if len(p) == 3:\n p[0] = p[1] + p[2]\n else:\n p[0] = p[1]\n\n def p_end(p):\n \"\"\"\n end : end END\n | END\n \"\"\"\n\n def p_error(p):\n print(\"syntax error at '%s'\" % p.type,p.value)\n pass\n\n yacc.yacc()\n\n variables = yacc.parse(amfile)\n return variables",
"def test_variables_macro():\n lines = \"\"\"\n [ macros ]\n int_macro 1\n quote_macro \"quoted\"\n unquoted_macro unquoted\n int_macro2 2\n\n [ variables ]\n int_variable $int_macro\n quote_variable $quote_macro\n unquoted_variable $unquoted_macro\n multiple $int_macro$int_macro2\n \"\"\"\n lines = textwrap.dedent(lines).splitlines()\n ff = vermouth.forcefield.ForceField(name='test_ff')\n vermouth.ffinput.read_ff(lines, ff)\n assert ff.variables == {'int_variable': 1, 'quote_variable': 'quoted',\n 'unquoted_variable': 'unquoted', 'multiple': 12}",
"def code_insert_macro_block(text: str, selection: str):",
"def _macro_list_change(self):\n self.macro_no += 1\n self._update_parser()\n self._update_macro_assert()\n self.update_test_run_name()\n self.run()",
"def parse(file):\n sections = _parse(file)\n pre_commands = sections.get('pre', [])\n post_commands = sections.get('post', [])\n params = Parameters(_parse_parameters(sections.get('params', [])))\n cmds = sections.get('jobs', [])\n fmt = _get_name_fmt(len(cmds))\n commands = collections.OrderedDict([(fmt.format(i), c.replace('${LINE}', fmt.format(i))) for i, c in enumerate(cmds)])\n return pre_commands, commands, post_commands, params",
"def get_cmake_macros_args(s):\n words = capture_util.split_line(s)\n if len(words) < 1:\n raise capture_util.ParserError(\"CMake macro/function define command args error! '%s'\" % s)\n\n macro_name = words[0]\n args = words[1:]\n return (macro_name, args)",
"def parse_embed_script(self,tokiter,scopes,ends,parse_between=None):\n token=tokiter.next()\n if token.token_type != 'varname':\n self.error('embed',token)\n if token.token_value != 'bash':\n self.error('embed',token,'unknown language \"%s\"'%(\n token.token_value,))\n nametoken=tokiter.next()\n if token.token_type != 'varname':\n self.error('embed script name',token)\n scope=EmbedBash(scopes)\n token=tokiter.next()\n\n while token.token_type==end_of_line_type: token=tokiter.next()\n if token.token_type=='(':\n self.parse_subscope(tokiter,[scope]+scopes,[')'],\n self.parse_between_arguments,\n allow_overwrite=False,\n allow_resolve=False,\n allow_null=True,\n only_scalars=True,\n scope_name='embed script parameters')\n scope=scope.as_parameters(self.con(token,scopes))\n token=tokiter.next()\n while token.token_type==end_of_line_type: token=tokiter.next()\n\n if token.token_type=='{':\n self.parse_subscope(tokiter,[scope]+scopes,['}'],\n self.parse_between_assignments,\n allow_overwrite=True,\n allow_resolve=True,\n allow_null=False,\n allow_use=True,\n only_scalars=True,\n scope_name='embed script variables')\n token=tokiter.next()\n while token.token_type==end_of_line_type: token=tokiter.next()\n\n if token.token_type in [ 'qstring', 'dqstring', 'bracestring' ]:\n scope.settemplate(self.action_string([scope]+scopes,token))\n else:\n self.error('embed script contents',token)\n if parse_between: \n parse_between(tokiter)\n return (nametoken.token_value,scope)",
"def parse_formula_id_wiki(document_id, mathml):\n index = mathml.index('id=\"')\n formula_id = \"?\"\n if(index != -1):\n # find document id\n start = index + len('id=\"')\n end = start\n while (not mathml[start:end+1].endswith('\"')):\n end += 1\n doc_id = mathml[start:end]\n formula_id = doc_id.split(\":\")[-1]\n else:\n print(\"FAILED PARSING FORMULA ID: \" + document_id)\n print(mathml)\n return formula_id",
"def parseSubscripts(subscr):\n ...",
"def process_macros(key, value, defines, iflags):\n\t\n\t# $def\n\tm = _def_re.match(value)\n\tif m:\n\t\tif not m[1] in defines:\n\t\t\traise RuntimeError(\"Unknown define {}\".format(m[1]))\n\t\treturn defines[m[1]]\n\t# $iflags\n\tm = _iflags_re.match(value)\n\tif m:\n\t\tlst = m[1].split()\n\t\tval = 0\n\t\tfor df in lst:\n\t\t\tdefname = \"ITEM_\" + df\n\t\t\tif not defname in defines:\n\t\t\t\traise RuntimeError(\"Unknown item flag {}\".format(defname))\n\t\t\tval = val | defines[defname]\n\t\treturn str(val)\n\treturn None",
"def _store ( self , defs , nowarn ):\n\n while True:\n l = defs.readline() # next macro rule\n# print \"rule input=\" , l\n if len(l) == 0: break # EOF check\n dl = definitionLine.DefinitionLine(l,False)\n left = dl.left # pattern to be matched\n tail = dl.tail # transformation to apply to match\n if left == None or tail == None:\n self._err(l=l)\n continue\n mp = ellyWildcard.convert(left)\n if mp == None:\n self._err('bad wildcards',l)\n continue\n pe = mp[-1]\n if pe != ellyWildcard.cALL and pe != ellyWildcard.cEND:\n mp += ellyWildcard.cEND # pattern must end in $ if it does not end in *\n if not _checkBindings(mp,tail):\n self._err('bad bindings in substitution',l)\n continue\n if not nowarn and not _checkExpansion(mp,tail):\n self._err('substitution longer than original string',l,0)\n r = [ mp , tail ]\n# print \"rule =\" , [ left , tail ]\n pat = r[0] # get coded pattern\n if pat == None:\n self._err('no pattern',l)\n continue\n c = pat[0] # first char of pattern\n # check type to see how to index rule\n# print 'c=' , ord(c)\n p = pat\n while c == ellyWildcard.cSOS: # optional sequence?\n k = p.find(ellyWildcard.cEOS) # if so, find the end of sequence\n if k < 0 or k == 1: break # if no end or empty sequence, stop\n k += 1\n if k == len(pat): break # should be something after sequence\n m = ellyChar.toIndex(pat[1]) # index by first char of optional sequence\n self.index[m].append(r) # (must be non-wildcard)\n p = p[k:] # move up in pattern\n c = p[0] # but check for another optional sequence\n\n if c == ellyWildcard.cSOS:\n self._err(l=l)\n continue # bad sequence, skip this rule\n\n# print 'c=' , ord(c)\n if ellyChar.isLetterOrDigit(c): # check effective first char of pattern\n m = ellyChar.toIndex(c)\n self.index[m].append(r) # add to index under alphanumeric char\n elif ellyChar.isText(c):\n self.index[0].append(r) # add to index under punctuation\n elif not c in ellyWildcard.Matching:\n if c == ellyWildcard.cEND:\n print >> sys.stderr , '** macro warning: pattern can have empty match'\n print >> sys.stderr , '* at [' , l , ']'\n else:\n dc = '=' + str(ord(c) - ellyWildcard.X)\n self._err('bad wildcard code' , dc)\n continue\n elif c == ellyWildcard.cANY or c == ellyWildcard.cALL:\n self.anyWx.append(r) # under general wildcards\n elif c == ellyWildcard.cCAN:\n self.index[0].append(r) # under punctuation\n elif c == ellyWildcard.cDIG or c == ellyWildcard.cSDG:\n self.digWx.append(r) # under digit wildcards\n elif c == ellyWildcard.cSAN:\n self.digWx.append(r) # under both digit and\n self.letWx.append(r) # letter wildcards\n elif c == ellyWildcard.cSPC or c == ellyWildcard.cEND:\n self._err('bad wildcard in context',l)\n continue # wildcards unacceptable here\n else:\n self.letWx.append(r) # everything else under letter wildcard\n\n self.count += 1 # count up macro substitution\n\n if self._errcount > 0:\n print >> sys.stderr , '**' , self._errcount , 'macro errors in all'\n print >> sys.stderr , 'macro table definition FAILed'\n raise ellyException.TableFailure",
"def _get_macro_def(self, macro):\n prims_def = ''\n for primitive in macro.primitives:\n shape = primitive.shape\n exposure = primitive.is_additive\n rotation = shape.rotation #or primitive.rotation\n rotation = int((2 - rotation) * 180 or 0)\n\n if isinstance(shape, Circle):\n mods = [SHAPE_TAGS['circle']['int'],\n exposure,\n self._convert_units_str(shape.radius * 2),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y)]\n elif isinstance(shape, Rectangle) and shape.is_centered:\n mods = [SHAPE_TAGS['center_rectangle']['int'],\n exposure,\n self._convert_units_str(shape.width),\n self._convert_units_str(shape.height),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n rotation if not shape.flip_horizontal else -rotation]\n elif isinstance(shape, Rectangle) and not shape.is_centered:\n mods = [SHAPE_TAGS['rectangle']['int'],\n exposure,\n self._convert_units_str(shape.width),\n self._convert_units_str(shape.height),\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n rotation]\n elif isinstance(shape, Polygon):\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.points]\n v_args = [vertices[i / 2][i % 2]\n for i in range(len(vertices) * 2)]\n mods = [SHAPE_TAGS['polygon']['int'],\n exposure] + v_args + [rotation]\n elif isinstance(shape, RegularPolygon):\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.vertices]\n mods = [SHAPE_TAGS['reg_polygon']['int'],\n exposure,\n vertices,\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n rotation]\n elif isinstance(shape, Moire):\n mods = [SHAPE_TAGS['moire']['int'],\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n self._convert_units_str(shape.ring_thickness),\n self._convert_units_str(shape.gap_thickness),\n self._convert_units_str(shape.max_rings),\n self._convert_units_str(shape.hair_thickness),\n self._convert_units_str(shape.hair_length),\n rotation]\n elif isinstance(shape, Thermal):\n mods = [SHAPE_TAGS['thermal']['int'],\n self._convert_units_str(shape.x),\n self._convert_units_str(shape.y),\n self._convert_units_str(shape.outer_diameter),\n self._convert_units_str(shape.inner_diameter),\n self._convert_units_str(shape.gap_thickness),\n rotation]\n mods = ','.join(str(m) for m in mods)\n prim_def = PRIMITIVE.format(mods=mods)\n prims_def += LINE.format(prim_def)\n macro_def = MACRO.format(name=macro.name,\n primitives=prims_def.strip())\n return LINE.format(macro_def)",
"def code_insert_macro_array(text: str, selection: str):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a tool clause
|
def tool_clause():
yield (local_decl ^
function_def ^
struct_def ^
tool_handler)
|
[
"def parse( self, root ):\n # Get the (user visible) name of the tool\n self.name = root.get( \"name\" )\n if not self.name: \n raise Exception, \"Missing tool 'name'\"\n # Get the UNIQUE id for the tool \n # TODO: can this be generated automatically?\n self.id = root.get( \"id\" )\n if not self.id: \n raise Exception, \"Missing tool 'id'\" \n self.version = root.get( \"version\" )\n if not self.version: \n # For backward compatibility, some tools may not have versions yet.\n self.version = \"1.0.0\"\n # Support multi-byte tools\n self.is_multi_byte = util.string_as_bool( root.get( \"is_multi_byte\", False ) )\n #Force history to fully refresh after job execution for this tool. Useful i.e. when an indeterminate number of outputs are created by a tool.\n self.force_history_refresh = util.string_as_bool( root.get( 'force_history_refresh', 'False' ) )\n #load input translator, used by datasource tools to change names/values of incoming parameters\n self.input_translator = root.find( \"request_param_translation\" )\n if self.input_translator:\n self.input_translator = ToolInputTranslator.from_element( self.input_translator )\n # Command line (template). Optional for tools that do not invoke a local program \n command = root.find(\"command\")\n if command is not None and command.text is not None:\n self.command = command.text.lstrip() # get rid of leading whitespace\n interpreter = command.get(\"interpreter\")\n if interpreter:\n # TODO: path munging for cluster/dataset server relocatability\n executable = self.command.split()[0]\n abs_executable = os.path.abspath(os.path.join(self.tool_dir, executable))\n self.command = self.command.replace(executable, abs_executable, 1)\n self.command = interpreter + \" \" + self.command\n else:\n self.command = ''\n # Parameters used to build URL for redirection to external app\n redirect_url_params = root.find( \"redirect_url_params\" )\n if redirect_url_params is not None and redirect_url_params.text is not None:\n # get rid of leading / trailing white space\n redirect_url_params = redirect_url_params.text.strip()\n # Replace remaining white space with something we can safely split on later\n # when we are building the params\n self.redirect_url_params = redirect_url_params.replace( ' ', '**^**' )\n else:\n self.redirect_url_params = ''\n # Short description of the tool\n self.description = util.xml_text(root, \"description\")\n # Job runner\n if self.app.config.start_job_runners is None:\n # Jobs are always local regardless of tool config if no additional\n # runners are started\n self.job_runner = \"local:///\"\n else:\n # Set job runner to the cluster default\n self.job_runner = self.app.config.default_cluster_job_runner\n for tup in self.app.config.tool_runners:\n if tup[0] == self.id.lower():\n self.job_runner = tup[1]\n break\n # Is this a 'hidden' tool (hidden in tool menu)\n self.hidden = util.xml_text(root, \"hidden\")\n if self.hidden: self.hidden = util.string_as_bool(self.hidden)\n # Load any tool specific code (optional) Edit: INS 5/29/2007,\n # allow code files to have access to the individual tool's\n # \"module\" if it has one. Allows us to reuse code files, etc.\n self.code_namespace = dict()\n self.hook_map = {}\n for code_elem in root.findall(\"code\"):\n for hook_elem in code_elem.findall(\"hook\"):\n for key, value in hook_elem.items():\n # map hook to function\n self.hook_map[key]=value\n file_name = code_elem.get(\"file\")\n code_path = os.path.join( self.tool_dir, file_name )\n execfile( code_path, self.code_namespace )\n # Load any tool specific options (optional)\n self.options = dict( sanitize=True, refresh=False )\n for option_elem in root.findall(\"options\"):\n for option, value in self.options.copy().items():\n if isinstance(value, type(False)):\n self.options[option] = util.string_as_bool(option_elem.get(option, str(value)))\n else:\n self.options[option] = option_elem.get(option, str(value))\n self.options = Bunch(** self.options)\n # Parse tool inputs (if there are any required)\n self.parse_inputs( root )\n # Parse tool help\n self.parse_help( root )\n # Description of outputs produced by an invocation of the tool\n self.outputs = {}\n out_elem = root.find(\"outputs\")\n if out_elem:\n for data_elem in out_elem.findall(\"data\"):\n output = ToolOutput( data_elem.get(\"name\") )\n output.format = data_elem.get(\"format\", \"data\")\n output.change_format = data_elem.findall(\"change_format\")\n output.metadata_source = data_elem.get(\"metadata_source\", \"\")\n output.parent = data_elem.get(\"parent\", None)\n output.label = util.xml_text( data_elem, \"label\" )\n output.count = int( data_elem.get(\"count\", 1) )\n output.filters = data_elem.findall( 'filter' )\n self.outputs[ output.name ] = output\n # Any extra generated config files for the tool\n self.config_files = []\n conf_parent_elem = root.find(\"configfiles\")\n if conf_parent_elem:\n for conf_elem in conf_parent_elem.findall( \"configfile\" ):\n name = conf_elem.get( \"name\" )\n filename = conf_elem.get( \"filename\", None )\n text = conf_elem.text\n self.config_files.append( ( name, filename, text ) )\n # Action\n action_elem = root.find( \"action\" )\n if action_elem is None:\n self.tool_action = DefaultToolAction()\n else:\n module = action_elem.get( 'module' )\n cls = action_elem.get( 'class' )\n mod = __import__( module, globals(), locals(), [cls])\n self.tool_action = getattr( mod, cls )()\n # User interface hints\n self.uihints = {}\n uihints_elem = root.find( \"uihints\" )\n if uihints_elem is not None:\n for key, value in uihints_elem.attrib.iteritems():\n self.uihints[ key ] = value\n # Tests\n tests_elem = root.find( \"tests\" )\n if tests_elem:\n try:\n self.parse_tests( tests_elem )\n except:\n log.exception( \"Failed to parse tool tests\" )\n else:\n self.tests = None\n # Determine if this tool can be used in workflows\n self.is_workflow_compatible = self.check_workflow_compatible()",
"def tool_handler():\n yield keyword(\"on\")\n yield normalspaces()\n yield var_name()\n yield normalspaces()\n yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return expr",
"def create_tool_from_suggestion():\n pass",
"def toolset_from_grammar():\n ### <toolset>\n def doMult(node):\n \t(a,b) = node\n \tnode.value = a.value * b.value\n \n def doAdd(node):\n \t(a,b) = node\n \tnode.value = a.value + b.value\n \n def formatResult(node):\n \tnode.value = \"%.3f\" % node.value\n \n return locals().copy()",
"def mousetool_def():\n yield keyword(\"tool\")\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n vnop = yield sepBy(named_argument, normalspaces())\n yield normalspaces()\n yield string(\"(\")\n yield normalspaces()\n toolclauses = yield sepBy(tool_clause, normalspaces())\n yield normalspaces()\n yield string(\")\")\n return s.Construct(s.MOUSETOOL_DEF, vname, vnop, toolclauses)",
"def is_Tool(input_list):",
"def parse_with_bindops(sentence, grammar: Optional[Any] = ..., trace=...):\n ...",
"def parse_line(line):\n label = opcode = operand = \"\"\n\n token_list = Util.get_token_list(line)\n\n token_length = len(token_list)\n\n mnemonics_list = list(Optab.as_dict().keys())\n\n if token_length == 1:\n if token_list[0] in mnemonics_list:\n # like RSUB\n opcode = token_list[0]\n else:\n # like END\n label = token_list[0]\n elif token_length == 2:\n if token_list[0] in mnemonics_list:\n # like ADD THREE\n opcode, operand = token_list\n elif token_list[1] in mnemonics_list:\n # like END RSUB\n label, opcode = token_list\n elif token_length == 3:\n if token_list[0] in mnemonics_list:\n # like LDA BUFFER, X\n opcode, operand, _ = token_list\n else:\n # like THREE WORD 3\n label, opcode, operand = token_list\n elif token_length == 4:\n # like LOOP LDA BUFFER, X\n # or EOF BYTE C'454F46'\n label = token_list[0]\n opcode = token_list[1]\n\n if opcode == OpCode.BYTE:\n # if opcode is BYTE then the 4th string\n # will be the actual value,(token_list[3]).\n # 3rd string will be 'C' or 'X'\n operand = token_list[3]\n else:\n operand = token_list[2]\n\n return label, opcode, operand",
"def build_parser(self, parser: ArgumentParser):",
"def vtr_command_argparser(prog=None):\n\n description = textwrap.dedent(\n \"\"\"\n Parses one or more VTR tasks.\n \"\"\"\n )\n epilog = textwrap.dedent(\n \"\"\"\n Examples\n --------\n\n Parse the task named 'timing_chain':\n\n %(prog)s timing_chain\n\n Parse all the tasks listed in the file 'task_list.txt':\n\n %(prog)s -l task_list.txt\n\n\n Exit Code\n ---------\n The exit code equals the number failures\n (i.e. exit code 0 indicates no failures).\n \"\"\"\n )\n\n parser = argparse.ArgumentParser(\n prog=prog,\n description=description,\n epilog=epilog,\n formatter_class=RawDefaultHelpFormatter,\n )\n\n #\n # Major arguments\n #\n parser.add_argument(\"task\", nargs=\"*\", help=\"Tasks to be run\")\n\n parser.add_argument(\n \"-l\",\n nargs=\"*\",\n default=[],\n metavar=\"TASK_LIST_FILE\",\n dest=\"list_file\",\n help=\"A file listing tasks to be run\",\n )\n\n parser.add_argument(\n \"-temp_dir\",\n default=None,\n metavar=\"TEMP_DIR\",\n dest=\"alt_tasks_dir\",\n help=\"Alternate directory to run the tasks in (will be created if non-existant)\",\n )\n\n parser.add_argument(\n \"-parse_qor\",\n default=False,\n action=\"store_true\",\n help=\"Perform only parsing on the latest task run\",\n )\n\n parser.add_argument(\n \"-create_golden\",\n default=False,\n action=\"store_true\",\n help=\"Update or create golden results for the specified task\",\n )\n\n parser.add_argument(\n \"-check_golden\",\n default=False,\n action=\"store_true\",\n help=\"Check the latest task run against golden results\",\n )\n\n parser.add_argument(\n \"-calc_geomean\",\n default=False,\n action=\"store_true\",\n help=\"QoR geomeans are not computed by default\",\n )\n\n parser.add_argument(\"-run\", default=None, type=str, help=\"\")\n\n parser.add_argument(\"-revision\", default=\"\", help=\"Revision number\")\n\n return parser",
"def parse_clang_arguments(opt=''):\n command='llvm-as </dev/null | opt '+opt+' -disable-output -debug-pass=Arguments'\n output=commands.getoutput(command)\n first_mods=output.split('Pass Arguments: ')[1].split()\n second_mods=output.split('Pass Arguments: ')[2].split()\n return (first_mods, second_mods)",
"def extract_compiler(pstree):\n result = \"unknown\"\n ignoreT = {\n 'pstree' : True,\n 'ld' : True,\n 'collect2' : True,\n }\n \n if (pstree == \"unknown\"):\n return result\n\n a = pstree.split(\"---\")\n n = len(a)\n\n for cmd in reversed(a):\n if (not (cmd in ignoreT)):\n result = cmd\n break\n\n return cmd",
"def dependency_parse(self):\n parsed_sentence = self.parsed\n word_type = None\n multiplier = None\n sentence_deps = set([w.dep_ for w in parsed_sentence])\n for word in parsed_sentence:\n #If there's a word prefaced by a number\n #Add a special case for am, treat it as AM.\n #To somewhat mitigate the special case being incorrectly triggered by the actual word 'am',\n #only do this if there are entities in the sentence\n if list(word.lefts) and \"nummod\" in sentence_deps:\n first_left = list(word.lefts)[0]\n log.debug(\"Found nummod type pair {0} and {1}\".format(word.orth_, first_left.orth_))\n if first_left.is_digit:\n word_type = self.check_time_word(word, \"nummod\")\n multiplier = int(first_left.orth_)\n break\n elif word.dep_ == \"pobj\":\n #Check for a cardinal time\n if cardinal_time_pattern.match(word.orth_):\n word_type, multiplier = self.parse_cardinal_time(word)\n break\n elif date_pattern.match(word.orth_):\n log.debug(\"Found date {0}\".format(word.orth_))\n if \"/\" in word.orth_:\n word_type = \"slashdate\"\n else:\n word_type = \"dotdate\"\n break\n else:\n self.relative == True\n word_type = self.check_time_word(word, \"pobj\")\n break\n #Only use a number for a parse if there's not a significant dependency in the sentence\n elif word.is_digit and \"pobj\" not in sentence_deps:\n word_type, multiplier = self.parse_cardinal_time(word)\n break\n self.word_type = word_type\n self.multiplier = multiplier",
"def parse_command(self, cmd, opts):\n cmd = cmd.upper().strip()\n if cmd == '':\n return None\n elif cmd == 'LOAD':\n return self.parse_load(opts)\n elif cmd == 'PRINT':\n return self.parse_print(opts)\n elif cmd == 'INC':\n return self.parse_inc(opts)\n elif cmd == 'DEC':\n return self.parse_dec(opts)\n elif cmd == 'JMP':\n return self.parse_jmp(opts)\n elif cmd == 'MOV':\n return self.parse_mov(opts)\n elif cmd == 'ADD':\n return self.parse_add(opts)\n elif cmd == 'SUB':\n return self.parse_sub(opts)\n elif cmd == 'MUL':\n return self.parse_mul(opts)\n elif cmd == 'DIV':\n return self.parse_div(opts)\n elif cmd == 'MOD':\n return self.parse_mod(opts)\n elif cmd == 'DB':\n return self.parse_db(opts)\n elif cmd == 'HEAP':\n return self.parse_heap(opts)\n elif cmd == 'INFO':\n return self.parse_info(opts)\n elif cmd == 'STOP':\n self.code += chr(opcodes.STOP)\n else:\n raise ParseError('Unsupported command: %s @%s' % (cmd, self.line))\n #print (cmd, opts)\n return self.code",
"def in_game_parse(line):\n tokens = line.split()\n if not tokens:\n raise SyntaxError('No Command Given')\n \"*** YOUR CODE HERE ***\"\n\n exp = ['Place', ['Put piece here', 'Put space here']]\n symbols = ['X', 'O']\n places = [str(i) for i in range(9)]\n\n while tokens:\n token = tokens.pop(0)\n \"*** YOUR CODE HERE ***\"\n raise SyntaxError(\"Symbol or Place is missing\")",
"def parse(self, hgvs_string):\n pass",
"def test_tests_one_tool(self):\n tool = random.choice(tool_list)\n args = parse_args(\"tests {}\".format(tool), use_shlex=True)\n self.assertCountEqual(args.tool, [tool])",
"def __init__( self, config_file ):\n # Determine the full path of the directory where the tool config is\n self.config_file = config_file\n self.tool_dir = os.path.dirname( config_file )\n # Parse XML configuration file and get the root element\n tree = util.parse_xml( self.config_file )\n root = tree.getroot()\n # Get the (user visible) name of the tool\n self.name = root.get(\"name\")\n if not self.name: raise Exception, \"Missing tool 'name'\"\n # Get the UNIQUE id for the tool \n # TODO: can this be generated automatically?\n self.id = root.get(\"id\")\n if not self.id: raise Exception, \"Missing tool 'id'\" \n # Command line (template). Optional for tools that do not invoke a \n # local program \n command = root.find(\"command\")\n if command is not None:\n self.command = util.xml_text(root, \"command\") # get rid of whitespace\n interpreter = command.get(\"interpreter\")\n if interpreter:\n self.command = interpreter + \" \" + os.path.join(self.tool_dir, self.command)\n else:\n self.command = ''\n # Short description of the tool\n self.description = util.xml_text(root, \"description\")\n # Load any tool specific code (optional)\n self.code_namespace = dict()\n for code_elem in root.findall(\"code\"):\n file_name = code_elem.get(\"file\")\n code_path = os.path.join( self.tool_dir, file_name )\n execfile( code_path, self.code_namespace )\n # Load parameters (optional)\n input_elem = root.find(\"inputs\")\n if input_elem:\n # Handle properties of the input form\n self.check_values = util.string_as_bool( input_elem.get(\"check_values\", \"true\") )\n self.action = input_elem.get( \"action\", \"/tool_runner/index\")\n self.target = input_elem.get( \"target\", \"galaxy_main\" )\n self.method = input_elem.get( \"method\", \"post\" )\n # Parse the actual parameters\n self.param_map = odict()\n self.param_map_by_page = list()\n self.display_by_page = list()\n enctypes = set()\n # Handle multiple page case\n pages = input_elem.findall( \"page\" )\n for page in ( pages or [ input_elem ] ):\n display, param_map = self.parse_page( page, enctypes )\n self.param_map_by_page.append( param_map )\n self.param_map.update( param_map )\n self.display_by_page.append( display )\n self.display = self.display_by_page[0]\n self.npages = len( self.param_map_by_page )\n self.last_page = len( self.param_map_by_page ) - 1\n self.has_multiple_pages = bool( self.last_page )\n # Determine the needed enctype for the form\n if len( enctypes ) == 0:\n self.enctype = \"application/x-www-form-urlencoded\"\n elif len( enctypes ) == 1:\n self.enctype = enctypes.pop()\n else:\n raise Exception, \"Conflicting required enctypes: %s\" % str( enctypes )\n # Check if the tool either has no parameters or only hidden (and\n # thus hardcoded) parameters. FIXME: hidden parameters aren't\n # parameters at all really, and should be passed in a different\n # way, making this check easier.\n self.input_required = False\n for param in self.param_map.values():\n if not isinstance( param, ( HiddenToolParameter, BaseURLToolParameter ) ):\n self.input_required = True\n break\n # Longer help text for the tool. Formatted in RST\n # TODO: Allow raw HTML or an external link.\n self.help = root.find(\"help\")\n self.help_by_page = list()\n help_header = \"\"\n help_footer = \"\"\n if self.help is not None:\n help_pages = self.help.findall( \"page\" )\n help_header = self.help.text\n try:\n self.help = util.rst_to_html(self.help.text)\n except:\n log.exception( \"error in help for tool %s\" % self.name )\n # Multiple help page case\n if help_pages:\n for help_page in help_pages:\n self.help_by_page.append( help_page.text )\n help_footer = help_footer + help_page.tail\n # Each page has to rendered all-together because of backreferences allowed by rst\n try:\n self.help_by_page = [ \\\n util.rst_to_html(help_header + x + help_footer) for x in self.help_by_page \\\n ]\n except:\n log.exception( \"error in multi-page help for tool %s\" % self.name )\n # Pad out help pages to match npages ... could this be done better?\n while len(self.help_by_page) < self.npages: self.help_by_page.append( self.help )\n # FIXME: This is not used anywhere, what does it do?\n # url redirection to ougoings\n self.redir_url = root.find(\"url\")\n # Description of outputs produced by an invocation of the tool\n self.outputs = {}\n out_elem = root.find(\"outputs\")\n if out_elem:\n for data_elem in out_elem.findall(\"data\"):\n name = data_elem.get(\"name\")\n format = data_elem.get(\"format\", \"data\")\n metadata_source = data_elem.get(\"metadata_source\", \"\")\n parent = data_elem.get(\"parent\", None)\n self.outputs[name] = (format, metadata_source, parent) \n # Action\n action_elem = root.find( \"action\" )\n if action_elem is None:\n self.tool_action = DefaultToolAction()\n else:\n module = action_elem.get( 'module' )\n cls = action_elem.get( 'class' )\n mod = __import__( module, globals(), locals(), [cls])\n self.tool_action = getattr( mod, cls )()\n # Tests\n tests_elem = root.find( \"tests\" )\n if tests_elem:\n try:\n self.parse_tests( tests_elem )\n except:\n log.exception( \"Failed to parse tool tests\" )\n else:\n self.tests = None",
"def read_command_line():\n global advanced\n global add_all_variable_names\n\n try:\n options, arguments = getopt.getopt(sys.argv[1:], 'hd:')\n except getopt.GetoptError:\n print_usage()\n print('ERROR: Syntax Error with command!')\n raise SystemExit(22)\n\n command_info = {'source': '', 'model': '', 'location': '', \\\n 'start_time': '', 'variable_names': []}\n for option, argument in options:\n if option == '-h':\n print_usage()\n raise SystemExit(0)\n elif option == '-d':\n add_all_variable_names = False\n advanced = True\n command_info['variable_names'] = argument.split(',')\n\n read_command_info(arguments, command_info)\n\n return command_info"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a tool handler
|
def tool_handler():
yield keyword("on")
yield normalspaces()
yield var_name()
yield normalspaces()
yield optional(var_name())
yield normalspaces()
yield keyword("do")
yield normalspaces()
expr = yield expression
return expr
|
[
"def parse( self, root ):\n # Get the (user visible) name of the tool\n self.name = root.get( \"name\" )\n if not self.name: \n raise Exception, \"Missing tool 'name'\"\n # Get the UNIQUE id for the tool \n # TODO: can this be generated automatically?\n self.id = root.get( \"id\" )\n if not self.id: \n raise Exception, \"Missing tool 'id'\" \n self.version = root.get( \"version\" )\n if not self.version: \n # For backward compatibility, some tools may not have versions yet.\n self.version = \"1.0.0\"\n # Support multi-byte tools\n self.is_multi_byte = util.string_as_bool( root.get( \"is_multi_byte\", False ) )\n #Force history to fully refresh after job execution for this tool. Useful i.e. when an indeterminate number of outputs are created by a tool.\n self.force_history_refresh = util.string_as_bool( root.get( 'force_history_refresh', 'False' ) )\n #load input translator, used by datasource tools to change names/values of incoming parameters\n self.input_translator = root.find( \"request_param_translation\" )\n if self.input_translator:\n self.input_translator = ToolInputTranslator.from_element( self.input_translator )\n # Command line (template). Optional for tools that do not invoke a local program \n command = root.find(\"command\")\n if command is not None and command.text is not None:\n self.command = command.text.lstrip() # get rid of leading whitespace\n interpreter = command.get(\"interpreter\")\n if interpreter:\n # TODO: path munging for cluster/dataset server relocatability\n executable = self.command.split()[0]\n abs_executable = os.path.abspath(os.path.join(self.tool_dir, executable))\n self.command = self.command.replace(executable, abs_executable, 1)\n self.command = interpreter + \" \" + self.command\n else:\n self.command = ''\n # Parameters used to build URL for redirection to external app\n redirect_url_params = root.find( \"redirect_url_params\" )\n if redirect_url_params is not None and redirect_url_params.text is not None:\n # get rid of leading / trailing white space\n redirect_url_params = redirect_url_params.text.strip()\n # Replace remaining white space with something we can safely split on later\n # when we are building the params\n self.redirect_url_params = redirect_url_params.replace( ' ', '**^**' )\n else:\n self.redirect_url_params = ''\n # Short description of the tool\n self.description = util.xml_text(root, \"description\")\n # Job runner\n if self.app.config.start_job_runners is None:\n # Jobs are always local regardless of tool config if no additional\n # runners are started\n self.job_runner = \"local:///\"\n else:\n # Set job runner to the cluster default\n self.job_runner = self.app.config.default_cluster_job_runner\n for tup in self.app.config.tool_runners:\n if tup[0] == self.id.lower():\n self.job_runner = tup[1]\n break\n # Is this a 'hidden' tool (hidden in tool menu)\n self.hidden = util.xml_text(root, \"hidden\")\n if self.hidden: self.hidden = util.string_as_bool(self.hidden)\n # Load any tool specific code (optional) Edit: INS 5/29/2007,\n # allow code files to have access to the individual tool's\n # \"module\" if it has one. Allows us to reuse code files, etc.\n self.code_namespace = dict()\n self.hook_map = {}\n for code_elem in root.findall(\"code\"):\n for hook_elem in code_elem.findall(\"hook\"):\n for key, value in hook_elem.items():\n # map hook to function\n self.hook_map[key]=value\n file_name = code_elem.get(\"file\")\n code_path = os.path.join( self.tool_dir, file_name )\n execfile( code_path, self.code_namespace )\n # Load any tool specific options (optional)\n self.options = dict( sanitize=True, refresh=False )\n for option_elem in root.findall(\"options\"):\n for option, value in self.options.copy().items():\n if isinstance(value, type(False)):\n self.options[option] = util.string_as_bool(option_elem.get(option, str(value)))\n else:\n self.options[option] = option_elem.get(option, str(value))\n self.options = Bunch(** self.options)\n # Parse tool inputs (if there are any required)\n self.parse_inputs( root )\n # Parse tool help\n self.parse_help( root )\n # Description of outputs produced by an invocation of the tool\n self.outputs = {}\n out_elem = root.find(\"outputs\")\n if out_elem:\n for data_elem in out_elem.findall(\"data\"):\n output = ToolOutput( data_elem.get(\"name\") )\n output.format = data_elem.get(\"format\", \"data\")\n output.change_format = data_elem.findall(\"change_format\")\n output.metadata_source = data_elem.get(\"metadata_source\", \"\")\n output.parent = data_elem.get(\"parent\", None)\n output.label = util.xml_text( data_elem, \"label\" )\n output.count = int( data_elem.get(\"count\", 1) )\n output.filters = data_elem.findall( 'filter' )\n self.outputs[ output.name ] = output\n # Any extra generated config files for the tool\n self.config_files = []\n conf_parent_elem = root.find(\"configfiles\")\n if conf_parent_elem:\n for conf_elem in conf_parent_elem.findall( \"configfile\" ):\n name = conf_elem.get( \"name\" )\n filename = conf_elem.get( \"filename\", None )\n text = conf_elem.text\n self.config_files.append( ( name, filename, text ) )\n # Action\n action_elem = root.find( \"action\" )\n if action_elem is None:\n self.tool_action = DefaultToolAction()\n else:\n module = action_elem.get( 'module' )\n cls = action_elem.get( 'class' )\n mod = __import__( module, globals(), locals(), [cls])\n self.tool_action = getattr( mod, cls )()\n # User interface hints\n self.uihints = {}\n uihints_elem = root.find( \"uihints\" )\n if uihints_elem is not None:\n for key, value in uihints_elem.attrib.iteritems():\n self.uihints[ key ] = value\n # Tests\n tests_elem = root.find( \"tests\" )\n if tests_elem:\n try:\n self.parse_tests( tests_elem )\n except:\n log.exception( \"Failed to parse tool tests\" )\n else:\n self.tests = None\n # Determine if this tool can be used in workflows\n self.is_workflow_compatible = self.check_workflow_compatible()",
"def run_parser(self, parser: ArgumentParser):",
"def __init__( self, config_file ):\n # Determine the full path of the directory where the tool config is\n self.config_file = config_file\n self.tool_dir = os.path.dirname( config_file )\n # Parse XML configuration file and get the root element\n tree = util.parse_xml( self.config_file )\n root = tree.getroot()\n # Get the (user visible) name of the tool\n self.name = root.get(\"name\")\n if not self.name: raise Exception, \"Missing tool 'name'\"\n # Get the UNIQUE id for the tool \n # TODO: can this be generated automatically?\n self.id = root.get(\"id\")\n if not self.id: raise Exception, \"Missing tool 'id'\" \n # Command line (template). Optional for tools that do not invoke a \n # local program \n command = root.find(\"command\")\n if command is not None:\n self.command = util.xml_text(root, \"command\") # get rid of whitespace\n interpreter = command.get(\"interpreter\")\n if interpreter:\n self.command = interpreter + \" \" + os.path.join(self.tool_dir, self.command)\n else:\n self.command = ''\n # Short description of the tool\n self.description = util.xml_text(root, \"description\")\n # Load any tool specific code (optional)\n self.code_namespace = dict()\n for code_elem in root.findall(\"code\"):\n file_name = code_elem.get(\"file\")\n code_path = os.path.join( self.tool_dir, file_name )\n execfile( code_path, self.code_namespace )\n # Load parameters (optional)\n input_elem = root.find(\"inputs\")\n if input_elem:\n # Handle properties of the input form\n self.check_values = util.string_as_bool( input_elem.get(\"check_values\", \"true\") )\n self.action = input_elem.get( \"action\", \"/tool_runner/index\")\n self.target = input_elem.get( \"target\", \"galaxy_main\" )\n self.method = input_elem.get( \"method\", \"post\" )\n # Parse the actual parameters\n self.param_map = odict()\n self.param_map_by_page = list()\n self.display_by_page = list()\n enctypes = set()\n # Handle multiple page case\n pages = input_elem.findall( \"page\" )\n for page in ( pages or [ input_elem ] ):\n display, param_map = self.parse_page( page, enctypes )\n self.param_map_by_page.append( param_map )\n self.param_map.update( param_map )\n self.display_by_page.append( display )\n self.display = self.display_by_page[0]\n self.npages = len( self.param_map_by_page )\n self.last_page = len( self.param_map_by_page ) - 1\n self.has_multiple_pages = bool( self.last_page )\n # Determine the needed enctype for the form\n if len( enctypes ) == 0:\n self.enctype = \"application/x-www-form-urlencoded\"\n elif len( enctypes ) == 1:\n self.enctype = enctypes.pop()\n else:\n raise Exception, \"Conflicting required enctypes: %s\" % str( enctypes )\n # Check if the tool either has no parameters or only hidden (and\n # thus hardcoded) parameters. FIXME: hidden parameters aren't\n # parameters at all really, and should be passed in a different\n # way, making this check easier.\n self.input_required = False\n for param in self.param_map.values():\n if not isinstance( param, ( HiddenToolParameter, BaseURLToolParameter ) ):\n self.input_required = True\n break\n # Longer help text for the tool. Formatted in RST\n # TODO: Allow raw HTML or an external link.\n self.help = root.find(\"help\")\n self.help_by_page = list()\n help_header = \"\"\n help_footer = \"\"\n if self.help is not None:\n help_pages = self.help.findall( \"page\" )\n help_header = self.help.text\n try:\n self.help = util.rst_to_html(self.help.text)\n except:\n log.exception( \"error in help for tool %s\" % self.name )\n # Multiple help page case\n if help_pages:\n for help_page in help_pages:\n self.help_by_page.append( help_page.text )\n help_footer = help_footer + help_page.tail\n # Each page has to rendered all-together because of backreferences allowed by rst\n try:\n self.help_by_page = [ \\\n util.rst_to_html(help_header + x + help_footer) for x in self.help_by_page \\\n ]\n except:\n log.exception( \"error in multi-page help for tool %s\" % self.name )\n # Pad out help pages to match npages ... could this be done better?\n while len(self.help_by_page) < self.npages: self.help_by_page.append( self.help )\n # FIXME: This is not used anywhere, what does it do?\n # url redirection to ougoings\n self.redir_url = root.find(\"url\")\n # Description of outputs produced by an invocation of the tool\n self.outputs = {}\n out_elem = root.find(\"outputs\")\n if out_elem:\n for data_elem in out_elem.findall(\"data\"):\n name = data_elem.get(\"name\")\n format = data_elem.get(\"format\", \"data\")\n metadata_source = data_elem.get(\"metadata_source\", \"\")\n parent = data_elem.get(\"parent\", None)\n self.outputs[name] = (format, metadata_source, parent) \n # Action\n action_elem = root.find( \"action\" )\n if action_elem is None:\n self.tool_action = DefaultToolAction()\n else:\n module = action_elem.get( 'module' )\n cls = action_elem.get( 'class' )\n mod = __import__( module, globals(), locals(), [cls])\n self.tool_action = getattr( mod, cls )()\n # Tests\n tests_elem = root.find( \"tests\" )\n if tests_elem:\n try:\n self.parse_tests( tests_elem )\n except:\n log.exception( \"Failed to parse tool tests\" )\n else:\n self.tests = None",
"def build_parser(self, parser: ArgumentParser):",
"def tool_clause():\n yield (local_decl ^\n function_def ^\n struct_def ^\n tool_handler)",
"def create_tool_from_suggestion():\n pass",
"def parseApplication(app):\n ...",
"def create_parser_impl(self, common, handler: ParserHandler) -> BaseParser:",
"def full_parser():\n return util.cmd.make_parser(__commands__, __doc__)",
"def parse_tools(url):\n\tdoc = urllib2.urlopen(url)\n\tlines = doc.readlines()\n\n\t# Find the indicies in the list of HTML lines that separate hardware / software sections\n\tsection_regex = re.compile(\"What hardware do you use?|And what software?|What would be your dream setup?\")\n\theader_inds = [i for i, l in enumerate(lines) if re.search(section_regex, l)]\n\thardware_inds =[header_inds[0], header_inds[1]-1]\n\tsoftware_inds =[header_inds[1], header_inds[2]-1]\n\n\t# Extract the tools, and clean out random None types from malformed HTML\n\thardware_tools = get_links(lines[hardware_inds[0]:hardware_inds[1]])\n\thardware_tools = [(a) for (a) in hardware_tools if a is not None]\n\t\n\tsoftware_tools = get_links(lines[software_inds[0]:software_inds[1]])\n\tsoftware_tools = [(a) for (a) in software_tools if a is not None]\n\t\n\treturn {'hardware' : map(lambda s: s.lower(), hardware_tools), 'software' : map(lambda s: s.lower(), software_tools)}",
"def parseCommandLine(cls): \n win32serviceutil.HandleCommandLine(cls)",
"def parse(self,cmds):\n # When the program runs\n # the first instruction will push the \"main\" scope which is the\n # outer-most scope.\n subrout.subroutparse_newdef(\"main\",self)\n while cmds:\n matched = False\n for cmdp in cmd_parsers:\n # match the instruction token(s)\n m=cmdp.regex.match(cmds)\n if m:\n print(m)\n print(m.re.pattern)\n print(m.groups())\n newinstr = None\n if cmdp.instr_constr:\n newinstr = cmdp.instr_constr(m.groups(),self)\n if not self.last_instr:\n # This means that parsing was prematurely terminated, so\n # we quit\n return\n if newinstr:\n # check if newinstr because some don't return\n # instructions, like NOP\n self.last_instr = append(self.last_instr,newinstr)\n cmds=cmds[m.end(0):]\n matched = True\n break\n if not matched:\n print(\"Error: no match for %s\" % (cmds,))\n break\n # Finish main which should be the outer-most scope and so there won't be\n # any other first_instr\n first_instr = subrout.subroutdefinstr_enddef(self)\n self.last_instr = first_instr\n if len(self.cur_subrout_def) > 0:\n raise Exception('Some routines have not been completed (missing closing \"}\" ?)')\n # The last instruction executes the main routine\n self.last_instr = append(self.last_instr,subrout.subroutexecinstr_create(\"main\"))\n return first_instr",
"def init_tools( self, config_filename ):\n def load_tool( elem, panel_dict ):\n try:\n path = elem.get( \"file\" )\n tool = self.load_tool( os.path.join( self.tool_root_dir, path ) )\n self.tools_by_id[ tool.id ] = tool\n key = 'tool_' + tool.id\n panel_dict[ key ] = tool\n log.debug( \"Loaded tool: %s %s\" % ( tool.id, tool.version ) )\n except:\n log.exception( \"error reading tool from path: %s\" % path )\n def load_workflow( elem, panel_dict ):\n try:\n # TODO: should id be encoded?\n workflow_id = elem.get( 'id' )\n workflow = self.load_workflow( workflow_id )\n self.workflows_by_id[ workflow_id ] = workflow\n key = 'workflow_' + workflow_id\n panel_dict[ key ] = workflow\n log.debug( \"Loaded workflow: %s %s\" % ( workflow_id, workflow.name ) )\n except:\n log.exception( \"error loading workflow: %s\" % workflow_id )\n def load_label( elem, panel_dict ):\n label = ToolSectionLabel( elem )\n key = 'label_' + label.id\n panel_dict[ key ] = label\n def load_section( elem, panel_dict ):\n section = ToolSection( elem )\n log.debug( \"Loading section: %s\" % section.name )\n for section_elem in elem:\n if section_elem.tag == 'tool':\n load_tool( section_elem, section.elems )\n elif section_elem.tag == 'workflow':\n load_workflow( section_elem, section.elems )\n elif section_elem.tag == 'label':\n load_label( section_elem, section.elems )\n key = 'section_' + section.id\n panel_dict[ key ] = section\n \n log.info(\"parsing the tool configuration\")\n tree = util.parse_xml( config_filename )\n root = tree.getroot()\n for elem in root:\n if elem.tag == 'tool':\n load_tool( elem, self.tool_panel )\n elif elem.tag == 'workflow':\n load_workflow( elem, self.tool_panel )\n elif elem.tag == 'section' :\n load_section( elem, self.tool_panel )\n elif elem.tag == 'label':\n load_label( elem, self.tool_panel )",
"def parse(self, command_line) -> dict:\n raise NotImplementedError",
"def __init__(self):\n self.CLI_COMMAND = os.path.basename(sys.argv[0])\n\n self.ctrl_parser = argparse.ArgumentParser(prog=self.CLI_COMMAND,\n description='Control Component Parser')\n\n self.ctrl_subparser = self.ctrl_parser.add_subparsers(\n title='Sub Commands',\n description='List of Valid Sub Commands', dest='subparser_name')\n\n self.add_simple_args()\n\n \"\"\"Sub Parser for all Cli Commands\"\"\"\n self.add_subparser('power', 'Power on/off/reset a device.',\n ['on', 'off', 'cycle', 'bios', 'efi', 'hdd', 'pxe', 'cdrom', 'removable'],\n 'Select an option: on/off/cycle/bios/efi/hdd/pxe/cdrom/removable.'\n ' Ex: {} power on node001'.format(self.CLI_COMMAND),\n [\n {\n 'name': '-f',\n 'name2': '--force',\n 'action': 'store_true',\n 'help': 'This option will allow user to force the Power On/Off/Reboot'\n },\n {\n 'name': '-o',\n 'name2': '--outlet',\n 'type': int,\n 'nargs': '?',\n 'help': 'Specify the outlet to edit (PDUs only)'\n }\n ])\n\n self.add_subparser('resource', 'Resource add/remove from a resource pool.', ['add', 'remove', 'check'],\n 'Select one of the following options: add/remove/check'\n ' Ex: {} resource add node001'.format(self.CLI_COMMAND))\n\n self.add_subparser('process', 'Process list/kill on a node in a cluster.', ['list', 'kill'],\n 'Select one of two options: list/kill.'\n ' Ex: {} process kill 1232 node001'.format(self.CLI_COMMAND),\n [\n {\n 'name': 'process_id',\n 'help': 'Please provide process id to list or kill a process'\n }\n ])\n\n self.add_subparser('get', 'Get powercap/freq value of a node.', ['freq', 'powercap'])\n\n self.add_subparser('set', 'Set powercap/freq value of a node.', ['freq', 'powercap'], 'Select an option to set',\n [\n {\n 'name': 'value',\n 'help': 'Please provide the value to be set'\n }\n ])\n\n self.add_subparser('service', 'Check, start or stop services specified in the configuration file',\n ['status', 'start', 'stop'], 'Select an action to perform')\n\n self.ctrl_subparser.add_parser('datastore', help=\"Raw access to the database and its contects\", add_help=False)\n self.ctrl_subparser.add_parser('cmm', help=\"Configuration Manifest Management (CMM) is a user friendly way to update your configuration.\", add_help=False)\n self.ctrl_subparser.add_parser('provision', help=\"Adding, setting and removing provisioning \"\n \"options for devices\", add_help=False)\n self.ctrl_subparser.add_parser('diag', help=\"Launching diagnostic tests on devices\", add_help=False)\n\n self.add_subparser('bios', 'Update or get version of bios on specified nodes/group of nodes',\n ['update', 'get-version'], 'Select an action to perform',\n [\n {\n 'name': '-i',\n 'name2': '--image',\n 'nargs': '?',\n 'help': 'Specify the bios image'\n }\n ])\n\n self.add_subparser('sensor', 'Get specified sensor value on specified nodes/group of nodes',\n ['get'], 'Select option to get sensor values'\n 'Ex: 1. {0} sensor-name temp 2. {1} sensor-name temp --get-overtime 2 3'.\n format(self.CLI_COMMAND, self.CLI_COMMAND),\n [\n {\n 'name': 'sensor_name',\n 'nargs': '?',\n 'help': 'Provide a specific sensor, a comma seperated list of multiple sensors '\n 'or \"*\" for all sensors'\n },\n {\n 'name': '--get-overtime',\n 'nargs': 2,\n 'type': int,\n 'metavar': ('<sample-rate>', '<duration>'),\n 'help': 'Provide a sample rate(per second) and a duration of time(seconds) to sample'\n ' over, both values must be integers greater than 1'\n }\n ])\n self.ctrl_subparser.add_parser('job', help='Launching, checking, '\n 'retrieving and canceling job', add_help=False)",
"def main():\r\n index(parserCmdLine())",
"def getparser():\n prs = ap.ArgumentParser(\n description=\"Format conversion for \"\n \"and introspection of \"\n \"intersphinx \"\n \"'objects.inv' files.\"\n )\n prs.add_argument(\n \"-\" + PrsConst.VERSION[0],\n \"--\" + PrsConst.VERSION,\n help=\"Print package version & other info\",\n action=\"store_true\",\n )\n\n sprs = prs.add_subparsers(\n title=\"Subcommands\",\n dest=PrsConst.SUBPARSER_NAME,\n metavar=f\"{{{PrsConst.CONVERT},{PrsConst.SUGGEST}}}\",\n help=\"Execution mode. Type \"\n \"'sphobjinv [mode] -h' \"\n \"for more information \"\n \"on available options. \"\n \"Mode names can be abbreviated \"\n \"to their first two letters.\",\n )\n\n # Enforce subparser as optional. No effect for 3.4 to 3.7;\n # briefly required a/o 3.7.0b4 due to change in default behavior, per:\n # https://bugs.python.org/issue33109. 3.6 behavior restored for\n # 3.7 release.\n sprs.required = False\n\n spr_convert = sprs.add_parser(\n PrsConst.CONVERT,\n aliases=[PrsConst.CONVERT[:2]],\n help=PrsConst.HELP_CO_PARSER,\n description=PrsConst.HELP_CO_PARSER,\n )\n spr_suggest = sprs.add_parser(\n PrsConst.SUGGEST,\n aliases=[PrsConst.SUGGEST[:2]],\n help=PrsConst.HELP_SU_PARSER,\n description=PrsConst.HELP_SU_PARSER,\n )\n\n # ### Args for conversion subparser\n spr_convert.add_argument(\n PrsConst.MODE,\n help=\"Conversion output format\",\n choices=(PrsConst.ZLIB, PrsConst.PLAIN, PrsConst.JSON),\n )\n\n spr_convert.add_argument(\n PrsConst.INFILE,\n help=(\n \"Path to file to be converted. Passing '-' indicates to read from stdin \"\n \"(plaintext/JSON only).\"\n ),\n )\n\n spr_convert.add_argument(\n PrsConst.OUTFILE,\n help=(\n \"Path to desired output file. \"\n \"Defaults to same directory and main \"\n \"file name as input file but with extension \"\n + PrsConst.HELP_CONV_EXTS\n + \", as appropriate for the output format. \"\n \"A path to a directory is accepted here, \"\n \"in which case the default output file name will be used. \"\n \"Passing '-' indicates to write to stdout. If \"\n + PrsConst.INFILE\n + \" is passed as '-', \"\n + PrsConst.OUTFILE\n + \" can be omitted and both stdin and stdout will be used.\"\n ),\n nargs=\"?\",\n default=None,\n )\n\n # Mutually exclusive group for --expand/--contract\n gp_expcont = spr_convert.add_argument_group(title=\"URI/display name conversions\")\n meg_expcont = gp_expcont.add_mutually_exclusive_group()\n meg_expcont.add_argument(\n \"-\" + PrsConst.EXPAND[0],\n \"--\" + PrsConst.EXPAND,\n help=\"Expand all URI and display name abbreviations\",\n action=\"store_true\",\n )\n\n meg_expcont.add_argument(\n \"-\" + PrsConst.CONTRACT[0],\n \"--\" + PrsConst.CONTRACT,\n help=\"Contract all URI and display name abbreviations\",\n action=\"store_true\",\n )\n\n # Clobber argument\n spr_convert.add_argument(\n \"-\" + PrsConst.OVERWRITE[0],\n \"--\" + PrsConst.OVERWRITE,\n help=\"Overwrite output files without prompting\",\n action=\"store_true\",\n )\n\n # stdout suppressor option (e.g., for scripting)\n spr_convert.add_argument(\n \"-\" + PrsConst.QUIET[0],\n \"--\" + PrsConst.QUIET,\n help=\"Suppress printing of status messages and \"\n \"overwrite output files without prompting\",\n action=\"store_true\",\n )\n\n # Flag to treat infile as a URL\n spr_convert.add_argument(\n \"-\" + PrsConst.URL[0],\n \"--\" + PrsConst.URL,\n help=(\n \"Treat 'infile' as a URL for download. \"\n \"Cannot be used with an infile of '-'.\"\n ),\n action=\"store_true\",\n )\n\n # ### Args for suggest subparser\n spr_suggest.add_argument(\n PrsConst.INFILE,\n help=(\n \"Path to inventory file to be searched. \"\n \"Passing '-' indicates to read from stdin (plaintext/JSON only).\"\n ),\n )\n spr_suggest.add_argument(PrsConst.SEARCH, help=\"Search term for object suggestions\")\n spr_suggest.add_argument(\n \"-\" + PrsConst.ALL[0],\n \"--\" + PrsConst.ALL,\n help=\"Display all results \"\n \"regardless of the number returned \"\n \"without prompting for confirmation.\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.PAGINATE[0],\n \"--\" + PrsConst.PAGINATE,\n help=\"Paginate long search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.INDEX[0],\n \"--\" + PrsConst.INDEX,\n help=\"Include Inventory.objects list indices with the search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.SCORE[0],\n \"--\" + PrsConst.SCORE,\n help=\"Include fuzzywuzzy scores with the search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.THRESH[0],\n \"--\" + PrsConst.THRESH,\n help=\"Match quality threshold, integer 0-100, \"\n \"default 75. Default is suitable when \"\n \"'search' is exactly a known object name. \"\n \"A value of 30-50 gives better results \"\n \"for approximate matches.\",\n default=PrsConst.DEF_THRESH,\n type=int,\n choices=range(101),\n metavar=\"{0-100}\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.URL[0],\n \"--\" + PrsConst.URL,\n help=(\n \"Treat 'infile' as a URL for download. \"\n f\"Cannot be used with --{PrsConst.URL}.\"\n ),\n action=\"store_true\",\n )\n\n return prs",
"def _ParseFileEntry(self, mediator, file_entry):",
"def toolset_from_grammar():\n ### <toolset>\n def doMult(node):\n \t(a,b) = node\n \tnode.value = a.value * b.value\n \n def doAdd(node):\n \t(a,b) = node\n \tnode.value = a.value + b.value\n \n def formatResult(node):\n \tnode.value = \"%.3f\" % node.value\n \n return locals().copy()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a mousetool def
|
def mousetool_def():
yield keyword("tool")
yield normalspaces()
vname = yield var_name()
yield normalspaces()
vnop = yield sepBy(named_argument, normalspaces())
yield normalspaces()
yield string("(")
yield normalspaces()
toolclauses = yield sepBy(tool_clause, normalspaces())
yield normalspaces()
yield string(")")
return s.Construct(s.MOUSETOOL_DEF, vname, vnop, toolclauses)
|
[
"def get_cmd_parser(buf):\n buf = buf.strip('\\n')\n try:\n value = json.loads(buf)\n return value\n except ValueError:\n # Handles in the following.\n pass\n\n value = buf # Default value\n if is_valid_uuid(buf):\n # UUID type\n pass # Uses the default\n elif buf.startswith('['):\n # Set type (might be containing UUIDs)\n # e.g.)\n # [<UUID>, <UUID>]\n buf = buf.replace('[', '[\"').replace(', ', '\", \"').replace(']', '\"]')\n value = json.loads(buf)\n elif buf.startswith('{'):\n # Map type\n # e.g.)\n # {stp-enable=\"true\", stp-priority=\"100\"}\n buf = buf.replace('{', '{\"').replace('=', '\": ').replace(', ', ', \"')\n value = json.loads(buf)\n\n return value",
"def parse(name):\n\n pass",
"def build_parser(self, parser: ArgumentParser):",
"def load_info_from_docstring(docstring, *, delimiter=...):\n ...",
"def parse( self, root ):\n # Get the (user visible) name of the tool\n self.name = root.get( \"name\" )\n if not self.name: \n raise Exception, \"Missing tool 'name'\"\n # Get the UNIQUE id for the tool \n # TODO: can this be generated automatically?\n self.id = root.get( \"id\" )\n if not self.id: \n raise Exception, \"Missing tool 'id'\" \n self.version = root.get( \"version\" )\n if not self.version: \n # For backward compatibility, some tools may not have versions yet.\n self.version = \"1.0.0\"\n # Support multi-byte tools\n self.is_multi_byte = util.string_as_bool( root.get( \"is_multi_byte\", False ) )\n #Force history to fully refresh after job execution for this tool. Useful i.e. when an indeterminate number of outputs are created by a tool.\n self.force_history_refresh = util.string_as_bool( root.get( 'force_history_refresh', 'False' ) )\n #load input translator, used by datasource tools to change names/values of incoming parameters\n self.input_translator = root.find( \"request_param_translation\" )\n if self.input_translator:\n self.input_translator = ToolInputTranslator.from_element( self.input_translator )\n # Command line (template). Optional for tools that do not invoke a local program \n command = root.find(\"command\")\n if command is not None and command.text is not None:\n self.command = command.text.lstrip() # get rid of leading whitespace\n interpreter = command.get(\"interpreter\")\n if interpreter:\n # TODO: path munging for cluster/dataset server relocatability\n executable = self.command.split()[0]\n abs_executable = os.path.abspath(os.path.join(self.tool_dir, executable))\n self.command = self.command.replace(executable, abs_executable, 1)\n self.command = interpreter + \" \" + self.command\n else:\n self.command = ''\n # Parameters used to build URL for redirection to external app\n redirect_url_params = root.find( \"redirect_url_params\" )\n if redirect_url_params is not None and redirect_url_params.text is not None:\n # get rid of leading / trailing white space\n redirect_url_params = redirect_url_params.text.strip()\n # Replace remaining white space with something we can safely split on later\n # when we are building the params\n self.redirect_url_params = redirect_url_params.replace( ' ', '**^**' )\n else:\n self.redirect_url_params = ''\n # Short description of the tool\n self.description = util.xml_text(root, \"description\")\n # Job runner\n if self.app.config.start_job_runners is None:\n # Jobs are always local regardless of tool config if no additional\n # runners are started\n self.job_runner = \"local:///\"\n else:\n # Set job runner to the cluster default\n self.job_runner = self.app.config.default_cluster_job_runner\n for tup in self.app.config.tool_runners:\n if tup[0] == self.id.lower():\n self.job_runner = tup[1]\n break\n # Is this a 'hidden' tool (hidden in tool menu)\n self.hidden = util.xml_text(root, \"hidden\")\n if self.hidden: self.hidden = util.string_as_bool(self.hidden)\n # Load any tool specific code (optional) Edit: INS 5/29/2007,\n # allow code files to have access to the individual tool's\n # \"module\" if it has one. Allows us to reuse code files, etc.\n self.code_namespace = dict()\n self.hook_map = {}\n for code_elem in root.findall(\"code\"):\n for hook_elem in code_elem.findall(\"hook\"):\n for key, value in hook_elem.items():\n # map hook to function\n self.hook_map[key]=value\n file_name = code_elem.get(\"file\")\n code_path = os.path.join( self.tool_dir, file_name )\n execfile( code_path, self.code_namespace )\n # Load any tool specific options (optional)\n self.options = dict( sanitize=True, refresh=False )\n for option_elem in root.findall(\"options\"):\n for option, value in self.options.copy().items():\n if isinstance(value, type(False)):\n self.options[option] = util.string_as_bool(option_elem.get(option, str(value)))\n else:\n self.options[option] = option_elem.get(option, str(value))\n self.options = Bunch(** self.options)\n # Parse tool inputs (if there are any required)\n self.parse_inputs( root )\n # Parse tool help\n self.parse_help( root )\n # Description of outputs produced by an invocation of the tool\n self.outputs = {}\n out_elem = root.find(\"outputs\")\n if out_elem:\n for data_elem in out_elem.findall(\"data\"):\n output = ToolOutput( data_elem.get(\"name\") )\n output.format = data_elem.get(\"format\", \"data\")\n output.change_format = data_elem.findall(\"change_format\")\n output.metadata_source = data_elem.get(\"metadata_source\", \"\")\n output.parent = data_elem.get(\"parent\", None)\n output.label = util.xml_text( data_elem, \"label\" )\n output.count = int( data_elem.get(\"count\", 1) )\n output.filters = data_elem.findall( 'filter' )\n self.outputs[ output.name ] = output\n # Any extra generated config files for the tool\n self.config_files = []\n conf_parent_elem = root.find(\"configfiles\")\n if conf_parent_elem:\n for conf_elem in conf_parent_elem.findall( \"configfile\" ):\n name = conf_elem.get( \"name\" )\n filename = conf_elem.get( \"filename\", None )\n text = conf_elem.text\n self.config_files.append( ( name, filename, text ) )\n # Action\n action_elem = root.find( \"action\" )\n if action_elem is None:\n self.tool_action = DefaultToolAction()\n else:\n module = action_elem.get( 'module' )\n cls = action_elem.get( 'class' )\n mod = __import__( module, globals(), locals(), [cls])\n self.tool_action = getattr( mod, cls )()\n # User interface hints\n self.uihints = {}\n uihints_elem = root.find( \"uihints\" )\n if uihints_elem is not None:\n for key, value in uihints_elem.attrib.iteritems():\n self.uihints[ key ] = value\n # Tests\n tests_elem = root.find( \"tests\" )\n if tests_elem:\n try:\n self.parse_tests( tests_elem )\n except:\n log.exception( \"Failed to parse tool tests\" )\n else:\n self.tests = None\n # Determine if this tool can be used in workflows\n self.is_workflow_compatible = self.check_workflow_compatible()",
"def toolset_from_grammar():\n ### <toolset>\n def doMult(node):\n \t(a,b) = node\n \tnode.value = a.value * b.value\n \n def doAdd(node):\n \t(a,b) = node\n \tnode.value = a.value + b.value\n \n def formatResult(node):\n \tnode.value = \"%.3f\" % node.value\n \n return locals().copy()",
"def _parse_definition(self, definition):\n split = definition.split()\n self._verify(split)\n\n self.name = split[0]\n self.nodes = [Node(x) for x in split[1:1+self.num_nodes]]\n\n # Construct the remaining value and param=value pair string\n value_str = ' '.join(split[1+self.num_nodes:])\n\n # Isolate single values\n values = re.findall(self.__class__.value_regex, value_str)\n if values:\n self.value = self._parse_values(values)\n\n # Isolate key=value pairs\n pairs = re.findall(self.__class__.pair_regex, value_str)\n if pairs:\n self.kwargs = {p[0].strip(): p[1].strip() for p in\n [pair.split('=') for pair in pairs]}\n self.kwargs = self._parse_pairs(self.kwargs)\n # for key, value in self.kwargs.items():\n # setattr(self, key, value)",
"def parse(cls, input):",
"def parseInput(input):\n # parse=bash(\"sh ../bitpar/parse '\"+input+\"'\") # ouput: [.VP [.V draw][.NP [.D a][.N-bar [.N square]]]]\n bash(\"java -jar ../lambda/lambda-auto.jar ../lambda/input.txt > ../lambda/input.tex\")\n fml=bash(\"make -C ../lambda input.fml\")\n print fml\n cmd=`fml`.split('true ')[1]\n \n # TEST CASES\n # cmd=\"draw(Gy[red(y) & square(y)])\" \n cmd=\"draw(\\gamma y(red(y) & square(y))).\"\n\n print cmd\n parse(cmd)",
"def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})",
"def _parse(description):\n args, kw = [], {}\n def add(sofar):\n if len(sofar) == 1:\n args.append(sofar[0])\n else:\n kw[sofar[0]] = sofar[1]\n sofar = ()\n for (type, value) in _tokenize(description):\n if type is _STRING:\n sofar += (value,)\n elif value == ':':\n add(sofar)\n sofar = ()\n add(sofar)\n return args, kw",
"def getparser():\n prs = ap.ArgumentParser(\n description=\"Format conversion for \"\n \"and introspection of \"\n \"intersphinx \"\n \"'objects.inv' files.\"\n )\n prs.add_argument(\n \"-\" + PrsConst.VERSION[0],\n \"--\" + PrsConst.VERSION,\n help=\"Print package version & other info\",\n action=\"store_true\",\n )\n\n sprs = prs.add_subparsers(\n title=\"Subcommands\",\n dest=PrsConst.SUBPARSER_NAME,\n metavar=f\"{{{PrsConst.CONVERT},{PrsConst.SUGGEST}}}\",\n help=\"Execution mode. Type \"\n \"'sphobjinv [mode] -h' \"\n \"for more information \"\n \"on available options. \"\n \"Mode names can be abbreviated \"\n \"to their first two letters.\",\n )\n\n # Enforce subparser as optional. No effect for 3.4 to 3.7;\n # briefly required a/o 3.7.0b4 due to change in default behavior, per:\n # https://bugs.python.org/issue33109. 3.6 behavior restored for\n # 3.7 release.\n sprs.required = False\n\n spr_convert = sprs.add_parser(\n PrsConst.CONVERT,\n aliases=[PrsConst.CONVERT[:2]],\n help=PrsConst.HELP_CO_PARSER,\n description=PrsConst.HELP_CO_PARSER,\n )\n spr_suggest = sprs.add_parser(\n PrsConst.SUGGEST,\n aliases=[PrsConst.SUGGEST[:2]],\n help=PrsConst.HELP_SU_PARSER,\n description=PrsConst.HELP_SU_PARSER,\n )\n\n # ### Args for conversion subparser\n spr_convert.add_argument(\n PrsConst.MODE,\n help=\"Conversion output format\",\n choices=(PrsConst.ZLIB, PrsConst.PLAIN, PrsConst.JSON),\n )\n\n spr_convert.add_argument(\n PrsConst.INFILE,\n help=(\n \"Path to file to be converted. Passing '-' indicates to read from stdin \"\n \"(plaintext/JSON only).\"\n ),\n )\n\n spr_convert.add_argument(\n PrsConst.OUTFILE,\n help=(\n \"Path to desired output file. \"\n \"Defaults to same directory and main \"\n \"file name as input file but with extension \"\n + PrsConst.HELP_CONV_EXTS\n + \", as appropriate for the output format. \"\n \"A path to a directory is accepted here, \"\n \"in which case the default output file name will be used. \"\n \"Passing '-' indicates to write to stdout. If \"\n + PrsConst.INFILE\n + \" is passed as '-', \"\n + PrsConst.OUTFILE\n + \" can be omitted and both stdin and stdout will be used.\"\n ),\n nargs=\"?\",\n default=None,\n )\n\n # Mutually exclusive group for --expand/--contract\n gp_expcont = spr_convert.add_argument_group(title=\"URI/display name conversions\")\n meg_expcont = gp_expcont.add_mutually_exclusive_group()\n meg_expcont.add_argument(\n \"-\" + PrsConst.EXPAND[0],\n \"--\" + PrsConst.EXPAND,\n help=\"Expand all URI and display name abbreviations\",\n action=\"store_true\",\n )\n\n meg_expcont.add_argument(\n \"-\" + PrsConst.CONTRACT[0],\n \"--\" + PrsConst.CONTRACT,\n help=\"Contract all URI and display name abbreviations\",\n action=\"store_true\",\n )\n\n # Clobber argument\n spr_convert.add_argument(\n \"-\" + PrsConst.OVERWRITE[0],\n \"--\" + PrsConst.OVERWRITE,\n help=\"Overwrite output files without prompting\",\n action=\"store_true\",\n )\n\n # stdout suppressor option (e.g., for scripting)\n spr_convert.add_argument(\n \"-\" + PrsConst.QUIET[0],\n \"--\" + PrsConst.QUIET,\n help=\"Suppress printing of status messages and \"\n \"overwrite output files without prompting\",\n action=\"store_true\",\n )\n\n # Flag to treat infile as a URL\n spr_convert.add_argument(\n \"-\" + PrsConst.URL[0],\n \"--\" + PrsConst.URL,\n help=(\n \"Treat 'infile' as a URL for download. \"\n \"Cannot be used with an infile of '-'.\"\n ),\n action=\"store_true\",\n )\n\n # ### Args for suggest subparser\n spr_suggest.add_argument(\n PrsConst.INFILE,\n help=(\n \"Path to inventory file to be searched. \"\n \"Passing '-' indicates to read from stdin (plaintext/JSON only).\"\n ),\n )\n spr_suggest.add_argument(PrsConst.SEARCH, help=\"Search term for object suggestions\")\n spr_suggest.add_argument(\n \"-\" + PrsConst.ALL[0],\n \"--\" + PrsConst.ALL,\n help=\"Display all results \"\n \"regardless of the number returned \"\n \"without prompting for confirmation.\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.PAGINATE[0],\n \"--\" + PrsConst.PAGINATE,\n help=\"Paginate long search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.INDEX[0],\n \"--\" + PrsConst.INDEX,\n help=\"Include Inventory.objects list indices with the search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.SCORE[0],\n \"--\" + PrsConst.SCORE,\n help=\"Include fuzzywuzzy scores with the search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.THRESH[0],\n \"--\" + PrsConst.THRESH,\n help=\"Match quality threshold, integer 0-100, \"\n \"default 75. Default is suitable when \"\n \"'search' is exactly a known object name. \"\n \"A value of 30-50 gives better results \"\n \"for approximate matches.\",\n default=PrsConst.DEF_THRESH,\n type=int,\n choices=range(101),\n metavar=\"{0-100}\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.URL[0],\n \"--\" + PrsConst.URL,\n help=(\n \"Treat 'infile' as a URL for download. \"\n f\"Cannot be used with --{PrsConst.URL}.\"\n ),\n action=\"store_true\",\n )\n\n return prs",
"def _parse_definition(self, line):\n op_pos = line.find('=')\n op_end = op_pos + 1\n if op_pos < 0:\n self._error('not a variable definition')\n\n if op_pos > 0 and line[op_pos - 1] in [':', '+']:\n op_pos -= 1\n else:\n self._error('only := and += are supported')\n\n # set op, sym, and val\n op = line[op_pos:op_end]\n sym = line[:op_pos].strip()\n val = self._expand_value(line[op_end:].lstrip())\n\n if op == ':=':\n self.symbol_table[sym] = val\n elif op == '+=':\n self.symbol_table[sym] += ' ' + val",
"def show_cmd_parser(buf):\n outputs = {}\n for line in line_parser(buf):\n if line.startswith('ovs_version'):\n # e.g.)\n # ovs_version: \"2.5.0\"\n outputs['ovs_version'] = line.split('\"')[1]\n\n return outputs",
"def parse_line(line):\n label = opcode = operand = \"\"\n\n token_list = Util.get_token_list(line)\n\n token_length = len(token_list)\n\n mnemonics_list = list(Optab.as_dict().keys())\n\n if token_length == 1:\n if token_list[0] in mnemonics_list:\n # like RSUB\n opcode = token_list[0]\n else:\n # like END\n label = token_list[0]\n elif token_length == 2:\n if token_list[0] in mnemonics_list:\n # like ADD THREE\n opcode, operand = token_list\n elif token_list[1] in mnemonics_list:\n # like END RSUB\n label, opcode = token_list\n elif token_length == 3:\n if token_list[0] in mnemonics_list:\n # like LDA BUFFER, X\n opcode, operand, _ = token_list\n else:\n # like THREE WORD 3\n label, opcode, operand = token_list\n elif token_length == 4:\n # like LOOP LDA BUFFER, X\n # or EOF BYTE C'454F46'\n label = token_list[0]\n opcode = token_list[1]\n\n if opcode == OpCode.BYTE:\n # if opcode is BYTE then the 4th string\n # will be the actual value,(token_list[3]).\n # 3rd string will be 'C' or 'X'\n operand = token_list[3]\n else:\n operand = token_list[2]\n\n return label, opcode, operand",
"def parse_help_string(key, excutable=\"castep.serial\"):\n\n out = sbp.check_output([excutable, \"-h\", key], universal_newlines=True)\n lines = out.split(\"\\n\")\n value_type = None\n key_level = None\n\n for i, line in enumerate(lines):\n if \"Help information on PARAMETERS keywords\" in line:\n param_start = i\n\n match = type_re.search(line)\n if match and not value_type:\n value_type = match.group(1).lower()\n\n match = level_re.search(line)\n if match and not key_level:\n key_level = match.group(1).lower()\n\n cell_lines = lines[2:param_start]\n param_lines = lines[param_start + 2 :]\n\n if len(cell_lines) > len(param_lines):\n help_lines = cell_lines\n key_type = \"CELL\"\n else:\n help_lines = param_lines\n key_type = \"PARAM\"\n\n return help_lines, key_type, key_level, value_type",
"def create_tool_from_suggestion():\n pass",
"def parse_command(self, cmd, opts):\n cmd = cmd.upper().strip()\n if cmd == '':\n return None\n elif cmd == 'LOAD':\n return self.parse_load(opts)\n elif cmd == 'PRINT':\n return self.parse_print(opts)\n elif cmd == 'INC':\n return self.parse_inc(opts)\n elif cmd == 'DEC':\n return self.parse_dec(opts)\n elif cmd == 'JMP':\n return self.parse_jmp(opts)\n elif cmd == 'MOV':\n return self.parse_mov(opts)\n elif cmd == 'ADD':\n return self.parse_add(opts)\n elif cmd == 'SUB':\n return self.parse_sub(opts)\n elif cmd == 'MUL':\n return self.parse_mul(opts)\n elif cmd == 'DIV':\n return self.parse_div(opts)\n elif cmd == 'MOD':\n return self.parse_mod(opts)\n elif cmd == 'DB':\n return self.parse_db(opts)\n elif cmd == 'HEAP':\n return self.parse_heap(opts)\n elif cmd == 'INFO':\n return self.parse_info(opts)\n elif cmd == 'STOP':\n self.code += chr(opcodes.STOP)\n else:\n raise ParseError('Unsupported command: %s @%s' % (cmd, self.line))\n #print (cmd, opts)\n return self.code",
"def parse_global_mcmc(desc, return_parser=False):\n parser = argparse.ArgumentParser(description=desc)\n \n h = 'the name of the mcmc parameter file to load'\n parser.add_argument('param_file', type=str, help=h)\n \n h = 'the minimum k value to include in the fit'\n parser.add_argument('--kmin', type=float, default=0.01, help=h)\n \n h = 'the maximum k value to include in the fit'\n parser.add_argument('--kmax', type=float, default=0.6, help=h)\n\n h = 'the name of the output file to save the results to'\n parser.add_argument('-o', '--output', type=str, required=True, help=h)\n \n if not return_parser:\n return parser.parse_args()\n else:\n return parser"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a param handler
|
def param_handler():
yield keyword("on")
yield normalspaces()
hname = yield var_name()
yield normalspaces()
action = yield keyword("set|get|preset|postset")
yield normalspaces()
other = yield var_name()
yield normalspaces()
yield keyword("do")
yield normalspaces()
expr = yield expression
return s.Construct(s.PARAMETERS_HANDLER, hname, action, other, expr)
|
[
"def _handleInput(self, paramInput):\n pass",
"def parse_param_elem( self, input_elem, enctypes, context ):\n param = ToolParameter.build( self, input_elem )\n param_enctype = param.get_required_enctype()\n if param_enctype:\n enctypes.add( param_enctype )\n # If parameter depends on any other paramters, we must refresh the\n # form when it changes\n for name in param.get_dependencies():\n context[ name ].refresh_on_change = True\n return param",
"def config(self, param: str, /) -> Any:",
"def default_body_parser(event, text):\n parts = [e.rsplit(' ', 1) for e in text.strip().split('=')]\n parts = [p.strip() for p in chain.from_iterable(parts)]\n if not len(parts) % 2:\n i = iter(parts)\n for k, v in zip(i, i):\n try:\n v = int(v)\n except ValueError:\n pass\n event.fields[k] = v",
"def callback_param_client(self,command,result):\n if result is not None:\n result=json.loads(result)\n param_value = result[\"values\"][\"value\"]\n rosparam.set_param(command.wrapper.name,param_value)",
"def parse_params(params):\n global scintillator_material\n scintillator_material = check_material(params.scintillator_material)\n global scintillator_thickness\n scintillator_thickness = params.scintillator_thickness\n add_filter(params.filter_1_material, params.filter_1_thickness)\n add_filter(params.filter_2_material, params.filter_2_thickness)\n add_filter(params.filter_3_material, params.filter_3_thickness)\n global d_source\n d_source = params.source_distance\n global sample_material\n sample_material = check_material(params.sample_material)\n global pixel_size\n pixel_size = params.pixel_size",
"def parse_destination_params(self, params):\n raise NotImplementedError()",
"def decompose_parameter(par):\n parts = par.split('__')\n\n pname, func, phase = None, None, None\n\n if len(parts) == 1:\n pname = parts[0]\n func = avg_\n\n elif len(parts) == 2:\n pname = parts[0]\n if parts[-1] in known_functions.keys():\n func = known_functions[parts[1]]\n else:\n phase = parts[1]\n func = avg_\n\n elif len(parts) == 3:\n pname = parts[0]\n phase = parts[1]\n func = known_functions[parts[2]]\n\n return pname, phase, func",
"def _parse_parameters(self):\n model_dump = self._get_model_dump()\n trees = self._split_trees(model_dump)\n for i, tree in enumerate(trees):\n self._parse_tree(tree, i)",
"def _handle_params(self):\n #Client\n for param in self.parser.client_params_list:\n command = Command(param.get_command(),\n self.command_handler.callback_param_client,\n wrapper=param,protocol=self.protocol) \n self.commands_list.append(command)",
"def process_epidemic_parameters(self):",
"def _match_param(self, arg: str) -> Tuple[\"Param\", str, str, str]:\n param_name, param_type, param_value = parse_potential_argument(\n arg, self.prefix\n )\n # parse -arg as -a rg only applicable with prefix auto and -\n # When we didn't match any argument-like\n # with allow_attached=False\n # Or we matched but it is not defined\n name_with_attached: str = None\n if not param_type and self.prefix == \"auto\":\n # then -a1 will be put in param_value, as if `a1` is a name,\n # it should be --a1\n name_with_attached = (\n param_value\n if (\n param_name is None\n and param_value\n and param_value[:1] == \"-\"\n and param_value[1:2] != \"-\"\n )\n else None\n )\n\n elif not param_type and len(self.prefix) <= 1:\n # say prefix = '+'\n # then `a1` for `+a1` will be put as param_name, since\n # there is no restriction on name length\n name_with_attached = (\n self.prefix + param_name\n if param_name and param_name[:1] != self.prefix\n else None\n )\n\n # we cannot find a parameter with param_name\n # check if there is any value attached\n if name_with_attached and not self.get_param(param_name):\n param_name2, param_type2, param_value2 = parse_potential_argument(\n name_with_attached, self.prefix, allow_attached=True\n )\n # Use them only if we found a param_name2 and\n # arbitrary: not previous param_name found\n # otherwise: parameter with param_name2 exists\n if param_name2 is not None and (\n (self.arbitrary and param_name is None)\n or self.get_param(param_name2)\n ):\n param_name, param_type, param_value = (\n param_name2,\n param_type2,\n param_value2,\n )\n\n # create the parameter for arbitrary\n if (\n self.arbitrary\n and param_name is not None\n and not self.get_param(param_name)\n ):\n self.add_param(param_name, type=param_type)\n\n param: \"Param\" = self.get_param(param_name)\n if not param:\n return None, param_name, param_type, param_value\n\n param_maybe_overwritten: \"Param\" = param.overwrite_type(param_type)\n if param_maybe_overwritten is not param:\n self._set_param(param_maybe_overwritten)\n param = param_maybe_overwritten\n\n param.hit = True\n if param_value is not None:\n param.push(param_value)\n return param, param_name, param_type, param_value",
"def _handler_get(self, *args, **kwargs):\n next_state = None\n\n # Retrieve the required parameter, raise if not present.\n try:\n params = args[0]\n\n except IndexError:\n raise InstrumentParameterException('Get command requires a parameter list or tuple.')\n # If all params requested, retrieve config.\n if (params == DriverParameter.ALL) or (params == [DriverParameter.ALL]):\n result = self._param_dict.get_config()\n\n # If not all params, confirm a list or tuple of params to retrieve.\n # Raise if not a list or tuple.\n # Retrieve each key in the list, raise if any are invalid.\n else:\n if not isinstance(params, (list, tuple)):\n raise InstrumentParameterException('Get argument not a list or tuple.')\n result = {}\n for key in params:\n try:\n val = self._param_dict.get(key)\n result[key] = val\n\n except KeyError:\n raise InstrumentParameterException(('%s is not a valid parameter.' % key))\n\n return next_state, result\n # return next_state, (next_state, result)",
"def _split_args_line(line):\n lexer = lap.Lexer(line)\n scanner = lap.Parser(lexer)\n tree = scanner.input_line()\n\n extractor = lap.QueryParamsExtractor()\n params_option_value, rest_of_args = extractor.visit(tree)\n\n return params_option_value, rest_of_args",
"def _parse_procedure(self, procedure_dict):\r\n raise NotImplementedError()",
"def _process_parameter(self, item):\n a_param = nodes.Parameter()\n logger = logging.getLogger(self.__class__.__name__)\n\n # In a Full CWMP-DM XML, Parameters always have a @name, @access, and syntax\n a_param.set_name(item[\"@name\"])\n a_param.set_access(item[\"@access\"])\n\n # In a Full CWMP-DM XML, Parameters never have a @base\n if \"@base\" in item:\n a_param.set_base(item[\"@base\"])\n\n if \"@activeNotify\" in item:\n a_param.set_active_notify(item[\"@activeNotify\"])\n\n if \"@forcedInform\" in item:\n a_param.set_forced_inform(item[\"@forcedInform\"])\n\n if \"description\" in item:\n a_param.set_description(item[\"description\"])\n\n # In a Full CWMP-DM XML, Parameters always have a @name and @access\n logger.debug(\n \"Processing Parameter: \\\"{}\\\" with \\\"{}\\\" Access\"\n .format(a_param.get_name(), a_param.get_access))\n\n a_param.set_syntax(self._process_syntax(a_param.get_name(), item[\"syntax\"]))\n\n return a_param",
"def sched_switch_parser(event, text):\n if text.count('=') == 2: # old format\n regex = re.compile(\n r'(?P<prev_comm>\\S.*):(?P<prev_pid>\\d+) \\[(?P<prev_prio>\\d+)\\] (?P<status>\\S+)'\n r' ==> '\n r'(?P<next_comm>\\S.*):(?P<next_pid>\\d+) \\[(?P<next_prio>\\d+)\\]'\n )\n parser_func = regex_body_parser(regex)\n return parser_func(event, text)\n else: # there are more than two \"=\" -- new format\n return default_body_parser(event, text.replace('==>', ''))",
"def parse_via_params(via_params_str):\n if not via_params_str or not via_params_str.startswith(';'):\n return HParams(), via_params_str\n else:\n hparams = HParams()\n try:\n hparams.parse_raw(via_params_str[1:])\n except Exception as e:\n raise ViaHeaderError(f'Cannot parse Via params from {via_params_str}: {e}')\n hparams.parse_known(known_function=ViaHeader.parse_known_param_fun)\n rest = via_params_str.lstrip(hparams.assemble())\n return hparams, rest",
"def _extract_pipeline_param(param: str) -> dsl.PipelineParam:\n matches = re.findall(r\"{{pipelineparam:op=([\\w\\s_-]*);name=([\\w\\s_-]+)}}\",\n param)\n op_dependency_name = matches[0][0]\n output_file_name = matches[0][1]\n return dsl.PipelineParam(output_file_name, op_dependency_name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a plugin clause
|
def plugin_clause():
clause = yield (local_decl ^
function_def ^
struct_def ^
parameters_def ^
mousetool_def ^
rollout_def ^
on_map_do_handler ^
on_clone_do_handler ^
on_do_handler)
return clause
|
[
"def plugin_def():\n yield keyword(\"plugin\")\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n vname = yield var_name()\n yield normalspaces()\n vnop = yield sepBy(named_argument, normalspaces())\n yield normalspaces()\n yield string(\"(\")\n yield normalspaces()\n pluginclauses = yield sepBy(plugin_clause, normalspaces())\n yield normalspaces()\n yield string(\")\")\n return s.Construct(s.PLUGIN_DEF, vname, vnop, pluginclauses)",
"def ParseWithPlugin(self, line):\n if not IsLoaded():\n print u'No hive loaded, unable to parse.'\n return\n\n current_hive = PregCache.hive_storage.loaded_hive\n if not current_hive:\n return\n\n if not line:\n print u'No plugin name added.'\n return\n\n plugin_name = line\n if '-h' in line:\n items = line.split()\n if len(items) != 2:\n print u'Wrong usage: plugin [-h] PluginName'\n return\n if items[0] == '-h':\n plugin_name = items[1]\n else:\n plugin_name = items[0]\n\n if not plugin_name.startswith('winreg'):\n plugin_name = u'winreg_{0:s}'.format(plugin_name)\n\n hive_type = current_hive.type\n plugins_list = parsers_manager.ParsersManager.GetWindowsRegistryPlugins()\n plugin_found = False\n for plugin_cls in plugins_list.GetKeyPlugins(hive_type):\n plugin = plugin_cls(reg_cache=current_hive.reg_cache)\n if plugin.plugin_name == plugin_name:\n # If we found the correct plugin.\n plugin_found = True\n break\n\n if not plugin_found:\n print u'No plugin named: {0:s} available for Registry type {1:s}'.format(\n plugin_name, hive_type)\n return\n\n if not hasattr(plugin, 'REG_KEYS'):\n print u'Plugin: {0:s} has no key information.'.format(line)\n return\n\n if '-h' in line:\n print frontend_utils.FormatHeader(plugin_name)\n print frontend_utils.FormatOutputString('Description', plugin.__doc__)\n print u''\n for registry_key in plugin.expanded_keys:\n print frontend_utils.FormatOutputString('Registry Key', registry_key)\n return\n\n if not plugin.expanded_keys:\n plugin.ExpandKeys(PregCache.parser_context)\n\n # Clear the last results from parse key.\n PregCache.events_from_last_parse = []\n\n # Defining outside of for loop for optimization.\n get_key_by_path = current_hive.GetKeyByPath\n for registry_key in plugin.expanded_keys:\n key = get_key_by_path(registry_key)\n if not key:\n print u'Key: {0:s} not found'.format(registry_key)\n continue\n\n # Move the current location to the key to be parsed.\n self.ChangeDirectory(registry_key)\n # Parse the key.\n print_strings = ParseKey(\n key=current_hive.GetCurrentRegistryKey(), hive_helper=current_hive,\n shell_helper=PregCache.shell_helper, verbose=False,\n use_plugins=[plugin_name])\n self.output_writer.write(u'\\n'.join(print_strings))\n self.output_writer.flush()",
"def _ParseAnalysisPluginOptions(self, options):\n # Get a list of all available plugins.\n analysis_plugin_info = self._analysis_manager.GetAllPluginInformation()\n # Use set-comprehension to create a set of the analysis plugin names.\n analysis_plugin_names = {\n name.lower() for name, _, _ in analysis_plugin_info}\n\n analysis_plugins = self.ParseStringOption(options, 'analysis_plugins')\n if not analysis_plugins:\n return\n\n # Use set-comprehension to create a set of the requested plugin names.\n requested_plugin_names = {\n name.strip().lower() for name in analysis_plugins.split(',')}\n\n # Check to see if we are trying to load plugins that do not exist.\n difference = requested_plugin_names.difference(analysis_plugin_names)\n if difference:\n difference_string = ' '.join(difference)\n raise errors.BadConfigOption(\n f'Non-existent analysis plugins specified: {difference_string:s}')\n\n self._analysis_plugins = self._GetAnalysisPlugins(analysis_plugins)\n\n for analysis_plugin in self._analysis_plugins:\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, analysis_plugin)",
"def _assemble_plugin_macro(self, plugin: str, args=None, ij1_style=True):\n if args is None:\n macro = \"run(\\\"{}\\\");\".format(plugin)\n return macro\n macro = \"\"\"run(\"{0}\", \\\"\"\"\".format(plugin)\n for key, value in args.items():\n argument = self._format_argument(key, value, ij1_style)\n if argument is not None:\n macro = macro + ' {}'.format(argument)\n macro = macro + \"\"\"\\\");\"\"\"\n return macro",
"def test_plugin_sets_analysis_driver():\n plugin = \"hydrotrend\"\n e = Experiment(plugin=plugin)\n assert_equal(e.interface.analysis_driver, \"dakota_run_plugin\")",
"def _get_plugin(plugin_name, plugin_list):\r\n plugin = None\r\n for plug in plugin_list:\r\n if plug[\"name\"] == plugin_name:\r\n plugin = plug\r\n break\r\n return plugin",
"def define_sub_options(self):\n self.plugin_parser = self.parser.add_argument_group(\"Plugin Options\",\n \"Options for all plugins.\")\n self.plugin_parser.add_argument(\"-H\", \"--host\",\n default='127.0.0.1',\n required=True,\n help=\"Host IP address or DNS\",\n dest=\"host\")\n self.plugin_parser.add_argument(\"-u\", \"--user\",\n default=None,\n required=False,\n help=\"User name\",\n dest=\"user\")\n self.plugin_parser.add_argument(\"-p\", \"--password\",\n default=None,\n required=False,\n help=\"User password\",\n dest=\"password\")",
"def parseManagerText(self, text):\n\n # Regular expressions for scanning the file\n find_active = re.compile(r\"^\\s*?(\\w+)\\.py\", re.MULTILINE)\n find_inactive = re.compile(r\"^\\s*?#\\s*(\\w+)\\.py\", re.MULTILINE)\n find_manager = re.compile(r\"^\\s*plugin_manager\\.py\", re.MULTILINE)\n\n if 1: # Put the first match in the starts dict.\n starts = OrderedDict()\n for kind,iter in (\n ('on',find_active.finditer(text)),\n ('off',find_inactive.finditer(text)),\n ):\n for match in iter:\n name = match.groups()[0]\n start = match.start()\n if start != -1:\n bunch = starts.get(name)\n if not bunch or bunch.start > start:\n starts[name] = g.Bunch(\n kind=kind,name=name,start=start,match=match)\n\n self.actives = OrderedDict(\n [(bunch.name,bunch.match) for bunch in starts.values() if bunch.kind=='on'])\n\n self.inactives = OrderedDict(\n [(bunch.name,bunch.match) for bunch in starts.values() if bunch.kind=='off'])\n\n if 0: # debugging.\n starts2 = [(bunch.start,bunch.name,bunch.kind) for bunch in starts.values()]\n starts2.sort()\n g.trace(g.listToString(starts2,tag='starts2 list'))\n g.trace(g.dictToString(self.actives,tag='Active Plugins'))\n\n else: # Original code.\n # Get active plugin defintions\n self.actives = dict([(match.groups()[0], match) \n for match in find_active.finditer(text)])\n\n # Get inactive plugin definitions\n self.inactives = dict([(match.groups()[0], match) \n for match in find_inactive.finditer(text)])\n\n # List of all plugins\n self.all = {}\n self.all.update(self.actives)\n self.all.update(self.inactives)\n\n # Locaction of the plugin_manager.py plugin - this is where\n # we add additional files\n self.manager = find_manager.search(text)",
"def parse_phrase(self, query):\n regex = r'\\w*\"([^\"]*)\"'\n query['phrase'] = re.findall(regex, self.line)\n if query['phrase']:\n self.line = re.sub(regex, '', self.line)\n return query",
"def parse_plugins_config(self, params_dict):\n self.analysis_plugins = list()\n json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()\n if json_parse_helper.doesPathExist(params_dict, [\"analysis\"]):\n for plugin_config in params_dict[\"analysis\"]:\n plugin_path = json_parse_helper.getStrValue(plugin_config, [\"path\"])\n plugin_path = os.path.abspath(plugin_path)\n plugin_module_name = json_parse_helper.getStrValue(plugin_config, [\"module\"])\n plugin_cls_name = json_parse_helper.getStrValue(plugin_config, [\"class\"])\n # Check if plugin path input is already in system path.\n already_in_path = False\n for c_path in sys.path:\n c_path = os.path.abspath(c_path)\n if c_path == plugin_path:\n already_in_path = True\n break\n # Add plugin path to system path\n if not already_in_path:\n sys.path.insert(0, plugin_path)\n logger.debug(\"Add plugin path ('{}') to the system path.\".format(plugin_path))\n # Try to import the module.\n logger.debug(\"Try to import the plugin module: '{}'\".format(plugin_module_name))\n plugin_mod_inst = importlib.import_module(plugin_module_name)\n logger.debug(\"Imported the plugin module: '{}'\".format(plugin_module_name))\n if plugin_mod_inst is None:\n raise Exception(\"Could not load the module: '{}'\".format(plugin_module_name))\n # Try to make instance of class.\n logger.debug(\"Try to create instance of class: '{}'\".format(plugin_cls_name))\n plugin_cls_inst = getattr(plugin_mod_inst, plugin_cls_name)()\n logger.debug(\"Created instance of class: '{}'\".format(plugin_cls_name))\n if plugin_cls_inst is None:\n raise Exception(\"Could not create instance of '{}'\".format(plugin_cls_name))\n if json_parse_helper.doesPathExist(plugin_config, [\"params\"]):\n logger.debug(\"User params are present for plugin so will test the required keys are present.\")\n plugin_cls_inst.set_users_param(plugin_config[\"params\"])\n plugin_cls_inst.check_param_keys(raise_except=True)\n logger.debug(\"User params for the plugin have the correct keys.\")\n self.analysis_plugins.append(plugin_config)",
"def pluginInfo(string, animCurveInterp=\"string\", autoload=bool, controlCommand=\"string\", listPlugins=bool, changedCommand=\"string\", loadPluginPrefs=bool, unloadOk=bool, apiVersion=bool, device=bool, writeRequires=bool, vendor=\"string\", path=\"string\", version=bool, activeFile=bool, tool=\"string\", pluginsInUse=bool, userNamed=bool, cacheFormat=bool, dragAndDropBehavior=bool, dependNode=bool, dependNodeId=\"string\", renderer=bool, translator=bool, name=\"string\", serviceDescriptions=bool, dependNodeByType=\"string\", data=\"string\", listPluginsPath=bool, loaded=bool, remove=bool, modelEditorCommand=\"string\", settings=bool, command=\"string\", constraintCommand=\"string\", savePluginPrefs=bool, registered=bool, iksolver=bool):\n pass",
"def __plugin_parameter(self, strip_index, stack_index):\n assert (self._ChannelStripController__assignment_mode == CSM_PLUGINS)\n if (self._ChannelStripController__plugin_mode == PCM_DEVICES):\n return (None,\n None)\n elif (self._ChannelStripController__plugin_mode == PCM_PARAMETERS):\n assert self._ChannelStripController__chosen_plugin\n parameters = self._ChannelStripController__ordered_plugin_parameters\n parameter_index = ((strip_index + stack_index) + self._ChannelStripController__plugin_mode_offsets[PCM_PARAMETERS])\n if ((parameter_index >= 0) and (parameter_index < len(parameters))):\n return parameters[parameter_index]\n else:\n return (None,\n None)\n else:\n assert 0",
"def parse_file(self, path):\n if \"__init__.py\" in path:\n return None\n try:\n return Plugin(path)\n except Exception as e:\n log_message(\n logging_callback=logging.exception,\n msg=e,\n extra={\"oname\": self.__class__.__name__},\n )\n messagebox.showerror(\n \"Error loading plugin...\",\n \"There was an error loading the script:\\n\\n{}.\"\n \"\\n\\nSee log for details\\n\\n{}.\".format(path, e),\n )\n return None",
"def parse(name):\n\n pass",
"def run_plugin(self, plugin, args=None, ij1_style=True):\n macro = self._assemble_plugin_macro(plugin, args=args, ij1_style=ij1_style)\n return self.run_macro(macro)",
"def launch_plugin(self, hivefile, plugin, output_format=\"text\", args=[]):\n reg = Registry.Registry(hivefile)\n plug = self.plugins[plugin]()\n res = plug.run(reg, self.verbose, args)\n plug.display(res, output_format, self.verbose)",
"def loadPlugin(self, plugin):\r\n return imp.load_module('plugin', *plugin[\"info\"])",
"def call(package, plugin, *args, **kwargs):\r\n plugin_func = get(package, plugin)\r\n return plugin_func(*args, **kwargs)",
"def build_auth_plugins_option_parser(parser):\n available_plugins = [plugin.name for plugin in get_plugin_list()]\n # parser.add_argument(\n # '--os-auth-type',\n # metavar='<auth-type>',\n # dest='auth_type',\n # default=utils.env('OS_AUTH_TYPE'),\n # help='Select an authentication type. Available types: ' +\n # ', '.join(available_plugins) +\n # '. Default: selected based on --os-username/--os-token' +\n # ' (Env: OS_AUTH_TYPE)',\n # choices=available_plugins\n # )\n # Maintain compatibility with old tenant env vars\n envs = {\n 'OS_PROJECT_NAME': utils.env(\n 'OS_PROJECT_NAME',\n default=utils.env('OS_TENANT_NAME')\n ),\n 'OS_PROJECT_ID': utils.env(\n 'OS_PROJECT_ID',\n default=utils.env('OS_TENANT_ID')\n ),\n }\n # for o in get_options_list():\n # # Remove tenant options from KSC plugins and replace them below\n # if 'tenant' not in o:\n # parser.add_argument(\n # '--os-' + o,\n # metavar='<auth-%s>' % o,\n # dest=o.replace('-', '_'),\n # default=envs.get(\n # OPTIONS_LIST[o]['env'],\n # utils.env(OPTIONS_LIST[o]['env']),\n # ),\n # help='%s\\n(Env: %s)' % (\n # OPTIONS_LIST[o]['help'],\n # OPTIONS_LIST[o]['env'],\n # ),\n # )\n get_options_list()\n\n # add tenant-related options for compatibility\n # this is deprecated but still used in some tempest tests...\n # parser.add_argument(\n # '--os-tenant-name',\n # metavar='<auth-tenant-name>',\n # dest='os_project_name',\n # help=argparse.SUPPRESS,\n # )\n parser.add_argument(\n '--os-tenant-id',\n metavar='<auth-tenant-id>',\n dest='os_project_id',\n default=utils.env(OPTIONS_LIST['tenant-id']['env']),\n help='%s\\n(Env: %s)' % (\n OPTIONS_LIST['tenant-id']['help'],\n OPTIONS_LIST['tenant-id']['env'],\n ),\n )\n\n parser.add_argument(\n '--os-username',\n metavar='<auth-username>',\n dest='os_username',\n default=utils.env(OPTIONS_LIST['username']['env']),\n help='%s\\n(Env: %s)' % (\n OPTIONS_LIST['username']['help'],\n OPTIONS_LIST['username']['env'],\n ),\n )\n parser.add_argument(\n '--os-password',\n metavar='<auth-password>',\n dest='os_password',\n default=utils.env(OPTIONS_LIST['password']['env']),\n help='%s\\n(Env: %s)' % (\n OPTIONS_LIST['password']['help'],\n OPTIONS_LIST['password']['env'],\n ),\n )\n parser.add_argument(\n '--os-auth-url',\n metavar='<auth-auth-url>',\n dest='os_auth_url',\n default=utils.env(OPTIONS_LIST['auth-url']['env']),\n help='%s\\n(Env: %s)' % (\n OPTIONS_LIST['auth-url']['help'],\n OPTIONS_LIST['auth-url']['env'],\n ),\n )\n parser.add_argument(\n '--os-user-domain-id',\n metavar='<auth-user-domain-id>',\n dest='os_user_domain_id',\n default=utils.env(OPTIONS_LIST['user-domain-id']['env']),\n help='%s\\n(Env: %s)' % (\n OPTIONS_LIST['user-domain-id']['help'],\n OPTIONS_LIST['user-domain-id']['env'],\n ),\n )\n parser.add_argument(\n '--os-project-domain-id',\n metavar='<auth-project-domain-id>',\n dest='os_project_domain_id',\n default=utils.env(OPTIONS_LIST['project-domain-id']['env']),\n help='%s\\n(Env: %s)' % (\n OPTIONS_LIST['project-domain-id']['help'],\n OPTIONS_LIST['project-domain-id']['env'],\n ),\n )\n\n return parser"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a plugin def
|
def plugin_def():
yield keyword("plugin")
yield normalspaces()
vname = yield var_name()
yield normalspaces()
vname = yield var_name()
yield normalspaces()
vnop = yield sepBy(named_argument, normalspaces())
yield normalspaces()
yield string("(")
yield normalspaces()
pluginclauses = yield sepBy(plugin_clause, normalspaces())
yield normalspaces()
yield string(")")
return s.Construct(s.PLUGIN_DEF, vname, vnop, pluginclauses)
|
[
"def parse(name):\n\n pass",
"def parseManagerText(self, text):\n\n # Regular expressions for scanning the file\n find_active = re.compile(r\"^\\s*?(\\w+)\\.py\", re.MULTILINE)\n find_inactive = re.compile(r\"^\\s*?#\\s*(\\w+)\\.py\", re.MULTILINE)\n find_manager = re.compile(r\"^\\s*plugin_manager\\.py\", re.MULTILINE)\n\n if 1: # Put the first match in the starts dict.\n starts = OrderedDict()\n for kind,iter in (\n ('on',find_active.finditer(text)),\n ('off',find_inactive.finditer(text)),\n ):\n for match in iter:\n name = match.groups()[0]\n start = match.start()\n if start != -1:\n bunch = starts.get(name)\n if not bunch or bunch.start > start:\n starts[name] = g.Bunch(\n kind=kind,name=name,start=start,match=match)\n\n self.actives = OrderedDict(\n [(bunch.name,bunch.match) for bunch in starts.values() if bunch.kind=='on'])\n\n self.inactives = OrderedDict(\n [(bunch.name,bunch.match) for bunch in starts.values() if bunch.kind=='off'])\n\n if 0: # debugging.\n starts2 = [(bunch.start,bunch.name,bunch.kind) for bunch in starts.values()]\n starts2.sort()\n g.trace(g.listToString(starts2,tag='starts2 list'))\n g.trace(g.dictToString(self.actives,tag='Active Plugins'))\n\n else: # Original code.\n # Get active plugin defintions\n self.actives = dict([(match.groups()[0], match) \n for match in find_active.finditer(text)])\n\n # Get inactive plugin definitions\n self.inactives = dict([(match.groups()[0], match) \n for match in find_inactive.finditer(text)])\n\n # List of all plugins\n self.all = {}\n self.all.update(self.actives)\n self.all.update(self.inactives)\n\n # Locaction of the plugin_manager.py plugin - this is where\n # we add additional files\n self.manager = find_manager.search(text)",
"def parse_file(self, path):\n if \"__init__.py\" in path:\n return None\n try:\n return Plugin(path)\n except Exception as e:\n log_message(\n logging_callback=logging.exception,\n msg=e,\n extra={\"oname\": self.__class__.__name__},\n )\n messagebox.showerror(\n \"Error loading plugin...\",\n \"There was an error loading the script:\\n\\n{}.\"\n \"\\n\\nSee log for details\\n\\n{}.\".format(path, e),\n )\n return None",
"def build_parser(self, parser: ArgumentParser):",
"def getparser():\n prs = ap.ArgumentParser(\n description=\"Format conversion for \"\n \"and introspection of \"\n \"intersphinx \"\n \"'objects.inv' files.\"\n )\n prs.add_argument(\n \"-\" + PrsConst.VERSION[0],\n \"--\" + PrsConst.VERSION,\n help=\"Print package version & other info\",\n action=\"store_true\",\n )\n\n sprs = prs.add_subparsers(\n title=\"Subcommands\",\n dest=PrsConst.SUBPARSER_NAME,\n metavar=f\"{{{PrsConst.CONVERT},{PrsConst.SUGGEST}}}\",\n help=\"Execution mode. Type \"\n \"'sphobjinv [mode] -h' \"\n \"for more information \"\n \"on available options. \"\n \"Mode names can be abbreviated \"\n \"to their first two letters.\",\n )\n\n # Enforce subparser as optional. No effect for 3.4 to 3.7;\n # briefly required a/o 3.7.0b4 due to change in default behavior, per:\n # https://bugs.python.org/issue33109. 3.6 behavior restored for\n # 3.7 release.\n sprs.required = False\n\n spr_convert = sprs.add_parser(\n PrsConst.CONVERT,\n aliases=[PrsConst.CONVERT[:2]],\n help=PrsConst.HELP_CO_PARSER,\n description=PrsConst.HELP_CO_PARSER,\n )\n spr_suggest = sprs.add_parser(\n PrsConst.SUGGEST,\n aliases=[PrsConst.SUGGEST[:2]],\n help=PrsConst.HELP_SU_PARSER,\n description=PrsConst.HELP_SU_PARSER,\n )\n\n # ### Args for conversion subparser\n spr_convert.add_argument(\n PrsConst.MODE,\n help=\"Conversion output format\",\n choices=(PrsConst.ZLIB, PrsConst.PLAIN, PrsConst.JSON),\n )\n\n spr_convert.add_argument(\n PrsConst.INFILE,\n help=(\n \"Path to file to be converted. Passing '-' indicates to read from stdin \"\n \"(plaintext/JSON only).\"\n ),\n )\n\n spr_convert.add_argument(\n PrsConst.OUTFILE,\n help=(\n \"Path to desired output file. \"\n \"Defaults to same directory and main \"\n \"file name as input file but with extension \"\n + PrsConst.HELP_CONV_EXTS\n + \", as appropriate for the output format. \"\n \"A path to a directory is accepted here, \"\n \"in which case the default output file name will be used. \"\n \"Passing '-' indicates to write to stdout. If \"\n + PrsConst.INFILE\n + \" is passed as '-', \"\n + PrsConst.OUTFILE\n + \" can be omitted and both stdin and stdout will be used.\"\n ),\n nargs=\"?\",\n default=None,\n )\n\n # Mutually exclusive group for --expand/--contract\n gp_expcont = spr_convert.add_argument_group(title=\"URI/display name conversions\")\n meg_expcont = gp_expcont.add_mutually_exclusive_group()\n meg_expcont.add_argument(\n \"-\" + PrsConst.EXPAND[0],\n \"--\" + PrsConst.EXPAND,\n help=\"Expand all URI and display name abbreviations\",\n action=\"store_true\",\n )\n\n meg_expcont.add_argument(\n \"-\" + PrsConst.CONTRACT[0],\n \"--\" + PrsConst.CONTRACT,\n help=\"Contract all URI and display name abbreviations\",\n action=\"store_true\",\n )\n\n # Clobber argument\n spr_convert.add_argument(\n \"-\" + PrsConst.OVERWRITE[0],\n \"--\" + PrsConst.OVERWRITE,\n help=\"Overwrite output files without prompting\",\n action=\"store_true\",\n )\n\n # stdout suppressor option (e.g., for scripting)\n spr_convert.add_argument(\n \"-\" + PrsConst.QUIET[0],\n \"--\" + PrsConst.QUIET,\n help=\"Suppress printing of status messages and \"\n \"overwrite output files without prompting\",\n action=\"store_true\",\n )\n\n # Flag to treat infile as a URL\n spr_convert.add_argument(\n \"-\" + PrsConst.URL[0],\n \"--\" + PrsConst.URL,\n help=(\n \"Treat 'infile' as a URL for download. \"\n \"Cannot be used with an infile of '-'.\"\n ),\n action=\"store_true\",\n )\n\n # ### Args for suggest subparser\n spr_suggest.add_argument(\n PrsConst.INFILE,\n help=(\n \"Path to inventory file to be searched. \"\n \"Passing '-' indicates to read from stdin (plaintext/JSON only).\"\n ),\n )\n spr_suggest.add_argument(PrsConst.SEARCH, help=\"Search term for object suggestions\")\n spr_suggest.add_argument(\n \"-\" + PrsConst.ALL[0],\n \"--\" + PrsConst.ALL,\n help=\"Display all results \"\n \"regardless of the number returned \"\n \"without prompting for confirmation.\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.PAGINATE[0],\n \"--\" + PrsConst.PAGINATE,\n help=\"Paginate long search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.INDEX[0],\n \"--\" + PrsConst.INDEX,\n help=\"Include Inventory.objects list indices with the search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.SCORE[0],\n \"--\" + PrsConst.SCORE,\n help=\"Include fuzzywuzzy scores with the search results\",\n action=\"store_true\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.THRESH[0],\n \"--\" + PrsConst.THRESH,\n help=\"Match quality threshold, integer 0-100, \"\n \"default 75. Default is suitable when \"\n \"'search' is exactly a known object name. \"\n \"A value of 30-50 gives better results \"\n \"for approximate matches.\",\n default=PrsConst.DEF_THRESH,\n type=int,\n choices=range(101),\n metavar=\"{0-100}\",\n )\n spr_suggest.add_argument(\n \"-\" + PrsConst.URL[0],\n \"--\" + PrsConst.URL,\n help=(\n \"Treat 'infile' as a URL for download. \"\n f\"Cannot be used with --{PrsConst.URL}.\"\n ),\n action=\"store_true\",\n )\n\n return prs",
"def ParseWithPlugin(self, line):\n if not IsLoaded():\n print u'No hive loaded, unable to parse.'\n return\n\n current_hive = PregCache.hive_storage.loaded_hive\n if not current_hive:\n return\n\n if not line:\n print u'No plugin name added.'\n return\n\n plugin_name = line\n if '-h' in line:\n items = line.split()\n if len(items) != 2:\n print u'Wrong usage: plugin [-h] PluginName'\n return\n if items[0] == '-h':\n plugin_name = items[1]\n else:\n plugin_name = items[0]\n\n if not plugin_name.startswith('winreg'):\n plugin_name = u'winreg_{0:s}'.format(plugin_name)\n\n hive_type = current_hive.type\n plugins_list = parsers_manager.ParsersManager.GetWindowsRegistryPlugins()\n plugin_found = False\n for plugin_cls in plugins_list.GetKeyPlugins(hive_type):\n plugin = plugin_cls(reg_cache=current_hive.reg_cache)\n if plugin.plugin_name == plugin_name:\n # If we found the correct plugin.\n plugin_found = True\n break\n\n if not plugin_found:\n print u'No plugin named: {0:s} available for Registry type {1:s}'.format(\n plugin_name, hive_type)\n return\n\n if not hasattr(plugin, 'REG_KEYS'):\n print u'Plugin: {0:s} has no key information.'.format(line)\n return\n\n if '-h' in line:\n print frontend_utils.FormatHeader(plugin_name)\n print frontend_utils.FormatOutputString('Description', plugin.__doc__)\n print u''\n for registry_key in plugin.expanded_keys:\n print frontend_utils.FormatOutputString('Registry Key', registry_key)\n return\n\n if not plugin.expanded_keys:\n plugin.ExpandKeys(PregCache.parser_context)\n\n # Clear the last results from parse key.\n PregCache.events_from_last_parse = []\n\n # Defining outside of for loop for optimization.\n get_key_by_path = current_hive.GetKeyByPath\n for registry_key in plugin.expanded_keys:\n key = get_key_by_path(registry_key)\n if not key:\n print u'Key: {0:s} not found'.format(registry_key)\n continue\n\n # Move the current location to the key to be parsed.\n self.ChangeDirectory(registry_key)\n # Parse the key.\n print_strings = ParseKey(\n key=current_hive.GetCurrentRegistryKey(), hive_helper=current_hive,\n shell_helper=PregCache.shell_helper, verbose=False,\n use_plugins=[plugin_name])\n self.output_writer.write(u'\\n'.join(print_strings))\n self.output_writer.flush()",
"def pluginInfo(string, animCurveInterp=\"string\", autoload=bool, controlCommand=\"string\", listPlugins=bool, changedCommand=\"string\", loadPluginPrefs=bool, unloadOk=bool, apiVersion=bool, device=bool, writeRequires=bool, vendor=\"string\", path=\"string\", version=bool, activeFile=bool, tool=\"string\", pluginsInUse=bool, userNamed=bool, cacheFormat=bool, dragAndDropBehavior=bool, dependNode=bool, dependNodeId=\"string\", renderer=bool, translator=bool, name=\"string\", serviceDescriptions=bool, dependNodeByType=\"string\", data=\"string\", listPluginsPath=bool, loaded=bool, remove=bool, modelEditorCommand=\"string\", settings=bool, command=\"string\", constraintCommand=\"string\", savePluginPrefs=bool, registered=bool, iksolver=bool):\n pass",
"def parse_plugins_config(self, params_dict):\n self.analysis_plugins = list()\n json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()\n if json_parse_helper.doesPathExist(params_dict, [\"analysis\"]):\n for plugin_config in params_dict[\"analysis\"]:\n plugin_path = json_parse_helper.getStrValue(plugin_config, [\"path\"])\n plugin_path = os.path.abspath(plugin_path)\n plugin_module_name = json_parse_helper.getStrValue(plugin_config, [\"module\"])\n plugin_cls_name = json_parse_helper.getStrValue(plugin_config, [\"class\"])\n # Check if plugin path input is already in system path.\n already_in_path = False\n for c_path in sys.path:\n c_path = os.path.abspath(c_path)\n if c_path == plugin_path:\n already_in_path = True\n break\n # Add plugin path to system path\n if not already_in_path:\n sys.path.insert(0, plugin_path)\n logger.debug(\"Add plugin path ('{}') to the system path.\".format(plugin_path))\n # Try to import the module.\n logger.debug(\"Try to import the plugin module: '{}'\".format(plugin_module_name))\n plugin_mod_inst = importlib.import_module(plugin_module_name)\n logger.debug(\"Imported the plugin module: '{}'\".format(plugin_module_name))\n if plugin_mod_inst is None:\n raise Exception(\"Could not load the module: '{}'\".format(plugin_module_name))\n # Try to make instance of class.\n logger.debug(\"Try to create instance of class: '{}'\".format(plugin_cls_name))\n plugin_cls_inst = getattr(plugin_mod_inst, plugin_cls_name)()\n logger.debug(\"Created instance of class: '{}'\".format(plugin_cls_name))\n if plugin_cls_inst is None:\n raise Exception(\"Could not create instance of '{}'\".format(plugin_cls_name))\n if json_parse_helper.doesPathExist(plugin_config, [\"params\"]):\n logger.debug(\"User params are present for plugin so will test the required keys are present.\")\n plugin_cls_inst.set_users_param(plugin_config[\"params\"])\n plugin_cls_inst.check_param_keys(raise_except=True)\n logger.debug(\"User params for the plugin have the correct keys.\")\n self.analysis_plugins.append(plugin_config)",
"def run_parser(self, parser: ArgumentParser):",
"def parse(cls, input):",
"def imfPlugins(string, keyword=\"string\", multiFrameSupport=\"string\", pluginName=\"string\", extension=\"string\", writeSupport=\"string\", readSupport=\"string\"):\n pass",
"def asPluginData(*args, **kwargs):\n \n pass",
"def launch_plugin(self, hivefile, plugin, output_format=\"text\", args=[]):\n reg = Registry.Registry(hivefile)\n plug = self.plugins[plugin]()\n res = plug.run(reg, self.verbose, args)\n plug.display(res, output_format, self.verbose)",
"def _get_plugin(plugin_name, plugin_list):\r\n plugin = None\r\n for plug in plugin_list:\r\n if plug[\"name\"] == plugin_name:\r\n plugin = plug\r\n break\r\n return plugin",
"def define_sub_options(self):\n self.plugin_parser = self.parser.add_argument_group(\"Plugin Options\",\n \"Options for all plugins.\")\n self.plugin_parser.add_argument(\"-H\", \"--host\",\n default='127.0.0.1',\n required=True,\n help=\"Host IP address or DNS\",\n dest=\"host\")\n self.plugin_parser.add_argument(\"-u\", \"--user\",\n default=None,\n required=False,\n help=\"User name\",\n dest=\"user\")\n self.plugin_parser.add_argument(\"-p\", \"--password\",\n default=None,\n required=False,\n help=\"User password\",\n dest=\"password\")",
"def parse(self, hgvs_string):\n pass",
"def subgroup_parser():\n parser = SubgroupConfigParser('test')\n parser.add_argument('a', is_flag=True)\n return parser",
"def parse(self):\n for line in self.template_string.split('\\n'):\n split_line = tag_re.split(line)\n if len(split_line) > 1:\n for matched in split_line:\n mat = tag_re.search(matched)\n if mat:\n full_command = mat.group(0)\n cmd = mat.group(2).split()[0].strip() #get_comment_form etc\n if cmd == 'load':\n self.loaded_classes.append(full_command)\n else:\n if cmd not in DEFAULT_TAGS and cmd not in 'end'.join(DEFAULT_TAGS):\n self.template_calls.append(full_command)",
"def loadPlugin(self, plugin):\r\n return imp.load_module('plugin', *plugin[\"info\"])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
parse a when handler
|
def when_handler():
@generate
def when_attribute():
# pylint: disable=line-too-long
yield keyword("when")
yield normalspaces()
kw = yield keyword("topology|geometry|names?|transform|select|parameters|subAnimStructure|controller|children|any")
yield normalspaces()
objects = yield factor
yield normalspaces()
yield keyword("changes?")
yield normalspaces()
vnop = yield sepBy(named_argument, normalspaces())
yield normalspaces()
objparam = yield optional(factor)
yield normalspaces()
yield keyword("do")
yield normalspaces()
expr = yield expression
return s.Construct(s.WHEN_ATTRIBUTE, kw, objects, vnop, objparam, expr)
@generate
def when_objects():
yield keyword("when")
yield normalspaces()
obj = yield factor
yield normalspaces()
yield keyword("deleted")
yield normalspaces()
vnop = yield sepBy(named_argument, normalspaces())
yield normalspaces()
objparam = yield optional(factor)
yield normalspaces()
yield keyword("do")
yield normalspaces()
expr = yield expression
return s.Construct(s.WHEN_OBJECTS, obj, vnop, objparam, expr)
when_thing = yield when_attribute ^ when_objects
return when_thing
|
[
"def parse_event(self, event):",
"def _parse(self):\n\t\t\n\t\tself.reply_msg = MessageHandler.fire_handlers(self)",
"def handle(self, handler: Handler):\n pass",
"def sched_switch_parser(event, text):\n if text.count('=') == 2: # old format\n regex = re.compile(\n r'(?P<prev_comm>\\S.*):(?P<prev_pid>\\d+) \\[(?P<prev_prio>\\d+)\\] (?P<status>\\S+)'\n r' ==> '\n r'(?P<next_comm>\\S.*):(?P<next_pid>\\d+) \\[(?P<next_prio>\\d+)\\]'\n )\n parser_func = regex_body_parser(regex)\n return parser_func(event, text)\n else: # there are more than two \"=\" -- new format\n return default_body_parser(event, text.replace('==>', ''))",
"def on_do_handler():\n @generate\n def do_exprseq():\n yield keyword(\"do\")\n yield normalspaces()\n handler = yield expression # expr_seq\n return handler\n\n yield keyword(\"on\")\n yield normalspaces()\n event = yield var_name()\n yield normalspaces()\n handler = yield function_return | do_exprseq\n return s.Construct(s.ON_DO_HANDLER, event, handler)",
"def read_handler(host, port, handler):\n\n sock = socket.socket()\n sock.connect((host, port))\n\n f_hand = sock.makefile()\n line = f_hand.readline()\n\n if line != \"Click::ControlSocket/1.3\\n\":\n raise ValueError(\"Unexpected reply: %s\" % line)\n\n cmd = \"read %s\\n\" % handler\n sock.send(cmd.encode(\"utf-8\"))\n\n line = f_hand.readline()\n\n regexp = '([0-9]{3}) (.*)'\n match = re.match(regexp, line)\n\n while not match:\n line = f_hand.readline()\n match = re.match(regexp, line)\n\n groups = match.groups()\n\n if int(groups[0]) == 200:\n\n line = f_hand.readline()\n res = line.split(\" \")\n\n length = int(res[1])\n data = f_hand.read(length)\n\n return (int(groups[0]), data)\n\n return (int(groups[0]), line)",
"def json_handler(cls, fn: Handler) -> MessageHandler:\n return lambda message: fn(**cls.parse_json(message))",
"def handle(event={}, context={}):\n LoLNewsHandler().run()\n return 'ok'",
"def my_event_handler(sender, event):\n print(\"Event:\")\n print(\" sender:\", sender)\n print(\" event.event:\", event.event)\n print(\" event.parsed:\", event.parsed)",
"def handle(self):\n request_data = parse_request_json(self.request)\n response = None\n if request_data[SC.MSG_TITLE] == SC.MESSAGE_GET_ROLE:\n response = self.handle_get_role(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_BROADCAST_ROLES:\n response = self.handle_get_network_information(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_PRODUCE_VOTES:\n response = self.handle_produce_votes(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_DISTRIBUTE_VOTES:\n response = self.handle_distribute_votes(request_data)\n else:\n response = self.handle_unexpected_request()\n send_response_json(self.request, response, request_data[SC.MSG_ORIGIN])",
"def param_handler():\n yield keyword(\"on\")\n yield normalspaces()\n hname = yield var_name()\n yield normalspaces()\n action = yield keyword(\"set|get|preset|postset\")\n yield normalspaces()\n other = yield var_name()\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.PARAMETERS_HANDLER, hname, action, other, expr)",
"def catchall_handler(*args, **kwargs): # pragma: no cover\n # unpack tuple to variables ( Taken from Tasker )\n for i, v in enumerate(args):\n if isinstance(v, tuple):\n tuple_args = len(v)\n if tuple_args == 1:\n msg = v\n elif tuple_args == 2:\n msg, scarlett_sound = v\n elif tuple_args == 3:\n msg, scarlett_sound, command = v\n\n recieved_signals.append(v)\n\n print(\"---- Caught signal ----\")\n\n print(\"--- [args] ---\")\n for arg in args:\n print(\"another arg through *arg : {}\".format(arg))\n\n print(\"--- [kargs] ---\")\n if kwargs is not None:\n for key, value in kwargs.items():\n print(\"{} = {}\".format(key, value))\n\n print(\"\\n\")",
"def rollout_handler():\n yield keyword(\"on\")\n yield normalspaces()\n handlername = yield var_name()\n yield normalspaces()\n varn = yield var_name()\n yield normalspaces()\n varn2 = yield optional(var_name())\n yield normalspaces()\n varn3 = yield optional(var_name())\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.ROLLOUT_HANDLER, handlername, varn, varn2, varn3, expr)",
"def __handle(event, context) -> Tuple[Optional[Dict[Any, Any]], Optional[str]]:\n serialized_event = json.dumps(event, default=lambda o: \"<not serializable>\")\n logger.info(f\"Got new request. Event: {serialized_event}.\")\n\n action = Action(event)\n\n if event[\"RequestType\"] == \"Create\":\n return action.create()\n\n if event[\"RequestType\"] == \"Update\":\n return action.update()\n\n if event[\"RequestType\"] == \"Delete\":\n return action.delete()\n\n raise KeyError(\"Unsupported request type! Type: {}\".format(event[\"RequestType\"]))",
"def rcmenu_handler():\n yield keyword(\"on\")\n yield normalspaces()\n varname = yield var_name()\n yield normalspaces()\n vn2 = yield var_name()\n yield normalspaces()\n yield keyword(\"do\")\n yield normalspaces()\n expr = yield expression\n return s.Construct(s.RCMENU_HANDLER, varname, vn2, expr)",
"def handle_raw_endpoint_event(self, msg):\n classname = msg.data.keys()[0]\n attr = msg.data[classname]\n parsed_msg = msg.wf.ept_epm_parser.parse(classname, attr, attr[\"_ts\"])\n # ensure we copy over msg.now and msg.wf from original msg to parsed_msg\n # (note these are added before handler is called and not in original eptMsgWorker event)\n setattr(parsed_msg, \"wf\", msg.wf)\n setattr(parsed_msg, \"now\", msg.now)\n parsed_msg.seq = msg.seq\n logger.debug(parsed_msg)\n self.handle_endpoint_event(parsed_msg)",
"def add_handler(self, predicate, handler):\n pass",
"def test_should_parse_handler_class(self):\n from tests.fixtures.handlers.foo import Foo\n ret = self.robot.parse_handler_methods(Foo())\n self.assertEqual(len(ret), 7)",
"def _handle_alarm(self, timestamp: datetime, alarm: str, state: bool):\n _LOGGER.debug(\"Handle alarm: %s; State: %s\", alarm, state)\n\n self.last_activity = timestamp\n self.alarm_timestamp[alarm] = timestamp\n self.alarm_state[alarm] = state\n\n for handler in self._alarm_handlers:\n handler(self, timestamp, alarm, state)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the setup string (pacman setup os setup script) for the copy command used by the mover
|
def getSetup(self):
_setup_str = ""
self._setup = self._setup.strip()
tolog("self setup: %s" % self._setup)
if self._setup and self._setup != "" and self._setup.strip() != "":
if not self._setup.endswith(";"):
self._setup += ";"
if not "alias" in self._setup:
if "atlasLocalSetup.sh" in self._setup and "--quiet" not in self._setup:
self._setup = self._setup.replace("atlasLocalSetup.sh", "atlasLocalSetup.sh --quiet")
if self._setup.startswith("export") or self._setup.startswith("source"):
_setup_str = "%s" % self._setup
else:
_setup_str = "source %s" % self._setup
else:
_setup_str = self._setup
if _setup_str != "":
tolog("Using setup: %s" % (_setup_str))
return _setup_str
|
[
"def _setup_body(setup_conf: SETUP_CONFIG) -> str:\n return os.linesep.join([\n 'import sys',\n 'from setuptools import setup',\n '',\n \"args = ' '.join(sys.argv).strip()\",\n 'if not any(args.endswith(suffix) for suffix in [{allowed_suffixes}]):',\n ' raise {error}',\n '',\n 'setup(',\n ' {config}',\n ')',\n ''\n ]).format(\n error=repr(ImportError(setup_conf['description'])),\n config=',{linesep} '.join([\n '{}={}'.format(key, repr(value))\n for key, value\n in sorted(setup_conf.items(), key=lambda item: item[0])\n ]).format(linesep=os.linesep),\n allowed_suffixes=', '.join(repr(each) for each in sorted(ALLOWED_SETUP_SUFFIXES))\n )",
"def _create_cmd(self):\n comment = (\"#-------------------\\n\"\n \"# Install ANTs {}\\n\"\n \"#-------------------\".format(self.version))\n if self.use_binaries:\n chunks = [comment, self.install_binaries()]\n else:\n chunks = [comment, self.build_from_source_github()]\n return \"\\n\".join(chunks)",
"def generate_commands(code_name,target_ip):\n\tcommands = list()\n\t#commands.append(\"sudo su \\n\")\n\tcommands.append(\"sudo apt update -y && sudo apt install -y docker.io && sudo service docker start && sudo apt-get install -y unzip && sudo apt-get install -y docker-compose \\n \")\n\tcommands.append(\"unzip \"+code_name+\" \\n \")\n\tcommands.append(\"cd cp-all-in-one/; sudo echo 'TARGET_IP=\" + target_ip + \"' > .env \\n \")\n\tcommands.append(\"cd cp-all-in-one/; sudo docker-compose up -d --build \\n\")\n\treturn commands",
"def setup_name(self):\n return self._setup_name",
"def provision_tool(connection, tool_path):\n bin_path = get_nsb_option(\"bin_path\")\n exit_status, stdout = connection.execute(\"which %s\" % tool_path)[:2]\n if exit_status == 0:\n return encodeutils.safe_decode(stdout, incoming='utf-8').rstrip()\n\n logging.warning(\"%s not found on %s, will try to copy from localhost\",\n tool_path, connection.host)\n connection.execute('mkdir -p \"%s\"' % bin_path)\n connection.put(tool_path, tool_path)\n return tool_path",
"def get_cmd(self) -> str:\n return f\"aws s3 cp {self.input} {self.output}\"",
"def generate_command(package, jailpath, additional_args=None):\n if additional_args is None: additional_args = ''\n command = \"pkg -c %s install --yes %s\" % (jailpath, package)\n return command.rstrip()",
"def write_setup_local(self, setup_local: str = None):\n if not any([setup_local, self.setup_local]):\n return\n if not setup_local:\n setup_local = self.setup_local\n self.copyfile(\n self.project.patch / self.ver / setup_local,\n self.src_path / \"Modules\" / \"Setup.local\",\n )",
"def get_cli():\n location = os.path.join(CLI_PATH, \"nativescript.tgz\")\n shutil.copy2(location.strip(), os.path.join(os.getcwd(), SUT_ROOT_FOLDER, \"nativescript.tgz\"))",
"def command_prefix():\n cp = config_get('defaults.submit.command-prefix')\n return decode(cp.stdout).rstrip('\\n') if cp.returncode == 0 else ''",
"def test_install_prefix(mm_script, mm_conf):\n ret = subprocess.run(\n [\"powershell\", str(mm_script).replace(\" \", \"' '\"), \"-p\", \"squarepants\"],\n capture_output=True,\n check=False,\n text=True,\n )\n assert ret.returncode == 0, ret.stderr\n conf_file = mm_conf / \"minion\"\n assert conf_file.exists()\n assert conf_file.read_text().find(\"id: squarepants\") > -1",
"def copy_poap_files():\n stream = open(\"/bootflash/poap_device_recipe.yaml\", 'r')\n dictionary = yaml.load(stream)\n os.system(\"mkdir -p /bootflash/poap_files\")\n timeout = options[\"timeout_copy_system\"]\n\n if (\"License\" in dictionary):\n for lic in dictionary[\"License\"]:\n serial_path = os.path.join(options[\"install_path\"], lic.strip())\n\n dst = \"poap_files/\" + lic.split('/')[-1]\n\n do_copy(serial_path, dst, timeout, dst, False)\n\n if (\"RPM\" in dictionary):\n rpm_error = False\n for rpm in dictionary[\"RPM\"]:\n rpm = rpm.strip()\n serial_path = os.path.join(options[\"install_path\"], rpm)\n\n dst = \"poap_files/\" + rpm.split('/')[-1]\n\n do_copy(serial_path, dst, timeout, dst, False)\n for rpm in dictionary[\"RPM\"]:\n rpm = rpm.strip()\n name_str = \"rpm -qp --qf '%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}.rpm' /bootflash/poap_files/\"+ rpm.split('/')[-1]\n orig_name = subprocess.check_output(name_str, shell=True)\n orig_name = byte2str(orig_name)\n if (orig_name != rpm.split('/')[-1]):\n poap_log (\"ERROR : RPM file %s does not match RPM package naming convention. Expected name: %s\" %(rpm.split('/')[-1],orig_name))\n rpm_error = True\n if rpm_error:\n abort(\"Please correct the above rpm files in rpm source location and update YAML file accordingly.\")\n \n if (\"Certificate\" in dictionary):\n for cert in dictionary[\"Certificate\"]:\n cert = cert.strip()\n serial_path = os.path.join(options[\"install_path\"], cert)\n\n dst = \"poap_files/\" + cert.split('/')[-1]\n\n do_copy(serial_path, dst, timeout, dst, False)\n if (\"Trustpoint\" in dictionary):\n for ca in dictionary[\"Trustpoint\"].keys():\n tmp_cmd = \"mkdir -p /bootflash/poap_files/\" + ca\n os.system(tmp_cmd)\n dst = \"poap_files/\" + ca + \"/\"\n for tp_cert, crypto_pass in dictionary[\"Trustpoint\"][ca].items():\n tp_cert = tp_cert.strip()\n dst = dst + tp_cert.split('/')[-1]\n serial_path = os.path.join(options[\"install_path\"], tp_cert)\n do_copy(serial_path, dst, timeout, dst, False)",
"def copy_code():\n with lcd(local_app_dir):\n local('git push production master')",
"def get_cmd(self):\n\t\tif self.cmd is not None:\n\t\t\treturn self.cmd\n\t\tcmd = \"/system/bin/sh /system/bin/am \"\n\t\tif self.prefix:\n\t\t\tcmd += self.prefix\n\t\tif self.action is not None:\n\t\t\tcmd += \" -a \" + self.action\n\t\tif self.data_uri is not None:\n\t\t\tcmd += \" -d \" + self.data_uri\n\t\tif self.mime_type is not None:\n\t\t\tcmd += \" -t \" + self.mime_type\n\t\tif self.category is not None:\n\t\t\tcmd += \" -c \" + self.category\n\t\tif self.component is not None:\n\t\t\tcmd += \" -n \" + self.component\n\t\tif self.flag is not None:\n\t\t\tcmd += \" -f \" + self.flag\n\t\tif self.extra_keys:\n\t\t\tfor key in self.extra_keys:\n\t\t\t\tcmd += \" --esn '%s'\" % key\n\t\tif self.extra_string:\n\t\t\tfor key in self.extra_string.keys():\n\t\t\t\tcmd += \" -e '%s' '%s'\" % (key, self.extra_string[key])\n\t\tif self.extra_boolean:\n\t\t\tfor key in self.extra_boolean.keys():\n\t\t\t\tcmd += \" -ez '%s' %s\" % (key, self.extra_boolean[key])\n\t\tif self.extra_int:\n\t\t\tfor key in self.extra_int.keys():\n\t\t\t\tcmd += \" -ei '%s' %s\" % (key, self.extra_int[key])\n\t\tif self.extra_long:\n\t\t\tfor key in self.extra_long.keys():\n\t\t\t\tcmd += \" -el '%s' %s\" % (key, self.extra_long[key])\n\t\tif self.extra_float:\n\t\t\tfor key in self.extra_float.keys():\n\t\t\t\tcmd += \" -ef '%s' %s\" % (key, self.extra_float[key])\n\t\tif self.extra_uri:\n\t\t\tfor key in self.extra_uri.keys():\n\t\t\t\tcmd += \" -eu '%s' '%s'\" % (key, self.extra_uri[key])\n\t\tif self.extra_component:\n\t\t\tfor key in self.extra_component.keys():\n\t\t\t\tcmd += \" -ecn '%s' %s\" % (key, self.extra_component[key])\n\t\tif self.extra_array_int:\n\t\t\tfor key in self.extra_array_int.keys():\n\t\t\t\tcmd += \" -eia '%s' %s\" % (key, \",\".join(self.extra_array_int[key]))\n\t\tif self.extra_array_long:\n\t\t\tfor key in self.extra_array_long.keys():\n\t\t\t\tcmd += \" -ela '%s' %s\" % (key, \",\".join(self.extra_array_long[key]))\n\t\tif self.extra_array_float:\n\t\t\tfor key in self.extra_array_float.keys():\n\t\t\t\tcmd += \" -efa '%s' %s\" % (key, \",\".join(self.extra_array_float[key]))\n\t\tif self.flags:\n\t\t\tcmd += \" \" + \" \".join(self.flags)\n\t\tif self.suffix:\n\t\t\tcmd += \" \" + self.suffix\n\t\tself.cmd = cmd\n\t\treturn self.cmd",
"def setup(command, version=env.default_version):\n local('%s/%s-py%s/bin/python setup.py %s' %\n (env.virtualenv_dir, env.project, version, command))",
"def talon_add_context_clipboard():\n friendly_name = actions.app.name()\n executable = actions.app.executable().split(os.path.sep)[-1]\n if app.platform != \"windows\":\n result = \"os: {}\\napp: {}\\ntitle: {}\".format(app.platform, friendly_name, actions.win.title())\n\n #on windows, it's best to include both the friendly name and executable name in case the muicache breaks....\n else:\n result = \"os: {}\\napp: {}\\napp: {}\\ntitle: {}\".format(app.platform, friendly_name, executable, actions.win.title())\n\n clip.set(result)",
"def makePkgInfo(dmg_path, info):\n\t# Info from PKG-INFO\n\tname = info['Name']\n\tversion = info['Version']\n\tdescription = info['Summary']\n\t# Local path to dmg\n\tdmg = dmg_path.split('/')[-1]\n\t# Filename of dmg with file extension removed\n\tdmg_name = dmg.split('.dmg')[0]\n\t# Path to temp location of install files\n\ttmp_path = \"/tmp\"\n\t# Path to directory for install log needed for uninstallation\n\tlog_dir = \"/Library/Application Support/Managed Python/\" + dmg_name\n\t# Get path to directory holding files for this tool\n\ttool_dir = os.path.dirname(os.path.abspath(sys.argv[0]))\n\t# Path to plist file pkginfo keys are written to\n\tpkginfo_path = os.getcwd() + \"/\" + dmg_name + \".pkginfo\"\n\t# Path to setup.py within module tmp directory\n\tsetup_path = tmp_path + \"/\" + dmg_name\n\tpkginfo = dict(\n\t\t_metadata=dict(\n\t\t\tcreated_by=NSUserName(),\n\t\t\tcreation_date=datetime.datetime.utcnow(),\n\t\t\tos_version=subprocess.check_output(['sw_vers', '-productVersion']).rstrip('\\n'),\n\t\t),\n\t\tautoremove=False,\n\t\tcatalogs=list(['testing']),\n\t\tdescription=description,\n\t\tinstallcheck_script=installcheck_script.replace(\"MODULE\", name).replace(\"VERS\", version),\n\t\tinstaller_item_hash=hashlib.sha256(open(dmg_path, 'rb').read()).hexdigest(),\n\t\tinstaller_item_location=dmg,\n\t\tinstaller_item_size=int(os.path.getsize(dmg_path) / 1024),\n\t\tinstaller_type='copy_from_dmg',\n\t\titems_to_copy=list((\n\t\t\tdict(\n\t\t\t\tdestination_path=tmp_path,\n\t\t\t\tsource_item=dmg_name,\n\t\t\t),\n\t\t)),\n\t\tminimum_os_version='10.4.0',\n\t\tname=name,\n\t\tpostinstall_script=postinstall_script.replace(\"LOGDIR\", log_dir).replace(\"SETUP_DIR\", setup_path),\n\t\trequires=list(['XcodeTools']),\n\t\tunattended_install=True,\n\t\tunattended_uninstall=True,\n\t\tuninstall_method='uninstall_script',\n\t\tuninstall_script=uninstall_script.replace(\"LOGDIR\", log_dir),\n\t\tuninstallable=True,\n\t\tversion=version,\n\t)\n\tplistlib.writePlist(pkginfo, pkginfo_path)\n\treturn pkginfo_path",
"def copy_setup(setup, to=None):\n if to is None:\n to = os.path.join(os.getcwd(), setup)\n\n parent = os.path.dirname(os.path.realpath(to))\n\n if not os.path.exists(parent):\n os.makedirs(parent)\n\n ignore = shutil.ignore_patterns(*IGNORE_PATTERNS)\n shutil.copytree(\n os.path.join(SETUPDIR, setup), to, ignore=ignore\n )",
"def write_setup(self):\r\n self.write_cmd(self.commands.SETUP, 1)\r\n self.write_cmd(self.commands.ACK)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return exit code (0 if OK), file size and checksum of a local file, as well as as date string if requested
|
def getLocalFileInfo(self, fileName, checksumType="default", date=None):
# note that date is mutable
statusRet = 0
outputRet = {}
outputRet["errorLog"] = ""
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
outputRet["size"] = 0
outputRet["checksum"] = ""
outputRet["checksumType"] = checksumType
self.log("Getting local File(%s) info." % fileName)
# does the file exist?
if not os.path.isfile(fileName):
if fileName.find("DBRelease") >= 0 and os.path.exists(os.path.dirname(fileName)):
outputRet["errorLog"] = errorLog = "DBRelease file missing: %s" % (fileNameame)
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_MISSDBREL, outputRet
else:
outputRet["errorLog"] = errorLog = "No such file or directory: %s" % (fileName)
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_MISSINGLOCALFILE, outputRet
# get the modification time if needed and store it in the mutable object
if date:
date = SiteMover.getModTime(os.path.dirname(fileName), os.path.basename(fileName))
# get the file size
try:
self.log("Executing getsize() for file: %s" % (fileName))
outputRet["size"] = fsize = str(os.path.getsize(fileName))
except OSError, e:
outputRet["errorLog"] = errorLog = "Could not get file size: %s" % str(e)
tolog("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_FAILEDSIZELOCAL, outputRet
else:
if fsize == "0":
outputRet["errorLog"] = errorLog = "Encountered zero file size for file %s" % (fileName)
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_ZEROFILESIZE, outputRet
else:
self.log("Got file size: %s" % (fsize))
# get the checksum
if checksumType == "adler32" or checksumType == "default":
self.log("Executing adler32() for file: %s" % (fileName))
outputRet["checksum"] = fchecksum = SiteMover.SiteMover.adler32(fileName)
if fchecksum == '00000001': # "%08x" % 1L
outputRet["errorLog"] = errorLog = "Adler32 failed (returned 1)"
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_FAILEDADLOCAL, outputRet
else:
self.log("Got adler32 checksum: %s" % (fchecksum))
else:
_cmd = '%s %s' % (CMD_CHECKSUM, fileName)
self.log("Executing command: %s" % (_cmd))
try:
s, o = commands.getstatusoutput(_cmd)
except Exception, e:
s = -1
o = str(e)
self.log("!!WARNING!!2999!! Exception caught in getstatusoutput: %s" % (o))
if s != 0:
o = o.replace('\n', ' ')
check_syserr(s, o)
outputRet["errorLog"] = errorLog = "Error running checksum command (%s): %s" % (CMD_CHECKSUM, o)
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_FAILEDMD5LOCAL, outputRet
outputRet["checksum"] = fchecksum = o.split()[0]
self.log("Got checksum: %s" % (fchecksum))
return 0, outputRet
|
[
"def process_file(filename):\n\n try:\n checksum_errors = verify_checksums(filename)\n if OPTIONS.compliance:\n compliance_errors = verify_compliance(filename)\n else:\n compliance_errors = 0\n if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force:\n update(filename)\n return checksum_errors + compliance_errors\n except Exception as e:\n log.error('EXCEPTION %r .. %s' % (filename, e))\n return 1",
"def check_files(self,fhdr):\n try:\n size_data = os.stat(fhdr.replace(\".vhdr\",\".eeg\")).st_size\n except:\n size_data = 0\n try:\n size_mrk = os.stat(fhdr.replace(\".vhdr\",\".vmrk\")).st_size\n except:\n size_mrk = 0\n \n return size_data,size_mrk",
"def _checksum_local_file(self, source_path):\n with open(source_path, 'r') as img_file:\n hasher = hashlib.md5()\n block_size = 0x10000\n buf = img_file.read(block_size)\n while len(buf) > 0:\n hasher.update(buf)\n buf = img_file.read(block_size)\n source_cksum = hasher.hexdigest()\n return source_cksum",
"def get_checksum(input_file):\n _, _, stderr = run_cmd(['ffprobe', input_file])\n checksum = re.findall('checksum == (.*)', stderr)[0]\n return checksum",
"def get_local_file_size(filename):\n\n file_size = None\n\n if os.path.exists(filename):\n try:\n file_size = os.path.getsize(filename)\n except Exception as exc:\n logger.warning(\"failed to get file size: %s\", exc)\n else:\n logger.warning(\"local file does not exist: %s\", filename)\n\n return file_size",
"def verify_checksum(filepath):\n file_obj = file_factory(filepath)\n return file_obj.verify_checksum()",
"def checksum(self, url):\n _, path = self._parse_url(url)\n file_checksum = self._hdfs_client.checksum(path)\n return '%s-%d-%s' % (\n file_checksum[_FILE_CHECKSUM_ALGORITHM],\n file_checksum[_FILE_CHECKSUM_LENGTH],\n file_checksum[_FILE_CHECKSUM_BYTES],\n )",
"def checksum(self, fileName):\n\n tar = tarfile.open(fileName, mode='r')\n lsl = [(x.name, int(x.size), int(x.mtime), x.uname) for x in tar.getmembers()]\n hasher = hashlib.sha256(str(lsl))\n checksum = hasher.hexdigest()\n\n return checksum",
"def file_information(self, logger, f, reg=None):\n try:\n _md5 = hashlib.md5(open(f, \"rb\").read()).hexdigest()\n _size = os.path.getsize(f)\n _date = datetime.fromtimestamp(os.stat(f).st_mtime)\n _extract = self.file_path_extract(logger, f, reg) if reg else reg\n\n return (_md5, _size, _date, _extract)\n except Exception as e:\n logger.warning(\"Something went wrong trying to get file information for: {0}, e: {1}\".format(f, e))\n return (None, None, None, None)",
"def checksum(self, filepath):\n command = f\"sha1sum {filepath}\"\n _, stdout, _ = self.exec_command(command)\n lines = stdout.readlines()\n return lines[0].strip()",
"def svn_fs_file_checksum(*args) -> \"svn_checksum_t **\":\n return _fs.svn_fs_file_checksum(*args)",
"def filesize(self):\n if self.downloaded_filesize < 0:\n self.retrieveHeaders()\n # If it is still invalid, see what the os can give us\n if self.downloaded_filesize < 0:\n try:\n self.downloaded_filesize = os.path.getsize(self.filepath)\n except (IOError, WindowsError), e:\n msg = \"Error while processing %s (file stats for file at %s): %s\"\n self.logger.log(msg % (self.url, self.filepath, e))\n raise IncompleteScanError\n return self.downloaded_filesize",
"def checksumChecker(localFilename, checksums):\n try:\n adler32 = readAdler32(localFilename)\n if adler32 == checksums['adler32']:\n return True\n else:\n return False\n except:\n cksum = readCksum(localFilename)\n if cksum == checksums['cksum']:\n return True\n else:\n return False\n\n return False",
"def getRemoteFileChecksum(self, full_surl, checksumType):\n remote_checksum = None\n output = None\n\n cmd = \"%s xrdadler32 %s\" % (self._setup, full_surl)\n tolog(\"Executing command: %s\" % (cmd))\n try:\n ec, output = commands.getstatusoutput(cmd)\n except Exception, e:\n tolog(\"Warning: (Exception caught) xrdadler32 failed: %s\" % (e))\n output = None\n else:\n if ec != 0 or \"[fail]\" in output:\n tolog(\"Warning: xrdadler32 failed: %d, %s\" % (ec, output))\n else:\n tolog(\"output: %s\" % output)\n try:\n remote_checksum = output.split()[-2]\n except:\n tolog(\"!!WARNING!!1998!! Cannot extract checksum from output: %s\" % (output))\n if not remote_checksum.isalnum():\n tolog(\"!!WARNING!!1998!! Failed to extract alphanumeric checksum string from output: %s\" % (output))\n remote_checksum = None\n return remote_checksum",
"def __init__ (self, filename, size, date, mdate, checksum) :\n self.filename = filename\n self.size = size\n self.date = date\n self.mdate = mdate # modification date\n self.checksum = checksum\n if date is not None and not isinstance (self.date, datetime.datetime) :\n raise ValueError(\"mismatch for date (%s) and file %s\" % (str(type(date)), filename))\n if mdate is not None and not isinstance (self.mdate, datetime.datetime) :\n raise ValueError(\"mismatch for mdate (%s) and file %s\" % (str(type(mdate)), filename))\n if not isinstance (size, int) :\n raise ValueError(\"mismatch for size (%s) and file %s\" % (str(type(size)), filename))\n if checksum is not None and not isinstance (checksum, str) :\n raise ValueError(\"mismatch for checksum (%s) and file %s\" % (str(type(checksum)), filename))\n if date is not None and mdate is not None :\n if mdate > date :\n raise ValueError(\"expecting mdate <= date for file \" + filename)",
"def get_file_lastModifiedDate(file_url):\n url = file_url.replace('json.zip','meta')\n metadata = requests.get(url).text\n metadata = metadata.split('\\n')\n lastModifiedDate = metadata[0].lstrip('lastModifiedDate:').rstrip('\\r')\n size = metadata[1].lstrip('size:').rstrip('\\r')\n zipSize = metadata[2].lstrip('zipSize:').rstrip('\\r')\n gzSize = metadata[3].lstrip('gzSize:').rstrip('\\r')\n sha256 = metadata[4].lstrip('sha256:').rstrip('\\r')\n return (lastModifiedDate,size,zipSize, gzSize,sha256)",
"def get_remote_file_size(host, file_name):\n cmd = \"ssh\" \" {}@{}\" \" stat -c%s {}\".format(\n getuser(), host, file_name)\n result = run_command(cmd)\n\n return int(result.stdout_text)",
"def get_ctime(local_path):\n\n try:\n return os.stat(local_path).st_ctime\n except FileNotFoundError:\n return -1.0",
"def process_file_command( self, filename ):\n\n command = \"file %s\" % posixpath.join(self.absolute_path,filename)\n fileinfo = os.popen( command ).readlines()[0]\n\n fileinfo = fileinfo.split(\":\",1)[1]\n if fileinfo.find(\"ERROR\") != -1:\n # Ersatz für >\"ERROR\" in fileinfo< ;)\n return \"\"\n else:\n return fileinfo"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get checksum with xrdadler32 command
|
def getRemoteFileChecksum(self, full_surl, checksumType):
remote_checksum = None
output = None
cmd = "%s xrdadler32 %s" % (self._setup, full_surl)
tolog("Executing command: %s" % (cmd))
try:
ec, output = commands.getstatusoutput(cmd)
except Exception, e:
tolog("Warning: (Exception caught) xrdadler32 failed: %s" % (e))
output = None
else:
if ec != 0 or "[fail]" in output:
tolog("Warning: xrdadler32 failed: %d, %s" % (ec, output))
else:
tolog("output: %s" % output)
try:
remote_checksum = output.split()[-2]
except:
tolog("!!WARNING!!1998!! Cannot extract checksum from output: %s" % (output))
if not remote_checksum.isalnum():
tolog("!!WARNING!!1998!! Failed to extract alphanumeric checksum string from output: %s" % (output))
remote_checksum = None
return remote_checksum
|
[
"def calculate_checksum(self):\n return binascii.crc32(self.unpack_binary(0, 0x78)) & 0xFFFFFFFF",
"def bin_checksum(s):\n return bin_sha256(bin_sha256(s))[:4]",
"def getRemoteFileChecksumFromOutput(self, output):\n remote_checksum = None\n # get remote checksum from the command output\n if \"xrootd\" in output or \"XRootD\" in output:\n status = False\n # define the search patterns\n if \"md5:\" in output:\n checksum_pstr = r\"md5: ([a-zA-Z0-9]+)\"\n checksum_pattern = re.compile(checksum_pstr)\n status = True\n elif \"adler32:\" in output:\n checksum_pstr = r\"adler32: ([a-zA-Z0-9]+)\"\n checksum_pattern = re.compile(checksum_pstr)\n status = True\n else:\n tolog(\"!!WARNING!!2999!! Checksum info not found in xrdcp output: %s\" % (output))\n\n if status:\n # grab the checksum from the output\n _checksum = re.findall(checksum_pattern, output)\n if len(_checksum) > 0:\n remote_checksum = _checksum[0]\n\n # note: there's a bug in xrdcp which will generate non-fixed length adler checksums; checksums can be\n # of length 7. In that case add a \"0\" to the beginning of the string\n if \"adler32:\" in output:\n # verify string size length\n if len(remote_checksum) == 7:\n tolog(\"!!WARNING!!1111!! Adding 0 to beginning of checksum (xrdcp returned a length 7 checksum): %s\" % (remote_checksum))\n remote_checksum = \"0\" + remote_checksum\n elif len(remote_checksum) == 6:\n tolog(\"!!WARNING!!1111!! Adding 00 to beginning of checksum (xrdcp returned a length 6 checksum): %s\" % (remote_checksum))\n remote_checksum = \"00\" + remote_checksum\n elif len(remote_checksum) == 5:\n tolog(\"!!WARNING!!1111!! Adding 000 to beginning of checksum (xrdcp returned a length 5 checksum): %s\" % (remote_checksum))\n remote_checksum = \"000\" + remote_checksum\n elif len(remote_checksum) == 4:\n tolog(\"!!WARNING!!1111!! Adding 0000 to beginning of checksum (xrdcp returned a length 4 checksum): %s\" % (remote_checksum))\n remote_checksum = \"0000\" + remote_checksum\n\n tolog(\"Copy command returned checksum: %s\" % (remote_checksum))\n else:\n tolog(\"!!WARNING!!2999!! checksum search failed: pattern (%s) not found in: %s\" % (checksum_pstr, output))\n remote_checksum = None\n else:\n tolog(\"!!WARNING!!2999!! Unexpected xrdcp output: %s\" % (output))\n\n return remote_checksum",
"def get_checksum(data):\n return hashlib.sha1(data).hexdigest()",
"def calculate_adler32_checksum(filename):\n\n asum = 1 # default adler32 starting value\n blocksize = 64 * 1024 * 1024 # read buffer size, 64 Mb\n\n with open(filename, 'rb') as f:\n while True:\n data = f.read(blocksize)\n if not data:\n break\n asum = adler32(data, asum)\n if asum < 0:\n asum += 2**32\n\n # convert to hex\n return \"{0:08x}\".format(asum)",
"def Checksum(cls, string):\n # Get the last 10 bits\n c = crc32(string.encode('utf-8')) & (2 ** 10 - 1)\n return (cls.BASE32_ALPHABET[c >> cls.BASE32_BIT_WIDTH] +\n cls.BASE32_ALPHABET[c & (2 ** cls.BASE32_BIT_WIDTH - 1)])",
"def calculate_checksum(self, text):\n\t\tchecksum = 0\n\t\tfor i in range(len(text)):\n\t\t\tchecksum ^= ord(text[i])\n\n\t\treturn \"%x\" % (checksum % 256)",
"def test__checksum(self):\n # Test\n result = converter._checksum(1, 2, 3)\n expected = ('''\\\n3c9909afec25354d551dae21590bb26e38d53f2173b8d3dc3eee4c047e7ab1c1eb8b85103e3be7\\\nba613b31bb5c9c36214dc9f14a42fd7a2fdb84856bca5c44c2''')\n self.assertEqual(result, expected)",
"def get_checksum(input_file):\n _, _, stderr = run_cmd(['ffprobe', input_file])\n checksum = re.findall('checksum == (.*)', stderr)[0]\n return checksum",
"def test_checksum(self):",
"def calculate_data_checksum(self):\n data = self.unpack_binary(0x200, self.next_record_offset() - 0x200)\n return binascii.crc32(data) & 0xFFFFFFFF",
"def calculate_header_checksum(self):\n data = self.unpack_binary(0x0, 0x78)\n data += self.unpack_binary(0x80, 0x180)\n return binascii.crc32(data) & 0xFFFFFFFF",
"def crc32_hash(value: str) -> int:\n return crc32(bytes(value.encode(\"utf-8\")))",
"def parse_nmea_checksum(nmea_line):\n return int(nmea_line[-2:], 16) # checksum hex digits as int",
"def calculate_checksum(source_bytes):\n total_byte_sum = 0\n for byte in source_bytes:\n total_byte_sum += byte\n \n return 128 - (total_byte_sum % 128)",
"def get_checksum(self):\n if self.checksum is None:\n r = get(f'{self.link}?$format=json&$select=Checksum',\n auth=Product.AUTH).json()\n self.checksum = r['d']['Checksum']['Value']\n return self.checksum",
"def _get_checksum(cls, raw_message):\n return int(raw_message[-cls._digest_bytes:])",
"def checksum(sentence):\n crc = 0\n for c in sentence:\n crc = crc ^ ord(c)\n crc = crc & 0xFF\n return crc",
"def bech32_create_checksum( hrp, data ):\n values = bech32_hrp_expand( hrp ) + data\n polymod = bech32_polymod( values + [ 0, 0, 0, 0, 0, 0 ] ) ^ 1\n return [ ( polymod >> 5 * ( 5 - i ) ) & 31 for i in range( 6 ) ]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
get checksum from xrdcp chksum command output
|
def getRemoteFileChecksumFromOutput(self, output):
remote_checksum = None
# get remote checksum from the command output
if "xrootd" in output or "XRootD" in output:
status = False
# define the search patterns
if "md5:" in output:
checksum_pstr = r"md5: ([a-zA-Z0-9]+)"
checksum_pattern = re.compile(checksum_pstr)
status = True
elif "adler32:" in output:
checksum_pstr = r"adler32: ([a-zA-Z0-9]+)"
checksum_pattern = re.compile(checksum_pstr)
status = True
else:
tolog("!!WARNING!!2999!! Checksum info not found in xrdcp output: %s" % (output))
if status:
# grab the checksum from the output
_checksum = re.findall(checksum_pattern, output)
if len(_checksum) > 0:
remote_checksum = _checksum[0]
# note: there's a bug in xrdcp which will generate non-fixed length adler checksums; checksums can be
# of length 7. In that case add a "0" to the beginning of the string
if "adler32:" in output:
# verify string size length
if len(remote_checksum) == 7:
tolog("!!WARNING!!1111!! Adding 0 to beginning of checksum (xrdcp returned a length 7 checksum): %s" % (remote_checksum))
remote_checksum = "0" + remote_checksum
elif len(remote_checksum) == 6:
tolog("!!WARNING!!1111!! Adding 00 to beginning of checksum (xrdcp returned a length 6 checksum): %s" % (remote_checksum))
remote_checksum = "00" + remote_checksum
elif len(remote_checksum) == 5:
tolog("!!WARNING!!1111!! Adding 000 to beginning of checksum (xrdcp returned a length 5 checksum): %s" % (remote_checksum))
remote_checksum = "000" + remote_checksum
elif len(remote_checksum) == 4:
tolog("!!WARNING!!1111!! Adding 0000 to beginning of checksum (xrdcp returned a length 4 checksum): %s" % (remote_checksum))
remote_checksum = "0000" + remote_checksum
tolog("Copy command returned checksum: %s" % (remote_checksum))
else:
tolog("!!WARNING!!2999!! checksum search failed: pattern (%s) not found in: %s" % (checksum_pstr, output))
remote_checksum = None
else:
tolog("!!WARNING!!2999!! Unexpected xrdcp output: %s" % (output))
return remote_checksum
|
[
"def get_checksum(input_file):\n _, _, stderr = run_cmd(['ffprobe', input_file])\n checksum = re.findall('checksum == (.*)', stderr)[0]\n return checksum",
"def find_checksum(chksum): \n for record in capture_metadata2:\n if record[1]==chksum:\n return 1\n return 0",
"def get_checksum(self):\n if self.checksum is None:\n r = get(f'{self.link}?$format=json&$select=Checksum',\n auth=Product.AUTH).json()\n self.checksum = r['d']['Checksum']['Value']\n return self.checksum",
"def get_checksum(data):\n return hashlib.sha1(data).hexdigest()",
"def bin_checksum(s):\n return bin_sha256(bin_sha256(s))[:4]",
"def checksum(self, filepath):\n command = f\"sha1sum {filepath}\"\n _, stdout, _ = self.exec_command(command)\n lines = stdout.readlines()\n return lines[0].strip()",
"def svn_fs_file_md5_checksum(*args) -> \"unsigned char [ANY]\":\n return _fs.svn_fs_file_md5_checksum(*args)",
"def _get_checksum(cls, raw_message):\n return int(raw_message[-cls._digest_bytes:])",
"def test_checksum(self):",
"def calculate_checksum(self):\n return binascii.crc32(self.unpack_binary(0, 0x78)) & 0xFFFFFFFF",
"def parse_nmea_checksum(nmea_line):\n return int(nmea_line[-2:], 16) # checksum hex digits as int",
"def compute_checksum(bin_msg):\n assert len(bin_msg) > 0\n cksum = 0\n for b in bin_msg:\n cksum += b\n return cksum % 256",
"def checksum(self):\n sum1 = 0xff\n sum2 = 0xff\n\n checksummed_data = [self.frame_id, self.get_control()] + self.payload\n\n for b in checksummed_data:\n sum1 += b\n sum1 &= 0xffff # Results wrapped at 16 bits\n sum2 += sum1\n sum2 &= 0xffff\n\n sum1 = (sum1 & 0x00ff) + (sum1 >> 8)\n sum2 = (sum2 & 0x00ff) + (sum2 >> 8)\n\n checksum = ((sum2 << 8) & 0xffff) | sum1\n\n high_byte = (checksum & 0xff00) >> 8\n low_byte = checksum & 0x00ff\n\n return [high_byte, low_byte]",
"def test__checksum(self):\n # Test\n result = converter._checksum(1, 2, 3)\n expected = ('''\\\n3c9909afec25354d551dae21590bb26e38d53f2173b8d3dc3eee4c047e7ab1c1eb8b85103e3be7\\\nba613b31bb5c9c36214dc9f14a42fd7a2fdb84856bca5c44c2''')\n self.assertEqual(result, expected)",
"def getRemoteFileChecksum(self, full_surl, checksumType):\n remote_checksum = None\n output = None\n\n cmd = \"%s xrdadler32 %s\" % (self._setup, full_surl)\n tolog(\"Executing command: %s\" % (cmd))\n try:\n ec, output = commands.getstatusoutput(cmd)\n except Exception, e:\n tolog(\"Warning: (Exception caught) xrdadler32 failed: %s\" % (e))\n output = None\n else:\n if ec != 0 or \"[fail]\" in output:\n tolog(\"Warning: xrdadler32 failed: %d, %s\" % (ec, output))\n else:\n tolog(\"output: %s\" % output)\n try:\n remote_checksum = output.split()[-2]\n except:\n tolog(\"!!WARNING!!1998!! Cannot extract checksum from output: %s\" % (output))\n if not remote_checksum.isalnum():\n tolog(\"!!WARNING!!1998!! Failed to extract alphanumeric checksum string from output: %s\" % (output))\n remote_checksum = None\n return remote_checksum",
"def checksum(path, client):\n\n if not os.path.exists(path):\n raise ESGPublishError(\"No such file: %s\"%path)\n\n command = \"%s %s\"%(client, path)\n info(\"Running: %s\"%command)\n\n try:\n f = subprocess.Popen([client, path], stdout=subprocess.PIPE).stdout\n except:\n error(\"Error running command '%s %s', check configuration option 'checksum'.\"%command)\n lines = f.readlines()\n csum = lines[0].split()[0]\n\n return csum",
"def _calcCrc(self, package, data):\n msg = bytearray()\n\n msg.extend(struct.pack(\"B\", self.START_BYTE))\n msg.extend(struct.pack(\"B\", self.messageType))\n msg.extend(struct.pack(\"H\", self.commandType))\n self._packData(msg, data)\n\n crc = crc8()\n return crc.crc(msg)",
"def _get_error_bad_checksum(self):\n return self.__error_bad_checksum",
"def get_data_checksum(checksum, data):\n return checksum + np.sum(np.sum(data))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the global file paths using to_native_lfn() [dsname needed] or Rucio naming convension [surl needed to extract the scope]
|
def getGlobalFilePaths(self, surl, dsname, computingSite, sourceSite, jobId=None):
# this method will in fact only ever return a single path, but keep 'paths' as a list for consistency with getGlobalFilePathsDQ2()
paths = []
# get the global redirectors (several, since the lib file might not be at the same place for overflow jobs)
fax_redirectors_dictionary = self.getFAXRedirectors(computingSite, sourceSite, jobId)
# select the proper fax redirector
if ".lib." in surl:
redirector = fax_redirectors_dictionary['computingsite']
else:
redirector = fax_redirectors_dictionary['sourcesite']
# correct the redirector in case the protocol and/or trailing slash are missing
redirector = self.updateRedirector(redirector)
# use the proper Rucio method to generate the path if possible (if scope is present in the SURL)
scope = extractPattern(surl, r'\/rucio\/(.+)\/[a-zA-Z0-9]{2}\/[a-zA-Z0-9]{2}\/')
if scope != "":
# for Rucio convension details see https://twiki.cern.ch/twiki/bin/view/AtlasComputing/MovingToRucio
native_path = "/atlas/rucio/" + scope + ":"
else:
# get the pre-path
native_path = self.to_native_lfn(dsname, 'DUMMYLFN')
native_path = native_path.replace('DUMMYLFN', '') # the real lfn will be added by the caller
# remove the /grid substring
native_path = native_path.replace('/grid', '')
# construct the global path
paths.append(redirector + native_path)
tolog("Will use global path: %s" % (paths[0]))
return paths
|
[
"def findGlobalFilePath(self, surl, dsname, computingSite, sourceSite, jobId=None):\n\n global_path = \"\"\n filename = os.path.basename(surl)\n\n # should dq2-list-files be used? If not, use to_native_lfn() directly to guess the path\n useDQ2 = False\n\n if useDQ2:\n # get the global file paths from file/DQ2\n paths = self.getGlobalFilePathsDQ2(dsname)\n\n if paths != []:\n # locate the global path\n for path in paths:\n if filename in path:\n # does the file path begin with 'root://'?\n if self.verifyGlobalPath(path, verbose=True):\n global_path = path\n break\n else:\n # abort\n tolog(\"!!WARNING!!3333!! Failed to get global file path\")\n else:\n # get the global file paths from file/DQ2\n paths = self.getGlobalFilePaths(surl, dsname, computingSite, sourceSite, jobId=jobId)\n\n if paths[0][-1] == \":\": # this is necessary to prevent rucio paths having \":/\" as will be the case if os.path.join is used\n global_path = paths[0] + filename\n else: # for old style paths not using the \":\" separator\n global_path = os.path.join(paths[0], filename)\n\n return global_path",
"def getRucioPath(self, scope, lfn, prefix='rucio'):\n\n # <prefix=rucio>/<scope>/md5(<scope>:<lfn>)[0:2]/md5(<scope:lfn>)[2:4]/<lfn>\n\n hash_hex = hashlib.md5('%s:%s' % (scope, lfn)).hexdigest()\n\n paths = [prefix] + scope.split('.') + [hash_hex[0:2], hash_hex[2:4], lfn]\n paths = filter(None, paths) # remove empty parts to avoid double /-chars\n return '/'.join(paths)\n\n #scope = os.path.join(*scope.split('.')) # correct scope\n #return os.path.join(prefix, scope, hash_hex[0:2], hash_hex[2:4], lfn)",
"def get_fs_url(self):",
"def scope_files():\n return sorted(list(Path(\"data\").glob(\"scope*\")))",
"def lc_files(self):\n base = dirname(self.fnam) # Paths are relative to config.\n return [abspath(join(base, c.lc_path)) for c in self.conversions]",
"def _get_lsp_path_name(self):\n return self.__lsp_path_name",
"def find_GDAL_DATA():\n pathnames = []\n roots = site.getsitepackages()\n for root in roots:\n pathnames+=glob.glob(root+\"/osgeo/**/gt_datum.csv\",recursive=True)\n if len(pathnames):\n break\n return justpath(pathnames[0]) if len(pathnames) else \"\"",
"def get_named_referent(f):\n nsplit = os.path.splitext(os.path.basename(f))\n if nsplit[1] not in REFERENT_FILE_TYPES:\n return None, None\n parent_path = os.path.dirname(f)\n parent = os.path.basename(parent_path)\n psplit = os.path.splitext(parent)\n\n pbase = psplit[0].lower().replace(' ','').lstrip('0123456789_-.')\n nbase = nsplit[0].lower().replace(' ','').lstrip('0123456789_-.')\n\n if nbase in ['readme', 'changelog', 'history']:\n return None, None # Ignore these files\n elif (len(nbase)>=3 and pbase==nbase) or \\\n (len(nbase)>=6 and pbase.endswith(nbase)):\n return nbase, parent_path # Return folder reference\n elif nsplit[0][0:1]=='_':\n return nbase, None # Return file reference\n else:\n return None, None # Return no reference at all",
"def get_local_dotrecipyrc():\n return os.path.join(os.getcwd(), DOTRECIPYRC)",
"def _get_prefixes_for_pathless_file_name(model_name, grid_id=None):\n\n nwp_model_utils.check_grid_id(model_name, grid_id)\n if model_name == nwp_model_utils.NARR_MODEL_NAME:\n return [NARR_ID_FOR_FILE_NAMES]\n\n if model_name == nwp_model_utils.RAP_MODEL_NAME:\n return ['{0:s}_{1:s}'.format(model_name, grid_id)]\n\n return ['ruc2_{0:s}'.format(grid_id), 'ruc2anl_{0:s}'.format(grid_id)]",
"def get_grism_path(root):\n PATH = './'\n if root.startswith('COSMOS'):\n PATH = unicorn.GRISM_HOME+'COSMOS/'\n if root.startswith('AEGIS'):\n PATH = unicorn.GRISM_HOME+'AEGIS/'\n if root.startswith('GOODS-N'):\n PATH = unicorn.GRISM_HOME+'GOODS-N/'\n if root.startswith('UDS'):\n PATH = unicorn.GRISM_HOME+'UDS/'\n if root.startswith('GN20'):\n PATH = unicorn.GRISM_HOME+'GOODS-N/'\n if root.startswith('G850.1'):\n PATH = unicorn.GRISM_HOME+'GOODS-N/'\n if root.startswith('GOODS-S'):\n PATH = unicorn.GRISM_HOME+'GOODS-S/'\n if root.startswith('UDF'):\n PATH = unicorn.GRISM_HOME+'UDF/'\n if root.startswith('WFC3-ERS'):\n PATH = unicorn.GRISM_HOME+'ERS/'\n if root.startswith('MARSHALL'):\n PATH = unicorn.GRISM_HOME+'SN-MARSHALL/'\n if root.startswith('PRIMO'):\n PATH = unicorn.GRISM_HOME+'SN-PRIMO/'\n if root.startswith('GEORGE'):\n PATH = unicorn.GRISM_HOME+'SN-GEORGE/'\n if root.startswith('TILE41'):\n PATH = unicorn.GRISM_HOME+'SN-TILE41/'\n if root.startswith('EGS1'):\n PATH = unicorn.GRISM_HOME+'COOPER/'\n if root.startswith('COLFAX'):\n PATH = unicorn.GRISM_HOME+'SN-COLFAX/'\n for fi in ['HS0105', 'HS1603', 'Q0100', 'Q0142', 'Q0207', 'Q0449', 'Q0821', 'Q1009', 'Q1217', 'Q1442', 'Q1549', 'Q1623', 'Q1700', 'Q2206', 'Q2343']:\n if fi in root:\n PATH = unicorn.GRISM_HOME+'Erb/'\n \n return PATH",
"def get_crds_actual_paths(observatory):\n return {\n \"mapping root\" : get_crds_mappath(observatory),\n \"reference root\" : get_crds_refpath(observatory),\n \"config root\" : get_crds_cfgpath(observatory),\n \"pickle root\" : get_crds_picklepath(observatory),\n }",
"def _get_local_files(self, raw_log_path = raw_log_path):\n self.logger.info(\"get_local_files starts\")\n filepathes = glob.glob(\"%s/*/*\" % (raw_log_path)) # e.g, #/data/mixs_log/raw/uid/filename\n local_files = {}\n for filepath in filepathes:\n filename = filepath.split(\"/\")[-1]\n local_files[filename] = 1\n pass\n self.logger.info(\"get_local_files finished\")\n return local_files",
"def idl_file_to_global_names(idl_filename):\n interface_name = idl_filename_to_interface_name(idl_filename)\n full_path = os.path.realpath(idl_filename)\n idl_file_contents = get_file_contents(full_path)\n extended_attributes = get_interface_extended_attributes_from_idl(idl_file_contents)\n\n global_keys = GLOBAL_EXTENDED_ATTRIBUTES.intersection(\n extended_attributes.keys())\n if not global_keys:\n return\n if len(global_keys) > 1:\n raise ValueError('The [Global] and [PrimaryGlobal] extended attributes '\n 'MUST NOT be declared on the same interface.')\n global_key = next(iter(global_keys))\n\n global_value = extended_attributes[global_key]\n if global_value:\n return global_value.strip('()').split(',')\n return [interface_name]",
"def get_stellaPaths(self):\n \n # Get the standard output files for stella \n self.path.finalphi_stella = self.input_file.with_suffix(\".final_fields\")\n self.path.geometry_stella = self.input_file.with_suffix(\".geometry\")\n self.path.fluxes_stella = self.input_file.with_suffix(\".fluxes\")\n self.path.omega_stella = self.input_file.with_suffix(\".omega\")\n self.path.output_stella = self.input_file.with_suffix(\".out.nc\")\n return",
"def get_datasets_paths(self):\n global dir_covid\n global dir_normal\n\n #Loop through directories, subdirs and files for dir, subdir, file in os.walk(self.path)L\n\n for dir, subdir, file in os.walk(self.path):\n\n #Register last folder\n last_folder = os.path.basename(os.path.normpath(dir))\n\n #Check if last folder is covid\n if last_folder == 'covid':\n dir_covid = dir\n\n #Check if last folder is normal\n elif last_folder == 'normal':\n dir_normal = dir\n\n elif last_folder == 'saved':\n dir_saved = dir\n\n return dir_covid, dir_normal, dir_saved",
"def get_booknlp_fpath(self, sid):\n\n\t\tif sid == 'a-tale-of-two-cities':\n\t\t\treturn self.atotcm.booknlp_fpath\n\t\telif sid == 'peregrine-pickle':\n\t\t\treturn self.ppm.booknlp_fpath\n\t\telif sid == 'pride-and-prejudice':\n\t\t\treturn self.pnpm.booknlp_fpath\n\t\telif sid == 'to-the-lighthouse':\n\t\t\treturn self.ttlm.booknlp_fpath\n\t\telif sid == 'tristram-shandy':\n\t\t\treturn self.ttm.booknlp_fpath\n\t\telif self.btmsm.belongs(sid):\n\t\t\treturn self.btmsm.get_booknlp_fpath(sid)\n\t\telif self.contcm.belongs(sid):\n\t\t\treturn self.contcm.get_booknlp_fpath(sid)\n\t\telif self.mfacm.belongs(sid):\n\t\t\treturn self.mfacm.get_booknlp_fpath(sid)\n\t\telif self.nf19Cm.belongs(sid):\n\t\t\treturn self.nf19Cm.get_booknlp_fpath(sid)\n\t\telif self.nf21Cm.belongs(sid):\n\t\t\treturn self.nf21Cm.get_booknlp_fpath(sid)\n\t\telif self.nycm.belongs(sid):\n\t\t\treturn self.nycm.get_booknlp_fpath(sid)\n\t\telif self.pipcm.belongs(sid):\n\t\t\treturn self.pipcm.get_booknlp_fpath(sid)\n\t\telif self.percm.belongs(sid):\n\t\t\treturn self.percm.get_booknlp_fpath(sid)\n\t\telif self.stancm.belongs(sid):\n\t\t\treturn self.stancm.get_booknlp_fpath(sid)\n\t\telif self.wilkcm.belongs(sid):\n\t\t\treturn self.wilkcm.get_booknlp_fpath(sid)\n\t\telse:\n\t\t\traise ValueError(\"Unrecognized story id, \" + sid + \".\")",
"def processedFileNamesLocations(samplingType):\n\tdirName = '{}/../../data/processed/currency_exchange/'.format(currentFileDir)\n\ttraining = '{}{}_training.dat'.format(dirName, samplingType)\n\ttesting = '{}{}_testing.dat'.format(dirName, samplingType)\n\treturn training, testing",
"def name_from_path( local_path):\n result = local_path.rstrip( os.sep).split( os.sep)\n result = result[-1].split('_')\n return next((a for a in result if 'DFN' in a), 'DFNUNKNOWN')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Correct the redirector in case the protocol and/or trailing slash are missing
|
def updateRedirector(self, redirector):
if not redirector.startswith("root://"):
redirector = "root://" + redirector
tolog("Updated redirector for missing protocol: %s" % (redirector))
if not redirector.endswith("/"):
redirector = redirector + "/"
tolog("Updated redirector for missing trailing /: %s" % (redirector))
# Protect against triple slashes
redirector = redirector.replace('///','//')
return redirector
|
[
"def test_subdomain_redirect(self):\n self.init('/', 'japan.personfinder.appspot.com')\n legacy_redirect.redirect(self.handler)\n self.assertEquals(301, self.handler.response.status_int)\n self.assertEquals('http://google.org/personfinder/japan/',\n self.handler.response.headers['Location'])",
"def before_request():\n scheme = request.headers.get('X-Forwarded-Proto')\n if scheme and scheme == 'http' and request.url.startswith('http://'):\n url = request.url.replace('http://', 'https://', 1)\n code = 301\n return redirect(url, code=code)",
"def http_error_302(self, req, fp, code, msg, headers):\n self.location = headers.get('Location', '')\n uprint(\"headers['Location']=\" + self.location)\n def squote(s):\n return urllib.parse.quote(s, ';/?:&=+,$[]%^')\n try:\n self.location.encode('ascii')\n except UnicodeEncodeError:\n scheme, netloc, path, params, query, fragment = \\\n urllib.parse.urlparse(self.location)\n self.location = urllib.parse.urlunparse((\n scheme, netloc, urllib.parse.quote(path), squote(params), squote(query),\n fragment))\n headers.replace_header('Location', self.location)\n uprint(\"pquoted headers['Location']=\" + self.location)\n return urllib.request.HTTPRedirectHandler.http_error_302(\n self, req, fp, code, msg, headers)",
"def process_request(self, request):\n if ('HTTP_HOST' not in request.META):\n return None #if we can't determine HOST we will proceed as usual\n (domain, sep, port) = request.META['HTTP_HOST'].partition(':')\n scheme = 'https' if request.is_secure() else 'http' \n if (domain in self.redirect_dict):\n return HttpResponseRedirect(scheme + '://' + self.redirect_dict[domain] + ':' + port + request.get_full_path())\n return None #fallthrough",
"def test_dotorg_redirect(self):\n self.init('/view?given_name=&id=turkey-2011.person-finder.appspot.com'\n '%2Fperson.1141073&family_name=&query=ahmet&role=seek',\n host='turkey-2011.personfinder.google.org')\n legacy_redirect.redirect(self.handler)\n self.assertEquals(301, self.handler.response.status_int)\n self.assertEquals(\n 'http://google.org/personfinder/turkey-2011/view?'\n 'id=turkey-2011.person-finder.appspot.com'\n '%2Fperson.1141073&query=ahmet&role=seek',\n self.handler.response.headers['Location'])",
"def testRootAPITrailingSlash(self):\n response = self.client.get(self.url().rstrip('/'),\n data={'format': 'api'})\n self.assertEqual(response.status_code, 301)\n self.assertEqual(response.url.replace('http://testserver', ''), self.url())",
"def test_certificate_redirect(self):\n without_slash = \"https://localhost:%s/certificate/\" % settings.HTTPS_FRONTEND_PORT\n response = requests.get(without_slash, verify=False, allow_redirects=False)\n self.assertEqual(response.status_code, 301)",
"def redirect_to_referral_url(self):\n referer = self.request.META.get('HTTP_REFERER')\n if referer:\n referer = unquote(referer) # HTTP_REFERER may be encoded.\n\n if not is_safe_url(\n url=referer,\n allowed_hosts={self.request.get_host()},\n require_https=self.request.is_secure(),\n ):\n referer = '/'\n return redirect(referer)",
"def redirect_nonwww():\n DOMAIN_NAME = \"cyclerouteforecast.com\"\n url = request.url\n urlparts = urlparse(url)\n if urlparts.netloc == DOMAIN_NAME:\n urlparts_list = list(urlparts)\n\n urlparts_list[1] = 'www.' + DOMAIN_NAME\n new_url = urlunparse(urlparts_list)\n logging.debug(\"redirecting from {} to {}\".format(url, new_url))\n return redirect(new_url, code=301)",
"def _http_check_url_rec_handle_redir(r, redirects):\n\n # If Location is in the headers\n if \"Location\" in r.headers:\n url_redir = r.headers[\"Location\"]\n redirects.append(url_redir)\n\n # Loop back in the recursion\n return FME_utils._http_check_url_rec(url_redir, redirects)\n\n return False",
"def safe_redirect(target, endpoint=\"home.index\"):\r\n if not target or not is_safe_url(target):\r\n target = url_for(endpoint)\r\n return redirect(target)",
"def redirect_after_login(request):\n url_path = request.GET.get(\"next\", None) \n if url_path is None:\n return redirect(settings.LOGIN_REDIRECT_URL)\n elif not is_safe_url(\n url=resolve_url(url_path),\n allowed_hosts={request.get_host()},\n require_https=request.is_secure()\n ):\n return redirect(settings.LOGIN_REDIRECT_URL)\n else:\n return redirect(resolve_url(url_path))",
"def warn_trailing_slash(self, dest, uri):\n if uri == '%s/' % self.get_uri(dest):\n self.log.warning(\n 'It seems that the url given do not need the trailing slash (%s). '\n 'You would have better not to keep trailing slash in your urls '\n 'if you don\\'t have to.' % uri)\n return True\n return False",
"def _normalize_base_url(_base_url):\n _base_url = _base_url[:-1] if _base_url.endswith('/') else _base_url\n _base_url = f\"https://{_base_url}\" if not _base_url.startswith('http') else _base_url\n return _base_url",
"def test_redirect_suppression(self):\n redirect, _ = doc_rev('REDIRECT <a class=\"redirect\" href=\"http://smoo/\">smoo</a>')\n response = self.client.get(\n redirect.get_absolute_url() + '?redirect=no',\n follow=True)\n self.assertContains(response, 'REDIRECT ')",
"def get_redirect_url(self):\n redirect_to = self.request.POST.get(\n self.redirect_field_name,\n self.request.GET.get(self.redirect_field_name, '')\n )\n url_is_safe = is_safe_url(\n url=redirect_to,\n allowed_hosts=self.get_success_url_allowed_hosts(),\n require_https=self.request.is_secure(),\n )\n return redirect_to if url_is_safe else ''",
"def addslash(method):\r\n @functools.wraps(method)\r\n def wrapper(self, *args, **kwargs):\r\n if not self.request.path.endswith(\"/\"):\r\n if self.request.method in (\"GET\", \"HEAD\"):\r\n uri = self.request.path + \"/\"\r\n if self.request.query:\r\n uri += \"?\" + self.request.query\r\n self.redirect(uri, permanent=True)\r\n return\r\n raise HTTPError(404)\r\n return method(self, *args, **kwargs)\r\n return wrapper",
"def EnableAutoRedirect(self):\r\n self.__opener.add_handler(urllib2.HTTPRedirectHandler())",
"def site_redirect_engine(shorturl):\n longurl = get_longurl(shorturl)\n if longurl is None:\n abort(404)\n if longurl.find(\"http://\") != 0 and longurl.find(\"https://\") != 0:\n longurl = \"http://\" + longurl\n if longurl:\n update_visits(shorturl)\n return redirect(longurl)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the global file paths using dq2listfiles
|
def getGlobalFilePathsDQ2(self, dsname):
paths = []
if dsname == "":
tolog("!!WARNING!!3333!! Dataset not defined")
return paths
filename = self.getGlobalPathsFileName(dsname)
if os.path.exists(filename):
try:
f = open(filename, 'r')
except OSError, e:
tolog("!!WARNING!!3333!! Could not open global paths file: %s (will attempt to recreate it)" % (e))
else:
p = f.read()
if p != "":
tolog("Cache detected (reading global paths from file)")
paths = p.split("\n")
f.close()
# if a proper file did not exist already, create and populate it
if paths == []:
redirector = readpar('faxredirector') # 'root://glrd.usatlas.org/'
if redirector != "":
# correct the redirector in case the protocol and/or trailing slash are missing
redirector = self.updateRedirector(redirector)
cmd = 'export STORAGEPREFIX=%s; ' % (redirector)
cmd += 'dq2-list-files -p %s' % (dsname)
try:
tolog("Executing command: %s" % (cmd))
s, telapsed, cout, cerr = timed_command(cmd, self.timeout)
except Exception, e:
tolog("!!WARNING!!3333!! timed_command() threw an exception: %s" % str(e))
s = 1
output = str(e)
telapsed = self.timeout
else:
output = cout + cerr
tolog("Elapsed time: %d" % (telapsed))
# a lot of output: tolog("Command output: %s" % (output))
if self.verifyGlobalPaths(output):
paths = output.split("\n")
# save the paths for later use (for the next file if necessary)
try:
f = open(filename, "w")
except OSError, e:
tolog("!!WARNING!!3333!! Could not open global paths file: %s (will attempt to recreate it)" % (e))
else:
f.write(output)
f.close()
else:
tolog("!!WARNING!!3334!! Could not verify global paths")
else:
tolog("!!WARNING!!3332!! Can not get global paths without a FAX redirector (set schedconfig.faxredirector)")
return paths
|
[
"def llist(self):\n\n filenames = os.listdir(self.SHARED_FOLDER)\n\n print('Local filepaths:')\n for filename in filenames:\n print(f'\\t{filename}')",
"def define_files():\n paths = []\n files = os.listdir(PATH_TO_INPUT_DIR)\n for file in files:\n path = os.path.join(PATH_TO_INPUT_DIR, file)\n paths.append(path)\n return paths",
"def _get_file_list(self):\n files = [f for f in os.listdir(FILESTORE_PATH) if os.path.isfile(os.path.join(FILESTORE_PATH, f))]\n return files",
"def getLibraryFilePathList(self, *args):\r\n return _osgDB.Registry_getLibraryFilePathList(self, *args)",
"def getFilesPaths(self):\n nextIsFileName = False\n filePaths = []\n for arg in self.args[1]:\n if (nextIsFileName):\n filePaths.append(arg)\n nextIsFileName = False\n else:\n if (arg == '-df'):\n nextIsFileName = True\n return filePaths",
"def _getRepositoryListPaths():\r\n _repositoryListPaths = []\r\n _repositoryListPaths.append(os.path.join(home,\".subuser\",\"repositories.json\"))\r\n _repositoryListPaths.append(\"/etc/subuser/repositories.json\") # TODO how does this work on windows?\r\n _repositoryListPaths.append(os.path.join(_getSubuserDir(),\"repositories.json\"))\r\n repositoryListPaths = []\r\n for path in _repositoryListPaths:\r\n if os.path.exists(path):\r\n repositoryListPaths.append(path)\r\n return repositoryListPaths",
"def getGlobalFilePaths(self, surl, dsname, computingSite, sourceSite, jobId=None):\n\n # this method will in fact only ever return a single path, but keep 'paths' as a list for consistency with getGlobalFilePathsDQ2()\n paths = []\n\n # get the global redirectors (several, since the lib file might not be at the same place for overflow jobs)\n fax_redirectors_dictionary = self.getFAXRedirectors(computingSite, sourceSite, jobId)\n\n # select the proper fax redirector\n if \".lib.\" in surl:\n redirector = fax_redirectors_dictionary['computingsite']\n else:\n redirector = fax_redirectors_dictionary['sourcesite']\n\n # correct the redirector in case the protocol and/or trailing slash are missing\n redirector = self.updateRedirector(redirector)\n\n # use the proper Rucio method to generate the path if possible (if scope is present in the SURL)\n scope = extractPattern(surl, r'\\/rucio\\/(.+)\\/[a-zA-Z0-9]{2}\\/[a-zA-Z0-9]{2}\\/')\n if scope != \"\":\n # for Rucio convension details see https://twiki.cern.ch/twiki/bin/view/AtlasComputing/MovingToRucio\n native_path = \"/atlas/rucio/\" + scope + \":\"\n else:\n # get the pre-path\n native_path = self.to_native_lfn(dsname, 'DUMMYLFN')\n native_path = native_path.replace('DUMMYLFN', '') # the real lfn will be added by the caller\n\n # remove the /grid substring\n native_path = native_path.replace('/grid', '')\n\n # construct the global path\n paths.append(redirector + native_path)\n\n tolog(\"Will use global path: %s\" % (paths[0]))\n\n return paths",
"def _get_local_files(self, raw_log_path = raw_log_path):\n self.logger.info(\"get_local_files starts\")\n filepathes = glob.glob(\"%s/*/*\" % (raw_log_path)) # e.g, #/data/mixs_log/raw/uid/filename\n local_files = {}\n for filepath in filepathes:\n filename = filepath.split(\"/\")[-1]\n local_files[filename] = 1\n pass\n self.logger.info(\"get_local_files finished\")\n return local_files",
"def GetFilepaths(self):\n filenames = self.GetFilenames()\n path = self.dirpicker.GetPath()\n\n filepaths = [] \n for name in filenames:\n filepath = path+os.sep+name\n filepaths.append(filepath)\n return filepaths",
"def global_resources_files(config):\n # type: (dict) -> list\n try:\n files = config['global_resources']['files']\n if util.is_none_or_empty(files):\n raise KeyError()\n except KeyError:\n files = []\n return files",
"def gethomepaths(self):\n cwd = os.getcwd()\n home_dir = os.path.expanduser('~')\n os.chdir(home_dir)\n fs_dir = os.path.abspath('.')\n\tos.chdir(cwd) # I hope this will always get you back to the original place...\n if home_dir!= fs_dir:\n return [home_dir, fs_dir]\n else:\n return [home_dir]",
"def _get_files(self):\n\n glob_path = os.path.join(self.path, self.mask)\n return glob.glob(glob_path)",
"def getDataFilePathList(self, *args):\r\n return _osgDB.Registry_getDataFilePathList(self, *args)",
"def inputFiles(self, globalSandbox):\n\n filelist = ''\n if globalSandbox is not None:\n for sbFile in globalSandbox.split(','):\n if sbFile == '' :\n continue\n filename = os.path.abspath(sbFile)\n filename.strip()\n filelist += filename + ','\n return filelist[:-1] # Strip off last \",\"",
"def filelist(pwd):\n fully_qualified_list = []\n for root, dirs, files in sorted(os.walk(pwd)):\n for filename in files:\n filepath = os.path.join(root, filename)\n fully_qualified_list.append(filepath)\n return fully_qualified_list",
"def listdir(self):\r\n\t\treturn []",
"def get_all_list_files():\n d = sjb.common.config.get_user_app_data_dir(_APP, suite_name=_SUITE)\n files = os.listdir(d)\n matching = []\n for f in files:\n if not os.path.isfile(os.path.join(d, f)):\n continue\n # check that it has correct extension.\n if not f.endswith(_LIST_FILE_EXTENSION):\n continue\n matching.append(f[0:(len(f)-len(_LIST_FILE_EXTENSION))])\n return matching",
"def _get_file_list(file_filter, dat_path=DAT_PATH):\n file_list = []\n for (root, dirs, files,) in os.walk(dat_path, followlinks=True):\n for file_ in files:\n if file_.endswith('.{}'.format(file_filter)):\n file_list.append(os.path.join(root, file_))\n return file_list",
"def _glob_files(DATA_PATH):\n FILE_LIST = glob.glob(DATA_PATH + \"/*\")\n return FILE_LIST",
"def scope_files():\n return sorted(list(Path(\"data\").glob(\"scope*\")))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify a global path (make sure the path begins with the root file protocol)
|
def verifyGlobalPath(self, path, verbose=True):
# NOTE: per file check
status = False
protocol = 'root://'
if path != "":
if len(path) > len(protocol):
if path[:len(protocol)] == protocol:
# path verified
status = True
if verbose:
tolog("Global path verified: %s" % (path))
else:
tolog("!!WARNING!!3335!! Junk path detected in dq2-list-files output: %s (cannot use path)" % (path))
else:
tolog("!!WARNING!!3336!! Unexpected command output: %s" % (path))
else:
tolog("!!WARNING!!3337!! No global path found")
return status
|
[
"def check_path():\n root = os.path.abspath(os.path.curdir)\n assert os.path.basename(root) == \"treelite\", \"Must be run on project root.\"",
"def check_path(path, curr_dir):\n if not os.path.isabs(path):\n path = os.path.join(curr_dir, path)\n\n return path",
"def is_root(path):\n return path == \"\"",
"def validate_path(self, path: str) -> bool:\n pass",
"def _validate_module_path(path: str):\n assert isinstance(path, str), ValueError(\"Path must be a string\")\n path = os.path.abspath(path)\n assert os.path.isfile(path), ValueError(f\"Path {path} dose not exist or is not a file.\")\n return path",
"def is_unc_path(path: Path):\n\n return PureWindowsPath(path).anchor.startswith(r\"\\\\\")",
"def is_remote_path(path: str) -> bool:\n return path.startswith(\"\\\\\\\\\") or path.startswith(\"//\")",
"def _check_env_path_valid(path):\n if path.endswith(\"/\") or path.startswith(\"/\"):\n raise InvalidEnvironmentPathError(\n \"'{0}' is an invalid path string. Environment paths should \"\n \"not have leading or trailing slashes.\".format(path)\n )",
"def _validate_file_path(self):\n ends_in_slash = self.file_path[-1] == '/'\n _, file_name = self._split_file_from_path(self.file_path)\n if not ends_in_slash:\n raise ValueError('Invalid file path: should end in \"/\".')\n if len(file_name) > 0:\n raise ValueError('Invalid file path: should not contain \".\".')",
"def check_path(self,path) :\n return self.path == path",
"def absPathAndVerify(path):\n absolutePath = absPath(path)\n if os.path.exists(absolutePath):\n return absolutePath\n else:\n print \"%s is not an absolute path that exists. Exiting controller.py.\" % absolutePath\n exit(3)",
"def __checkBase():\n if FIRED_BASE_FOLDER is None: sys.exit(\"\\033[91mNeed to set FIRED basefolder Folder\\033[0m\")",
"def is_valid_path(path, urlconf=None):\n try:\n return resolve(path, urlconf)\n except Resolver404:\n return False",
"def _check_input_path(self, input_path):",
"def test_default_route_relative_path(self):\n path = '.'\n def_route = DefaultRoute(path)\n #assert_regexp_matches(def_route.default_handler_args['path'], '.')\n assert path in def_route.default_handler_args['path']",
"def verify_path(path):\n if not exists(path):\n try:\n os.mkdir(path)\n except:\n pass\n\n return path",
"def CheckPath(Path):\n\n NormalInput = \":\\\\\"\n ExitCall = \"e\"\n ExitCall01 = \"E\"\n\n if NormalInput not in Path and ExitCall not in Path and ExitCall01 not in Path:\n return False\n else:\n return True",
"def __good_path(path):\n # type: (str) -> str\n path = path.replace(\"\\\\\", \"/\")\n path = path.replace(\"//\", \"/\")\n return path",
"def is_global_prefix_set() -> bool:\n return os.path.isfile(BaseTestingManager.__GLOBAL_PREFIX_PATH)",
"def is_match_path(input_path, smb_share_details):\n input_path = input_path[:-1] if input_path[-1] == \"/\" else input_path\n if smb_share_details['path'] != input_path:\n return False\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify the global paths (make sure the output begins with the root file protocol)
|
def verifyGlobalPaths(self, output):
# NOTE: this is not a per file check but an integration check to make sure the output is not garbish
# individual files will be verified as they are needed
return self.verifyGlobalPath(output, verbose=False)
|
[
"def testPaths():\n for path in config.main.paths:\n assert(os.path.exists(config.main.paths[path]))",
"def check_path():\n root = os.path.abspath(os.path.curdir)\n assert os.path.basename(root) == \"treelite\", \"Must be run on project root.\"",
"def __checkBase():\n if FIRED_BASE_FOLDER is None: sys.exit(\"\\033[91mNeed to set FIRED basefolder Folder\\033[0m\")",
"def check_paths(self):\n self.settings.fileStore = os.path.expandvars(self.settings.fileStore) # to allow things like $HOME or $RMGpy\n self.settings.scratchDirectory = os.path.expandvars(self.settings.scratchDirectory)\n for path in [self.settings.fileStore, self.settings.scratchDirectory]:\n if not os.path.exists(path):\n logging.info(\"Creating directory %s for QM files.\" % os.path.abspath(path))\n # This try/except should be redundant, but some networked file systems\n # seem to be slow or buggy or respond strangely causing problems\n # between checking the path exists and trying to create it.\n try:\n os.makedirs(path)\n except OSError as e:\n logging.warning(\"Error creating directory {0}: {1!r}\".format(path, e))\n logging.warning(\"Checking it already exists...\")\n assert os.path.exists(path), \"Path {0} still doesn't exist?\".format(path)",
"def testFilePath(self):\n files = list(File().find())\n for file in files:\n adapter = File().getAssetstoreAdapter(file)\n filesystempath = adapter.fullPath(file)\n filepath = File().getLocalFilePath(file)\n fusepath = File().getGirderMountFilePath(file)\n self.assertTrue(os.path.exists(filesystempath))\n self.assertTrue(os.path.exists(filepath))\n self.assertTrue(os.path.exists(fusepath))\n self.assertEqual(filesystempath, filepath)\n self.assertNotEqual(filesystempath, fusepath)\n self.assertEqual(fusepath[:len(self.mountPath)], self.mountPath)\n with open(filepath) as file1:\n with open(fusepath) as file2:\n self.assertEqual(file1.read(), file2.read())\n subpath = fusepath[len(self.mountPath):].lstrip('/')\n if self.knownPaths.get(subpath):\n with open(fusepath) as file1:\n self.assertEqual(file1.read().strip(), self.knownPaths[subpath])",
"def check_file_paths(self):\n if self.version != OUTDATED_WACZ:\n package_files = [item[\"path\"] for item in self.datapackage[\"resources\"]]\n for filepath in pathlib.Path(self.dir.name).glob(\"**/*.*\"):\n filename = os.path.basename(filepath)\n if (\n filename != \"datapackage.json\"\n and filename != \"datapackage-digest.json\"\n ):\n file = str(filepath).split(\"/\")[-2:]\n file = \"/\".join(file)\n if file not in package_files:\n print(\"file %s is not listed in the datapackage\" % file)\n return False\n return True",
"def test_root_path(self, copier, cwp):\n assert cwp.root_path == C_ROOT_PATH\n assert copier.root_path == \"\"",
"def _check_input_path(self, input_path):",
"def is_root(path):\n return path == \"\"",
"def verify_paths(config=None, output_collection=None, return_missing=False):\n paths = get_paths(config=config, output_collection=output_collection)\n missing = list(filter(lambda p: p and not os.path.exists(p), paths))\n return missing if return_missing else not bool(missing)",
"def checkSysPath(self):\n coreDir = natlinkcorefunctions.getBaseFolder()\n if coreDir.lower().endswith('core'):\n # check the registry setting:\n try:\n regDict, sectionName = self.getHKLMPythonPathDict()\n except pywintypes.error:\n print \"\"\"PythonPath setting not found in registry\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"\n return\n except ValueError:\n print \"\"\"NatLink setting not found or wrong in PythonPath setting in registry\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"\n return\n\n if regDict is None:\n print \"\"\"NatLink setting not found or wrong in PythonPath setting in registry\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"\n return\n \n section = regDict['NatLink']\n if not section:\n print \"\"\"PythonPath/Natlink setting in registry does exist.\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"\n return\n setting = section['']\n if setting.lower() == coreDir.lower():\n baseDir = os.path.normpath(os.path.join(coreDir, \"..\"))\n self.InsertToSysPath(coreDir)\n self.InsertToSysPath(baseDir)\n else:\n print \"\"\"PythonPath/Natlink setting in registry does not match this core directory\\n\nregistry: %s\\ncoreDir: %s\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"% (\n setting, coreDir)\n return\n else:\n baseDir = None\n print 'non expected core directory %s, cannot find baseDirectory\\nTry to run the Config Program with administrator rights'% coreDir\n userDir = self.getUserDirectory()\n # special for other user directories, insert also unimacro for actions etc.\n if userDir: \n self.InsertToSysPath(userDir)\n\n \n includeUnimacro = self.getIncludeUnimacroInPythonPath()\n if includeUnimacro:\n if not baseDir:\n print 'no baseDir found, cannot \"IncludeUnimacroInPythonPath\"'\n return\n unimacroDir = os.path.join(baseDir, '..', '..', 'unimacro')\n unimacroDir = os.path.normpath(unimacroDir)\n if os.path.isdir(unimacroDir):\n self.InsertToSysPath(unimacroDir)\n else:\n print 'no valid UnimacroDir found(%s), cannot \"IncludeUnimacroInPythonPath\"'% \\\n unimacroDir\n return 1",
"def is_global_prefix_set() -> bool:\n return os.path.isfile(BaseTestingManager.__GLOBAL_PREFIX_PATH)",
"def sanity_check_step(self):\n custom_paths = {\n 'files': [os.path.join('SASFoundation', self.version, 'sas')],\n 'dirs': ['licenses', os.path.join('SASFoundation', self.version, 'bin')],\n }\n super(EB_SAS, self).sanity_check_step(custom_paths=custom_paths)",
"def test_default_output_dir_exists():\n\n assert os.path.exists(\"corems_output\")",
"def testMainMount(self):\n mountpath = self.mountPath\n # Check that the mount lists users and collections\n self.assertEqual(sorted(os.listdir(mountpath)), sorted(['user', 'collection']))\n # Check that all known paths exist and that arbitrary other paths don't\n for testpath, contents in self.knownPaths.items():\n localpath = os.path.join(mountpath, testpath)\n # The path must exist\n self.assertTrue(os.path.exists(localpath))\n # The path plus an arbitrary string must not exist\n self.assertFalse(os.path.exists(localpath + '.other'))\n # If the path is a file, check that it equals the expected value\n # and reports a non-zero size\n if contents:\n size = os.path.getsize(localpath)\n with open(localpath) as file1:\n self.assertEqual(file1.read().strip(), contents)\n self.assertGreater(size, 0)\n # The mtime should be recent\n stat = os.stat(localpath)\n self.assertGreater(stat.st_mtime, time.time() - 1e5)\n # All parents should be folders and have zero size.\n subpath = testpath\n while '/' in subpath:\n subpath = subpath.rsplit('/')[0]\n localpath = os.path.join(mountpath, subpath)\n self.assertTrue(os.path.isdir(localpath))\n self.assertEqual(os.path.getsize(localpath), 0)\n # An arbitrary alternate file should not exist\n self.assertFalse(os.path.exists(localpath + '.other'))",
"def test_windows_paths(self):\n\n current_path = os.path\n import ntpath\n\n os.path = ntpath\n try:\n\n class NoCompileTemplate(Template):\n def _compile_from_file(self, path, filename):\n self.path = path\n return Template(\"foo bar\").module\n\n t1 = NoCompileTemplate(\n filename=\"c:\\\\foo\\\\template.html\",\n module_directory=\"c:\\\\modules\\\\\",\n )\n\n eq_(t1.uri, \"/foo/template.html\")\n eq_(t1.path, \"c:\\\\modules\\\\foo\\\\template.html.py\")\n\n t1 = NoCompileTemplate(\n filename=\"c:\\\\path\\\\to\\\\templates\\\\template.html\",\n uri=\"/bar/template.html\",\n module_directory=\"c:\\\\modules\\\\\",\n )\n\n eq_(t1.uri, \"/bar/template.html\")\n eq_(t1.path, \"c:\\\\modules\\\\bar\\\\template.html.py\")\n\n finally:\n os.path = current_path",
"def gethomepaths(self):\n cwd = os.getcwd()\n home_dir = os.path.expanduser('~')\n os.chdir(home_dir)\n fs_dir = os.path.abspath('.')\n\tos.chdir(cwd) # I hope this will always get you back to the original place...\n if home_dir!= fs_dir:\n return [home_dir, fs_dir]\n else:\n return [home_dir]",
"def check_execution_path():\n file_name = \"LICENSE\"\n if not os.path.exists(file_name):\n logging.error(\n \"Don't execute the script from a sub-directory. \"\n \"Switch to the root of the project folder\")\n return False\n return True",
"def test_multi_proto_dirs(self):\n\n def _check_all_res(res):\n ds, es, ms = res\n if es != []:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n self.assertEqual(ms, {})\n self.assertEqual(len(ds), 1)\n d = ds[0]\n self.assert_(d.is_error())\n self.assert_(d.dep_vars.is_satisfied())\n self.assertEqual(d.base_names[0], \"libc.so.1\")\n self.assertEqual(set(d.run_paths),\n set([\"lib\", \"usr/lib\"]))\n self.assertEqual(d.dep_key(),\n self.__path_to_key(self.paths[\"libc_path\"]))\n self.assertEqual(d.action.attrs[\"path\"],\n self.paths[\"curses_path\"])\n self.assert_(dependencies.is_file_dependency(d))\n\n t_path = self.make_manifest(self.int_elf_manf)\n self.make_elf(os.path.join(\"foo\", self.paths[\"curses_path\"]))\n self.make_elf(self.paths[\"libc_path\"], static=True)\n\n # This should fail because the \"foo\" directory is not given\n # as a proto_dir.\n d_map, es, ms, pkg_attrs = dependencies.list_implicit_deps(\n t_path, [self.proto_dir], {}, [], convert=False)\n if len(es) != 1:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n if es[0].file_path != self.paths[\"curses_path\"]:\n raise RuntimeError(\"Wrong file was found missing:\\n{0}\".format(\n es[0]))\n self.assertEqual(es[0].dirs, [self.proto_dir])\n self.assertEqual(ms, {})\n self.assert_(len(d_map) == 0)\n\n # This should work since the \"foo\" directory has been added to\n # the list of proto_dirs to use.\n d_map, es, ms, pkg_attrs = dependencies.list_implicit_deps(\n t_path, [self.proto_dir,\n os.path.join(self.proto_dir, \"foo\")], {}, [], convert=False)\n if es:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n self.assertEqual(ms, {})\n self.assert_(len(d_map) == 0)\n\n # This should be different because the empty text file\n # is found before the binary file.\n self.make_proto_text_file(self.paths[\"curses_path\"])\n d_map, es, ms, pkg_attrs = dependencies.list_implicit_deps(\n t_path, [self.proto_dir,\n os.path.join(self.proto_dir, \"foo\")], {}, [],\n remove_internal_deps=False, convert=False)\n if es:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n if len(ms) != 1:\n raise RuntimeError(\"Didn't get expected types of \"\n \"missing files:\\n{0}\".format(ms))\n self.assertEqual(ms.keys()[0], \"empty file\")\n self.assert_(len(d_map) == 0)\n\n # This should find the binary file first and thus produce\n # a depend action.\n d_map, es, ms, pkg_attrs = dependencies.list_implicit_deps(\n t_path, [os.path.join(self.proto_dir, \"foo\"),\n self.proto_dir], {}, [], remove_internal_deps=False,\n convert=False)\n if es:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n self.assertEqual(ms, {})\n self.assert_(len(d_map) == 1)\n\n # Check alternative proto_dirs with hardlinks.\n t_path = self.make_manifest(self.int_hardlink_manf)\n self.make_proto_text_file(os.path.join(\"foo\",\n self.paths[\"syslog_path\"]))\n # This test should fail because \"foo\" is not included in the\n # list of proto_dirs.\n ds, es, ms, pkg_attrs = \\\n dependencies.list_implicit_deps(t_path, [self.proto_dir],\n {}, [], convert=False)\n if len(es) != 1:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n if es[0].file_path != self.paths[\"syslog_path\"]:\n raise RuntimeError(\"Wrong file was found missing:\\n{0}\".format(\n es[0]))\n self.assertEqual(es[0].dirs, [self.proto_dir])\n self.assert_(len(ms) == 0)\n self.assert_(len(ds) == 1)\n\n # This test should pass because the needed directory has been\n # added to the list of proto_dirs.\n ds, es, ms, pkg_attrs = \\\n dependencies.list_implicit_deps(t_path,\n [self.proto_dir, os.path.join(self.proto_dir, \"foo\")],\n {}, [], convert=False)\n if es != []:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n self.assert_(len(ms) == 1)\n self.assert_(len(ds) == 0)\n\n # Check alternative proto_dirs work with python files and\n # scripts.\n\n def _py_check_all_res(res):\n ds, es, ms, pkg_attrs = res\n mod_pats = [\n \"{0}/__init__.py\", \"{0}.py\", \"{0}.pyc\", \"{0}.pyo\",\n \"{0}.so\", \"{0}module.so\",\n \"64/{0}.so\", \"64/{0}module.so\"\n ]\n mod_names = [\"foobar\", \"misc_test\", \"os\",\n \"search_storage\", \"minidom\"]\n pkg_names = [\"indexer_test\", \"pkg\", \"pkg_test\",\n \"xml\", \"dom\"]\n expected_deps = set([(\"python\",)] +\n [tuple(sorted([\n pat.format(n) for pat in mod_pats\n ]))\n for n in mod_names] +\n [(\"{0}/__init__.py\".format(n),) for n in pkg_names])\n if es != []:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n\n self.assertEqual(ms, {})\n for d in ds:\n self.assert_(d.is_error())\n if d.dep_vars is None:\n raise RuntimeError(\"This dep had \"\n \"depvars of None:{0}\".format(d))\n self.assert_(d.dep_vars.is_satisfied())\n if not d.dep_key()[0] in expected_deps:\n raise RuntimeError(\"Got this \"\n \"unexpected dep:{0}\\n\\nd:{1}\".format(\n d.dep_key()[0], d))\n expected_deps.remove(d.dep_key()[0])\n self.assertEqual(d.action.attrs[\"path\"],\n self.paths[\"indexer_path\"])\n if expected_deps:\n raise RuntimeError(\"Couldn't find these \"\n \"dependencies:\\n\" + \"\\n\".join(\n [str(s) for s in sorted(expected_deps)]))\n self.__debug = True\n t_path = self.make_manifest(self.ext_python_manf)\n\n self.make_proto_text_file(\n os.path.join(\"d5\", self.paths[\"indexer_path\"]),\n self.python_text)\n # This should have an error because it cannot find the file\n # needed.\n ds, es, ms, pkg_attrs = dependencies.list_implicit_deps(t_path,\n [self.proto_dir], {}, [], convert=False)\n if len(es) != 1:\n raise RuntimeError(\"Got errors in results:\" +\n \"\\n\".join([str(s) for s in es]))\n if es[0].file_path != self.paths[\"indexer_path\"]:\n raise RuntimeError(\"Wrong file was found missing:\\n{0}\".format(\n es[0]))\n self.assertEqual(es[0].dirs, [self.proto_dir])\n self.assertEqual(len(ds), 0)\n self.assertEqual(len(ms), 0)\n\n # Because d5 is in the list of proto dirs, this test should work\n # normally.\n _py_check_all_res(dependencies.list_implicit_deps(t_path,\n [self.proto_dir, os.path.join(self.proto_dir, \"d5\")], {},\n [], convert=False))",
"def test_output_exists():\n global out_dir\n assert_true(path.exists(path.join(out_dir, 'run.log')))\n assert_true(path.exists(path.join(out_dir, 'info.pickle')))\n assert_true(path.exists(path.join(out_dir, 'articles.pickle')))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the FAX redirectors via curl or JSON
|
def _getFAXRedirectors(self, computingSite, sourceSite, pandaID, url='http://waniotest.appspot.com/SiteToFaxEndpointTranslator'):
fax_redirectors_dictionary = {}
file_name = "fax_redirectors.json"
if os.path.exists(file_name):
# Read back the FAX redirectors from file
fax_redirectors_dictionary = readJSON(file_name)
if fax_redirectors_dictionary == {}:
# Attempt to get fax redirectors from Ilija Vukotic's google server
cmd = "curl --silent --connect-timeout 100 --max-time 120 -X POST --data \'computingsite=%s&sourcesite=%s&pandaID=%s\' %s" % (computingSite, sourceSite, pandaID, url)
tolog("Trying to get FAX redirectors: %s" % (cmd))
dictionary_string = commands.getoutput(cmd)
if dictionary_string != "":
# try to convert to a python dictionary
from json import loads
try:
fax_redirectors_dictionary = loads(dictionary_string)
except Exception, e:
tolog("!!WARNING!!4444!! Failed to parse fax redirector json: %s" % (e))
else:
tolog("Backing up dictionary")
status = writeJSON("fax_redirectors.json", fax_redirectors_dictionary)
if not status:
tolog("Failed to backup the FAX redirectors")
return fax_redirectors_dictionary
|
[
"def follow_redirect(self, response):\n new_response = response\n while new_response.status_code in (301, 302, 303, 307):\n scheme, netloc, path, query, fragment = urlparse.urlsplit(new_response['location'])\n new_response = self.client.get(path, QueryDict(query))\n return new_response",
"def find_callback_redirect_url(self):\n results = APIRequests.get_request(\n \"find_callback_redirect_url\",\n self.instance_key,\n dev_mode=self.dev_mode,\n url_override=self.url_override,\n )\n return {\n k: results.get(k, None)\n for k in [\"url\", \"key\"]\n if results.get(k, None) is not None\n }",
"def test_get_redirect(ini, count):\n logging.info(\"Count: {}\".format(count))\n resp = requests.get(str(get_url(ini) + '/redirect/' + str(count)))\n logging.info(\"Response: {}\".format(resp.text))\n assert resp.status_code == 200, \"Wrong status code of response.\"\n assert len(resp.history) == int(count), \"Wrong redirection number.\"",
"def _get_legend_gitlab_redirect_uris(self):\n raise NotImplementedError(\"No GitLab redirect URIs defined.\")",
"def existing_fwd(ctx):\n existing_forwarders = get_existing_forwarders(ctx.obj[FORWARDERS_HTML])\n\n for fwd in existing_forwarders:\n print(fwd)",
"def redirects(self, limit='max', namespace=None, getinfo=None, **evil):\n self.info() #needed to get pageid\n params = {\n 'action': 'query',\n 'prop': 'redirects',\n 'titles': self.title,\n 'rdlimit': limit,\n 'rdnamespace': namespace,\n }\n params.update(evil)\n return self._generate(\n params,\n Page,\n ('query', 'pages', '__page', 'redirects'),\n getinfo\n )",
"def get_all_requests():",
"def getFAXRedirectors(self, computingSite, sourceSite, jobId):\n\n fax_redirectors_dictionary = {}\n\n # Is the sourceSite set?\n if sourceSite and sourceSite.lower() != 'null':\n # Get the FAX redirectors (if the method returns an empty dictionary, the keys and values will be set below)\n fax_redirectors_dictionary = self._getFAXRedirectors(computingSite, sourceSite, jobId)\n\n # Verify the dictionary\n if fax_redirectors_dictionary.has_key('computingsite') and fax_redirectors_dictionary['computingsite'] != None:\n if fax_redirectors_dictionary['computingsite'] == \"\" or fax_redirectors_dictionary['computingsite'].lower() == \"null\":\n fax_redirectors_dictionary['computingsite'] = readpar('faxredirector')\n tolog(\"!!WARNING!!5555!! FAX computingsite is unknown, using default AGIS value (%s)\" % fax_redirectors_dictionary['computingsite'])\n else:\n fax_redirectors_dictionary['computingsite'] = readpar('faxredirector')\n tolog(\"!!WARNING!!5556!! FAX computingsite is unknown, using default AGIS value (%s)\" % fax_redirectors_dictionary['computingsite'])\n if fax_redirectors_dictionary.has_key('sourcesite') and fax_redirectors_dictionary['sourcesite'] != None:\n if fax_redirectors_dictionary['sourcesite'] == \"\" or fax_redirectors_dictionary['sourcesite'].lower() == \"null\":\n fax_redirectors_dictionary['sourcesite'] = readpar('faxredirector')\n tolog(\"!!WARNING!!5555!! FAX sourcesite is unknown, using default AGIS value (%s)\" % fax_redirectors_dictionary['sourcesite'])\n else:\n fax_redirectors_dictionary['sourcesite'] = readpar('faxredirector')\n tolog(\"!!WARNING!!5556!! FAX aourcesite is unknown, using default AGIS value (%s)\" % fax_redirectors_dictionary['sourcesite'])\n\n else:\n tolog(\"sourceSite is not set, use faxredirector value from AGIS\")\n fax_redirectors_dictionary['computingsite'] = readpar('faxredirector')\n fax_redirectors_dictionary['sourcesite'] = readpar('faxredirector')\n\n return fax_redirectors_dictionary",
"def list_URLs(app):\n with app.application.app_context():\n links = []\n for rule in app.application.url_map.iter_rules():\n # Filter out rules we can't navigate to in a browser\n # and rules that require parameters\n if 'GET' in rule.methods and has_no_empty_params(rule):\n url = get_url_for(rule.endpoint)\n links.append(url)\n return links",
"def get_all_statement_forwarders():\n resp = connect('api/v2/statementforwarding/', 200, 'get')\n return resp.json()",
"def authorized():\n MSGRAPH.redirect_uri_handler()",
"def work_from_viaf(viaf):\n print \"Looking up\", viaf\n\n url= \"%s/justlinks.json\" % viaf\n try:\n data = loadjsonurl(url)\n except ValueError as e:\n print \"error: unable to load json from '%s'\" % (url)\n return []\n oclcurls = []\n\n # Map Library of Congress IDs to worldcat URLs\n if 'LC' in data:\n for oclc in data['LC']:\n if \"no\" == oclc[0:2]:\n # Not quite sure how to find works for such IDS\n oclcurl = \"http://id.loc.gov/authorities/names/%s.html\" % oclc\n print \"error: Not following %s\" %oclcurl\n else:\n oclcurl = \"http://www.worldcat.org/identities/lccn-%s-%s/\" % \\\n (oclc[0:3], oclc[3:])\n# print oclcurl\n oclcurls.append(oclcurl)\n workurls = []\n for u in oclcurls:\n print \" loading\", u\n try:\n html = urllib2.urlopen(u).read()\n# print html\n root = lxml.html.fromstring(html)\n for div in root.cssselect(\"oclcnum\"):\n oclcnum = div.text\n oclcurl = \"http://www.worldcat.org/oclc/%s\" % oclcnum[3:]\n workurls.append(oclcurl)\n except urllib2.HTTPError as e:\n print \"error: unable to fetch %s\" % u\n# print workurls\n return workurls",
"def getUrls(domain):\n wayback_urls = set()\n history = requests.get(API_URL + domain).text.splitlines()\n for line in history:\n record = parse_wayback_record(line)\n if record.mimetype == \"text/html\":\n url = domain + record.path\n wayback_url = BASE_URL + record.timestamp + \"/\" + url\n wayback_urls.add(wayback_url)\n return wayback_urls",
"def redirect_response(self, redirect):\n assert isinstance(redirect, core.Redirect)\n web.header('Location', redirect.url)\n web.ctx.status = '303 See Other'\n content_type = 'text/uri-list'\n web.header('Content-Type', content_type)\n web.ctx.hatrac_content_type = content_type\n body = redirect.url + '\\n'\n nbytes = len(body)\n web.header('Content-Length', nbytes)\n web.ctx.hatrac_request_content_range = '*/%d' % nbytes\n return body",
"def _http_check_url_rec_handle_redir(r, redirects):\n\n # If Location is in the headers\n if \"Location\" in r.headers:\n url_redir = r.headers[\"Location\"]\n redirects.append(url_redir)\n\n # Loop back in the recursion\n return FME_utils._http_check_url_rec(url_redir, redirects)\n\n return False",
"def test_api_v1_authenticate_identity_redirect_url_get(self):\n pass",
"def redirect(self) -> 'outputs.GoogleCloudRecaptchaenterpriseV1FirewallActionRedirectActionResponse':\n return pulumi.get(self, \"redirect\")",
"def film_links():\n data = parse(\"https://swapi.dev/api/films/\")\n film_links=[]\n for i in data['results']:\n for key,value in i.items():\n if key == \"characters\":\n film_links.extend(value)\n return film_links",
"def getProxyHistory(self):\n # type: () -> List[IHttpRequestResponse]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the FAX redirectors primarily from the google server, fall back to schedconfig.faxredirector value
|
def getFAXRedirectors(self, computingSite, sourceSite, jobId):
fax_redirectors_dictionary = {}
# Is the sourceSite set?
if sourceSite and sourceSite.lower() != 'null':
# Get the FAX redirectors (if the method returns an empty dictionary, the keys and values will be set below)
fax_redirectors_dictionary = self._getFAXRedirectors(computingSite, sourceSite, jobId)
# Verify the dictionary
if fax_redirectors_dictionary.has_key('computingsite') and fax_redirectors_dictionary['computingsite'] != None:
if fax_redirectors_dictionary['computingsite'] == "" or fax_redirectors_dictionary['computingsite'].lower() == "null":
fax_redirectors_dictionary['computingsite'] = readpar('faxredirector')
tolog("!!WARNING!!5555!! FAX computingsite is unknown, using default AGIS value (%s)" % fax_redirectors_dictionary['computingsite'])
else:
fax_redirectors_dictionary['computingsite'] = readpar('faxredirector')
tolog("!!WARNING!!5556!! FAX computingsite is unknown, using default AGIS value (%s)" % fax_redirectors_dictionary['computingsite'])
if fax_redirectors_dictionary.has_key('sourcesite') and fax_redirectors_dictionary['sourcesite'] != None:
if fax_redirectors_dictionary['sourcesite'] == "" or fax_redirectors_dictionary['sourcesite'].lower() == "null":
fax_redirectors_dictionary['sourcesite'] = readpar('faxredirector')
tolog("!!WARNING!!5555!! FAX sourcesite is unknown, using default AGIS value (%s)" % fax_redirectors_dictionary['sourcesite'])
else:
fax_redirectors_dictionary['sourcesite'] = readpar('faxredirector')
tolog("!!WARNING!!5556!! FAX aourcesite is unknown, using default AGIS value (%s)" % fax_redirectors_dictionary['sourcesite'])
else:
tolog("sourceSite is not set, use faxredirector value from AGIS")
fax_redirectors_dictionary['computingsite'] = readpar('faxredirector')
fax_redirectors_dictionary['sourcesite'] = readpar('faxredirector')
return fax_redirectors_dictionary
|
[
"def _getFAXRedirectors(self, computingSite, sourceSite, pandaID, url='http://waniotest.appspot.com/SiteToFaxEndpointTranslator'):\n\n fax_redirectors_dictionary = {}\n file_name = \"fax_redirectors.json\" \n if os.path.exists(file_name):\n # Read back the FAX redirectors from file\n fax_redirectors_dictionary = readJSON(file_name)\n\n if fax_redirectors_dictionary == {}:\n # Attempt to get fax redirectors from Ilija Vukotic's google server\n cmd = \"curl --silent --connect-timeout 100 --max-time 120 -X POST --data \\'computingsite=%s&sourcesite=%s&pandaID=%s\\' %s\" % (computingSite, sourceSite, pandaID, url)\n tolog(\"Trying to get FAX redirectors: %s\" % (cmd))\n dictionary_string = commands.getoutput(cmd)\n if dictionary_string != \"\":\n # try to convert to a python dictionary\n from json import loads\n try:\n fax_redirectors_dictionary = loads(dictionary_string)\n except Exception, e:\n tolog(\"!!WARNING!!4444!! Failed to parse fax redirector json: %s\" % (e))\n else:\n tolog(\"Backing up dictionary\")\n status = writeJSON(\"fax_redirectors.json\", fax_redirectors_dictionary)\n if not status:\n tolog(\"Failed to backup the FAX redirectors\")\n\n return fax_redirectors_dictionary",
"def getFax(self):\n return self._fax",
"def existing_fwd(ctx):\n existing_forwarders = get_existing_forwarders(ctx.obj[FORWARDERS_HTML])\n\n for fwd in existing_forwarders:\n print(fwd)",
"def axfr_next(self):\n return _ldns.ldns_axfr_next(self)\n #parameters: ldns_resolver *,\n #retvals: ldns_rr *",
"def _get_legend_gitlab_redirect_uris(self):\n raise NotImplementedError(\"No GitLab redirect URIs defined.\")",
"def list_forwarders(self, email_domain):\n r = self.make_call('/execute/Email/list_forwarders?domain={}'.format(email_domain))\n if r is None:\n return None\n\n forwards = {}\n for d in r['data']:\n forwards[d['dest']] = d['forward']\n\n return forwards",
"def _get_default_gws():\n\n result = []\n dr_list = _ipr.get_default_routes(family=socket.AF_INET)\n for dr in dr_list:\n ip = dr.get_attr(\"RTA_GATEWAY\")\n oif = dr.get_attr(\"RTA_OIF\")\n met = dr.get_attr(\"RTA_PRIORITY\")\n ifname = _ipr.get_links(oif)[0].get_attr(\"IFLA_IFNAME\")\n result.append((ip, ifname, met))\n return result",
"def referer(cls, extension):\n\n from PyFunceble import Lookup\n\n manual_server = {\n 'aaa': 'whois.nic.aaa',\n 'abb': 'whois.nic.abb',\n 'able': 'whois.nic.able',\n 'accenture': 'whois.nic.accenture',\n 'aetna': 'whois.nic.aetna',\n 'aig': 'whois.nic.aig',\n 'americanexpress': 'whois.nic.americanexpress',\n 'amex': 'whois.nic.amex',\n 'amica': 'whois.nic.amica',\n 'amsterdam': 'whois.nic.amsterdam',\n 'analytics': 'whois.nic.analytics',\n 'aramco': 'whois.nic.aramco',\n 'athleta': 'whois.nic.athleta',\n 'audible': 'whois.nic.audible',\n 'author': 'whois.nic.author',\n 'aws': 'whois.nic.aws',\n 'axa': 'whois.nic.axa',\n 'azure': 'whois.nic.azure',\n 'baby': 'whois.nic.baby',\n 'banamex': 'whois.nic.banamex',\n 'bananarepublic': 'whois.nic.bananarepublic',\n 'baseball': 'whois.nic.baseball',\n 'bharti': 'whois.nic.bharti',\n 'bing': 'whois.nic.bing',\n 'bloomberg': 'whois.nic.bloomberg',\n 'bm': 'whois.afilias-srs.net',\n 'book': 'whois.nic.book',\n 'booking': 'whois.nic.booking',\n 'bot': 'whois.nic.bot',\n 'bz': 'whois.afilias-grs.net',\n 'buzz': 'whois.nic.buzz',\n 'call': 'whois.nic.call',\n 'calvinklein': 'whois.nic.calvinklein',\n 'caravan': 'whois.nic.caravan',\n 'cartier': 'whois.nic.cartier',\n 'cbn': 'whois.nic.cbn',\n 'cbre': 'whois.nic.cbre',\n 'cd': 'chois.nic.cd',\n 'chase': 'whois.nic.chase',\n 'circle': 'whois.nic.circle',\n 'cisco': 'whois.nic.cisco',\n 'citadel': 'whois.nic.citadel',\n 'citi': 'whois.nic.citi',\n 'citic': 'whois.nic.citic',\n 'cm': 'whois.netcom.cm',\n 'coupon': 'whois.nic.coupon',\n 'crown': 'whois.nic.crown',\n 'crs': 'whois.nic.crs',\n 'fj': 'whois.usp.ac.fj',\n 'ga': 'whois.my.ga',\n 'gh': 'whois.nic.gh',\n 'int': 'whois.iana.org',\n 'kw': 'whois.nic.kw',\n 'lc': 'whois2.afilias-grs.net',\n 'lk': 'whois.nic.lk',\n 'microsoft': 'whois.nic.microsoft',\n 'nagoya': 'whois.nic.nagoya',\n 'nyc': 'whois.nic.nyc',\n 'ps': 'whois.pnina.ps',\n 'ren': 'whois.nic.ren',\n 'rw': 'whois.ricta.org.rw',\n 'shop': 'whois.nic.shop',\n 'sl': 'whois.nic.sl',\n 'stream': 'whois.nic.stream',\n 'tokyo': 'whois.nic.tokyo',\n 'uno': 'whois.nic.uno',\n 'za': 'whois.registry.net.za'\n }\n\n if extension in manual_server:\n return manual_server[extension]\n else:\n whois_record = Lookup().whois(Settings.iana_server, 'hello.' + extension, 10)\n\n if whois_record is not None:\n regex_referer = r'(refer:)\\s+(.*)'\n\n if Helpers.Regex(\n whois_record,\n regex_referer,\n return_data=False).match():\n return Helpers.Regex(\n whois_record,\n regex_referer,\n return_data=True,\n group=2).match()\n return None",
"def get_urls_for_pac_files():\n pacFiles = []\n if sys.platform == 'win32':\n try:\n import _winreg as winreg # used from python 2.0-2.6\n except:\n import winreg # used from python 2.7 onwards\n net = winreg.OpenKey(\n winreg.HKEY_CURRENT_USER,\n \"Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet Settings\"\n )\n nSubs, nVals, lastMod = winreg.QueryInfoKey(net)\n subkeys = {}\n for i in range(nVals):\n thisName, thisVal, thisType = winreg.EnumValue(net, i)\n subkeys[thisName] = thisVal\n if 'AutoConfigURL' in subkeys.keys() and len(subkeys['AutoConfigURL']) > 0:\n pacFiles.append(subkeys['AutoConfigURL'])\n elif sys.platform == 'darwin':\n import plistlib\n sysPrefs = plistlib.readPlist(\n '/Library/Preferences/SystemConfiguration/preferences.plist')\n networks = sysPrefs['NetworkServices']\n # loop through each possible network (e.g. Ethernet, Airport...)\n for network in networks.items():\n netKey, network = network # the first part is a long identifier\n if 'ProxyAutoConfigURLString' in network['Proxies'].keys():\n pacFiles.append(network['Proxies']['ProxyAutoConfigURLString'])\n return list(set(pacFiles)) # remove redundant ones",
"def get_caller_locations(self,\r\n account_number,\r\n page=None,\r\n limit=None):\r\n # The base uri for api requests\r\n query_builder = Configuration.BASE_URI\r\n \r\n # Prepare query string for API call\r\n query_builder += \"/accounts/{account_number}/caller_locations\"\r\n\r\n # Process optional template parameters\r\n query_builder = APIHelper.append_url_with_template_parameters(query_builder, { \r\n \"account_number\": account_number\r\n })\r\n\r\n # Process optional query parameters\r\n query_parameters = {\r\n \"page\": page,\r\n \"limit\": limit\r\n }\r\n \r\n # Validate and preprocess url\r\n query_url = APIHelper.clean_url(query_builder)\r\n\r\n # Prepare headers\r\n headers = {\r\n \"user-agent\": \"APIMATIC 2.0\",\r\n \"accept\": \"application/json\",\r\n \"X-Auth-Token\": Configuration.x_auth_token,\r\n \"X-Auth-Token\": Configuration.x_auth_token\r\n }\r\n\r\n # Prepare the API call.\r\n http_request = self.http_client.get(query_url, headers=headers, query_parameters=query_parameters)\r\n\r\n # Invoke the API call to fetch the response.\r\n response = self.http_client.execute_as_string(http_request);\r\n\r\n # Endpoint error handling using HTTP status codes.\r\n if response.status_code == 401:\r\n raise APIException(\"You are not authenticated\", 401, response.raw_body)\r\n elif response.status_code == 403:\r\n raise APIException(\"This action needs a valid WSSE header\", 403, response.raw_body)\r\n elif response.status_code == 404:\r\n raise APIException(\"Resource not found\", 404, response.raw_body)\r\n\r\n # Global error handling using HTTP status codes.\r\n self.validate_response(response) \r\n\r\n return response.raw_body",
"def get_site_scheme_and_netloc():\n parse_result = urlparse(get_service_url())\n return \"%s://%s\" % (parse_result.scheme, parse_result.netloc)",
"def get_federal_cyhy_requests(db):\n fed_orgs = get_all_descendants(db, 'FEDERAL')\n try:\n requests = db.requests.find({'retired': {'$ne': True}, 'report_types': 'CYHY', '_id': {'$in': fed_orgs}}, {'_id': True, 'agency.acronym': True, 'agency.contacts.email': True, 'agency.contacts.type': True})\n except TypeError:\n logging.critical('There was an error with the MongoDB query that retrieves the list of agencies', exc_info=True)\n raise\n\n return requests",
"def find_referral_server(self):\n s = self.find_first_section(('ReferralServer',))\n if s:\n server = (s[0][2]).lstrip('/')\n port = int(s[0][3])\n return server, port\n else:\n return None",
"def getUrls(domain):\n wayback_urls = set()\n history = requests.get(API_URL + domain).text.splitlines()\n for line in history:\n record = parse_wayback_record(line)\n if record.mimetype == \"text/html\":\n url = domain + record.path\n wayback_url = BASE_URL + record.timestamp + \"/\" + url\n wayback_urls.add(wayback_url)\n return wayback_urls",
"def get_webhook_whitelist():\n default_whitelist = ['0.0.0.0/0']\n return default_whitelist",
"def find_forward_slink(self, fsa_reltype_groups):\n\n logger.debug(\"len(fsa_reltype_groups) = %s\" % len(fsa_reltype_groups))\n fsa_lists = fsa_reltype_groups[0]\n reltypes_list = fsa_reltype_groups[1]\n event_context = self.parent[self.position + 1:]\n return self._find_slink(event_context, fsa_lists, reltypes_list)",
"def getFingerConfig(self, qrobot : Vector) -> Vector:\n return [qrobot[i] for i in self.fingerLinks]",
"def get_url(family):\n\n url = f'{BASE_URL}?token={TOKEN}'\n url += '&filter_not[common_name]=null&filter_not[image_url]=null'\n if family != 'general':\n url += f'&filter[family_common_name]={family}'\n return url",
"def get_project_forwarding_rules(self, project):\n forwarding_rules = []\n\n def rules_pager(page_token):\n return api.CLIENTS.compute.forwardingRules().aggregatedList(\n project=project, pageToken=page_token)\n\n for _, forwarding_rules_list in api.resource_iterator(rules_pager):\n for rule in forwarding_rules_list.get('forwardingRules', []):\n forwarding_rules.append(rule)\n return forwarding_rules"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find the global path for the given file
|
def findGlobalFilePath(self, surl, dsname, computingSite, sourceSite, jobId=None):
global_path = ""
filename = os.path.basename(surl)
# should dq2-list-files be used? If not, use to_native_lfn() directly to guess the path
useDQ2 = False
if useDQ2:
# get the global file paths from file/DQ2
paths = self.getGlobalFilePathsDQ2(dsname)
if paths != []:
# locate the global path
for path in paths:
if filename in path:
# does the file path begin with 'root://'?
if self.verifyGlobalPath(path, verbose=True):
global_path = path
break
else:
# abort
tolog("!!WARNING!!3333!! Failed to get global file path")
else:
# get the global file paths from file/DQ2
paths = self.getGlobalFilePaths(surl, dsname, computingSite, sourceSite, jobId=jobId)
if paths[0][-1] == ":": # this is necessary to prevent rucio paths having ":/" as will be the case if os.path.join is used
global_path = paths[0] + filename
else: # for old style paths not using the ":" separator
global_path = os.path.join(paths[0], filename)
return global_path
|
[
"def _find_global_config_file():\n # Check test-imposed environment var P4GF_LOG_CONFIG_FILE.\n if p4gf_const.P4GF_TEST_LOG_CONFIG_PATH in os.environ:\n path = os.environ[p4gf_const.P4GF_TEST_LOG_CONFIG_PATH]\n if os.path.exists(path):\n return path\n\n # Check /etc/git-fusion.log.conf .\n if os.path.exists(_config_filename_default):\n return _config_filename_default\n\n return None",
"def get_relative_path(self, file):\n pass",
"def get_file_path(environ, req_file_desc):\n return __get_path(environ, __ISFILE, req_file_desc)",
"def find_file_path(root_directory, base_file, default_dir):\n path_verbatim = os.path.join(root_directory, base_file)\n if os.path.isfile(path_verbatim):\n return path_verbatim\n\n path_inferred = os.path.join(root_directory, default_dir)\n path_inferred = os.path.join(path_inferred, base_file)\n return path_inferred",
"def get_path() -> str:\n places = os.walk(os.path.abspath(os.path.join(__file__, \"../..\")))\n\n def condition(files):\n return all(file in files for file in needed_files)\n\n return next((path for path, dirs, files in places if condition(files)), None)",
"def search_file(filename, search_path):\n file_found = 0\n paths = string.split(search_path, ':')\n for path in paths:\n if os.path.exists(os.path.join(path, filename)):\n file_found = 1\n break\n if file_found:\n return os.path.abspath(os.path.join(path, filename))\n else:\n return None",
"def find_filepath(\n filename,\n basepaths=(os.path.curdir, DATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')):\n if os.path.isfile(filename):\n return filename\n for basedir in basepaths:\n fullpath = expand_filepath(os.path.join(basedir, filename))\n if os.path.isfile(fullpath):\n return fullpath\n return False",
"def file_path(self) -> str:\n return self.files[self.__main['location']['file']]",
"def getResolvedFileName(filename, pathenv=\"\"):\n if os.access(filename,os.R_OK):\n return filename\n pathlist = os.getenv(pathenv,'').split(os.pathsep)\n for path in pathlist:\n f = os.path.join( path, filename )\n if os.access( f, os.R_OK ):\n return f\n raise RuntimeError(\"Can't read file %s, neither locally nor in %s\" % (filename, pathenv) )",
"def findConfigFile(cls, filename):\n\n paths = cls.getConfigPaths()\n for p in paths:\n testPath = os.path.join(p, filename)\n if os.path.isfile(testPath):\n return os.path.join(p, filename)",
"def search_file(filename, search_path):\n for path in string.split(search_path, \":\"):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate):\n return os.path.abspath(candidate)\n return None",
"def _find_word_set_path(self, file_name):\n return os.path.split(os.path.abspath(__file__))[0] + os.sep + file_name",
"def which(searchFile) :\n for searchPath in os.environ[\"PATH\"].split(os.pathsep):\n test=os.path.join(searchPath,searchFile)\n if os.path.isfile(test): return test\n\n return None",
"def get_absolute_path(self, file):\n pass",
"def resolve_file_path(file_path):\n if not os.path.isfile(file_path):\n # Allow loading config files relative to rltime/configs directory\n base_path = os.path.dirname(rltime.__file__)\n rel_file_path = os.path.join(base_path, \"configs\", file_path)\n if os.path.isfile(rel_file_path):\n return rel_file_path\n return file_path",
"def get_filepath_from_pythonpath(filename):\n for path in sys.path:\n real_path = os.path.join(path, filename)\n if exists(real_path):\n return real_path\n \n return None",
"def _file_path_from_env(self):\n path = os.getenv(self.FILE_ENV_VAR, default=None)\n\n if path and os.path.isfile(path):\n return path",
"def _getFileLocalOrPath(filename, pathenv):\n if os.path.exists(filename):\n log.info( \"Using local file %s\" % filename)\n return filename\n\n pathlist = os.getenv(pathenv,'').split(os.pathsep)\n resolvedfilename = FindFile(filename, pathlist, os.R_OK)\n if resolvedfilename:\n return resolvedfilename\n\n log.fatal(\"No file %s found locally nor in %s\" % (filename, os.getenv('CORAL_DBLOOKUP_PATH')) )\n return None",
"def get_file_path(f):\n return os.path.split(f)[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parsing storage specific write concern Optional, use Monty WriteConcern by default. Recive MontyClient kwargs, should parse kwargs and return a instance of `montydb.base.WriteConcern` class.
|
def wconcern_parser(self, client_kwargs):
pass
|
[
"def set_write_disposition(write_disposition):\n if write_disposition == 'WRITE_APPEND':\n return bigquery.WriteDisposition.WRITE_APPEND\n elif write_disposition == 'WRITE_EMPTY':\n return bigquery.WriteDisposition.WRITE_EMPTY\n elif write_disposition == 'WRITE_TRUNCATE':\n return bigquery.WriteDisposition.WRITE_TRUNCATE\n else:\n raise KeyError(\"{} is not a valid write_disposition key\".format(write_disposition))",
"def db_for_write(self, model, **hints):\n if model._meta.model_name.startswith('postgres'):\n return 'postgres'\n return None",
"def get_db_for_write(env, db=None):\n if db:\n return (db, False)\n else:\n return (env.get_db_cnx(), True)",
"def cli_cosmosdb_mongodb_database_throughput_migrate(client,\n resource_group_name,\n account_name,\n database_name,\n throughput_type):\n if throughput_type == \"autoscale\":\n return client.begin_migrate_mongo_db_database_to_autoscale(resource_group_name, account_name, database_name)\n return client.begin_migrate_mongo_db_database_to_manual_throughput(resource_group_name, account_name, database_name)",
"def db_for_write(self, model, **hints):\n return self.use_analytics_db_or_default(model.__name__)",
"def getConnection(self, **kwargs):\n connectionParams = _getDefaults(kwargs)\n kwargs = {\n 'cache_module_name':'memcache',\n 'keep_history': kwargs.get('zodb_keep_history', False)\n }\n adapter = relstorage.adapters.postgresql.PostgreSQLAdapter(\n dsn=\"dbname=%(dbname)s port=%(port)s user=%(user)s password=%(password)s\" % connectionParams,\n options=relstorage.options.Options(**kwargs))\n\n # rename the cache_servers option to not have the zodb prefix.\n if 'zodb_cacheservers' in kwargs:\n kwargs['cache_servers'] = kwargs['zodb_cacheservers']\n\n storage = relstorage.storage.RelStorage(adapter, **kwargs)\n cache_size = kwargs.get('zodb_cachesize', 1000)\n db = ZODB.DB(storage, cache_size=cache_size)\n return db, storage",
"def open ( filename ,\n mode = 'c' ,\n writeback = False ,\n root_only = False , *args ) :\n db_type = RootOnlyShelf if root_only else RootShelf\n return db_type ( filename ,\n mode ,\n writeback , * args )",
"def __init__(self,\n logger,\n job_num,\n mongos_executable=None,\n mongos_options=None,\n mongod_executable=None,\n mongod_options=None,\n dbpath_prefix=None,\n preserve_dbpath=False,\n num_shards=1,\n separate_configsvr=True,\n enable_sharding=None,\n auth_options=None):\n\n interface.Fixture.__init__(self, logger, job_num)\n\n if \"dbpath\" in mongod_options:\n raise ValueError(\"Cannot specify mongod_options.dbpath\")\n\n self.mongos_executable = mongos_executable\n self.mongos_options = utils.default_if_none(mongos_options, {})\n self.mongod_executable = mongod_executable\n self.mongod_options = utils.default_if_none(mongod_options, {})\n self.preserve_dbpath = preserve_dbpath\n self.num_shards = num_shards\n self.separate_configsvr = separate_configsvr\n self.enable_sharding = utils.default_if_none(enable_sharding, [])\n self.auth_options = auth_options\n\n # Command line options override the YAML configuration.\n dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)\n dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)\n self._dbpath_prefix = os.path.join(dbpath_prefix,\n \"job%d\" % (self.job_num),\n config.FIXTURE_SUBDIR)\n\n self.configsvr = None\n self.mongos = None\n self.shards = []",
"def test_both_prefixed_and_not_in_extra_field(self, conn_type):\n extra_config = ConnectionExtraConfig(\n conn_type=conn_type,\n conn_id=\"test-conn-id\",\n extra={\"arg1\": \"foo\", f\"extra__{conn_type}__arg1\": \"bar\"},\n )\n assert extra_config.get(\"arg1\") == \"bar\"",
"def instance():\n # create the default access\n login=os.getenv(\"MGDBLOGIN\",\"NONE\")\n if (login != \"NONE\"):\n \n userinfo=login.split(\"@\")[0]\n hostinfo=login.split(\"@\")[1]\n dbname=login.split(\"@\")[2]\n user=userinfo.split(\"/\")[0]\n pwd=userinfo.split(\"/\")[1]\n host=hostinfo.split(\":\")[0]\n port=int(hostinfo.split(\":\")[1])\n #print(\"MGROC::INSTANCE() \",host,port,dbname,user,pwd)\n _wdd=MongoRoc(host,port,dbname,user,pwd)\n return _wdd\n else:\n if os.path.isfile(\"/etc/.mongoroc.json\"):\n f=open(\"/etc/.mongoroc.json\")\n s=json.loads(f.read())\n _wdd=MongoRoc(s[\"host\"],s[\"port\"],s[\"db\"],s[\"user\"],s[\"pwd\"])\n f.close()\n return _wdd\n else:\n return None",
"def build_connection_mock(cls, *args, **kwargs) -> Connection:\n raise NotImplementedError",
"def __prepare_quota_limit_object(limit_type, **kwargs):\n object_name = kwargs.pop(\"obj_name\", None)\n obj = None\n if object_name:\n obj = QUOTA_LIMITS[limit_type][OBJ_API].find(\n object_name\n )\n try:\n quota_limit_obj = prepare_ds_object(\n QUOTA_LIMITS[limit_type][CLASS_NAME], **kwargs\n )\n except exceptions.RHEVMEntityException as e:\n util.logger.error(\n \"Failed to prepare quota %s limit object: %s\", limit_type, e\n )\n return None\n if hasattr(quota_limit_obj, limit_type):\n setattr(quota_limit_obj, limit_type, obj)\n return quota_limit_obj",
"def db_for_write(self, model, **hints):\n if model._meta.app_label in self.route_app_labels:\n return 'sakiladb'\n return None",
"def _apply_extra_strategy_args_to_conf(strategy_def_config, extra_strategy_args):\n if 'is_template_strategy' not in strategy_def_config or not strategy_def_config['is_template_strategy']:\n strategy_def_config = copy(strategy_def_config)\n strategy_def_config.update(extra_strategy_args)\n else:\n strategy_def_config = apply_extra_args_to_conf(strategy_def_config, extra_strategy_args)\n return strategy_def_config",
"def _get_write_only(\n artifacts: artifacts_types.TAnyPropertyArtifacts,\n) -> typing.Optional[bool]:\n if artifacts.type == oa_types.PropertyType.SIMPLE:\n return artifacts.open_api.write_only\n if artifacts.type == oa_types.PropertyType.JSON:\n return artifacts.open_api.write_only\n if artifacts.type == oa_types.PropertyType.RELATIONSHIP:\n return artifacts.write_only\n return None",
"def db_for_write(self, model, **hints): \n if model._meta.app_label in USER_APP:\n return DB_USER\n \n return False",
"def shard_config (self, *args, **kwargs):\n payload = args[0]\n body = args[1]\n start_response = args[2]\n\n if self.is_config:\n # somebody contact security...\n start_response('403 Forbidden', [('Content-Type', 'text/plain')])\n body.put(\"Forbidden, executor already in a configured state\\r\\n\")\n body.put(StopIteration)\n\n logging.warning(\"denied configuring shard %s prefix %s\", self.shard_id, self.prefix)\n else:\n self.is_config = True\n self.prefix = payload[\"prefix\"]\n self.shard_id = payload[\"shard_id\"]\n\n start_response('200 OK', [('Content-Type', 'text/plain')])\n body.put(\"Bokay\\r\\n\")\n body.put(StopIteration)\n\n logging.info(\"configuring shard %s prefix %s\", self.shard_id, self.prefix)",
"def opp_comm_kwargs(self):\n kwargs = {'commtype': self._commtype, 'use_async': self.is_async,\n 'allow_multiple_comms': self.allow_multiple_comms}\n kwargs['address'] = self.opp_address\n kwargs['serializer'] = self.serializer\n # TODO: Pass copies/partner_copies in kwargs?\n if self.direction == 'send':\n kwargs['direction'] = 'recv'\n else:\n kwargs['direction'] = 'send'\n kwargs.update(self.serializer.input_kwargs)\n return kwargs",
"def _new_configsvr(self):\n\n logger_name = \"%s:configsvr\" % (self.logger.name)\n mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)\n\n mongod_options = copy.deepcopy(self.mongod_options)\n mongod_options[\"configsvr\"] = \"\"\n mongod_options[\"dbpath\"] = os.path.join(self._dbpath_prefix, \"config\")\n mongod_options[\"replSet\"] = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME\n mongod_options[\"storageEngine\"] = \"wiredTiger\"\n\n return replicaset.ReplicaSetFixture(mongod_logger,\n self.job_num,\n mongod_executable=self.mongod_executable,\n mongod_options=mongod_options,\n preserve_dbpath=self.preserve_dbpath,\n num_nodes=3,\n auth_options=self.auth_options,\n replset_config_options={\"configsvr\": True})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Trains a tokenizer from a dataframe and saves to disk. Uses minimal alphabet of ascii lowercase plus up to 30 characters.
|
def train_tokenizer_from_df(
df,
directory,
filename,
vocab_size,
min_frequency,
max_caption_length,
special_tokens,
use_bert_wordpiece=True,
):
if use_bert_wordpiece:
tokenizer = BertWordPieceTokenizer(lowercase=True)
tokenizer.enable_padding(length=max_caption_length, pad_id=0, pad_token=PAD)
tokenizer.enable_truncation(
max_length=max_caption_length, stride=0, strategy="longest_first"
)
else:
tokenizer = WordTokenizer()
tokenizer.enable_truncation(max_caption_length)
tokenizer.enable_padding()
strings = df.iloc[:, 1:].stack(-1).reset_index(drop=True)
strings.to_csv(os.path.join(directory, filename), header=False, index=False)
tokenizer.train(
os.path.join(directory, filename),
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
initial_alphabet=ascii_lowercase,
limit_alphabet=len(ascii_lowercase) + 30,
)
tokenizer.save_model(directory, filename + "tokenizer")
return tokenizer
|
[
"def _tokenize_df(df, target=\"sentence\"):\n tqdm.pandas()\n assert type(target) is str, \"target must be a string\"\n assert target in df.columns, \"dataframe must have a {} column (user specified) to tokenize\".format(target)\n df[\"tokenized_text\"] = df[target].progress_apply(ReviewApp._tokenize_lematize)\n return df",
"def _add_sentence_tokens(self, data: pd.DataFrame) -> pd.DataFrame:\n tokenizer = nltk.data.load(\"tokenizers/punkt/english.pickle\")\n data[_Column.TOKENS.name] = data[_Column.MESSAGE.name].apply(tokenizer.tokenize)\n return data",
"def main(filepath):\n\n df = load_label(filepath)\n\n df['cleaned_no_stem'] = df[\"comment_text\"].apply(tokenizer,args=(stops,None,False),)\n\n rebalance_dict = {0: 35, 1: 50, 2: 60, 3: 65, 4: .75, 5: 'random'}\n\n data_proportions = [0.2, 0.3, 0.4, 0.5, 0.6, 0.75]\n\n test_ratio = 0.2\n\n for p, proportion in enumerate(data_proportions):\n\n train_sample, val_set, test_set = get_samples(df, proportion=proportion, train_test_ratio=(1-test_ratio))\n\n prepared_35, prepared_50, prepared_60, prepared_65, prepared_75, random_df = rebalance_data(train_sample)\n\n for i, p_df in enumerate([prepared_35, prepared_50, prepared_60, prepared_65, prepared_75, random_df]):\n model_name= f'{int(data_proportions[p]*100)}pct_model_{rebalance_dict[i]}toxic'\n\n # Optional pickled, previously rebalanced df functionality\n # val_set.to_pickle(\"jigsaw_toxic/\" + model_name + \"_val.pkl\")\n # test_set.to_pickle(\"jigsaw_toxic/\" + model_name + \"_test.pkl\")\n # p_df.to_pickle(\"jigsaw_toxic/\" + model_name + \"_train.pkl\")\n\n # filelist = []\n # for file in os.listdir(filepath):\n # if file.endswith(\".pkl\"):\n # if \"_test\" not in file:\n # filelist.append(file)\n\n # filelist.sort()\n\n # train_list, val_list = [], []\n # for x in filelist:\n # (train_list if \"_train\" in x else val_list).append(x)\n\n for p_df, val_set in zip(train_list, val_list):\n # model_name = os.path.splitext(p_df)[0].replace(\"_train\", \"\")\n p_df = pd.read_pickle(filepath + p_df)\n val_set = pd.read_pickle(filepath + val_set)\n\n print(f\"{model_name}:\")\n X_train = p_df.drop('label', axis=1)\n y_train = p_df['label']\n test_sample = val_set.sample( n=math.ceil(len(X_train)*test_ratio), random_state=1008 )\n # test_sample = val_set.sample(frac=test_ratio, replace=True)\n X_test = test_sample.drop('label', axis=1)\n y_test = test_sample['label']\n\n lstm_model = LSTMModel(X_train, y_train,\n X_test, y_test, hidden_dim=50,\n num_layers=1, embed_dim=50, batch_size=1,\n dropout=0, num_classes=2)\n if USE_CUDA:\n torch.cuda.init()\n lstm_model = lstm_model.cuda()\n\n lstm_model.train()\n\n NUM_EPOCHS = 6\n hist_lstm = np.zeros(NUM_EPOCHS)\n\n _, model_state_dict = lstm_model.run_model(\n y_train, X_test, y_test, NUM_EPOCHS, hist_lstm, text_col='cleaned_no_stem',\n savestate=model_name)\n\n print(model_state_dict)",
"def train_tokenizer(corpus: Union[str, List[str]],\n vocab_size: int = 30519,\n overwrite: bool = True,\n lowercase: bool = True,\n save_vocab: bool = False,\n dst: Optional[str] = None,\n in_domain_vocab: str = VOCAB_CACHE_PREFIX,\n tokenizer_type: str = 'bert',\n tokenizer_kwargs: dict = None\n ) -> Union[BertWordPieceTokenizer, ByteLevelBPETokenizer]:\n if tokenizer_type == 'bert':\n tokenizer_class = BertWordPieceTokenizer\n special_tokens = [\"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\", \"[MASK]\"]\n elif tokenizer_type == 'roberta' or tokenizer_type == 'bpe_from_scratch':\n tokenizer_class = ByteLevelBPETokenizer\n special_tokens = [\"<s>\", \"<pad>\", \"</s>\", \"<unk>\", \"<mask>\"]\n elif tokenizer_type == 'bpe_from_scratch':\n tokenizer_class = ByteLevelBPETokenizer\n vocab_size = 50000\n special_tokens = [\"<s>\", \"<pad>\", \"</s>\", \"<unk>\", \"<mask>\"]\n else:\n raise Exception(\"unsupported tokenizer type\")\n\n if isinstance(corpus, list):\n if isdir(corpus[0]):\n old_corpus = corpus\n corpus = []\n for corp in old_corpus:\n corpus.extend(get_text_files(corp))\n else:\n corpus = get_text_files(corpus)\n\n # Load cached vocab if possible\n if not overwrite:\n cached_vocab = Path(dst) / (VOCAB_CACHE_PREFIX + '-vocab.txt')\n\n if cached_vocab.exists():\n logger.info(f'Loading cached vocabulary at {cached_vocab}')\n return tokenizer_class(str(cached_vocab))\n else:\n logger.info(f'Cached vocabulary not found at {cached_vocab}')\n\n # Train tokenizer\n logger.info('Training new WordPiece tokenizer on in-domain corpora')\n tokenizer = tokenizer_class(**tokenizer_kwargs)\n tokenizer.train(corpus, vocab_size=vocab_size, special_tokens=special_tokens)\n\n if save_vocab:\n tokenizer.save('.' if dst is None else dst, in_domain_vocab)\n logger.info('Saved in-domain vocabulary to '\n f'{Path(dst) / (in_domain_vocab + \"-vocab.txt\")}')\n return tokenizer",
"def test_tokenize_train_generate():\n run_tokenize_train_generate()",
"def tokenize(args):\n if args.profile and not Path(args.profile).exists(): # pragma: no cover\n raise ParserError('--profile must be a path for an existing file')\n _write(args, Tokenizer(profile=args.profile)(_read(args), column=args.mapping))",
"def load_tokenizer(names):\n tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=43)\n for name in names:\n tokenizer.fit_on_texts(list(name))\n return tokenizer",
"def tokenizer():\n fileManager = session_functions.loadFileManager()\n\n if request.method == \"GET\":\n # \"GET\" request occurs when the page is first loaded.\n if 'csvoptions' not in session:\n session['csvoptions'] = constants.DEFAULT_CSV_OPTIONS\n\n labels = fileManager.getActiveLabels()\n return render_template('tokenizer.html', labels=labels)\n\n if 'gen-csv' in request.form:\n #The 'Generate and Visualize Matrix' button is clicked on tokenizer.html.\n DocTermSparseMatrix, countMatrix = fileManager.generateCSVMatrix(roundDecimal=True)\n countMatrix = zip(*countMatrix)\n\n dtm = []\n for row in xrange(1,len(countMatrix)):\n dtm.append(list(countMatrix[row]))\n matrixTitle = list(countMatrix[0])\n matrixTitle[0] = \"Token\"\n matrixTitle[0] = matrixTitle[0].encode(\"utf-8\")\n\n labels = fileManager.getActiveLabels()\n session_functions.saveFileManager(fileManager)\n\n return render_template('tokenizer.html', labels=labels, matrixData=dtm, matrixTitle=matrixTitle, matrixExist=True)\n\n if 'get-csv' in request.form:\n #The 'Download Matrix' button is clicked on tokenizer.html.\n session_functions.cacheCSVOptions()\n savePath, fileExtension = fileManager.generateCSV()\n session_functions.saveFileManager(fileManager)\n\n\n return send_file(savePath, attachment_filename=\"frequency_matrix\"+fileExtension, as_attachment=True)",
"def tokenize(self):",
"def prep(self, text_column=\"text\", rm_stopwords=True, save=False):\n # define operations\n pipeline = (\n [str.lower, self.tokenize, self.remove_stopwords]\n if rm_stopwords is True\n else [str.lower, self.tokenize]\n )\n\n def prepare(text):\n # reverses the pipeline and calls it in reverse-order on the text\n return compose(*pipeline[::-1])(text)\n\n result = self.df.assign(tokens=self.df[text_column].apply(prepare))\n if save:\n self.df = result\n return self\n return result",
"def set_tokenizer(self, tokenizer_model: str, do_lower_case: bool):\n self.tokenizer_model = XLNetTokenizer.from_pretrained(tokenizer_model, do_lower_case=do_lower_case)",
"def tokenize_flow(df: pd.DataFrame, **tf_params) -> Tuple[TfidfVectorizer, csr_matrix]:\n if not 'stop_words' in tf_params:\n tf_params['stop_words'] = stopwords.words('english') + OUR_STOP_WORDS\n\n vectorizer = TfidfVectorizer(**tf_params)\n corpus = df['body']\n X = vectorizer.fit_transform(corpus)\n\n return vectorizer, X",
"def test_tokenizes_pandas_text_column_into_new_column(self):\n \n df = pd.DataFrame({'id': [1], 'title': ['My test'], 'text_length': 10, 'text': ['This is a functional test. It Should return this sentece tokenized.']})\n self.datacleaner.data = df\n data = self.datacleaner.tokenize_pandas_column('text')\n self.assertEqual(data['tokenized'][0], 'This functional test Should return sentece tokenized')",
"def wtsv3_to_token_df(file: str)-> pd.DataFrame:\r\n list_par = []\r\n list_line_tsv3 = []\r\n dict_features = {}\r\n dict_features_by_type = {\"span_variables\": {}, \r\n \"relation_variables\": {}}\r\n\r\n list_df = []\r\n with open(file, encoding = \"utf-8\") as tsvfile:\r\n tsvreader = csv.reader(tsvfile, delimiter=\"\\t\")\r\n for line in tsvreader:\r\n list_line_tsv3.append(line)\r\n if len(line)==1:\r\n if line[0][:6] == \"#Text=\":\r\n list_par.append(line[0][6:])\r\n if (len(line)==1) and (line[0][5:20]==\"=webanno.custom\"):\r\n list_per_feature = line[0].split(\"|\")[1:]\r\n layer_name = line[0].split(\"|\")[0][21:]\r\n \r\n if line[0][0:5]==\"#T_SP\": \r\n dict_features_by_type[\"span_variables\"][layer_name] = [(layer_name, x) for x in list_per_feature]\r\n if line[0][0:5]==\"#T_RL\": \r\n #list_per_feature = [list_per_feature[ind-1] + \"_id\" if \"BT_webanno.custom.\" in x else x for ind, x in enumerate(list_per_feature)]\r\n dict_features_by_type[\"relation_variables\"][layer_name] = [(layer_name, x) for x in list_per_feature]\r\n dict_features[layer_name] = list_per_feature\r\n if (len(line) not in [0, 1]):\r\n list_df.append(line[:-1])\r\n list_layers = [\"\".join([x[0].upper() for x in mot.split(\"_\")]) for mot in list(dict_features.keys())]\r\n\r\n \r\n tuples_index_features = [[(layer, x) for x in dict_features[layer]] for layer in dict_features.keys()]\r\n tuples_index_features = list(itertools.chain.from_iterable(tuples_index_features))\r\n \r\n id_features = [(\"id_features\", \"token_par_id\"), (\"id_features\", 'id_characters'), (\"id_features\", \"token\")]\r\n tuples_index_features = id_features + tuples_index_features\r\n multi_index = pd.MultiIndex.from_tuples(tuples_index_features, names=['layer', 'feature'])\r\n \r\n \r\n df= pd.DataFrame.from_records(list_df, columns = multi_index)\r\n df = df.replace(to_replace=r'\\*\\[\\d+\\]', value='_', regex=True)\r\n df = df.replace(to_replace=r'\\*', value='_', regex=True)\r\n df[[x for x in df.columns if x not in [\"token\"]]] = df[[x for x in df.columns if x not in [\"token\"]]].replace(to_replace=r'\\\\', value='', regex=True)\r\n df[(\"id_features\", \"par_id\")] = df[(\"id_features\", \"token_par_id\")].apply(lambda x: int(x.split(\"-\")[0])-1)\r\n df[(\"id_features\", \"token_id\")] = df[(\"id_features\", \"token_par_id\")].apply(lambda x: int(x.split(\"-\")[1])-1)\r\n \r\n df[(\"id_features\", \"start_index\")] = df[(\"id_features\", \"id_characters\")].apply(lambda x: int(x.split(\"-\")[0]))\r\n df[(\"id_features\", \"end_index\")] = df[(\"id_features\", \"id_characters\")].apply(lambda x: int(x.split(\"-\")[1]))\r\n \r\n return df, dict_features_by_type, list_layers, dict_features, list_line_tsv3, list_par",
"def _custom_tokenizer(self, text):\n normalized_string = self._pre_tokenizer.pre_tokenize_str(text)\n words = [string[0] for string in normalized_string]\n offsets = [string[1] for string in normalized_string]\n spaces = []\n for i in range(len(words)):\n if i == len(words) - 1:\n spaces.append(False)\n break\n spaces.append(True if offsets[i][1] != offsets[i+1][0] else False)\n # default is None\n spaces = None if not spaces else spaces\n return Doc(self.spacy_tokenizer.vocab, words=words, spaces=spaces)",
"def main() -> None:\n configure_pandas()\n\n df = pd.read_csv(f\"{DATAPATH}ton.csv\", low_memory=False)\n\n # Remove outlier text lengths\n df = df[df[\"text\"].str.len() < 3000]\n\n # Only keep full records of party_name and text\n df = df[[\"party_name\", \"text\"]].dropna(how=\"any\")\n print(f\"{len(df)} full records.\")\n\n # Limit dataset for faster iterations during testing\n N = 10000\n df = df.loc[:N, :]\n\n # Split train/test\n X_train, X_test, y_train, y_test = train_test_split(\n df[\"text\"],\n df[\"party_name\"],\n test_size=0.3,\n random_state=1,\n stratify=df[\"party_name\"])\n\n # Encode targets based on train\n target_encoder = LabelEncoder()\n target_encoder.fit(y_train)\n y_train = target_encoder.transform(y_train)\n y_test = target_encoder.transform(y_test)\n\n # Features + model pipeline\n pipeline = Pipeline(\n [\n (\"vect\", TfidfVectorizer(\n tokenizer=spacy_tokenizer,\n analyzer=\"word\",\n max_df=0.9,\n min_df=5,\n ngram_range=(1, 2)\n )),\n (\"clf\", XGBClassifier()),\n ]\n )\n\n # Train\n pipeline.fit(X_train, y_train)\n\n # Measure on test set\n y_pred = pipeline.predict(X_test)\n print(metrics.classification_report(y_test, y_pred, target_names=target_encoder.classes_))\n\n # TODO grid search, hyperparameters, etc.",
"def tokenize(self, sentence):\n ...",
"def tag(model, filename, device=\"cpu\", batch_size=64):\n print(\"Loading the model.\")\n model = BoudamsTagger.load(model, device=device)\n print(\"Model loaded.\")\n remove_line = True\n spaces = re.compile(\"\\s+\")\n apos = re.compile(\"['’]\")\n for file in tqdm.tqdm(filename):\n out_name = file.name.replace(\".txt\", \".tokenized.txt\")\n content = file.read() # Could definitely be done a better way...\n if remove_line:\n content = spaces.sub(\"\", content)\n\n # Now, extract apostrophes, remove them, and reinject them\n apos_positions = [ i for i in range(len(content)) if content[i] in [\"'\", \"’\"] ]\n content = apos.sub(\"\", content)\n\n with open(out_name, \"w\") as out_io:\n out = ''\n for tokenized_string in model.annotate_text(content, batch_size=batch_size):\n out = out + tokenized_string+\" \"\n\n # Reinject apostrophes\n #out = 'Sainz Tiebauz fu nez en l evesché de Troies ; ses peres ot non Ernous et sa mere, Gile et furent fra'\n true_index = 0\n for i in range(len(out) + len(apos_positions)):\n if true_index in apos_positions:\n out = out[:i] + \"'\" + out[i:]\n true_index = true_index + 1\n else:\n if not out[i] == ' ':\n true_index = true_index + 1\n\n out_io.write(out)\n # print(\"--- File \" + file.name + \" has been tokenized\")",
"def tokenize(column):\r\n return fn.base64(column)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add the start and end tokens to the strings in columns 1 > end of a pandas dataframe. Returns a copy of the dataframe and a list of the special tokens.
|
def add_special_tokens(df, pad=PAD, start=BOS, end=EOS, unk=UNK):
for col in df.iloc[:, 1:].columns:
if not df.loc[0, col].startswith(start):
df[col] = start + " " + df[col] + " " + end
return df, [pad, start, end, unk]
|
[
"def avail(df):\r\n avail = DataFrame({\r\n 'start' : df.apply(lambda col: col.first_valid_index()),\r\n 'end' : df.apply(lambda col: col.last_valid_index())\r\n })\r\n return avail[['start', 'end']]",
"def add_tokenized_column(self, df, column_name_to_tokenize):\n COL = column_name_to_tokenize\n df_with_tokens = df.assign(**{f'tokens_{COL}': lambda df: df[COL].apply(lambda x: str(x).split())})\n return df_with_tokens",
"def _concat_start_and_endpoints(flatline_starts, flatline_ends):\n all_flatlines = pd.concat([flatline_starts, flatline_ends]).set_index('pos_in_ts')\n all_flatlines = all_flatlines.sort_index()\n return all_flatlines",
"def test_explode_dataframe_in_snippets(self):\n\n df = pd.DataFrame({'id': [1], \n 'title': ['My test'], \n 'text': ['This is the first paragraph.\\n\\n\\nThis is the second paragraph.']\n })\n self.datacleaner.data = df\n column = 'text'\n re_paragraph_splitter = '\\n\\n+'\n result = self.datacleaner.explode_dataframe_in_snippets(column, re_paragraph_splitter)\n self.assertEqual(result['text'][0], 'This is the first paragraph.')\n self.assertEqual(result['text'][1], 'This is the second paragraph.')",
"def _tokenize_df(df, target=\"sentence\"):\n tqdm.pandas()\n assert type(target) is str, \"target must be a string\"\n assert target in df.columns, \"dataframe must have a {} column (user specified) to tokenize\".format(target)\n df[\"tokenized_text\"] = df[target].progress_apply(ReviewApp._tokenize_lematize)\n return df",
"def from_data_frame_time_intervals(data_frame):\n ans = ''\n for column in data_frame:\n ans += from_values_to_time_intervals(data_frame[column].values.tolist())\n return ans",
"def _trim_start_end(data: pd.DataFrame, start: int, end: int):\n start_idx = data.loc[:, \"start_locus\"].searchsorted(start)\n end_idx = data.loc[:, \"start_locus\"].searchsorted(end, side=\"left\")\n return data.iloc[start_idx:end_idx, :]",
"def add_start_end_token_idx(vec: list, start_token_idx: int = None, end_token_idx: int = None):\n res = copy(vec)\n if start_token_idx:\n res.insert(0, start_token_idx)\n if end_token_idx:\n res.append(end_token_idx)\n return res",
"def rule_split_column_to_another(self, data, **kwargs):\n if data.empty:\n return data\n\n from_column = kwargs.pop('from_column')\n to_column = kwargs.pop('to_column')\n delimiter = kwargs.pop('delimiter')\n\n data.loc[:, to_column] = [\n val.split(delimiter)[-1]\n if len(val.split(delimiter)) >= 2\n else np.NaN for val in data[from_column]\n ]\n\n data.loc[:, from_column] = [\n val.split(delimiter)[0]\n if len(val.split(delimiter)) <= 2\n else delimiter.join(val.split(delimiter)[:-1])\n for val in data[from_column]\n ]\n\n return data",
"def add_lat_longs(df):\n df['lats'], df['longs'] = separate_coords(df)",
"def construct_annotated_text(text_dataframe):\n content = ' '.join(text_dataframe['tokens'].to_list())\n compt = 0\n compt2 = 0\n string = ''\n for i in content:\n if i == '[':\n if compt == 0:\n compt += 1\n string += i\n elif compt >= 1:\n compt += 1\n elif i == ']':\n if compt - 1 != compt2:\n compt2 += 1\n else:\n string += i\n compt = 0\n compt2 = 0\n else:\n string += i\n string = string.replace('] [', ' ')\n string = string.replace(' .', '.')\n string = string.replace(' ’', '’')\n string = string.replace(' ,', ',')\n string = string.replace(' - ', '-')\n string = string.replace('( ', '(')\n string = string.replace(' )', ')')\n string = string.replace(']-[', '-')\n string = string.replace('.]', '].')\n string = string.replace('] / [', ' / ')\n string = string.replace('\\n ', '\\n')\n return string",
"def test_tokenizes_pandas_text_column_into_new_column(self):\n \n df = pd.DataFrame({'id': [1], 'title': ['My test'], 'text_length': 10, 'text': ['This is a functional test. It Should return this sentece tokenized.']})\n self.datacleaner.data = df\n data = self.datacleaner.tokenize_pandas_column('text')\n self.assertEqual(data['tokenized'][0], 'This functional test Should return sentece tokenized')",
"def _expand_range_addr(df):\n address = df['address']\n regex = r\"^[0-9]+-[0-9]+$\"\n ind = address.str.split(pat=' ', n=1).str[0].str.contains(regex)\n df_range = df[ind]\n list_expanded_df = []\n for (i, row) in df_range.iterrows():\n list_expanded_df.append(_expand_range_addr_single(row))\n return pd.concat(list_expanded_df, axis=0, ignore_index=True)",
"def make_linelist_from_dataframe(df):\n lst = []\n for values in df.head().values:\n lst.append('\\t'.join([str(v) for v in values]))\n return lst",
"def all_indexer(df):\n columns = [df.columns.str.endswith(xyz) for xyz in _veclist]\n vector_columns = columns[0]\n for column in columns:\n vector_columns |= column\n return df.columns[vector_columns]",
"def strand_specific_end_site(self) -> \"Bed\":\n if set(self[\"strand\"]) != set([\"+\", \"-\"]):\n raise ValueError(\"Not all features are strand specific!\")\n df = pd.DataFrame(self, copy=True)\n pos_strand = df.query(\"strand == '+'\").index\n neg_strand = df.query(\"strand == '-'\").index\n df.loc[pos_strand, \"chromStart\"] = df.loc[pos_strand, \"chromEnd\"] - 1\n df.loc[neg_strand, \"chromEnd\"] = df.loc[neg_strand, \"chromStart\"] + 1\n return type(self)(df)",
"def columns_to_index(self) -> pd.DataFrame:\n if self._obj.index.name == \"locus\":\n return self._obj\n obj = self._obj.copy()\n obj.loc[:, \"locus\"] = pd.IntervalIndex.from_arrays(\n obj.loc[:, \"start_locus\"],\n obj.loc[:, \"end_locus\"],\n closed=\"left\",\n name=\"locus\",\n )\n obj = obj.set_index(\"locus\").drop([\"start_locus\", \"end_locus\"], axis=1)\n return obj",
"def MergeStringColumns(df,strs):\n\n if type(df[strs[0]]) == float(1.0):\n df[strs[0]] = ''\n\n if len(strs) == 1:\n return np.array(df[strs[0]]) \n \n if type(df[strs[1]]) == float(1.0):\n df[strs[1]] = ''\n \n tmp = df[strs[0]] + ' ' + df[strs[1]]\n \n for i in range(len(strs)-1,len(strs)):\n if type(df[strs[i]]) == float(1.0):\n df[strs[i]] = ''\n\n tmp = tmp + ' ' + df[strs[i]]\n \n return np.array(tmp)",
"def prep(self, text_column=\"text\", rm_stopwords=True, save=False):\n # define operations\n pipeline = (\n [str.lower, self.tokenize, self.remove_stopwords]\n if rm_stopwords is True\n else [str.lower, self.tokenize]\n )\n\n def prepare(text):\n # reverses the pipeline and calls it in reverse-order on the text\n return compose(*pipeline[::-1])(text)\n\n result = self.df.assign(tokens=self.df[text_column].apply(prepare))\n if save:\n self.df = result\n return self\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a single captions or group of captions from a rank1 or rank2 tensor of ids using a tokenizer.
|
def ids_to_captions(ids_tensor, tokenizer, skip_special_tokens=False):
if ids_tensor.dim() == 1:
ids_tensor = ids_tensor.reshape(1, -1)
ids_tensor = ids_tensor.cpu()
strings = tokenizer.decode_batch(ids_tensor.tolist(), skip_special_tokens=False)
if skip_special_tokens:
strings = list(map(lambda s: s.lstrip(BOS).partition(EOS)[0], strings))
return strings
|
[
"def get_random_caption_tokens(idx):\n \n # Initialize an empty list for the results.\n result = []\n\n # For each of the indices.\n for i in idx:\n # The index i points to an image in the training-set.\n # Each image in the training-set has at least 5 captions\n # which have been converted to tokens in tokens_train.\n # We want to select one of these token-sequences at random.\n\n # Get a random index for a token-sequence.\n j = np.random.choice(len(tokens_train[i]))\n\n # Get the j'th token-sequence for image i.\n tokens = tokens_train[i][j]\n\n # Add this token-sequence to the list of results.\n result.append(tokens)\n\n return result",
"def tokenize(\n image_id_to_captions: Dict[ImageID, List[Caption]]\n) -> Dict[ImageID, List[Caption]]:\n # Path to the Stanford CoreNLP JAR file.\n CORENLP_JAR = \"stanford-corenlp-3.4.1.jar\"\n\n # Prepare data for Tokenizer: write captions to a text file, one per line.\n image_ids = [k for k, v in image_id_to_captions.items() for _ in range(len(v))]\n sentences = \"\\n\".join(\n [c.replace(\"\\n\", \" \") for k, v in image_id_to_captions.items() for c in v]\n )\n tmp_file = tempfile.NamedTemporaryFile(delete=False)\n tmp_file.write(sentences.encode())\n tmp_file.close()\n\n # fmt: off\n # Tokenize sentences. We use the JAR file for tokenization.\n command = [\n \"java\", \"-cp\", CORENLP_JAR, \"edu.stanford.nlp.process.PTBTokenizer\",\n \"-preserveLines\", \"-lowerCase\", tmp_file.name\n ]\n tokenized_captions = (\n Popen(command, cwd=os.path.dirname(os.path.abspath(__file__)), stdout=PIPE)\n .communicate(input=sentences.rstrip())[0]\n .decode()\n .split(\"\\n\")\n )\n # fmt: on\n os.remove(tmp_file.name)\n\n # Map tokenized captions back to their image IDs.\n image_id_to_tokenized_captions: Dict[ImageID, List[Caption]] = defaultdict(list)\n for image_id, caption in zip(image_ids, tokenized_captions):\n image_id_to_tokenized_captions[image_id].append(\n \" \".join([w for w in caption.rstrip().split(\" \") if w not in PUNCTS])\n )\n\n return image_id_to_tokenized_captions",
"def ids2tokens(vocab, tokids):\n return [Doc(vocab, words=[vocab[t].orth_ for t in ids]) for ids in tokids]",
"def tf_idf_captions(df):\n df = transform_caption(df)\n captions = df[\"caption\"].tolist()\n captions = [x if isinstance(x, str) else \"\" for x in captions]\n tfidf_vect = TfidfVectorizer(stop_words='english')\n tfidf_captions = tfidf_vect.fit_transform(captions)\n return tfidf_captions",
"def build_vocab_from_file(captions_file, tokenizer, min_df=7):\n\n captions = []\n with open(captions_file) as f:\n for line in f:\n _, _, caption = line.strip().split('\\t')\n captions.append(caption)\n\n return build_vocab(captions, tokenizer, min_df=min_df)",
"def _text_to_ids(text, vocab_to_int, add_eos):\n # Check if id of '<EOS>' needs to add at the end of each sentence\n if add_eos:\n eos = [vocab_to_int['<EOS>']]\n else:\n eos = []\n \n # Get the id of each word in the text\n id_text = []\n for sentence in text.split('\\n'):\n sentence_id_text = [vocab_to_int[word] for word in sentence.split()] + eos\n id_text.append(sentence_id_text)\n \n return id_text",
"def preprocess_bert_input(text):\n input_word_ids = tokenize_text(text)\n input_mask = tf.cast(input_word_ids > 0, tf.int64)\n input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])\n\n zeros_dims = tf.stack(tf.shape(input_mask))\n input_type_ids = tf.fill(zeros_dims, 0)\n input_type_ids = tf.cast(input_type_ids, tf.int64)\n\n return (tf.squeeze(input_word_ids,\n axis=0), tf.squeeze(input_mask, axis=0),\n tf.squeeze(input_type_ids, axis=0))",
"def select_captions(annotations, image_ids):\n\n # for fast lookup\n image_ids = set(image_ids)\n\n captions = []\n caption_image_ids = []\n\n for annotation in annotations:\n image_id = annotation['image_id']\n if image_id in image_ids:\n captions.append(annotation['caption'].replace('\\n', ''))\n caption_image_ids.append(image_id)\n\n return captions, caption_image_ids",
"def load_tokens(input_id_path, token_type_id_path, attention_mask_path, label_path):\n input_ids = torch.load(input_id_path)\n token_type_ids = torch.load(token_type_id_path)\n attention_mask = torch.load(attention_mask_path)\n labels = torch.load(label_path)\n\n return TensorDataset(input_ids, token_type_ids, attention_mask, labels), labels",
"def process_input(text):\n global tokenizer\n\n inputs = tokenizer(text, return_tensors=\"pt\")\n labels = torch.tensor([1]).unsqueeze(0)\n\n return inputs, labels",
"def idf_select(self, texts: Union[str, List[str]], pool_size: int = 200):\n candidate_pool = defaultdict(lambda: 0)\n # D := number of \"documents\", i.e., number of \"keys\" in the original index\n D = len(self.original_index)\n for token in self.tokenizer(texts):\n # each token is associated with some classes\n potential_candidates = self.constructed_index[token]\n if not potential_candidates:\n continue\n # We use idf instead of tf because the text for each class is of different length, tf is not a fair measure\n # inverse document frequency: with more classes to have the current token tk, the score decreases\n idf = math.log10(D / len(potential_candidates))\n for candidate in potential_candidates:\n # each candidate class is scored by sum(idf)\n candidate_pool[candidate] += idf\n candidate_pool = list(sorted(candidate_pool.items(), key=lambda item: item[1], reverse=True))\n # print(f\"Select {min(len(candidate_pool), pool_size)} candidates.\")\n # select the first K ranked\n return candidate_pool[:pool_size]",
"def _convert_id_to_token(self, index):\n if index < self.vocab_size:\n return self.sp_model.id_to_piece(index)",
"def preprocess_sentences(sentences, vocab):\n # Add sentence boundaries, canonicalize, and handle unknowns\n words = flatten([\"<s>\"] + s + [\"</s>\"] for s in sentences)\n words = [canonicalize_word(w, wordset=vocab.word_to_id)\n for w in words]\n return np.array(vocab.words_to_ids(words))",
"def dictionary(raw_captions,threshold):\n caps = []\n for im in raw_captions:\n for s in raw_captions[im]:\n caps.append(s.split())\n\n word_freq = nltk.FreqDist(itertools.chain(*caps))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id",
"def _to_ids(self, data: List[str], batch_size: int) -> torch.LongTensor:\n\n ids = torch.LongTensor(len(data))\n for i, token in enumerate(data):\n ids[i] = self.vocab[token]\n\n n_batches = ids.size(0) // batch_size\n ids = ids[:n_batches * batch_size]\n ids = ids.view(batch_size, -1)\n\n return ids",
"def torch_indices_to_tokens(self, indices: torch.LongTensor) -> np.array:\n return self.itos[indices.numpy()]",
"def get_bert_tokens(orig_tokens, tokenizer):\n bert_tokens = []\n orig_to_bert_map = []\n bert_to_orig_map = []\n for i, sentence in enumerate(orig_tokens):\n sentence_bert_tokens = []\n sentence_map_otb = []\n sentence_map_bto = []\n sentence_bert_tokens.append(\"[CLS]\")\n for orig_idx, orig_token in enumerate(sentence):\n sentence_map_otb.append(len(sentence_bert_tokens))\n tokenized = tokenizer.tokenize(orig_token)\n for bert_token in tokenized:\n sentence_map_bto.append(orig_idx)\n sentence_bert_tokens.extend(tokenizer.tokenize(orig_token))\n sentence_bert_tokens = sentence_bert_tokens[:511]\n sentence_bert_tokens.append(\"[SEP]\")\n bert_tokens.append(sentence_bert_tokens)\n orig_to_bert_map.append(sentence_map_otb)\n bert_to_orig_map.append(sentence_map_bto)\n bert_ids = [tokenizer.convert_tokens_to_ids(b) for b in bert_tokens]\n return bert_tokens, bert_ids, orig_to_bert_map, bert_to_orig_map",
"def preprocess(text):\n text = text.lower()\n text = text.replace(\".\", \" .\") # ピリオドを単語から切り離す\n words = text.split(\" \")\n\n word_to_id = {}\n id_to_word = {}\n for word in words:\n # 内包表記だと重複する単語を除きにくいのでこの書き方になると思われる\n if word not in word_to_id:\n new_id = len(word_to_id)\n word_to_id[word] = new_id\n id_to_word[new_id] = word\n\n corpus = np.array([word_to_id[w] for w in words])\n\n return corpus, word_to_id, id_to_word",
"def sentence_to_seq(sentence, vocab_to_int):\n # TODO: Implement Function\n word_ids = []\n unk = vocab_to_int['<UNK>']\n \n for word in sentence.split():\n try:\n word_id = vocab_to_int[word.lower()]\n except:\n word_id = unk\n finally:\n word_ids.append(word_id)\n \n return word_ids"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns possibly weighted average of 1, 2, 3, and 4gram corpus BLEU scores for a batch of predictions and ground truths. The tokenizer is used to strip special characters and padding and (if relevant) reconstruct words from subwords.
|
def corpus_bleu_score(
preds: torch.Tensor, gt: torch.Tensor, tokenizer, weights=(0.25, 0.25, 0.25, 0.25)
):
preds = [s.strip().split(" ") for s in ids_to_captions(preds, tokenizer, True)]
gt = [
[s.strip().split(" ") for s in ids_to_captions(lst, tokenizer, True)]
for lst in gt
]
return corpus_bleu(gt, preds, weights=weights)
|
[
"def get_bleu_score_mean(dataset,sentence_bleu,weights,smoothing_function):\n data_set_bleu_score = 0 #bleu score of all dataset\n\n for k,v in dataset.items():\n reference = [k.lower().split(\" \")]\n utterance_bleu_score = 0 # current utterance bleu_Score = average_paraphrase_bleu_score\n for cand in v:\n candidate = cand.lower().split(\" \")\n \n paraphrase_bleu_score = get_smooth(sentence_bleu,candidate,reference,weights,smoothing_function)\n \n utterance_bleu_score += paraphrase_bleu_score\n \n if utterance_bleu_score > 0:\n utterance_bleu_score = utterance_bleu_score / len(v)\n data_set_bleu_score += utterance_bleu_score\n\n bleu = data_set_bleu_score / len(dataset)\n return bleu",
"def build_score(self):\n for bigrams in self.bigram_dic_.keys():\n i, j = bigrams.split(self.parsing_char_)\n score = (\n self.bigram_dic_[bigrams] - self.params[\"phrases_delta\"]\n ) / (self.unigram_dic_[i] * self.unigram_dic_[j])\n self.bigram_dic_[bigrams] = (self.bigram_dic_[bigrams], score)",
"def _blurr_predict_tokens(\n # The function to do the base predictions (default: self.blurr_predict)\n predict_func:Callable,\n # The str (or list of strings) you want to get token classification predictions for\n items:Union[str, List[str]],\n # The Blurr Transform with information about the Hugging Face objects used in your training\n tfm:Transform\n):\n # grab the Hugging Face tokenizer from the learner's dls.tfms\n hf_tokenizer = tfm.hf_tokenizer\n tok_kwargs = tfm.tok_kwargs\n\n if (isinstance(items[0], str)): items = [items]\n\n outs = []\n for inp, res in zip(items, predict_func(items)):\n # blurr_predict returns a list for each, we only doing one at a time so git first element of each\n pred_lbls, pred_lbl_ids, probs = res[0][0], res[1][0], res[2][0]\n\n # calculate the number of subtokens per raw/input token so that we can determine what predictions to\n # return\n subtoks_per_raw_tok = [ (entity, len(hf_tokenizer.tokenize(str(entity)))) for entity in inp ]\n\n # very similar to what HF_BatchTransform does with the exception that we are also grabbing\n # the `special_tokens_mask` to help with getting rid or irelevant predicts for any special tokens\n # (e.g., [CLS], [SEP], etc...)\n res = hf_tokenizer(inp, None,\n max_length=tfm.max_length,\n padding=tfm.padding,\n truncation=tfm.truncation,\n is_split_into_words=tfm.is_split_into_words,\n **tok_kwargs)\n\n special_toks_msk = L(res['special_tokens_mask'])\n actual_tok_idxs = special_toks_msk.argwhere(lambda el: el != 1)\n\n # using the indexes to the actual tokens, get that info from the results returned above\n pred_lbls_list = ast.literal_eval(pred_lbls)\n actual_pred_lbls = L(pred_lbls_list)[actual_tok_idxs]\n actual_pred_lbl_ids = pred_lbl_ids[actual_tok_idxs]\n actual_probs = probs[actual_tok_idxs]\n\n # now, because a raw token can be mapped to multiple subtokens, we need to build a list of indexes composed\n # of the *first* subtoken used to represent each raw token (that is where the prediction is)\n offset = 0\n raw_trg_idxs = []\n for idx, (raw_tok, sub_tok_count) in enumerate(subtoks_per_raw_tok):\n raw_trg_idxs.append(idx+offset)\n offset += sub_tok_count-1 if (sub_tok_count > 1) else 0\n\n outs.append((inp,\n actual_pred_lbls[raw_trg_idxs],\n actual_pred_lbl_ids[raw_trg_idxs],\n actual_probs[raw_trg_idxs]))\n\n return outs",
"def calc_accuracy(self):\n sentence = self.output()\n user_input = input('> ')\n self.total_words += len(sentence)/5\n aligned = align_strings(sentence, user_input)\n self.print_alignment(aligned)\n accuracy = self.hamming_score(aligned[0], aligned[1])\n accuracy /= len(aligned[0])\n self.avg_accuracy += accuracy",
"def score(self, lst):\n words = []\n score = 0\n for each in lst:\n words.append(each[1])\n\n for word in words:\n if word in UNIGRAM_COUNTS:\n score = score + log10((UNIGRAM_COUNTS[word] / 1024908267229.0))\n else:\n score = score + log10((10.0 / (1024908267229.0 * 10 ** len(word))))\n\n return score",
"def sentence_to_avg(word_list, word_to_vec_map):\n \n # Initialize the average word vector, should have the same shape as your word vectors.\n shape = np.shape(50,)\n \n avg = np.zeros(shape)\n\n \n total = 0\n unknown_counter = 0\n for w in word_list:\n try:\n total += word_to_vec_map[w]\n except:\n unknown_counter += 1\n \n avg = total / len(word_list) - unknown_counter\n \n \n return avg",
"def build_dictionary_ngrams(training_datasets): \n word_counter_unigrams = collections.Counter()\n word_counter_bigrams = collections.Counter()\n word_counter_trigrams = collections.Counter()\n for i, dataset in enumerate(training_datasets):\n for example in dataset:\n sent1_tokenized = tokenize(example['sentence1_binary_parse'])\n sent2_tokenized = tokenize(example['sentence2_binary_parse'])\n bigrams1 = nltk.bigrams(sent1_tokenized)\n bigrams2 = nltk.bigrams(sent2_tokenized)\n trigrams1 = nltk.trigrams(sent1_tokenized)\n trigrams2 = nltk.trigrams(sent2_tokenized)\n word_counter_bigrams.update(bigrams1)\n word_counter_bigrams.update(bigrams2)\n word_counter_trigrams.update(trigrams1)\n word_counter_trigrams.update(trigrams2)\n word_counter_unigrams.update(sent1_tokenized)\n word_counter_unigrams.update(sent2_tokenized)\n \n vocabulary_uni = set([word for word in word_counter_unigrams])\n vocabulary_uni = list(vocabulary_uni)\n vocabulary_uni = [PADDING, UNKNOWN] + vocabulary_uni \n word_indices_uni = dict(zip(vocabulary_uni, range(len(vocabulary_uni))))\n \n vocabulary_bi = set([word for word in word_counter_bigrams])\n vocabulary_bi = list(vocabulary_bi)\n vocabulary_bi = [PADDING, UNKNOWN] + vocabulary_bi \n word_indices_bi = dict(zip(vocabulary_bi, range(len(vocabulary_bi))))\n \n vocabulary_tri = set([word for word in word_counter_trigrams])\n vocabulary_tri = list(vocabulary_tri)\n vocabulary_tri = [PADDING, UNKNOWN] + vocabulary_tri \n word_indices_tri = dict(zip(vocabulary_tri, range(len(vocabulary_tri))))\n\n return word_indices_uni, word_indices_bi, word_indices_tri",
"def word_averaging_list(wv, text_list):\n return np.vstack([word_averaging(wv, review) for review in text_list ])",
"def recognize(models: dict, test_set: SinglesData):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n probabilities = []\n guesses = []\n\n all_sequences = test_set.get_all_sequences()\n all_Xlenghts = test_set.get_all_Xlengths()\n\n print('Started recognizing ...')\n\n for i, test_word in zip( range(0,len(all_sequences) ), test_set.wordlist):\n\n bestLogL = float(\"-inf\")\n bestWord = ''\n\n myProbs = {}\n \n for word in models.keys():\n\n model = models[word]\n\n try: \n\n logL = model.score(all_sequences[i][0],all_Xlenghts[i][1] )\n\n if logL > bestLogL:\n bestLogL = logL\n bestWord = word\n\n except Exception:\n pass\n \n myProbs[word] = logL \n \n \n guesses.append(bestWord)\n probabilities.append(myProbs)\n \n print('Finished analyzing {} words '.format(len(all_sequences)))\n\n return probabilities, guesses",
"def calc_accuracy(model_dict, test_dict):\n\n \"\"\" Calculate the result \"\"\"\n\n all_prob = []\n result_dict = {}\n test_label = []\n predict_label = []\n\n for t_name, t in test_dict.items():\n result = []\n index = []\n hype_dict = {}\n sum = len(t)\n counter = 0\n letter = t_name\n for p in t:\n test_label.append(t_name)\n high_score = -100000\n for m_name, m in model_dict.items():\n score = m.score([p])\n if score > high_score:\n high_score = score\n hypo = m_name\n result.append(hypo)\n predict_label.append(hypo)\n if hypo == letter:\n counter += 1\n all_letters = list(set(result))\n for l in all_letters:\n hype_dict[l] = result.count(l)\n\n sorted_hype_dict = sorted(hype_dict.iteritems(), key=operator.itemgetter(1))\n sorted_hype_dict.reverse()\n\n if sum != 0:\n prob = float(counter)/sum\n print str(letter) + \"(\"+ str(counter) + \"/\" + str(sum) + \")\" + \" ==> Accuracy: \" + str(prob),\n print sorted_hype_dict\n all_prob.append(prob)\n result_dict[letter] = np.array([counter, sum])\n\n \"\"\" Print the average accuracy\"\"\"\n\n all_prob = np.array(all_prob)\n print \"Average accuracy is: \" + str(all_prob.mean())\n print \"=================================\"\n\n return all_prob, result_dict, test_label, predict_label",
"def preprocess_data(self):\n # Read the Input data \n self.read_data()\n\n # Initial preprocessing\n self.tweets = map(lambda x: x.split(\"\\r\\n\"), self.tweets) # Splitting the tweets into individual tweets\n self.tweets = self.tweets[0][1:]\n self.tweets = map(lambda x: x.split(\";\")[2], self.tweets) # Splitting each tweet with colon and extract the tweet message\n \n # Removing punctuation \n exclude = set(string.punctuation) # Make a set of punctuation to remove it from tweet\n self.tweets = map(lambda x:''.join(ch for ch in x if ch not in exclude).lower(), self.tweets) # Remove the punctuation from each tweet \n self.tweets = map(lambda x:' '.join(x.split()), self.tweets) # Remove extra spaces \n \n # Removing Stop Words \n stopWords = set(stopwords.words(\"english\")) # Removing Stop words \n self.tweets = map(lambda x:' '.join(word for word in x.split() if word not in stopWords), self.tweets)\n\n # Applying Stemming \n stemmer = PorterStemmer()\n self.tweets = map(lambda x: self.stem_message(x), self.tweets) # Convert each message to its base form after stemming \n\n print \"\\nIntermediate Data After Initial Preprocessing\"\n print self.tweets[:5]\n\n # Convert the tweets to bag of words representation\n self.countVectorizer = CountVectorizer(decode_error='ignore', \\\n stop_words='english', \\\n min_df=5, ngram_range=(1, 2)) # Extract uni-gram, bi-grams and tri-grams from the tweets \n self.bag_of_words = self.countVectorizer.fit_transform(self.tweets) # Convert each tweet into a vector \n\n print \"\\nTop 20 uni grams in the vocabulary_\"\n print sorted(dict((key,value) for key, value in self.countVectorizer.vocabulary_.iteritems() if key.count(' ')==0).items(), key=operator.itemgetter(1), reverse=True)[:20]\n\n print \"\\nTop 20 bi-grams in vocbulary \"\n print sorted(dict((key,value) for key, value in self.countVectorizer.vocabulary_.iteritems() if key.count(' ')==1).items(), key=operator.itemgetter(1), reverse=True)[:20]\n\n # Convert the Tweets to TF - IDF Representation for understanding importance of individual words \n self.tfidf_vectorizer = TfidfVectorizer(decode_error='ignore',\\\n stop_words='english', \\\n min_df=10, ngram_range=(1, 3)) # Convert the tweets message to tf idf representation \n self.tf_idf_scores = self.tfidf_vectorizer.fit_transform(self.tweets) \n \n # Convert the tf - idf to pandas dataframe \n print \"\\nTf - Idf for each tweet in the dataset\"\n self.df = pd.DataFrame(self.tf_idf_scores.toarray(), columns=self.tfidf_vectorizer.get_feature_names()) # Convert the td idf values for each tweet into a DataFrame\n self.df[\"Input Tweets\"] = self.tweets\n print self.df.sample(n=5) \n\n # Adding Proportion of positive words as a feature\n self.df['Positive Words'] = map(lambda x: self.positive_word(x), self.df['Input Tweets']) # Adding proportion of positive words as a feature\n print self.df.sample(n=5)\n \n # Adding Proportion of negative words as a feature \n self.df['Negative Words'] = map(lambda x: self.negative_word(x), self.df['Input Tweets']) # Adding proportion of negative words as a feature\n print self.df.sample(n=5)\n\n # Adding part of speech tag features to the dataframe \n pos_feat_ = map(lambda x: self.pos_features(x), self.df['Input Tweets']) # Adding number of parts of speech like Noun, Pronoun, Adjective as a feature\n self.df['Nouns'] = map(lambda x: x['NN'], pos_feat_)\n self.df['Verbs'] = map(lambda x: x['VBP'], pos_feat_)\n self.df['Pronoun'] = map(lambda x: x['PRP'], pos_feat_)\n self.df['Adverb'] = map(lambda x: x['RB'], pos_feat_)\n self.df['Adjective'] = map(lambda x: x['JJ'], pos_feat_)\n print self.df.sample(n=5)\n\n # Let's build a brown classifier \n self.build_brown_classifier()\n \n # Adding another features which classifies the tweet into four categories news, reviews, humor and government. \n self.df['Category'] = map(lambda x: self.classifying_with_brown(x), self.df['Input Tweets'])\n print self.df.sample(n=100)\n print self.df['Category'].value_counts()",
"def avg_word_vectors(wordlist,size): \n sumvec=np.zeros(shape=(1,size))\n wordcnt=0\n for w in wordlist:\n if w in model_w2v:\n sumvec += model_w2v[w]\n wordcnt +=1\n \n if wordcnt ==0:\n return sumvec\n else:\n return sumvec / wordcnt",
"def _get_best_words(self):\n words_frequencies = FreqDist()\n label_words_frequencies = ConditionalFreqDist()\n\n for word in movie_reviews.words(categories=['pos']):\n words_frequencies[word.lower()] += 1\n label_words_frequencies['pos'][word.lower()] += 1\n\n for word in movie_reviews.words(categories=['neg']):\n words_frequencies[word.lower()] += 1\n label_words_frequencies['neg'][word.lower()] += 1\n\n pos_words_count = label_words_frequencies['pos'].N()\n neg_words_count = label_words_frequencies['neg'].N()\n total_words_count = pos_words_count + neg_words_count\n\n words_scores = {}\n\n for word, frequency in words_frequencies.items():\n pos_score = BigramAssocMeasures.chi_sq(label_words_frequencies['pos'][word],\n (frequency, pos_words_count), total_words_count)\n neg_score = BigramAssocMeasures.chi_sq(label_words_frequencies['neg'][word],\n (frequency, neg_words_count), total_words_count)\n words_scores[word] = pos_score + neg_score\n\n best_words = sorted(words_scores.items(), key=lambda x: x[1], reverse=True)[:10000]\n self.best_words_set = set([w for w, s in best_words if w not in self.stopset])",
"def get_corpus_bleu_score(self) -> float:\n # Calculate corpus-level brevity penalty.\n bp = self.get_brevity_penalty()\n\n # Returns 0 if there's no matching 1-gram\n if self.no_of_correct_predicted[1] == 0:\n return 0\n\n n_gram_precision = self.get_smoothened_modified_precision()\n\n geometric_average_precision = math.exp(\n math.fsum((w_i * math.log(p_i) for w_i, p_i in zip(self.weights, n_gram_precision) if p_i > 0)))\n bleu_score = bp * geometric_average_precision\n\n return bleu_score",
"def calc_accuracy(self, path_test_file):\n total_words = 0\n total_sentences = 0\n correct_words = 0\n correct_sentences = 0\n num_samples = 0\n for _ in dep_sample_generator(path_test_file):\n num_samples += 1\n progress = ProgressBar(num_samples, fmt=ProgressBar.FULL)\n samp_gen = dep_sample_generator(path_test_file)\n for sample in samp_gen:\n total_sentences += 1\n total_words += sample[-1].idx\n infered_sample = self.infer(sample)\n correct_parse = True\n for i in range(len(sample)):\n if not i:\n # skip ROOT\n continue\n if sample[i].head == infered_sample[i].head:\n correct_words += 1\n else:\n correct_parse = False\n if correct_parse:\n correct_sentences += 1\n progress.current += 1\n progress()\n progress.done()\n print('\\n')\n sentence_accuracy = 1.0 * correct_sentences / total_sentences\n word_accuracy = 1.0 * correct_words / total_words\n return sentence_accuracy, word_accuracy",
"def predict(model, lb, tweets_df):\n tokenizer = Tokenizer(num_words = 5000)\n\n # Predict each tweet class\n for i in range(tweets_df.shape[0]):\n tokenizer.fit_on_texts(tweets_df.iloc[i][\"text\"])\n sequences = tokenizer.texts_to_sequences(tweets_df.iloc[i][\"text\"])\n data = pad_sequences(sequences, maxlen = 200)\n\n if data.size != 0:\n # Predict the class\n pred = model.predict(data)\n\n # Find the class with the highest probability\n Y_pred = pred.argmax(axis = 1)[0]\n tweets_df.loc[i, \"class\"] = lb.classes_[Y_pred]\n #print(\"[{}]{}\\n [{} with {:.2f}%]\\n\".format(i, tweets_df.iloc[i][\"text\"], lb.classes_[Y_pred], pred[0][Y_pred] * 100))",
"def ari_per_word_weighted(df: pd.DataFrame):\n\n df = gold_predict(df)\n\n words = {word: (adjusted_rand_score(df_word.gold, df_word.predict), len(df_word))\n for word in df.word.unique()\n for df_word in (df.loc[df['word'] == word],)}\n\n cumsum = sum(ari * count for ari, count in words.values())\n total = sum(count for _, count in words.values())\n\n assert total == len(df), 'please double-check the format of your data'\n\n return cumsum / total, words",
"def estimate_exact_fscore_wikiner(y_true, y_pred):\n\n pairs = []\n start = False\n end = False\n\n tp = 0\n tn = 0\n fp = 0\n fn = 0\n total_seq = 0\n\n for i in range(len(y_true)):\n if y_true[i] == \"O\":\n if y_pred[i] == \"O\":\n tn += 1\n else:\n fp += 1\n else:\n assert \"-\" in y_true[i], \"true label \" + y_true[i] + \" should contains '-'\"\n true_b_i, true_class = y_true[i].split(\"-\")\n\n if true_b_i == \"I\":\n # next label is the same=> start a sequence or continue one\n if i < len(y_true) - 1 and y_true[i] == y_true[i + 1]:\n if start:\n continue\n else:\n start_index = i\n start = True\n total_seq += 1\n else:\n # next token begin with I but other class => end of the sequence or just one token\n if i == len(y_true) - 1:\n end_index = i + 1\n else:\n # not at the end, next token is I but other class\n end_index = i\n if start:\n start = False\n pairs.append((start_index, end_index + 1))\n else:\n start_index = i\n pairs.append((start_index,))\n\n elif true_b_i == \"B\":\n if i == len(y_true) - 1: # end of the sentence\n if start:\n start = False\n pairs.append((start_index, i + 1))\n else:\n start_index = i\n pairs.append((start_index,))\n continue\n\n else:\n if y_true[i + 1] == \"O\":\n continue\n next_true_b_i, next_true_class = y_true[i + 1].split(\"-\")\n if next_true_b_i == \"B\": # this is not a sequence B-PER B-PER or B-PER B-LOC => append start_index\n start_index = i\n pairs.append((start_index,))\n elif next_true_class == true_class: # sequence\n if start:\n continue\n else:\n start_index = i\n start = True\n total_seq += 1\n else: # not a sequence\n if start:\n start = False\n pairs.append((start_index, i))\n else:\n start_index = i\n pairs.append((start_index,))\n for pair in pairs:\n if len(pair) == 1:\n try:\n y_true[pair[0]]\n except:\n print(\"Pair\", pair)\n print(\"Pair\", len(pair))\n print(y_true)\n print(y_pred)\n\n try:\n y_pred[pair[0]]\n except:\n print(\"Pair\", pair)\n print(\"Pair\", len(pair))\n print(y_true)\n print(y_pred)\n\n if y_true[pair[0]] == y_pred[pair[0]]:\n tp += 1\n else:\n fn += 1\n if len(pair) == 2:\n if y_true[pair[0]:pair[1]] == y_pred[pair[0]:pair[1]]:\n tp += 1\n else:\n fn += 1\n return tp, tn, fp, fn, total_seq",
"def word_averaging(wv, words):\n all_words, mean = set(), []\n \n for word in words:\n if isinstance(word, np.ndarray):\n mean.append(word)\n elif word in wv.vocab:\n mean.append(wv.syn0norm[wv.vocab[word].index])\n all_words.add(wv.vocab[word].index)\n\n if not mean:\n logging.warning(\"cannot compute similarity with no input %s\", words)\n # FIXME: remove these examples in pre-processing\n return np.zeros(wv.layer1_size,)\n\n mean = gensim.matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32)\n return mean"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sample a minibatch and show the images and captions.
|
def sample_minibatch(minibatch, tokenizer, remove_special_tokens=True):
inv_normalize = NormalizeInverse()
sample_images = inv_normalize(minibatch["image"])
sample_captions = minibatch["captions"]
for i in range(sample_images.shape[0]):
plt.imshow(sample_images[i].permute(1, 2, 0).clip(0, 1).cpu())
plt.axis("off")
caption_strs = ids_to_captions(
sample_captions[i], tokenizer, remove_special_tokens
)
plt.title("\n".join(caption_strs))
plt.show()
|
[
"def display_samples(folders):\n if not do_plotting:\n return\n for folder in folders:\n print(folder)\n image_files = os.listdir(folder)\n image = random.choice(image_files)\n image_file = os.path.join(folder, image)\n i = Image(filename=image_file)\n display(i)",
"def show_samples():\n files = os.listdir(FLAGS.directory)\n for file in files:\n image, label = read_and_decode(tf.train.string_input_producer([os.path.join(FLAGS.directory, file)]),\n (256, 256, 3))\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n tf.train.start_queue_runners(sess=sess)\n\n label_val_1, image_val_1 = sess.run([label, image])\n\n cv2.imshow('s', (image_val_1 + 0.5))\n print(label_val_1)\n cv2.waitKey(1000)",
"def _render_mini_batch(\n self,\n catalog_list: List[Table],\n psf: List[galsim.GSObject],\n wcs: WCS,\n survey: Survey,\n seedseq_minibatch: np.random.SeedSequence,\n ) -> list:\n outputs = []\n index = 0\n\n # prepare progress bar description\n process_id = get_current_process()\n main_desc = f\"Generating blends for {survey.name} survey\"\n desc = main_desc if process_id == \"main\" else f\"{main_desc} in process id {process_id}\"\n disable = not self.use_bar or process_id != \"main\"\n for blend in tqdm(catalog_list, total=len(catalog_list), desc=desc, disable=disable):\n # All bands in same survey have same pixel scale, WCS\n slen = self._get_pix_stamp_size(survey)\n\n x_peak, y_peak = _get_center_in_pixels(blend, wcs)\n blend.add_column(x_peak)\n blend.add_column(y_peak)\n\n # add rotation, if requested\n if self.augment_data:\n rng = np.random.default_rng(seedseq_minibatch.generate_state(1))\n theta = rng.uniform(0, 360, size=len(blend))\n blend.add_column(Column(theta), name=\"btk_rotation\")\n else:\n blend.add_column(Column(np.zeros(len(blend))), name=\"btk_rotation\")\n\n n_bands = len(survey.available_filters)\n iso_image_multi = np.zeros((self.max_number, n_bands, slen, slen))\n blend_image_multi = np.zeros((n_bands, slen, slen))\n seedseq_blend = seedseq_minibatch.spawn(n_bands)\n for jj, filter_name in enumerate(survey.available_filters):\n filt = survey.get_filter(filter_name)\n single_band_output = self.render_blend(\n blend, psf[jj], filt, survey, seedseq_blend[jj]\n )\n blend_image_multi[jj, :, :] = single_band_output[0]\n iso_image_multi[:, jj, :, :] = single_band_output[1]\n\n outputs.append([blend_image_multi, iso_image_multi, blend])\n index += len(blend)\n return outputs",
"def visualize(sample_id: str, backend: Callable[[str], None] = None) -> None:\n self.image_samples.raw(sample_id)\n image_path = cache._build_cache_path(\"samples/{}/images/raw/\".format(self.id), sample_id)\n image_path = cache._add_file_extension(image_path)\n\n if backend is None:\n system = platform.system()\n\n if system == \"Darwin\":\n subprocess.call((\"open\", image_path))\n elif system == \"Windows\":\n os.startfile(image_path)\n else:\n subprocess.call((\"xdg-open\", image_path))\n else:\n backend(image_path)",
"def show_samples(fig, samples, labels=None):\r\n\r\n # Squeeze gray scale images\r\n if samples.shape[3] == 1:\r\n samples = samples.squeeze()\r\n\r\n # Compute optimal grid size\r\n n = samples.shape[0]\r\n grid_size = int(numpy.ceil(numpy.sqrt(n)))\r\n\r\n from mpl_toolkits.axes_grid1 import ImageGrid\r\n grid = ImageGrid(fig, 111, nrows_ncols=(grid_size, grid_size), axes_pad=0)\r\n\r\n for i in range(grid_size * grid_size):\r\n if i < n:\r\n grid[i].imshow(samples[i], interpolation='nearest', cmap='gray')\r\n\r\n if labels is not None:\r\n grid[i].text(3,\r\n 3,\r\n str(labels[i]),\r\n horizontalalignment='center',\r\n verticalalignment='center',\r\n color='red')\r\n\r\n grid[i].axis('off')",
"def test_raft_image_mosaic(self):\n infiles = sorted(glob.glob(os.path.join(_root_dir, 'S??',\n '*_lambda_flat_1000_*.fits')))\n infiles = OrderedDict([(filename.split('/')[-2], filename)\n for filename in infiles])\n test_files = dict()\n step = 100\n level = step\n for slot, infile in list(infiles.items()):\n outfile = '%s_test_image_%05i.fits' % (slot, level)\n with fits.open(infile) as hdu_list:\n for hdu in hdu_list[1:17]:\n hdu.data = np.ones(hdu.data.shape, dtype=np.float32)*level\n level += step\n fitsWriteto(hdu_list, outfile, overwrite=True)\n test_files[slot] = outfile\n\n raft_mosaic = raftTest.RaftMosaic(test_files, bias_subtract=False)\n raft_mosaic.plot(title='Test pattern')\n plt.savefig(self.outfile)",
"def show_random_images_from_full_dataset(dset, num_rows=4, num_cols=8):\n \n ### get random sample of images and labels\n indices = np.random.randint(0, high=len(dset)+1, size=num_rows*num_cols)\n im_arrays = np.take(dset.data, indices, axis=0)\n labels = map(dset.classes.__getitem__, np.take(dset.targets, indices))\n\n ### plot sample\n fig = plt.figure(figsize=(20, 20))\n grid = ImageGrid(\n fig, \n 111,\n nrows_ncols=(num_rows, num_cols),\n axes_pad=0.3)\n for ax, im_array, label in zip(grid, im_arrays, labels):\n ax.imshow(im_array)\n ax.set_title(label)\n ax.axis(\"off\")",
"def _vis_minibatch(im_blob, rois_blob, labels_blob, sublabels_blob):\n import matplotlib.pyplot as plt\n for i in xrange(rois_blob.shape[0]):\n rois = rois_blob[i, :]\n im_ind = rois[0]\n roi = rois[2:]\n im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()\n im += cfg.PIXEL_MEANS\n im = im[:, :, (2, 1, 0)]\n im = im.astype(np.uint8)\n cls = labels_blob[i]\n subcls = sublabels_blob[i]\n plt.imshow(im)\n print 'class: ', cls, ' subclass: ', subcls\n plt.gca().add_patch(\n plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],\n roi[3] - roi[1], fill=False,\n edgecolor='r', linewidth=3)\n )\n plt.show()",
"def test_napari_sample_data(make_napari_viewer):\n viewer = make_napari_viewer()\n\n assert len(viewer.layers) == 0\n viewer.open_sample(\"brainreg-napari\", \"sample\")\n assert len(viewer.layers) == 1\n new_layer = viewer.layers[0]\n assert isinstance(new_layer, napari.layers.Image)\n assert new_layer.data.shape == (270, 193, 271)",
"def visualize_sample(X_train, y_train, classes, samples_per_class = 7):\n num_classes = len(classes)\n for y, cls in enumerate(classes):\n idxs = np.flatnonzero(y_train == y) # get all the indexes of cls\n idxs = np.random.choice(idxs, samples_per_class, replace = False)\n for i, idx in enumerate(idxs): # plot the image one by one\n plt_idx = i * num_classes + y + 1 # i*num_classes and y+1 determine the row and column respectively\n plt.subplot(samples_per_class, num_classes, plt_idx)\n plt.imshow(X_train[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls)\n plt.show()",
"def visualise_peeks(self, images):\n images, (_, C, H, W) = ensure_batched(images)\n image = images[0:1]\n grids = [grid + self.pos for grid in self._make_centred_grids()]\n for grid in grids:\n yield F.grid_sample(image, grid,\n padding_mode='zeros').squeeze(dim=0)",
"def plot_sample_images(X, y,target_names,n=10):\n \n for label in target_names.keys():\n # grab the first n images with the corresponding y values equal to label\n images = X[np.argwhere(y == label)]\n n_images = images[:n]\n \n columns_n = 10\n rows_n = int(n/ columns_n)\n\n plt.figure(figsize=(10, 1))\n \n i = 1 # current plot \n for image in n_images:\n plt.subplot(rows_n, columns_n, i)\n plt.imshow(image[0])\n plt.xticks([])\n plt.yticks([])\n i += 1\n \n label_to_str = lambda label: target_names[label]\n plt.suptitle(f\"Brain inffract: {label_to_str(label)}\")\n plt.show()",
"def sample_patches(exam, parameters):\n all_patches = []\n all_cases = []\n for view in VIEWS.LIST:\n for short_file_path in exam[view]:\n image_path = get_image_path(short_file_path, parameters)\n patches, case = sample_patches_single(\n image_path=image_path,\n view=view,\n horizontal_flip=exam['horizontal_flip'],\n parameters=parameters,\n )\n\n all_patches += patches\n all_cases.append([short_file_path] + case)\n #print('sample_patches')\n return all_patches, all_cases",
"def __call__(self, sample):\n torch_img = np_to_torch(tmp := pil_to_np(sample)).to(self.device)\n plot_image_grid([np.clip(tmp, 0, 1)], factor=4, nrow=1, show=False, save_path='results_dip/imgs/true.png') # TODO remove\n num_iters = self.sample_iters()\n transformed = self.run(torch_img, num_iters)\n return np_to_pil(torch_to_np(transformed))",
"def show_example_images(datagen):\n fnames = [os.path.join(train_cats_dir, fname) for\n fname in os.listdir(train_cats_dir)]\n print(len(fnames))\n img_path = fnames[3] # Chooses one image to augment\n img = image.load_img(img_path, target_size=(150, 150))\n x = image.img_to_array(img) # Converts it to a Numpy array with shape (150, 150, 3) \n x = x.reshape((1,) + x.shape) # Reshapes it to (1, 150, 150, 3)\n i = 0\n for batch in datagen.flow(x, batch_size=1):\n plt.figure(i)\n imgplot = plt.imshow(image.array_to_img(batch[0]))\n i += 1\n if i % 4 == 0:\n break\n plt.show()",
"def _show_examples(self):\n labels, label_indices, label_counts = np.unique(self.y_train, return_index=True, return_counts=True)\n plt.figure(figsize=(15, 20))\n for idx in range(len(labels)):\n ax = plt.subplot(9, 5, idx + 1)\n ax.imshow(self.X_train[label_indices[idx]])\n ax.axis('off')\n ax.set_title(f\"label {labels[idx]}: {label_counts[idx]} images\")\n\n plt.show()",
"def present_batch(self, memory, minibatch_size):",
"def show_batch(dataLoader, rows):\n for images, labels in dataLoader:\n _, ax = plt.subplots(figsize=(12, 12))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.imshow(make_grid(images[:64], nrow=rows).permute(1, 2, 0))\n print(labels)\n break",
"def show_random_images(train_dataset):\n\n # Randomize the order of the train dataset ids\n ids = [x for x in range(len(train_dataset))]\n random.shuffle(ids)\n\n # Show images\n for id in ids:\n img, label = train_dataset.__getitem__(id)\n img = img.cpu().permute(1, 2, 0)\n img = img.numpy()\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n cv2.imshow(\"Image\", img)\n cv2.waitKey(0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Similar to `check_value()`, but accepts many candidate shapes and checks each of them before raising an error.
|
def _check_value_any(a, *shapes, **kwargs):
if len(shapes) == 0:
raise ValueError("At least one shape is required")
name = kwargs.get("name")
for shape in shapes:
try:
return check_value(a, shape, name=name)
except ValueError:
pass
if name is None:
preamble = "Expected an array"
else:
preamble = "Expected {} to be an array".format(name)
shape_choices = ", ".join(
shapes[:-2] + (" or ".join([str(shapes[-2]), str(shapes[-1])]),)
)
if a is None:
raise ValueError("{} with shape {}; got None".format(preamble, shape_choices))
else:
raise ValueError(
"{} with shape {}; got {}".format(preamble, shape_choices, a.shape)
)
|
[
"def is_valid_shape(value):\n if is_int_positive(value):\n return True, value\n elif isinstance(value, tuple) or isinstance(value, list):\n for v in value:\n if not is_int_positive(v):\n return False, value\n return True, value\n else:\n return False, value",
"def checkShape(value, expectedShape, name=''):\n if value.shape != tuple(expectedShape):\n raise ValueError('Shape mismatch %s: Expected %s, got %s' %\n (name, str(expectedShape), str(value.shape)))",
"def _validate_inputs(data, error):\n data = np.asanyarray(data)\n if data.ndim != 2:\n raise ValueError('data must be a 2D array.')\n\n if error is not None:\n error = np.asanyarray(error)\n if error.shape != data.shape:\n raise ValueError('error and data must have the same shape.')\n\n return data, error",
"def _check_shape(self, shape: Tuple[int], name: str) -> Union[Tuple[bool, Optional[str]], bool]:\n ok = shape[-1] == self.dims and shape[-2] == self.dims\n if not ok:\n reason = \"'{}' on the {} requires more than {} dim\".format(\n name, self, self.dims\n )\n else:\n reason = None\n return ok, reason",
"def _validate_scalarization_parameter_shape(\n multi_objectives: tf.Tensor,\n params: Dict[str, Union[Sequence[ScalarFloat], tf.Tensor]],\n):\n for param_name, param_value in params.items():\n param_shape = tf.convert_to_tensor(param_value).shape\n if param_shape.rank != 1 and not multi_objectives.shape.is_compatible_with(\n param_shape\n ):\n raise ValueError(\n 'The shape of multi_objectives: {} does not match the shape of '\n 'scalarization parameter: {}, which is {}'.format(\n multi_objectives.shape, param_name, param_shape\n )\n )",
"def validate(self, model_output_shape: Tuple, target_shape: Tuple) -> None:\n raise NotImplementedError",
"def _check_shapes(parameters, names):\n\n\tn = len(parameters)\n\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\tif parameters[i] is None:\n\t\t\t\tcontinue\n\n\t\t\tif parameters[j] is None:\n\t\t\t\tcontinue\n\n\t\t\tn1, n2 = names[i], names[j]\n\t\t\tif len(parameters[i]) != len(parameters[j]):\n\t\t\t\traise ValueError(\"Parameters {} and {} must be the same \"\n\t\t\t\t\t\"shape.\".format(names[i], names[j]))",
"def validate(self, value):\n if value in self.empty_values:\n return\n\n if self.required_columns and not value.first_column:\n raise forms.ValidationError(\n self.error_messages['required_columns']\n )\n if self.required_rows and not value.first_row:\n raise forms.ValidationError(\n self.error_messages['required_rows']\n )\n\n # make sure the first rangeconfig coordinate is top-left\n for i, j in zip(value[:2], value[2:]):\n if i and j and i > j:\n raise forms.ValidationError(self.error_messages['values'])\n\n if self.max_columns and value.columns > self.max_columns:\n raise forms.ValidationError(\n self.error_messages['max_columns'].format(self.max_columns)\n )\n\n if self.max_rows and value.rows > self.max_rows:\n raise forms.ValidationError(\n self.error_messages['max_rows'].format(self.max_rows)\n )\n\n if self.max_either:\n if all([value.columns > self.max_either,\n value.rows > self.max_either]):\n raise forms.ValidationError(\n self.error_messages['max_either'].format(self.max_either)\n )",
"def valid_shape(variable, correct_shape):\n if qtrader.framework.VALID_SHAPE:\n if not hasattr(variable, 'shape'):\n raise AttributeError(\n '`%s` has no attribute `shape`.' % (variable)\n )\n if hasattr(correct_shape, 'shape'):\n if correct_shape.shape != variable.shape:\n raise ValueError(\n 'invalid `%s` shape; passed shape: %s; expected shape: %s' % (\n variable, variable.shape, correct_shape.shape)\n )\n else:\n if variable.shape != correct_shape:\n raise ValueError(\n 'invalid `%s` shape; passed shape: %s; expected shape: %s' % (\n variable, variable.shape, correct_shape)\n )\n qtrader.framework.logger.debug(\n 'successful valid_shape(variable, correct_shape) call')",
"def test_check_shape_exception(self, inpt, target_shape, bound):\n with pytest.raises(ValueError, match=\"XXX\"):\n check_shape(inpt, target_shape, bound=bound, msg=\"XXX\")",
"def _verify_features_nonscalar_labels(\n pair_of_paths, *, volume_shape, check_shape, check_labels_int, check_labels_gte_zero\n):\n x = nib.load(pair_of_paths[0])\n y = nib.load(pair_of_paths[1])\n if check_shape:\n if not volume_shape:\n raise ValueError(\n \"`volume_shape` must be specified if `check_shape` is true.\"\n )\n if x.shape != volume_shape:\n return False\n if x.shape != y.shape:\n return False\n if check_labels_int:\n # Quick check of integer type.\n if not np.issubdtype(y.dataobj.dtype, np.integer):\n return False\n y = y.get_fdata(caching=\"unchanged\", dtype=np.float32)\n # Longer check that all values in labels can be cast to int.\n if not np.all(np.mod(y, 1) == 0):\n return False\n if check_labels_gte_zero:\n if not np.all(y >= 0):\n return False\n return True",
"def check_input(X):\n if isinstance(X, (pd.DataFrame, pd.Series)):\n err_message = \"Pandas DataFrame are not supported: apply X.values when calling fit\"\n raise TypeError(err_message)\n check_array(X, accept_sparse=True)",
"def check_valid_size(value, name):\n if value is None:\n return\n check_type(integer_types + (float,), value)\n if value < 0:\n raise InvalidArgument(u'Invalid size %s %r < 0' % (value, name))\n if isinstance(value, float) and math.isnan(value):\n raise InvalidArgument(u'Invalid size %s %r' % (value, name))",
"def test_check_shape_list_of_inputs_exception(self, inpt, target_shape, bound):\n with pytest.raises(ValueError, match=\"XXX\"):\n check_shapes(inpt, target_shape, bounds=[bound] * len(inpt), msg=\"XXX\")",
"def _raise_validation_error(params, validate):\n logging.error('Data failed type checking')\n failed_constraints = validate.get('failed_constraints')\n error_msg = ['Object {} failed type checking:'.format(params.get('obj_name'))]\n if failed_constraints.get('unique'):\n unique_values = failed_constraints.get('unique')\n error_msg.append('Object should have unique field: {}'.format(unique_values))\n if failed_constraints.get('contains'):\n contained_values = failed_constraints.get('contains')\n for contained_value in contained_values:\n subset_value = contained_value.split(' ')[0]\n super_value = ' '.join(contained_value.split(' ')[1:])\n if 'col_mapping' in super_value:\n error_msg.append('Column attribute mapping instances should contain all '\n 'column index from original data')\n\n if 'row_mapping' in super_value:\n error_msg.append('Row attribute mapping instances should contain all row '\n 'index from original data')\n\n error_msg.append('Object field [{}] should contain field [{}]'.format(\n super_value,\n subset_value))\n for failure in failed_constraints.get('conditionally_required', []):\n error_msg.append('If object field \"{}\" is present than object field(s) {} should '\n 'also be present. Object is missing {}'.format(*failure))\n raise ValueError('\\n'.join(error_msg))",
"def is_valid_shape(self, x):\n a = [i % j == 0 for i, j in zip(x.shape[2:], self.shape_factor())]\n return all(a)",
"def test_error_on_different_shape(metric_class=RelativeSquaredError):\n metric = metric_class()\n with pytest.raises(RuntimeError, match=\"Predictions and targets are expected to have the same shape\"):\n metric(torch.randn(100), torch.randn(50))",
"def validate(self, attr, value):\n if type(value) is not int:\n raise TypeError(\"{} must be an integer\".format(attr))\n if attr is \"x\" or attr is \"y\":\n if value < 0:\n raise ValueError(\"{} must be >= 0\".format(attr))\n elif attr is \"width\" or attr is \"height\":\n if value <= 0:\n raise ValueError(\"{} must be > 0\".format(attr))",
"def is_valid_shape(obj) -> bool:\n return isinstance(obj, _get_union_type_unsubscripted_args(ShapeLike))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Start to listen for messages untill one comes or the timeout is reached. Use 'limit' for tests when you want to run the worker a given number of loops before it stop without having to tell him to. Limit should be an integer representing the number of loops. This is mainly used for testing purpose and is default to 1, which is no limit.
|
def main_loop(self, timeout=1, limit=-1):
self.run = True
self.on_main_loop()
try:
while self.run and limit != 0:
try:
self.connection.drain_events(timeout=timeout)
except socket.timeout:
# this happens when timeout is reached and no message is
# in the queue
limit -= 1
except self.connection.connection_errors, e:
self.logger.error("Error while connecting with Kombu: %s" % e)
raise
except socket.error, e:
self.logger.error("Socket error: %s" % e)
raise
except (KeyboardInterrupt, SystemExit) as e:
self.logger.info("\nStopping %s" % self.name)
try:
self.connection.release()
except AssertionError:
# todo: find why there is this assertion error about state
pass
|
[
"def start(self, timeout=1, limit=-1, force_purge=False):\n\n self.on_worker_starts()\n\n self.connect()\n\n self.logger.info('%s is starting' % self.name)\n\n if force_purge:\n self.purge()\n\n self.main_loop(timeout, limit)\n\n self.on_worker_stopped()\n\n self.logger.info('%s stopped' % self.name)",
"def try_until_limit(func):\n def wrapper(*args, **kwargs):\n limit = kwargs.get('limit', 10)\n sleep = kwargs.get('sleep', 2)\n step = 0\n while step < limit:\n print('[*]Waiting {}.. {}/{}'.format(kwargs['msg'], step, limit))\n if not func(*args, **kwargs):\n time.sleep(sleep)\n step += 1\n else:\n return True\n return False\n return wrapper",
"def listen(self):\r\n\t\tself.sock.setblocking(1) # set socket functions to be blocking\r\n\t\tself.sock.listen(10) # Create a backlog queue for up to 10 connections\r\n\t\tself.sock.settimeout(0.1) # set to block 0.1 seconds, for instance for reading from the socket\r",
"def wait_for_next_step(self):\n response = Convert.listen(self.socket)\n if len(response) == CHOOSE_LEN:\n return self.start(response)\n elif len(response) == FEED_LEN:\n return self.feed(response)",
"def mainloop(self):\n\n while self.is_running:\n self.timer = Timer(self.until_timeout, self.timeout)\n self.timer.start()\n try:\n self.bot.polling()\n except Exception or KeyboardInterrupt as e:\n print(e)\n raise SystemExit",
"async def limit(self, ctx: commands.Context, limit: int = 0):\n await self.config.limit.set(limit)\n await ctx.send(\n f\"The server limit has been set to {limit}.\"\n if limit\n else \"The server limit has been disabled.\"\n )\n await self.build_cache()",
"def __init__(self, limit):\r\n self.limit = limit\r\n self.clock = 0",
"def queue_listener(self):\n while True:\n # listen until smth appears\n job = ast.literal_eval(self._connection.brpop(self._name, 0)[1])\n self.process(job)",
"def listen(self, n):\n try:\n assert n + self.listen_time <= self.time\n self.listen_time += n\n left_time = self.time - self.listen_time\n\n self.progress = (self.listen_time/ self.time) * 100\n\n except AssertionError:\n print(\"You can not listen more than audio's time.\")\n\n else:\n if self.listen_time == self.time:\n print(\"Completed\")\n else:\n print(f\"You have listen {self.listen_time} minute(s) from {self.title}. \\n\"\n f\"There are {left_time} minute(s) left.\")",
"def test_start_criteria_limit_flag(self):\n\n flags = {'limit': 1}\n assert not phout.stop_criteria(0, None,\n flags), \"limit flag should not hit\"\n assert phout.stop_criteria(1, None, flags), \"limit flag should hit\"",
"def test_client_limit(self):\n listening_thread = Thread(target=self.mock_server.listen, daemon=True)\n listening_thread.start()\n\n self.mock_server.clientLimit = 5\n num_of_clients = 5\n\n for i in range(num_of_clients):\n new_thread = Thread(target=self.connect_to_server, daemon=True)\n new_thread.start()\n # give the clients some time to actually connect:\n sleep(0.01)\n\n mock_client = socket.socket()\n mock_client.connect((self.mock_server.host, self.mock_server.port))\n\n assert self.mock_server.clientCount == self.mock_server.clientLimit",
"def _make_limit_message_counter(self):\n limit_counter_key = \"limit:{}:{}\".format(\n self.traptor_type, self.traptor_id\n )\n collection_window = int(os.getenv('LIMIT_COUNT_COLLECTION_WINDOW', 900))\n\n self.limit_counter = TraptorLimitCounter(\n key=limit_counter_key,\n window=collection_window\n )\n self.limit_counter.setup(redis_conn=self.redis_conn)",
"def wait_for_next_step(self):\n response = Convert.listen(self.socket, False)\n if len(response) == 3:\n return self.start(response)\n if len(response) == 5:\n return self.feed(response)\n else:\n print response[0]",
"def test_job_limit_timeout(self):\n job_limit = 5\n self.fake_api_backend._api_client = JobTimeoutClient(\n job_limit=job_limit, max_fail_count=1)\n self.fake_api_provider._api_client = self.fake_api_backend._api_client\n\n job_set = None\n try:\n job_set = self._jm.run([self._qc]*(job_limit+2),\n backend=self.fake_api_backend, max_experiments_per_job=1)\n last_mjobs = job_set._managed_jobs[-2:]\n for _ in range(10):\n if all(mjob.job for mjob in last_mjobs):\n break\n self.assertTrue(all(job.job_id() for job in job_set.jobs()))\n finally:\n # Cancel all jobs.\n for mjob in job_set.managed_jobs():\n if mjob.job is not None:\n mjob.cancel()\n elif job_set._job_submit_lock.locked():\n job_set._job_submit_lock.release()\n wait([mjob.future for mjob in job_set.managed_jobs()], timeout=5)",
"def _get_filename_queue(self, epoch_limit):\n epoch_limiter = variable_scope.variable(\n initial_value=constant_op.constant(0, dtype=dtypes.int64),\n name=\"epoch_limiter\",\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES])\n filenames_tensor = array_ops.reshape(\n ops.convert_to_tensor(self._filenames), [-1])\n # We can't rely on epoch_limiter being initialized, since queue runners are\n # started before local variables are initialized. Instead, we ignore epoch\n # limits before variable initialization. This means that prior to variable\n # initialization, a QueueRunner may cause a reader to enter an un-checked\n # infinite loop. However, as soon as local variables are initialized, we\n # will start incrementing and checking epoch_limiter, which will interrupt\n # any in-progress loops.\n conditional_count_up_to = control_flow_ops.cond(\n state_ops.is_variable_initialized(epoch_limiter),\n lambda: epoch_limiter.count_up_to(epoch_limit),\n lambda: constant_op.constant(0, dtype=dtypes.int64))\n with ops.control_dependencies([conditional_count_up_to]):\n filenames_tensor = array_ops.identity(filenames_tensor)\n filename_queue = input_lib.string_input_producer(\n filenames_tensor, shuffle=False, capacity=1)\n return filename_queue, epoch_limiter",
"async def _server_limit(self, ctx: commands.Context, num_servers: int):\n if num_servers < 1:\n return await ctx.send(\"Please enter a number greater than 0!\")\n await self.config.limit.set(num_servers)\n return await ctx.tick()",
"def check_timer(self, limit=1):\n self.log.notset(__name__ + '::check_timer:')\n timer_now = dt.datetime.now()\n change = (timer_now - self.timer_start).total_seconds()\n if change > limit: # if time limit exceeded\n self.log.error(__name__ + '::check_timer: request_data failed after ' + str(limit) + ' seconds')\n self.log.error(__name__ + '::check_timer: notDone items in self.end_check_list')\n tp = self.end_check_list[self.end_check_list['status'] != 'Done']\n self.log.error(str(tp))\n return True\n else:\n return None",
"def test_job_limit(self):\n job_limit = 5\n self.fake_api_backend._api_client = BaseFakeAccountClient(\n job_limit=job_limit, job_class=CancelableFakeJob)\n self.fake_api_provider._api_client = self.fake_api_backend._api_client\n\n job_set = None\n try:\n with self.assertLogs(managedjob.logger, 'WARNING'):\n job_set = self._jm.run([self._qc]*(job_limit+2),\n backend=self.fake_api_backend, max_experiments_per_job=1)\n time.sleep(1)\n\n # There should be 5 done and 2 running futures.\n running_futures = [mjob.future for mjob in job_set.managed_jobs()\n if mjob.future.running()]\n max_wait = 6\n while len(running_futures) > 2 and max_wait > 0:\n running_futures = [f for f in running_futures if f.running()]\n time.sleep(0.5)\n self.assertEqual(len(running_futures), 2)\n\n for mjob in job_set.managed_jobs():\n if mjob.job is not None:\n mjob.cancel()\n self.assertEqual(len(job_set.jobs()), job_limit+2)\n self.assertTrue(all(job_set.jobs()))\n finally:\n # Cancel all submitted jobs first.\n for mjob in job_set.managed_jobs():\n if mjob.job is not None:\n mjob.cancel()\n elif job_set._job_submit_lock.locked():\n job_set._job_submit_lock.release()\n wait([mjob.future for mjob in job_set.managed_jobs()], timeout=5)",
"def add_frequency_limiter(self, limit: int = 1) -> None:\n template = jinja_env().get_template(\"clamp_add_frequency.json.j2\")\n return template.render(name=self.extract_operational_policy_name(\"FrequencyLimiter\"),\n LOOP_name=self.name,\n limit=limit)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Connect the worker to th message broker, purge queues if required then starts the main loop to listen and react for messages. Provide callbacks to perform action before and after the main loop starts. Use 'limit' for tests when you want to run the worker a given number of loops before it stop without having to tell him to. Limit should be an integer representing the number of loops. This is mainly used for testing purpose and is default to 1, which is no limit.
|
def start(self, timeout=1, limit=-1, force_purge=False):
self.on_worker_starts()
self.connect()
self.logger.info('%s is starting' % self.name)
if force_purge:
self.purge()
self.main_loop(timeout, limit)
self.on_worker_stopped()
self.logger.info('%s stopped' % self.name)
|
[
"def main_loop(self, timeout=1, limit=-1):\n\n self.run = True\n\n self.on_main_loop()\n\n try:\n while self.run and limit != 0:\n try:\n self.connection.drain_events(timeout=timeout)\n except socket.timeout: \n # this happens when timeout is reached and no message is\n # in the queue\n limit -= 1\n\n except self.connection.connection_errors, e:\n self.logger.error(\"Error while connecting with Kombu: %s\" % e)\n raise\n except socket.error, e:\n self.logger.error(\"Socket error: %s\" % e)\n raise\n except (KeyboardInterrupt, SystemExit) as e:\n self.logger.info(\"\\nStopping %s\" % self.name)\n\n try:\n self.connection.release()\n except AssertionError:\n # todo: find why there is this assertion error about state\n pass",
"def run(self):\n\n \n try:\n # Spawn the tasks to run concurrently\n self.loop.create_task(self.listen()) # Listen to subscribed topics\n self.loop.create_task(self.run_tk()) # Run GUI\n self.loop.run_forever()\n except:\n pass",
"def _worker_loop(self) -> None:\n msgq = self.msg_q\n while self._do_main_loop:\n if self._isactive:\n msg = self.generate_msg()\n if msg is not None:\n # print(\"enqueueing {}\".format(msg))\n msgq.put(msg)\n # --\n gevent.sleep(self._sec_sleep)",
"def run(self):\n\n loop = asyncio.get_event_loop()\n connection = loop.run_until_complete(self.connect_broker())\n try:\n loop.run_until_complete(self.start_loop(connection))\n if self.run_forever:\n loop.run_forever()\n except KeyboardInterrupt:\n return\n finally:\n if self.run_forever:\n loop.run_until_complete(connection.close())\n loop.run_until_complete(loop.shutdown_asyncgens())\n loop.stop()",
"def process_messages_loop_internal(self):\n while self.receiving_messages:\n # connect to AMQP server and listen for 1 message then disconnect\n self.work_request = None\n self.connection.receive_loop_with_callback(self.queue_name, self.save_work_request_and_close)\n if self.work_request:\n self.process_work_request()",
"def run_while(cb):\n if not callable(cb):\n raise Exception(\"cb is not callable\")\n if not cb():\n return\n while cb():\n if _quit_requested:\n raise QuitException()\n time.sleep(0.001)\n MessageLoop._run_until_empty()\n MessageLoop._run_pending_messages()",
"def run(self):\n\t\tself.client.loop_start()\n\t\tself.discover_and_notify()\n\t\tself.publish()",
"async def message_loop(self):\n raise NotImplementedError",
"def run():\n global connection, channel\n while _isRunning:\n try:\n if channel:\n if len(_subscriptions) > 0: # don't start conuming when there are no subscriptions, this doesn't work\n channel.start_consuming()\n else:\n channel.connection.process_data_events() # make certain that hearbeat is processed\n sleep(2) # no need to loop continuosly if there are no subscriptions, give the cpu some rest until we have something to monitor.\n except:\n logger.exception(\"broker communication failure\")\n channel = None\n if _isRunning and len(_subscriptions) > 0: # don't try to reconnect if there are no subscriptions, no need for this part of the code.\n logger.error(\"reconnecting from main loop\")\n reconnect()",
"def main_loop(self):\n \n while self.running:\n # handle_network() will block for at most 1 second during\n # the select() syscall\n self.handle_network()\n self.check_queue()\n self.handle_cron()",
"def run(self):\n if self.test:\n self.set_test_env()\n try:\n consumer = AsyncConsumer(\n cfg=self.cfg, cbk=self._callback, worker=self)\n rabbitmq_conn = consumer.connect()\n rabbitmq_conn.ioloop.start()\n except (KeyboardInterrupt, SystemExit):\n consumer.stop()",
"def run(self):\n response_socket = self._ZMQ_CONTEXT.socket(zmq.DEALER)\n if self._SOCKET_IDENTITY:\n response_socket.setsockopt(zmq.IDENTITY, self._SOCKET_IDENTITY)\n response_socket.connect(self._ZMQ_ENDPOINT)\n poller = zmq.Poller()\n # pylint: disable=E1101\n poller.register(response_socket, zmq.POLLIN)\n # pylint: enable=E1101\n self._is_running.set()\n while self._is_running.is_set():\n socks = dict(poller.poll(self._POLLING_TIMEOUT_MILLI))\n if socks.get(response_socket) == zmq.POLLIN:\n message = response_socket.recv()\n self._handle_binary_omega_message(message)\n time.sleep(2.)\n response_socket.close()",
"def run(self):\n self.__started = True\n\n while not self.stopEvent.isSet():\n self.lock.acquire()\n try:\n connection_dispatcher_keys = self.connectionDispatchers.keys()\n for k in connection_dispatcher_keys:\n connection = self.connectionDispatchers.get(k)\n if connection is not None:\n connection.makeConnection()\n\n if self.stopEvent.isSet():\n break\n finally:\n self.lock.release()\n\n if len(self.connectionDispatchers) > 0:\n asyncore.loop(0.01, False, None, 1)\n if self.doEventsDispatching:\n SpecEventsDispatcher.dispatch()\n else:\n time.sleep(0.01)\n\n asyncore.loop(0.01, False, None, 1)",
"def exec(self) -> None:\n self.start_message_loop()",
"async def run(self, workerAction: WorkerAction) -> None:\n for worker in self.workers:\n await workerAction(worker)",
"def start_messege_handler(\n self, subscription: str, callback: MessageHandler, max_workers: int = 10\n ) -> StreamingPullFuture:\n return self.subscriber.subscribe(\n subscription, callback, scheduler=self._make_scheduler(max_workers)\n )",
"def worker(self):\n while True:\n address, msg = self.messenger.receive(return_payload=False)\n if msg is None:\n time.sleep(2)\n continue\n deserialized_msg = self.messenger.deserialize_message_payload(msg)\n if msg.msg_type == message.Message.MSG_TASKUNIT:\n tu = deserialized_msg\n # TODO MA Make this run in a new thread instead of directly here.\n tu.run()\n self.messenger.send_taskunit_result(tu, address)",
"def listen(self):\n try:\n # Send messages from shoutbox every few seconds\n self.start_message_loop()\n self.start_ping_loop()\n # Start the reactor\n reactor.run()\n except KeyboardInterrupt:\n self.close_connection()\n self.logprint(\"Exiting...\")",
"def callback_serial(self):\r\n while self.queue_serial.qsize():\r\n try:\r\n queuer = self.queue_serial.get(0)\r\n queuer.daemon=True\r\n queuer.start()\r\n except Empty:\r\n pass\r\n self.callback_obj1=self.after(1000, self.callback_serial)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override this if you want to perform an action when the worker start
|
def on_worker_starts(self):
pass
|
[
"def start_worker(self):\n self._thread_worker = _start_thread(self._start)",
"def on_start(self, event):\n pass",
"def launch(self):\n Worker.time += 1",
"def start_processing(self):\n pass",
"def __call__(self, *args, **kwargs):\n self.start(*args, **kwargs)",
"def on_worker_connected(self):\n pass",
"def didExecutePreJobs(self):\n pass",
"def start(self):\n self.action_server.start()",
"def start_event_dispatch(self, params):\n raise NotImplementedError() #pragma: no cover",
"def start(self):\r\n from ubcs_auxiliary.threading import new_thread\r\n new_thread(self.run)",
"def task_started(self, task):\n pass",
"def start(self):\n\n # start the pumps\n for pump in self.pumps.values():\n pump.start()\n\n # call the start of the thread\n super().start()",
"def start(self):\n self.threadpool.callInThread(self.run)",
"def notify_started(self):\n self._send_notification('Airplayer started')",
"def start_mw(self):\n self.status = Status.RUNNING\n self.maintenance_event('start')",
"def start(self):\n if self.step_id is not None:\n self.start_local()\n else:\n super(SlurmJob, self).start()",
"def willExecutePreJobs(self):\n pass",
"def start(self):\n self.running = True",
"def DaemonStarting(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override this if you want to perform an action when the worker has stoped
|
def on_worker_stopped(self):
pass
|
[
"def at_stop(self):\r\n pass",
"def callback_stopping(self, myrun):\n pass # pragma: no cover",
"def _worker_emu_stop(self):\n self._worker_queue.put(UnicornWorkerUpdateStateMessage(TargetStates.STOPPED))\n self.uc.emu_stop()",
"def request_stop(self):\n self._stop_requested = True",
"def stop(self):\n self.dispatch(Request.stop())",
"def _stopping(self):\n \n self.__state = runlevel.STATE_STOPPING",
"def _on_stop(self):\n self._pool.join()",
"def stop(self):\n\n self.active = False\n self.join()",
"def stop(self, callback=None):\r\n\r\n if not self.started:\r\n return\r\n\r\n if self.status == 1:\r\n # someone already requested to stop the manager\r\n return\r\n\r\n # set the callback\r\n self.stop_cb = callback\r\n\r\n # update the status to stop and wake up the loop\r\n self.status = 1\r\n self._waker.send()",
"def stop(self):\n # self.stop_watches()\n logging.debug(\"Sending stop signal to etcd watcher thread\")\n self.keep_running = False\n self.observer_thread.join()\n logging.info(\"Romana watcher plugin: Stopped\")",
"def stop(self):\n self._stop.set()\n self.enqueue_stop_sign()\n self._thread.join()\n self._thread = None",
"def on_stop(self):\n executed_command = LeapGui.StoredArgs().load().stored_args[EXECUTED_COMMAND]\n if executed_command == ACTION_RECORD:\n return self.clientRunner.stop_leap()\n self.clientRunner.stop()",
"def serviceStopped(self):",
"def stop(self):\n logger.info(\"Stopping Follw\")\n self.terminate = True\n self.location.stop()",
"def __exit__(self, exc_type, exc_value, traceback):\n self.stop()",
"def endwork(self):\n \n self.log.warning('Factory stopped.')\n if (self.shutdownrestart):\n self.restartfunc(self.shutdownrestartdelay)",
"def stop(self):\n Multipass.stop(self.name)",
"def stop(self):\n\t\tself.unscheduleCallback(self._updateDisplayText)",
"def _stopped(self):\n\n self.active = False\n self.stopped = True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override this if you want to perform an action when the worker has connected to the messag broker
|
def on_worker_connected(self):
pass
|
[
"def on_connected(self):\n log.debug('on_connected called.')",
"def kasaya_connection_started(self, addr):\n LOG.debug(\"Connected to %s\", addr)\n self.SYNC.notify_worker_live(self.status)",
"def on_message_handled(self, event):\n self.accept(event.delivery)\n print('job accepted: ' + str(event.subject))",
"def ready(self, component):\n\n self.fire(Connect(self.host, self.port))",
"def on_connect(self, client, userdata, rc):\n print(\"Connected with result code: \" + str(rc))\n self.subscribe(\"orchestra/glock\")",
"def __publish_connect_message(self):\n logger.debug(\"Connect successfull\")\n self.publish(\"Greetings from Discord Bot\", topic=self.bot_topic)",
"def connect(self):\n self.signal.connect(\n self.on_signal_received,\n sender=self.sender,\n dispatch_uid=self._dispatch_uid,\n )\n self.is_connected = True",
"def connect(self):\n if not self._jobboard.connected:\n self._jobboard.connect()",
"def on_connect():\n\n print('User connected')",
"def pusher_connected(self, data):\n # Inform user that pusher is done connecting\n self.logger.info(\"Pusherclient connected\")\n\n # Bind the events we want to listen to\n self.callback_client.bind(\"payment_authorized\",\n self.payment_authorized)\n self.callback_client.bind(\"shortlink_scanned\",\n self.shortlink_scanned)",
"def on_worker_starts(self):\n pass",
"def _on_protocol_built(self):\n log_msg(\"I guess we connected successfully...\", 2)\n self._on_reachable()",
"def run(self):\n\t\tself.client.loop_start()\n\t\tself.discover_and_notify()\n\t\tself.publish()",
"def channelJoined(self, channel):",
"def handle(self):\n #Send connection confirmation to client\n msg = \"CONNECTED {}\".format(self.client_address[0])\n self.queue.put(msg)\n while True:\n msg = self.queue.get(block=True)\n self.request.sendall(msg)",
"def notify_started(self):\n self._send_notification('Airplayer started')",
"def on_connect(self, client, userdata, flags, rc):\n try:\n for topic in self.topic_list:\n self.clientMqtt.subscribe(topic)\n self.logger.d(\"subscribe on \" + topic)\n for topic in self.topic_list_unsubscribe:\n self.clientMqtt.unsubscribe(topic)\n self.logger.d(\"unsubscribe on \" + topic)\n self.mqtt_connect_event.set()\n except Exception as e:\n import traceback\n exc_type, exc_obj, exc_tb = sys.exc_info()\n exceptionStr = (\n os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n + \", line \"\n + str(exc_tb.tb_lineno)\n + \" : \"\n + str(e) +\n \"\".join(traceback.format_tb(e.__traceback__))\n )\n self.logger.e(exceptionStr)",
"def connect(self):\n self.connected = True",
"def _onconnect(self):\n# print('DEBUG: enter daq._onconnect',file=sys.stderr)\n handshake_tries = 0\n while True:\n try:\n hs = self.comm.command(b'H')\n except RuntimeError:\n handshake_tries += 1\n if handshake_tries>=3:\n self._conncall('Handshake timed out. Check if PteroDAQ firmware is installed.')\n return\n continue\n break\n if hs != b'DAQ':\n self._conncall('Handshake failed. Check if PteroDAQ firmware is installed.')\n return\n version = self.comm.command(b'V')\n if version != firmware_version:\n self._conncall('Incorrect version: {0} present, {1} needed.'.format(tostr(version), tostr(firmware_version)))\n return\n model = self.comm.command(b'M')\n self.board = getboardinfo(model)\n self._conncall(None)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override this to return the exchanges you are going to use for you worker. It should return a mapping of exchange names and exchanges object.
|
def get_exchanges(self):
pass
|
[
"def list_exchanges(self):\n endpoint = self.build_url(\"/exchanges\")\n return self.request('get', endpoint)",
"def bind_exchanges(self):\n\n for name, exchange in self.exchanges.items():\n self.exchanges[name] = exchange(self.channel)",
"def createexchanges(self):\r\n exchangeString = \"\"\r\n if type(self.exchanges) is not list:\r\n raise Exception(\"Exchanges need to be structured as a list\")\r\n if len(self.exchanges) == 1:\r\n exchangeString += '(' + str(self.EXCHANGE_STRING) + ' == \"' + self.exchanges[0] + '\")'\r\n elif len(self.exchanges) > 1:\r\n exchangeString += '('\r\n for exchange in self.exchanges[:-1]:\r\n exchangeString += '(' + str(self.EXCHANGE_STRING) + ' == \"' + exchange + '\")'\r\n exchangeString += ' | '\r\n exchangeString += '(' + str(self.EXCHANGE_STRING) + ' == \"' + self.exchanges[-1] + '\")'\r\n exchangeString += ')'\r\n return exchangeString",
"def exchange_wrapper():\n return query_exchange_server(wf,start_outlook, end_outlook, date_offset)",
"def get_all_exchanges(self, fsym, tsym, base_url='https://www.cryptocompare.com/api/data/'):\n res = self.get_coin_snapshot(fsym, tsym, base_url=base_url)\n try:\n exchanges = res['Data']['Exchanges']\n markets = [x['MARKET'] for x in exchanges]\n return sorted(markets)\n except KeyError:\n return res",
"async def ensure_exchanges(self, channel):\n\n # TODO: Perhaps instead make self.exchanges a property to better guard\n # against its use before ensure_exchanges has been called\n if self.exchanges is not None:\n return\n\n self.exchanges = {}\n\n for exchange_name in self.get_exchanges():\n exchange = self.config.broker.exchanges[exchange_name]\n self.exchanges[exchange_name] = \\\n await channel.declare_exchange(exchange.name, exchange.type)",
"def getExames(self):\n catalog = getToolByName(self, 'portal_catalog')\n path_exames = '/'.join(self.context.aq_parent.getPhysicalPath())\n exames = catalog(object_provides=IExameSangue.__identifier__,\n path=path_exames,\n sort_on='Date',\n sort_order='reverse',)\n return exames",
"def getExchangeVersions(self):\n return self.session.request('replicationcomms/slave/exchange')",
"def agent_addr_to_exchange_params(self) -> Dict[Address, ExchangeParams]:\n return self._agent_addr_to_exchange_params",
"def workers(self):\n # type: () -> Dict\n return self.__workers",
"def get_ehosts(self):\n return self.execution_host_manager.get_objects()",
"def create_exchanges():\n coinbasepro = ccxt.coinbasepro({\n 'apiKey': api_keys.coinbasepro['apiKey'],\n 'secret': api_keys.coinbasepro['secret'],\n 'enableRateLimit': True,\n })\n\n cex = ccxt.cex({\n 'apiKey': api_keys.cex['apiKey'],\n 'secret': api_keys.cex['secret'],\n 'enableRateLimit': True,\n })\n\n poloniex = ccxt.poloniex({\n 'apiKey': api_keys.poloniex['apiKey'],\n 'secret': api_keys.poloniex['secret'],\n 'enableRateLimit': True,\n })\n\n bittrex = ccxt.bittrex({\n 'apiKey': api_keys.bittrex['apiKey'],\n 'secret': api_keys.bittrex['secret'],\n 'enableRateLimit': True,\n })\n\n binance = ccxt.binance({\n 'apiKey': api_keys.binance['apiKey'],\n 'secret': api_keys.binance['secret'],\n 'enableRateLimit': True,\n })\n\n bitfinex = ccxt.bitfinex({\n 'apiKey': api_keys.bitfinex['apiKey'],\n 'secret': api_keys.bitfinex['secret'],\n 'enableRateLimit': True,\n })\n\n kucoin = ccxt.kucoin({\n 'apiKey': api_keys.kucoin['apiKey'],\n 'secret': api_keys.kucoin['secret'],\n 'enableRateLimit': True,\n })\n\n kraken = ccxt.kraken({\n 'apiKey': api_keys.kraken['apiKey'],\n 'secret': api_keys.kraken['secret'],\n 'enableRateLimit': True,\n 'options': { # ←--------------------- inside 'options' subkey\n 'fetchMinOrderAmounts': False, # ←---------- set to False \n }\n })\n\n bitmex = ccxt.bitmex({\n 'apiKey': api_keys.bitmex['apiKey'],\n 'secret': api_keys.bitmex['secret'],\n 'enableRateLimit': True,\n })\n\n okex = ccxt.okex({\n 'apiKey': api_keys.okex['apiKey'],\n 'secret': api_keys.okex['secret'],\n 'enableRateLimit': True,\n })\n\n exchanges =[coinbasepro, cex, poloniex, bittrex, binance, bitfinex, kucoin, kraken, okex, bitmex]\n\n return exchanges",
"def get_producers(self):\n return {'psms': Producer(self.channel, exchange=self.exchanges['psms'])}",
"def get_queues(self):\n pass",
"def get_rabbitmq_admin_instances():\n instances = {}\n\n for instance in LIFEGUARD_RABBITMQ_INSTANCES:\n key = instance.upper()\n instances[instance] = {\n \"base_url\": SETTINGS_MANAGER.read_value(\n \"LIFEGUARD_RABBITMQ_{}_ADMIN_BASE_URL\".format(key)\n ),\n \"user\": SETTINGS_MANAGER.read_value(\n \"LIFEGUARD_RABBITMQ_{}_ADMIN_USER\".format(key)\n ),\n \"passwd\": SETTINGS_MANAGER.read_value(\n \"LIFEGUARD_RABBITMQ_{}_ADMIN_PASSWD\".format(key)\n ),\n \"vhost\": SETTINGS_MANAGER.read_value(\n \"LIFEGUARD_RABBITMQ_{}_ADMIN_VHOST\".format(key)\n ),\n }\n\n return instances",
"def stocks(self):\n if self._stocks == {}:\n # Init empty Stocks\n for symbol in self.symbols:\n self._stocks[symbol] = Stock(symbol)\n\n # Apply trades to Stocks\n for trade in self.trades:\n self._stocks[trade.symbol].apply_trade(trade)\n return self._stocks",
"def list_vhost_exchanges(self, *, vhost: str = None):\n vhost = vhost if vhost is not None else self.vhost\n endpoint = self.build_url(\"/exchanges/{vhost}\", vhost=vhost)\n return self.request('get', endpoint)",
"def get_all_ee_signals(self):\n info = dict()\n for ept in self.list_endpoint_names():\n info[ept] = self.get_ee_signals(ept)\n return info",
"def get_exchange_by_id(self, id):\n\n url = self.url + \"/exchanges/\" + id\n\n resp = requests.get(url)\n\n if resp.status_code < 300:\n return resp.json()[\"data\"]\n\n return ExchangeStructure"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loop on all exchanges in the self.exchanges dictionary and bind them to the current channel. Called in self.connect() right after the connection with the message broker has been established. Assume there is only one channel and one connection.
|
def bind_exchanges(self):
for name, exchange in self.exchanges.items():
self.exchanges[name] = exchange(self.channel)
|
[
"async def ensure_exchanges(self, channel):\n\n # TODO: Perhaps instead make self.exchanges a property to better guard\n # against its use before ensure_exchanges has been called\n if self.exchanges is not None:\n return\n\n self.exchanges = {}\n\n for exchange_name in self.get_exchanges():\n exchange = self.config.broker.exchanges[exchange_name]\n self.exchanges[exchange_name] = \\\n await channel.declare_exchange(exchange.name, exchange.type)",
"def bind_queues(self):\n\n for name, queue in self.queues.items():\n self.queues[name] = queue(self.channel)\n self.queues[name].declare()",
"def run(self):\n\n # Continue as long as there is no stop signal\n while self.running:\n\n # Initial variables, specific to each connection\n connection_tries = 0\n connection_delay = 0\n\n # Initialise the data structure\n for currency_pair in self.markets:\n self.data_store[currency_pair['base_currency'], currency_pair['quote_currency']] = {\n 'order_book_ask': sortedcontainers.SortedListWithKey(key=lambda val: val[0]),\n 'order_book_bid': sortedcontainers.SortedListWithKey(key=lambda val: -val[0]),\n 'last_sequence': None,\n 'status': 'inactive',\n }\n\n # Connect to the order book. Continue trying in case of issues or a temporary downtime\n while self.socket_handle is None:\n\n # Delay connecting if required, to prevent flooding the remote server with connection tries\n time.sleep(connection_delay)\n\n # Call the connect function, implemented by the child class\n try:\n self.socket_handle = self.connect()\n except OrderBookError as e:\n logger.warning(\"Could not connect with the websocket API: %s\" % e)\n\n connection_tries += 1\n\n # Delay the next connection if connecting failed more than 3 times. 1 second for the 4th try,\n # up until 5 seconds for the 8th try and over\n if connection_tries > 3:\n connection_delay = min(connection_tries - 3, 5)\n\n # Give up after 2000 failed tries to connect\n if connection_tries > 2000:\n raise OrderBookError(\"Failed to connect with the websocket after 2000 tries\")\n\n logger.info(\"Order book connection established\")\n\n # Subscribe to all specified markets\n for pair, _ in self.data_store.items():\n\n # Send subscription message\n self.subscribe(pair[0], pair[1])\n\n # Update status of market\n self.data_store[pair]['status'] = 'initialising'\n\n # Run in a loop to process messages until we want to stop, encounter an error or timeout\n while self.running and not self.restart:\n\n # Call the update method of the child. Each call returns a list with 0 or more update messages\n try:\n updates = self.receive()\n except OrderBookError as e:\n logger.warning(\"Error while receiving data: %s\" % e)\n self.restart = True\n\n else:\n # Process all updates\n if len(updates) > 0:\n for item in updates[:-1]:\n self.update(item)\n self.update(updates[-1], True)\n\n # Initialise a restart if requested\n if self.restart and self.running:\n logger.info(\"Order book restart initiated\")\n\n # Try to cleanly disconnect\n self.disconnect()\n\n # Reset data structures\n self.data_store = {}\n self.socket_handle = None\n self.restart = False\n\n # Instruct child class to reset its exchange specific data structures, if implemented\n self.reset_data_structures()\n\n # Disconnect when shutting down\n self.disconnect()",
"def _refresh_broker_registry(self):\n while True:\n # Wait for a connect notification from the DXL client or the update interval\n with self.app.dxl_service_client._connected_lock:\n self.app.dxl_service_client._connected_wait_condition.wait(\n self.BROKER_UPDATE_INTERVAL)\n if self.app.dxl_service_client.connected:\n logger.info(\"Refreshing broker registry...\")\n self.update_broker_registry()",
"def join_all(self):\n for channel in self.config['channels']:\n self.join(channel)",
"async def connected_callback(self):\n symbols = []\n for s in self._symbols:\n t = s.replace(\"/\", \"\")\n symbols.append(t)\n self._symbols_map[t] = s\n\n if not symbols:\n logger.warn(\"symbols not found in config file.\", caller=self)\n return\n if not self._channels:\n logger.warn(\"channels not found in config file.\", caller=self)\n return\n\n subscriptions = []\n for ch in self._channels:\n if ch == \"orderbook\":\n sub = {\"name\": \"l2\", \"symbols\": symbols}\n subscriptions.append(sub)\n else:\n logger.error(\"channel error! channel:\", ch, caller=self)\n if subscriptions:\n msg = {\n \"type\": \"subscribe\",\n \"subscriptions\": subscriptions\n }\n await self._ws.send(msg)\n logger.info(\"subscribe orderbook success.\", caller=self)",
"def connect_all(event=None):\n\n # Notify all, who want to register callbacks for connections\n notify(BeforeBrokerConnectEvent())\n\n # Gather all producer and consumer utility registrations\n from collective.zamqp.interfaces import IProducer, IConsumer\n regs = list(getUtilitiesFor(IProducer)) + list(getUtilitiesFor(IConsumer))\n\n # Connect all connections, which have related utilities registered\n for connection_id, connection in getUtilitiesFor(IBrokerConnection):\n if filter(lambda reg: reg[1].connection_id == connection_id, regs):\n connection.connect()",
"def run(self):\n with open('conf/ibclients.json', 'r') as cf:\n ib_conf = json.loads(cf.read())\n\n for ib_host in ib_conf:\n ib_thread = threading.Thread(target=self.ib_thread,\n kwargs=dict(ib_host=ib_host))\n ib_thread.daemon = True\n ib_thread.start()\n\n asyncore.loop()",
"def subscribe_to_objects(self):\n # define subscription interests\n interests = {\n \"epmMacEp\":{\"callback\": self.handle_epmMacEp}, \n \"epmIpEp\":{\"callback\": self.handle_epmIpEp},\n \"epmRsMacEpToIpEpAtt\":{\"callback\":self.handle_epmRsMacEpToIpEpAtt},\n \"fabricProtPol\":{\"callback\":self.handle_fabricProtPol},\n \"fabricExplicitGEp\":{\"callback\":self.handle_fabricExplicitGEp},\n \"vpcRsVpcConf\":{\"callback\":self.handle_vpcRsVpcConf},\n \"fabricNode\":{\"callback\": self.handle_fabricNode},\n \"fvCtx\": {\"callback\": self.handle_name_event},\n \"fvBD\": {\"callback\": self.handle_name_event},\n \"fvSvcBD\": {\"callback\": self.handle_name_event},\n \"fvEPg\": {\"callback\": self.handle_name_event},\n \"fvRsBd\": {\"callback\": self.handle_name_event},\n \"vnsRsEPpInfoToBD\": {\"callback\": self.handle_name_event},\n \"l3extExtEncapAllocator\": {\"callback\": self.handle_name_event},\n \"fvSubnet\": {\"callback\": self.handle_subnet_event},\n \"fvIpAttr\": {\"callback\": self.handle_subnet_event},\n }\n try:\n while 1:\n # start worker processes\n self.start_workers()\n \n # enqueue initial rebuild jobs created from stage_ep_history_db\n while len(self.rebuild_jobs)>0:\n self.enqueue_job(self.rebuild_jobs.pop(0))\n\n # override max_key_count if trust_subscription is disabled\n if not self.trust_subscription:\n self.max_key_count = 64\n\n # start subscriptions\n ept_utils.add_fabric_event(self.fabric, \"Running\", \"\")\n rc = ept_utils.subscribe(self.fabric, interests=interests, \n checker=check_apic_health, \n controller=self.control_subscription,\n controller_interval=self.controller_interval)\n # restart subscription if we see a stateful subscription close\n if rc == ept_utils.RC_SUBSCRIPTION_CLOSE:\n self.stop_workers(delay=0.1)\n logger.warn(\"received subscripton close, re-subscribe\")\n ept_utils.add_fabric_event(self.fabric, \"Re-initializing\",\n \"Restarting subscription\")\n continue\n elif rc == ept_utils.RC_SUBSCRIPTION_FAIL:\n logger.warn(\"received subscription fail\")\n ept_utils.add_fabric_event(self.fabric, \"Restarting\",\n \"APIC subscription failed\")\n else:\n logger.warn(\"unexpected subscription rc: %s\" % rc)\n break\n finally:\n # if subscriptions unexpectedly close, stop workers\n logger.debug(\"subscription unexpectedly ended\")\n self.stop_workers(delay=0.1)",
"def register_binding(self, binding_parameters):\n\n for consumer in self._amqp_consumers:\n consumer.register_binding(binding_parameters)",
"def _connect_accounts(self):\n\n for account in self.accounts.keys():\n self._connect(account)",
"def bind(self, exchange, routing_key=None, arguments=None, nowait=False):\n if isinstance(exchange, Exchange):\n exchange = exchange.name\n\n return self.channel.queue_bind(\n queue=self.name,\n exchange=exchange,\n routing_key=routing_key or '',\n arguments=arguments,\n nowait=nowait\n )",
"def test_connection(self):\n for sender in self.senders:\n sender.test_connection()",
"async def _track_and_propagate_available_endpoints(self) -> None:\n async for ev in self.wait_iter(self._endpoint.stream(EventBusConnected)):\n self._available_endpoints = self._available_endpoints + (ev.connection_config,)\n self.logger.debug(\"New EventBus Endpoint connected %s\", ev.connection_config.name)\n # Broadcast available endpoints to all connected endpoints, giving them\n # a chance to cross connect\n await self._endpoint.broadcast(AvailableEndpointsUpdated(self._available_endpoints))\n self.logger.debug(\"Connected EventBus Endpoints %s\", self._available_endpoints)",
"def _initialise_bindings(self):\n\n for enemy in self.game.enemies:\n self.bind(powered_up=enemy.switch_frightened_state)\n Character._initialise_bindings(self)",
"def run():\n global connection, channel\n while _isRunning:\n try:\n if channel:\n if len(_subscriptions) > 0: # don't start conuming when there are no subscriptions, this doesn't work\n channel.start_consuming()\n else:\n channel.connection.process_data_events() # make certain that hearbeat is processed\n sleep(2) # no need to loop continuosly if there are no subscriptions, give the cpu some rest until we have something to monitor.\n except:\n logger.exception(\"broker communication failure\")\n channel = None\n if _isRunning and len(_subscriptions) > 0: # don't try to reconnect if there are no subscriptions, no need for this part of the code.\n logger.error(\"reconnecting from main loop\")\n reconnect()",
"def _setup_subscriptions(self):\n for handler in self.handlers:\n exchange, topics = handler.get_exchange_topics()\n\n for topic in topics:\n queue_name = \"designate.notifications.%s.%s.%s\" % (\n handler.get_canonical_name(), exchange, topic)\n\n self.rpc_conn.join_consumer_pool(\n self._process_notification,\n queue_name,\n topic,\n exchange_name=exchange)",
"def get_exchanges(self):\n pass",
"def dispatch(self):\n servers = self.settingsInstance.settings['servers']\n for name, info in servers.items():\n self.botObjects[name] = src.irc.botObject.BotObject(\n self.settingsInstance.settings,\n info\n )\n thread = threading.Thread(\n target=self.botObjects[name].connectToServer\n )\n thread.start()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override this to return the queues you are going to use for you worker. It should return a mapping of exchange names and exchanges object.
|
def get_queues(self):
pass
|
[
"def get_spider_queues(config):\n queues = {}\n for project in get_project_list(config):\n table = 'scrapy_%s_queue' % project\n queues[project] = PgQueue(config, table=table)\n return queues",
"def get_queues():\n queues = []\n for name, display_name in tasks.TASK_QUEUE_DISPLAY_NAMES.items():\n queue = {\n 'name': name,\n 'display_name': display_name,\n }\n queues.append(queue)\n\n queues.sort(key=lambda q: q['display_name'])\n return queues",
"def get_queue_dict(all_queues, thread=True):\n queue_dict = dict()\n for q in all_queues:\n if thread:\n queue_dict[q] = Queue.Queue()\n else:\n queue_dict[q] = multiprocessing.Queue()\n return queue_dict",
"def list_queues(self):\n endpoint = self.build_url(\"/queues\")\n return self.request('get', endpoint)",
"def list_queues():\n queues = _list_queues()\n return queues",
"def get_spider_queues(config):\r\n dbsdir = config.get('dbs_dir', 'dbs')\r\n if not os.path.exists(dbsdir):\r\n os.makedirs(dbsdir)\r\n d = {}\r\n for project in get_project_list(config):\r\n dbpath = os.path.join(dbsdir, '%s.db' % project)\r\n d[project] = SqliteSpiderQueue(dbpath)\r\n return d",
"def describe_job_queues(self, jobQueues: List = None, maxResults: int = None, nextToken: str = None) -> Dict:\n pass",
"def _list_queues():\n queue_dir = __opts__[\"sqlite_queue_dir\"]\n files = os.path.join(queue_dir, \"*.db\")\n paths = glob.glob(files)\n queues = [os.path.splitext(os.path.basename(item))[0] for item in paths]\n\n return queues",
"def get_dead_letter_queues(self):\n raise NotImplementedError",
"def list_queues(self, name=None, return_columns=None, use_ems=True):\n raw_queues = self._list_queues(return_columns=return_columns, use_ems=use_ems)\n\n nl = lambda x: (name is None) or (name is not None and name in x)\n\n if return_columns is None:\n queues = [x['name'] for x in raw_queues if nl(x['name'])]\n else:\n queues = [x for x in raw_queues if nl(x['name'])]\n\n return queues",
"def _list_queues(self, return_columns=None, use_ems=True):\n feats = \"%2f\"\n if isinstance(return_columns, list):\n feats += \"?columns=\" + ','.join(return_columns)\n url = self._get_management_url(\"queues\", feats)\n raw_queues = self._call_management(url, use_ems=use_ems)\n\n return raw_queues",
"def test_queues_get_queues_v1(self):\n pass",
"def setup_job_queues(self):\n self.conn = Redis('localhost', 6379)\n self.generate_queue = Queue('generate', connection=self.conn, default_timeout=\"1h\")\n self.email_queue = Queue('notify_email', connection=self.conn)",
"def workers(self):\n # type: () -> Dict\n return self.__workers",
"def dump_queue(self, *names):\n conn = redis.StrictRedis(connection_pool=self.pool)\n for name in names:\n if name == 'worker':\n logger.debug('last worker: ' + conn.get(self._key_worker()))\n elif name == 'available':\n logger.debug('available: ' +\n str(conn.zrevrange(self._key_available(), 0, -1,\n withscores=True)))\n elif name == 'priorities':\n logger.debug('priorities: ' +\n str(conn.hgetall(self._key_priorities())))\n elif name == 'expiration':\n logger.debug('expiration: ' +\n str(conn.zrevrange(self._key_expiration(), 0, -1,\n withscores=True)))\n elif name == 'workers':\n logger.debug('workers: ' +\n str(conn.hgetall(self._key_workers())))\n elif name.startswith('reservations_'):\n item = name[len('reservations_'):]\n logger.debug('reservations for ' + item + ': ' +\n str(conn.smembers(self._key_reservations(item))))",
"def list_queues(backend=\"sqlite\"):\n queue_funcs = salt.loader.queues(__opts__)\n cmd = \"{}.list_queues\".format(backend)\n if cmd not in queue_funcs:\n raise SaltInvocationError('Function \"{}\" is not available'.format(cmd))\n ret = queue_funcs[cmd]()\n return ret",
"def get_queue_names(self, headers=HEADERS):\n result = self.get_queues()\n queue_ls = result['ls']\n queuename_ls = []\n for queue in queue_ls:\n queuename_ls.append(queue['name'])\n\n return queuename_ls",
"def get_agent_queues(self, project=None, queue_name=None, action_filter=None):\n route_values = {}\n if project is not None:\n route_values['project'] = self._serialize.url('project', project, 'str')\n query_parameters = {}\n if queue_name is not None:\n query_parameters['queueName'] = self._serialize.query('queue_name', queue_name, 'str')\n if action_filter is not None:\n query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')\n response = self._send(http_method='GET',\n location_id='900fa995-c559-4923-aae7-f8424fe4fbea',\n version='6.0-preview.1',\n route_values=route_values,\n query_parameters=query_parameters)\n return self._deserialize('[TaskAgentQueue]', self._unwrap_collection(response))",
"def get_agent_queues_by_names(self, queue_names, project=None, action_filter=None):\n route_values = {}\n if project is not None:\n route_values['project'] = self._serialize.url('project', project, 'str')\n query_parameters = {}\n if queue_names is not None:\n queue_names = \",\".join(queue_names)\n query_parameters['queueNames'] = self._serialize.query('queue_names', queue_names, 'str')\n if action_filter is not None:\n query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')\n response = self._send(http_method='GET',\n location_id='900fa995-c559-4923-aae7-f8424fe4fbea',\n version='6.0-preview.1',\n route_values=route_values,\n query_parameters=query_parameters)\n return self._deserialize('[TaskAgentQueue]', self._unwrap_collection(response))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loop on all queues in the self.queues dictionary and bind them to the current channel. Called in self.connect() right after the connection with the message broker has been established. Assume there is only one channel and one connection.
|
def bind_queues(self):
for name, queue in self.queues.items():
self.queues[name] = queue(self.channel)
self.queues[name].declare()
|
[
"def on_bindok(self, frame):\n logger.debug('Queue bound')\n self.start_consuming()",
"def start_handling_messages(self):\n for amqp_consumer in self._amqp_consumers:\n amqp_consumer.start_handling_messages()",
"def bind(self, queue, routing_key=None, arguments=None, nowait=False):\n if isinstance(queue, Queue):\n queue = queue.name\n\n # [to update: return object?]\n return self.channel.queue_bind(\n queue=queue,\n exchange=self.name,\n routing_key=routing_key or '',\n arguments=arguments,\n nowait=nowait\n )",
"def populate_queue(ports):\n for port in ports:\n PORT_QUEUE.put(port)",
"def bind_exchanges(self):\n\n for name, exchange in self.exchanges.items():\n self.exchanges[name] = exchange(self.channel)",
"def _loop_through_queues(self):\n if 'unload' in self:\n self._unload_plugins()\n del self['unload']\n if 'load' in self:\n self._load_plugins()\n del self['load']\n if 'reload' not in self:\n return\n self['load'] = set(self['reload'])\n del self['reload']",
"def get_queues(self):\n pass",
"def process_queue():\r\n while True:\r\n current_target = targets_queue.get()\r\n for user in users:\r\n for passwd in passwds:\r\n ssh_connect(current_target, user, passwd, port_num)\r\n targets_queue.task_done()",
"def join_all(self):\n for channel in self.config['channels']:\n self.join(channel)",
"def __setup_kombu_queue(self, config):\n configs = config[u'config']\n for item in configs:\n if item[u'group'] == u'queue':\n value = item[u'value']\n queue = value[u'queue']\n uri = value[u'uri']\n manager = RedisManager(uri)\n manager.server.set(u'_kombu.binding.%s' % queue, value)",
"def bind(self, exchange, routing_key=None, arguments=None, nowait=False):\n if isinstance(exchange, Exchange):\n exchange = exchange.name\n\n return self.channel.queue_bind(\n queue=self.name,\n exchange=exchange,\n routing_key=routing_key or '',\n arguments=arguments,\n nowait=nowait\n )",
"def register_binding(self, binding_parameters):\n\n for consumer in self._amqp_consumers:\n consumer.register_binding(binding_parameters)",
"def __on_queue_declareok(self, _):\n LOGGER.info('Binding %s to %s with %s',\n self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.__on_bindok,\n self.queue,\n self.exchange,\n self.routing_key)",
"def process_queue(GWs):\n # Create a dict gsmmodems of modem connections to gw's of type gsmmodem\n gsmmodems = {}\n for (gw, gw_params) in GWs['gw'].items():\n if gw_params['type'] == 'gsmmodem':\n gsmmodems[gw] = connect_to_gsmmodem(gw_params['tty'], gw_params['baud'], gw_params['pin'])\n\n print(\"Info: Ready to process queue\")\n while True:\n time.sleep(1) # Less CPU use + Idle betwen getting semaphone - needed?, not tried without\n with queue_semaphore:\n if len(queue):\n qprocess = get_element_with_highest_priority(queue)\n res = exec_queue_job(GWs, gsmmodems, qprocess)\n qprocess['clientsocket'].send(json.dumps(res).encode())\n qprocess['clientsocket'].close()",
"async def reader_worker(self):\n try:\n while True:\n data = await self.reader.readline()\n print('SOCKET <', data)\n for queue in self.outbound_queues:\n await queue.put(data.decode())\n finally:\n self.reader = None",
"def connect_queue(self, queue):\n queue.consume(self.handle_complaint)",
"def send_messages(self, queues):\n for q in queues:\n queue = q['queue']\n try:\n m = queue.get(block=False)\n org, flow = q['dest_channel'].split('|')\n url = '{server}/flows/{org}/{flow}/messages'.format(\n server=self.server,\n org=org,\n flow=flow,\n )\n auth = (self.token, '')\n payload = {\n 'event': 'message',\n 'content': self.format_message(m),\n }\n headers = {\n 'Content-Type': 'application/json'\n }\n r = requests.post(url,\n data=json.dumps(payload),\n auth=auth,\n headers=headers)\n if not r.status_code == 201:\n raise Exception(r.text)\n sent_msg_counter.labels('flowdock', q['dest_channel']).inc()\n queue.task_done()\n except Queue.Empty:\n pass",
"def _setup_subscriptions(self):\n for handler in self.handlers:\n exchange, topics = handler.get_exchange_topics()\n\n for topic in topics:\n queue_name = \"designate.notifications.%s.%s.%s\" % (\n handler.get_canonical_name(), exchange, topic)\n\n self.rpc_conn.join_consumer_pool(\n self._process_notification,\n queue_name,\n topic,\n exchange_name=exchange)",
"def on_queue_declareok(self, method_frame):\n logger.debug('Binding %s to %s with %s', self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.queue, self.exchange,\n routing_key=self.routing_key, callback=self.on_bindok)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override this to return the consumers you are going to use for you worker. It should return a mapping of exchange names and exchanges object. There are no 'bind_consumers' method as kombu forces you to instanciate producers already bounded
|
def get_consumers(self):
pass
|
[
"def get_all_consumers(self):\n return self.consumers",
"def get_queues(self):\n pass",
"def get_producers(self):\n return {'psms': Producer(self.channel, exchange=self.exchanges['psms'])}",
"def list_consumers(self):\n endpoint = self.build_url(\"/consumers\")\n return self.request('get', endpoint)",
"def get_task_consumer(self, connection, queues=None, **kwargs):\n return self.ConsumerSet(connection, from_dict=queues or self.queues,\n **kwargs)",
"def workers(self):\n # type: () -> Dict\n return self.__workers",
"def start_consumers(self, msg_runner):\n topic_base = CONF.cells.rpc_driver_queue_base\n proxy_manager = InterCellRPCDispatcher(msg_runner)\n # NOTE(comstud): We do not need to use the object serializer\n # on this because object serialization is taken care for us in\n # the messaging module.\n dispatcher = rpc_dispatcher.RpcDispatcher([proxy_manager])\n for msg_type in msg_runner.get_message_types():\n topic = '%s.%s' % (topic_base, msg_type)\n self._start_consumer(dispatcher, topic)",
"def get_producers(self):\n # generators, ofc how could I forget!\n if not self._producers:\n return None\n\n for producer in self._producers:\n yield producer_factory(producer)",
"def get_producers(self):\n pass",
"def bind_exchanges(self):\n\n for name, exchange in self.exchanges.items():\n self.exchanges[name] = exchange(self.channel)",
"def get_dead_letter_queues(self):\n raise NotImplementedError",
"def _RestoreConsumers(self):\n data = TryLoadJSON(self.consumers_list_path, self.logger.name)\n if data:\n for name in data:\n self.consumers[name] = self._CreateConsumer(name)",
"def get_spider_queues(config):\n queues = {}\n for project in get_project_list(config):\n table = 'scrapy_%s_queue' % project\n queues[project] = PgQueue(config, table=table)\n return queues",
"def list_consumers(self, publisher_id=None):\n query_parameters = {}\n if publisher_id is not None:\n query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')\n response = self._send(http_method='GET',\n location_id='4301c514-5f34-4f5d-a145-f0ea7b5b7d19',\n version='5.1',\n query_parameters=query_parameters)\n return self._deserialize('[Consumer]', self._unwrap_collection(response))",
"def get_exchanges(self):\n pass",
"def create(self):\n\n dependencies = self.create_dependencies()\n\n # Create the consumer.\n consumer = messaging.consuming.consumers.Simple(\n receiver=dependencies['receiver'],\n handler=dependencies['handler'],\n filters=dependencies['filters'])\n\n # Include blocking.\n consumer = messaging.consuming.consumers.Blocking(\n consumer=consumer,\n interval=self._properties['consumer']['interval'])\n\n # Include orchestration.\n logger_factory = Logger(properties=self._properties)\n logger = logger_factory.create()\n consumer = consuming.consumers.Orchestrating(consumer=consumer,\n logger=logger)\n\n return consumer",
"def workers(self) -> WorkerManager:\n return self.app.workers",
"def workers(self):\n return self.instance.get_task_workers(self.name)",
"def bind_queues(self):\n\n for name, queue in self.queues.items():\n self.queues[name] = queue(self.channel)\n self.queues[name].declare()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override this to return the producers you are going to use for you worker. It should return a mapping of exchange names and exchanges object. There are no 'bind_producers' method as kombu forces you to instanciate producers already bounded
|
def get_producers(self):
pass
|
[
"def get_producers(self):\n return {'psms': Producer(self.channel, exchange=self.exchanges['psms'])}",
"def get_producers(self):\n # generators, ofc how could I forget!\n if not self._producers:\n return None\n\n for producer in self._producers:\n yield producer_factory(producer)",
"def workers(self):\n # type: () -> Dict\n return self.__workers",
"def get_consumers(self):\n pass",
"def get_queues(self):\n pass",
"def _initProducers(self, actor, args):\n\n # for every producer model in the producers, we get the user set services `argKey` to be consumed in the blueprint itself.\n # calculate the difference of the available services and the user set\n # calculate the min required services and see if we should create new ones if auto is set\n # create the services required till the minServices is reached.\n # set add each to our producers and add ourself the their consumers list.\n # maintain the parent relationship (parent is always a producer and we are always a consumer of the parent.)\n\n for producer_model in actor.model.dbobj.producers:\n producer_role = producer_model.actorRole\n usersetservices = []\n passedservicesnames = args.get(producer_model.argKey, args.get(producer_role, \"\"))\n if not j.data.types.list.check(passedservicesnames):\n passedservicesnames = [passedservicesnames]\n for svname in passedservicesnames:\n if svname:\n foundservices = self.aysrepo.servicesFind(name=svname, actor=\"%s(\\..*)?\" % producer_model.actorRole)\n usersetservices.extend(foundservices)\n\n available_services = self.aysrepo.servicesFind(actor=producer_role)\n available_services = list(set(available_services)-set(usersetservices))\n\n extraservices = len(usersetservices) - producer_model.maxServices\n if extraservices > 0:\n raise j.exceptions.Input(message=\"Specified services [%s] are more than maximum services: [%s]\"%(str(usersetservices), str(producer_model.maxServices)),\n level=1, source=\"\", tags=\"\", msgpub=\"\")\n\n tocreate = producer_model.minServices-len(available_services)-len(usersetservices)\n if tocreate > 0:\n if producer_model.auto:\n for idx in range(tocreate):\n auto_actor = self.aysrepo.actorGet(producer_role)\n available_services.append(auto_actor.serviceCreate(instance=\"auto_%s\" % idx, args={}))\n else:\n raise j.exceptions.Input(message=\"Minimum number of services required is %s and only %s are provided. [Hint: Maybe you want to set auto to auto create the missing services?]\" % (producer_model.minServices, len(usersetservices)),\n level=1, source=\"\", tags=\"\", msgpub=\"\")\n\n for idx, producer_obj in enumerate(usersetservices + available_services):\n if idx >= len(usersetservices) and idx >= producer_model.minServices:\n break\n self.model.producerAdd(\n actorName=producer_obj.model.dbobj.actorName,\n serviceName=producer_obj.model.dbobj.name,\n key=producer_obj.model.key)\n # add ourself to the consumers list of the producer\n producer_obj.model.consumerAdd(\n actorName=self.model.dbobj.actorName,\n serviceName=self.model.dbobj.name,\n key=self.model.key)\n\n if self.parent is not None:\n # add parent to the producers list.\n self.model.producerAdd(\n actorName=self.parent.model.dbobj.actorName,\n serviceName=self.parent.model.dbobj.name,\n key=self.parent.model.key)\n\n # add ourself to the consumers list of the parent\n self.parent.model.consumerAdd(\n actorName=self.model.dbobj.actorName,\n serviceName=self.model.dbobj.name,\n key=self.model.key)",
"def __setup_processors():\n processors = {}\n for processor_id in ['standalone-processor-1', 'standalone-processor-2', 'standalone-processor-3']:\n processors[processor_id] = StreamProcessor(host_name='localhost', processor_id=processor_id)\n processors[processor_id].start()\n return processors",
"def workers(self) -> WorkerManager:\n return self.app.workers",
"def workers(self):\n return self.instance.get_task_workers(self.name)",
"def get_rabbitmq_admin_instances():\n instances = {}\n\n for instance in LIFEGUARD_RABBITMQ_INSTANCES:\n key = instance.upper()\n instances[instance] = {\n \"base_url\": SETTINGS_MANAGER.read_value(\n \"LIFEGUARD_RABBITMQ_{}_ADMIN_BASE_URL\".format(key)\n ),\n \"user\": SETTINGS_MANAGER.read_value(\n \"LIFEGUARD_RABBITMQ_{}_ADMIN_USER\".format(key)\n ),\n \"passwd\": SETTINGS_MANAGER.read_value(\n \"LIFEGUARD_RABBITMQ_{}_ADMIN_PASSWD\".format(key)\n ),\n \"vhost\": SETTINGS_MANAGER.read_value(\n \"LIFEGUARD_RABBITMQ_{}_ADMIN_VHOST\".format(key)\n ),\n }\n\n return instances",
"def get_workers(self):\n with self._engine.begin() as conn:\n worker_rows = conn.execute(\n select([cl_worker, cl_worker_dependency.c.dependencies]).select_from(\n cl_worker.outerjoin(\n cl_worker_dependency,\n cl_worker.c.worker_id == cl_worker_dependency.c.worker_id,\n )\n )\n ).fetchall()\n worker_run_rows = conn.execute(cl_worker_run.select()).fetchall()\n\n worker_dict = {\n (row.user_id, row.worker_id): {\n 'user_id': row.user_id,\n 'worker_id': row.worker_id,\n 'group_uuid': row.group_uuid,\n 'tag': row.tag,\n 'cpus': row.cpus,\n 'gpus': row.gpus,\n 'memory_bytes': row.memory_bytes,\n 'free_disk_bytes': row.free_disk_bytes,\n 'checkin_time': row.checkin_time,\n 'socket_id': row.socket_id,\n # run_uuids will be set later\n 'run_uuids': [],\n 'dependencies': row.dependencies\n and self._deserialize_dependencies(row.dependencies),\n 'shared_file_system': row.shared_file_system,\n 'tag_exclusive': row.tag_exclusive,\n 'exit_after_num_runs': row.exit_after_num_runs,\n 'is_terminating': row.is_terminating,\n 'preemptible': row.preemptible,\n }\n for row in worker_rows\n }\n for row in worker_run_rows:\n worker_dict[(row.user_id, row.worker_id)]['run_uuids'].append(row.run_uuid)\n return list(worker_dict.values())",
"def _create_workers(self):\n for worker_config in self.__config.worker_configs:\n worker = CopyingManagerWorker(self.__config, worker_config)\n self.__workers[worker_config[\"id\"]] = worker",
"def workers(self):\n return self._wrap_get('/workers')",
"def get_spider_queues(config):\n queues = {}\n for project in get_project_list(config):\n table = 'scrapy_%s_queue' % project\n queues[project] = PgQueue(config, table=table)\n return queues",
"def bind_exchanges(self):\n\n for name, exchange in self.exchanges.items():\n self.exchanges[name] = exchange(self.channel)",
"def start_consumers(self, msg_runner):\n topic_base = CONF.cells.rpc_driver_queue_base\n proxy_manager = InterCellRPCDispatcher(msg_runner)\n # NOTE(comstud): We do not need to use the object serializer\n # on this because object serialization is taken care for us in\n # the messaging module.\n dispatcher = rpc_dispatcher.RpcDispatcher([proxy_manager])\n for msg_type in msg_runner.get_message_types():\n topic = '%s.%s' % (topic_base, msg_type)\n self._start_consumer(dispatcher, topic)",
"def get_dead_letter_queues(self):\n raise NotImplementedError",
"def get_all_consumers(self):\n return self.consumers",
"def get_exchanges(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
One producer only for all messages, since we have only one exchange.
|
def get_producers(self):
return {'psms': Producer(self.channel, exchange=self.exchanges['psms'])}
|
[
"def worker(self, producer):\r\n while self.redis.get_status('producer') == 'run':\r\n for table in self.producer_table:\r\n logger.info(f'start processing : {table}')\r\n producer.send(table)",
"async def producer():\n MQStatus.mqp_connected = False\n while True:\n # If not connected to kafka, attempt to connect...\n if not MQStatus.mqp_connected:\n try:\n logger.info(\"Producer client not connected, attempting to connect...\")\n await mqp.start()\n logger.info(\"Producer client connected!\")\n MQStatus.mqp_connected = True\n except KafkaError:\n logger.exception('Producer client hit error, triggering re-connect...')\n await asyncio.sleep(RETRY_INTERVAL)\n continue\n\n # Pull items off our queue to produce\n if not produce_queue:\n await asyncio.sleep(0.1)\n continue\n\n for _ in range(0, len(produce_queue)):\n item = produce_queue.popleft()\n topic = item['topic']\n msg = item['msg']\n logger.info(\n \"Popped item from produce queue (qsize: %d): topic %s: %s\",\n len(produce_queue), topic, msg\n )\n try:\n await mqp.send_and_wait(topic, json.dumps(msg).encode('utf-8'))\n logger.info(\"Produced on topic %s: %s\", topic, msg)\n except KafkaError:\n logger.exception('Producer client hit error, triggering re-connect...')\n MQStatus.mqp_connected = False\n # Put the item back on the queue so we can push it when we reconnect\n produce_queue.appendleft(item)",
"def get_producer():\n from ... import __producer__\n return __producer__",
"def queue_producer(self, producer):\n try:\n for item in producer:\n self.semaphore.acquire()\n yield item\n except:\n logger.exception(\"Error in producer parallel task\")",
"def mq_send_once(exchange, routing_key, message):\n connection = mq_connect()\n confirmation = mq_send(connection.channel(), exchange, routing_key, message)\n connection.close()\n return confirmation",
"def test_user_specified_service_producer(self):\n task_queue = kombu.Queue(\"tasks\", kombu.Exchange(\"tasks\"), routing_key=\"tasks\")\n to_publish = {\"hello\": \"world\"}\n\n def process_message(body, message):\n message.ack()\n\n with self.tracer.trace(\"parent\", service=\"parentsvc\"):\n self.producer.publish(\n to_publish, exchange=task_queue.exchange, routing_key=task_queue.routing_key, declare=[task_queue]\n )\n\n with kombu.Consumer(self.conn, [task_queue], accept=[\"json\"], callbacks=[process_message]) as consumer:\n Pin.override(consumer, tracer=self.tracer)\n self.conn.drain_events(timeout=2)\n\n spans = self.get_spans()\n self.assertEqual(len(spans), 3)\n # Parent and producer spans should have parent service\n assert spans[0].service == \"parentsvc\"\n assert spans[1].service == \"parentsvc\"\n # Consumer span should have global service\n assert spans[2].service == \"mysvc\"",
"def produce(self, topic, msg):\n pass",
"def get_producer(self):\n if not self._producer:\n self.set_producer()\n return self._producer",
"def _produce(self, topic, key, value):\n return self.producer.produce(topic=topic, key=key, value=value,\n callback=self._delivery_report)",
"def kafka_payment_producer_worker(mq: queue.Queue):\n global app_config\n\n # Client\n producer = KafkaProducer(bootstrap_servers=bootstrap_servers,\n value_serializer=lambda item: json.dumps(item).encode('utf-8'))\n\n while not t_stop_event.is_set():\n try:\n if mq.qsize() > 0:\n # Topic + Message\n msg = mq.get()\n logging.info(\"GET %s FROM QUEUE AND SENDING TO %s\" % (msg, 'payment'))\n producer.send('payment', msg)\n # Force buffer flush in order to send the message\n logging.info(\"MESSAGE SENT !\")\n producer.flush()\n except Exception as e:\n logging.fatal(e, exc_info=True)\n\n producer.close()\n return",
"def publish_message(self, message, queue):",
"def produce_kafka_messages(cfg):\n setup_logging(cfg['logging'])\n logger = getLogger('producer')\n\n if cfg['producer']['message_frequency_hz']:\n rate = 1. / cfg['producer']['message_frequency_hz']\n else:\n rate = None\n\n p = Producer({'bootstrap.servers': cfg['bootstrap.servers']})\n\n while True:\n if not cfg['producer']['num_messages']:\n break\n p.poll(0)\n payload = uuid4().__str__()\n logger.info(payload)\n p.produce(cfg['topic'], payload, partition=0, callback=delivery_report)\n p.flush()\n cfg['producer']['num_messages'] -= 1\n if rate:\n sleep(rate)\n else:\n sleep(lognormvariate(1, 1))",
"def send_messages_to_consumer(producer, topic_name: str = \"sample_customer_profile\"):\n data = get_fake_data()\n for message in data:\n print(f\"Sending message from producer: {message}\")\n producer.send(topic_name, dumps(message).encode(\"utf-8\"))\n\n # Wait for all messages to be sent\n print(f\"All producermessages sent to consumer for topic {topic_name}\")\n producer.flush()",
"def produce_consume():\n logger = logging.getLogger(__name__)\n\n even_consumer = actors.Printer.start(\"Even Printer\")\n odd_consumer = actors.Printer.start(\"Odd Printer\")\n producer = NumberGenerator.start(\"RNG\")\n producer.proxy().register(even_consumer, 'even number')\n producer.proxy().register(odd_consumer, 'odd number')\n\n logger.info(\"Producing for 2 seconds at an interval of 0.1 seconds...\")\n producer.tell({'command': 'start producing', 'interval': 0.1})\n time.sleep(2)\n producer.tell({'command': 'stop producing'})\n time.sleep(2)\n logger.info(\"Producing for 2 seconds at an interval of 0.5 seconds...\")\n producer.tell({'command': 'start producing', 'interval': 0.5})\n time.sleep(2)\n producer.tell({'command': 'stop producing'})\n time.sleep(2)\n logger.info(\"Producing for 2 seconds...\")\n producer.tell({'command': 'start producing'})\n time.sleep(2)\n producer.tell({'command': 'stop producing'})\n logger.info(\"Quitting\")\n\n pykka.ActorRegistry.stop_all() # stop actors in LIFO order",
"def get_producers(self):\n # generators, ofc how could I forget!\n if not self._producers:\n return None\n\n for producer in self._producers:\n yield producer_factory(producer)",
"def start_handling_messages(self):\n for amqp_consumer in self._amqp_consumers:\n amqp_consumer.start_handling_messages()",
"def kafka_restaurant_producer_worker(mq: queue.Queue):\n global app_config\n\n # Client\n producer = KafkaProducer(bootstrap_servers=bootstrap_servers,\n value_serializer=lambda item: json.dumps(item).encode('utf-8'))\n\n while not t_stop_event.is_set():\n try:\n if mq.qsize() > 0:\n # Topic + Message\n msg = mq.get()\n logging.info(\"GET %s FROM QUEUE AND SENDING TO %s\" % (msg, 'restaurant'))\n producer.send('restaurant', msg)\n # Force buffer flush in order to send the message\n logging.info(\"MESSAGE SENT !\")\n producer.flush()\n except Exception as e:\n logging.fatal(e, exc_info=True)\n\n producer.close()\n return",
"def kafka_delivery_producer_worker(mq: queue.Queue):\n global app_config\n\n # Client\n producer = KafkaProducer(bootstrap_servers=bootstrap_servers,\n value_serializer=lambda item: json.dumps(item).encode('utf-8'))\n\n while not t_stop_event.is_set():\n try:\n if mq.qsize() > 0:\n # Topic + Message\n msg = mq.get()\n logging.info(\"GET %s FROM QUEUE AND SENDING TO %s\" % (msg, 'delivery'))\n producer.send('delivery', msg)\n # Force buffer flush in order to send the message\n logging.info(\"MESSAGE SENT !\")\n producer.flush()\n except Exception as e:\n logging.fatal(e, exc_info=True)\n\n producer.close()\n return",
"def producer(self, enricher):\r\n while True:\r\n data: ETLProducerTable = (yield)\r\n lasttime = self.redis.get_lasttime(data.table) or self.pgbase.get_first_object_time(data.table)\r\n idlist = self.pgbase.get_updated_object_id(lasttime, data.table, self.limit)\r\n logger.info(f'get new or modifed data from postgress \"{data.table}\" table')\r\n try:\r\n lasttime = self.redis.set_lasttime(data.table, idlist[-1].modified)\r\n except IndexError:\r\n logger.warning(f'No more new data in {data.table}')\r\n some_sleep(min_sleep_time=1, max_sleep_time=10)\r\n idlist = [filmid.id for filmid in idlist]\r\n enricher.send(ETLEnricherData(data, idlist))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a dictionary with all countries
|
def get_countries(self):
if self.db_connected:
data = {}
countries = self.cur.execute("SELECT id, key, name FROM countries ORDER BY name")
for country in countries.fetchall():
data[country[0]] = {
"iso_id" : country[1],
"name" : country[2]
}
return data
else:
return False
|
[
"def allCountries():",
"def getCountriesInfos():\n countries = getCountries()\n return [(country['code'], country['name'], country['resource_uri'])\n for country in countries]",
"def get_countries(self):\n return self._make_transferto_request(action=\"pricelist\", info_type=\"countries\")",
"def select(self, country_list):\n countries = dict()\n for c in country_list:\n tmp = self.get_country_data(c)\n if tmp is not None:\n countries[c] = tmp\n return countries",
"def list_products_countries(self, country_list):\r\n products_countries_dic = {}\r\n for country in country_list:\r\n products_countries_dic[country] = self.list_products_country(country)\r\n return products_countries_dic",
"def getCountries():\r\n url = \"https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/reference/v1.0/{}\"\r\n countries = requests.request(\"GET\", url.format(\"countries/en-US\"), headers = headers).json()['Countries']\r\n\r\n return countries",
"def getCountries():\n return loadJson(BASE_URL_COUNTRY, limit=0)['objects']",
"def countries():\n # Query all countries from the Invoices table\n results = session.query(Invoices.BillingCountry).\\\n group_by(Invoices.BillingCountry).all()\n\n # Convert list of tuples into normal list\n countries_list = list(np.ravel(results))\n\n return jsonify(countries_list)",
"def countries(self) -> List[str]:\n return self.get_type_values(registry.country)",
"def get_covid_countries_report():\n data = db.session.query(CovidWiki).filter(CovidWiki.state.is_(None)).all()\n return [v.to_dict() for v in data]",
"def countries(self):\n return [get_country_name(country_code, 'en_US').upper()\n for country_code in self.country_codes]",
"def get_etf_countries():\n\n return etf_countries_as_list()",
"def load_countries(self):\n countries = self.data['country head office']\n countries = countries.dropna()\n countries = countries.drop_duplicates().to_list()\n\n # Some names were written wrong, so some hard coded solution to rename those countries.\n countries[countries.index('Afganistan')] = 'Afghanistan'\n countries.remove('Netherlands Antilles')\n countries[countries.index('Gibralter')] = 'Gibraltar'\n countries.sort()\n\n return countries",
"def country_sites(countries):\n def country_sites(country):\n return [site.id for site in SITE_MODEL.objects.filter(country=country)]\n\n return {str(country.code): country_sites(country)\n for country in countries}",
"def _get_alternative_names_countries(self):\n names = dd(dict)\n pg.cur.execute(\"\"\"SELECT geonameid FROM countries\"\"\")\n for geonameid, in pg.cur.fetchall():\n pg.cur.execute(f\"\"\"SELECT name, full_name, population, country_geonameid, adm1_geonameid FROM geonames WHERE geonameid = {geonameid}\"\"\")\n res = pg.cur.fetchone()\n if res is None:\n continue\n name, full_name, population, country_geonameid, adm1_geonameid = res\n if name not in names:\n names[name] = {}\n\n geonameid_info = {\n 'type': 'country',\n 'abbreviations': [],\n \"toponym\": name,\n \"geonameid\": geonameid,\n \"population\": population,\n \"country_geonameid\": country_geonameid,\n \"adm1_geonameid\": adm1_geonameid\n }\n names[name][geonameid] = geonameid_info\n\n pg.cur.execute(f\"\"\"SELECT alternate_name, isolanguage, full_name FROM alternate_names WHERE geonameid = {geonameid}\"\"\")\n for name, isolanguage, full_name in pg.cur.fetchall():\n if name not in names:\n names[name] = {}\n if geonameid not in names[name]:\n names[name][geonameid] = geonameid_info\n if isolanguage == 'abbr':\n names[name][geonameid]['abbreviations'].append(full_name)\n return names",
"def allCountries(self):\n utility = zapi.getUtility(ICountriesStates)\n results = TitledVocabulary.fromTitles(utility.countries)\n\n return results._terms",
"def getCountries(self):\n\n owl_list = self.readTextFile(\n os.path.join(self.owl_path, self.owl_filename)\n )\n\n countries = []\n for i in range(len(owl_list)):\n if (\n owl_list[i].find('#Country\"') != -1 and \n owl_list[i - 1].find('owl:NamedIndividual') != -1\n ):\n start = owl_list[i - 1].find('#') + 1\n end = owl_list[i - 1].find('\"', start)\n individual = owl_list[i - 1][start : end]\n\n i += 1\n while (owl_list[i].find('owl:NamedIndividual') == -1):\n start = owl_list[i].find('>') + 1\n end = owl_list[i].find('<', start)\n field = owl_list[i][start : end]\n\n if (owl_list[i].find('alpha2Code') != -1):\n alpha2 = field\n elif (owl_list[i].find('alpha3Code') != -1):\n alpha3 = field\n elif (owl_list[i].find('countryID') != -1):\n id = int(field)\n elif (owl_list[i].find('countryName') != -1):\n name = field\n\n i += 1\n countries.append([id, individual, name, alpha2, alpha3])\n return pd.DataFrame(data=countries, columns=['id', 'individual', 'name', 'alpha2', 'alpha3'])",
"def load_country_codes():\n\n url = '/'.join([NAGER_API_BASE, 'AvailableCountries'])\n try:\n response = requests.get(url)\n response.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n\n country_codes = response.json()\n country_codes_only = [pair[\"key\"] for pair in country_codes]\n\n return country_codes_only",
"def get_country_codes():\n country_codes = dataCache.get(\"country_codes\")\n\n if country_codes == None:\n country_codes = {}\n for loc in Location.objects.filter(level=Location.LEVEL_COUNTRY):\n country_codes[loc.code.upper()] = loc.id\n dataCache.set(\"country_codes\", country_codes)\n\n return country_codes"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns a dictionary with all info for one country
|
def get_country_information(self,country):
if self.db_connected:
data = {}
infos = self.cur.execute("""SELECT format.key, format.value, format.id FROM format
INNER JOIN countries ON countries.id=format.country_id
WHERE countries.key = :isoid AND format.parent_id IS NULL
UNION
SELECT key, value, 0 FROM defaults
WHERE NOT EXISTS(
SELECT format.key FROM format
INNER JOIN countries
ON countries.id=format.country_id
WHERE countries.key = :isoid AND format.parent_id IS NULL AND defaults.key = format.key
);""",{"isoid":country})
for info in infos.fetchall():
if info[0] != "sub_keys":
data[info[0]] = info[1]
elif info[0] == "sub_keys":
states = {}
sub_el = self.cur.execute("SELECT format.key, format.value, format.id FROM format WHERE parent_id=:parent_id",{"parent_id":info[2]})
for sub in sub_el.fetchall():
states[sub[0]] = {}
state_el = self.cur.execute("SELECT format.key, format.value FROM format WHERE parent_id=:parent_id",{"parent_id":sub[2]})
for state in state_el.fetchall():
states[sub[0]][state[0]] = state[1]
data["administrative_areas"] = states
return data
else:
return False
|
[
"def get_countries(self):\n if self.db_connected:\n data = {}\n countries = self.cur.execute(\"SELECT id, key, name FROM countries ORDER BY name\")\n for country in countries.fetchall():\n data[country[0]] = {\n \"iso_id\" : country[1],\n \"name\" : country[2]\n }\n\n return data\n else:\n return False",
"def _build_country_info(self):\n if not self.users_by_country:\n return {}\n\n country_data = {}\n for country in pycountry.countries:\n country_info = self.users_by_country.get(country.alpha_2)\n number_of_users = 0\n percentage_of_users = 0\n color_rgb = [247, 247, 247]\n if country_info is not None:\n if self.private:\n number_of_users = country_info[\"number_of_users\"] or 0\n percentage_of_users = country_info[\"percentage_of_users\"] or 0\n color_rgb = country_info[\"color_rgb\"] or [247, 247, 247]\n\n # Use common_name if available to be less political\n # offending (#310)\n try:\n country_name = country.common_name\n except AttributeError:\n country_name = country.name\n\n country_data[country.numeric] = {\n \"name\": country_name,\n \"code\": country.alpha_2,\n \"percentage_of_users\": percentage_of_users,\n \"color_rgb\": color_rgb,\n }\n\n if self.private:\n country_data[country.numeric][\n \"number_of_users\"\n ] = number_of_users\n\n return country_data",
"def getCountriesInfos():\n countries = getCountries()\n return [(country['code'], country['name'], country['resource_uri'])\n for country in countries]",
"def allCountries():",
"def select(self, country_list):\n countries = dict()\n for c in country_list:\n tmp = self.get_country_data(c)\n if tmp is not None:\n countries[c] = tmp\n return countries",
"def get_country_gnp_per_capita(country_iso):\r\n url = 'http://api.worldbank.org/countries/%s/indicators/NY.GNP.PCAP.CD/' % country_iso\r\n # Query some more years, in case we lack data\r\n date_str = '2000:2020'\r\n r = requests.get(url=url, params={'format': 'json', 'date': date_str})\r\n if len(r.json()) == 1:\r\n return {}\r\n header, records = r.json()\r\n result = {rec['date']: rec['value'] for rec in records}\r\n return result",
"def list_products_countries(self, country_list):\r\n products_countries_dic = {}\r\n for country in country_list:\r\n products_countries_dic[country] = self.list_products_country(country)\r\n return products_countries_dic",
"def country(self, ip_address):\n try:\n response = self._country_reader.country(ip_address)\n except geoip2.errors.AddressNotFoundError:\n return {\n 'country_code': None,\n 'country_name': None,\n }\n else:\n country = response.country\n return {\n 'country_code': country.iso_code,\n 'country_name': country.name,\n }",
"def tag_country_basic(dict):\n\n from geotext import GeoText\n import pycountry\n\n places = GeoText(dict['full_text'])\n dict['cities'] = places.cities\n dict['nationalities'] = places.nationalities\n dict['countries_iso2'] = places.country_mentions\n\n dict['primary_country'] = \"\"\n if len(places.country_mentions) > 0:\n country = pycountry.countries.get(alpha_2=list(places.country_mentions)[0])\n dict['primary_country'] = [country.name, list(places.country_mentions)[0]]\n\n dict['countries'] = []\n while len(places.country_mentions) > 0:\n c = places.country_mentions.popitem(last=False)\n country = pycountry.countries.get(alpha_2=c[0])\n dict['countries'].append((country.name, c[0], c[1]))",
"def _load_country_2_continent(self):\n pg.cur.execute(\"SELECT geonameid, continents FROM countries\")\n return {\n country: [int(c) for c in continent.split(',')]\n for country, continent in pg.cur.fetchall()\n }",
"def get_country_data(self, country):\n temp = self.ts[self.ts[\"Country/Region\"] == country]\n if temp.shape[0] == 0:\n print(country + \" is not in the dataset.\")\n data = None\n else:\n data = temp.drop(columns=[\"Lat\", \"Long\"]).pivot_table(columns=\"Province/State\")\n data.index = pd.to_datetime(data.index)\n return CountryData(data.sort_index())",
"def get_covid_states_report_by_country(country):\n data = db.session.query(CovidWiki).filter(and_(\n CovidWiki.state.isnot(None),\n func.lower(CovidWiki.country) == country.lower(),\n )).all()\n return [v.to_dict() for v in data]",
"def _get_alternative_names_countries(self):\n names = dd(dict)\n pg.cur.execute(\"\"\"SELECT geonameid FROM countries\"\"\")\n for geonameid, in pg.cur.fetchall():\n pg.cur.execute(f\"\"\"SELECT name, full_name, population, country_geonameid, adm1_geonameid FROM geonames WHERE geonameid = {geonameid}\"\"\")\n res = pg.cur.fetchone()\n if res is None:\n continue\n name, full_name, population, country_geonameid, adm1_geonameid = res\n if name not in names:\n names[name] = {}\n\n geonameid_info = {\n 'type': 'country',\n 'abbreviations': [],\n \"toponym\": name,\n \"geonameid\": geonameid,\n \"population\": population,\n \"country_geonameid\": country_geonameid,\n \"adm1_geonameid\": adm1_geonameid\n }\n names[name][geonameid] = geonameid_info\n\n pg.cur.execute(f\"\"\"SELECT alternate_name, isolanguage, full_name FROM alternate_names WHERE geonameid = {geonameid}\"\"\")\n for name, isolanguage, full_name in pg.cur.fetchall():\n if name not in names:\n names[name] = {}\n if geonameid not in names[name]:\n names[name][geonameid] = geonameid_info\n if isolanguage == 'abbr':\n names[name][geonameid]['abbreviations'].append(full_name)\n return names",
"def prevalence_G_dict(self):\n ret = {}\n for finding in self.findings:\n if(self.isCountry(finding[0])):\n ret[finding[0]] = finding[1]\n return ret",
"def get_covid_countries_report():\n data = db.session.query(CovidWiki).filter(CovidWiki.state.is_(None)).all()\n return [v.to_dict() for v in data]",
"def by_country(self, country:str):\n\n if not isinstance(country, str):\n logger.error(\"The country is not the string.\")\n return []\n _search = self.search\n _search[self.sc] = {\n \"country\": country,\n # \"data\"\n }\n return _search.find()",
"def get_data_from_country(country_name: str) -> list:\n\n url = f\"https://api.covid19api.com/total/dayone/country/{country_name}\"\n\n payload = {}\n headers = {}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n data = response.json()\n\n return data",
"def getCountries():\r\n url = \"https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/reference/v1.0/{}\"\r\n countries = requests.request(\"GET\", url.format(\"countries/en-US\"), headers = headers).json()['Countries']\r\n\r\n return countries",
"def load_uk():\r\n with open('city.list.json', 'r', encoding='utf8') as json_file:\r\n all_places = load(json_file)\r\n return {place['name'].lower(): place['id'] for place in all_places\r\n if place['country'] == 'GB'}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deletes the country entry from the country list
|
def delete_country_entry(self,country_keys):
if self.db_connected:
self.delete_country_data(country_keys)
for country in country_keys:
self.cur.execute("DELETE FROM countries WHERE key = :key", {"key":country})
|
[
"def remove_country():\n\n name=request.get_data().decode().split('=')[1]\n result = json.loads(dumps(db.getInstance().delete_country_by_name(name)))\n return result",
"def delete(self, country_id):\n le_country = get_a_country(country_id)\n if not le_country:\n return {'success': False, 'msg': 'country does not exist'}\n else:\n delete_a_country(country_id)\n return {'success': True, 'message': 'country deleted successfully'}",
"def delete_destination(payload, country_id):\n try:\n country = Country.query.filter(Country.id == country_id).first()\n body = request.get_json()\n destination_id = body.get('destinationId')\n\n dest = Country.query.filter(Country.id == destination_id).first()\n if not country or not dest:\n abort(404)\n country.destinations.remove(dest)\n country.update()\n\n dests = [destination.short() for destination in country.destinations]\n return jsonify({\n 'destinations': dests,\n })\n except Exception:\n abort(422)",
"def render(self, session, country, force_non_empty, **arguments):\n\n dblocation = Location.get_unique(\n session, name=country, location_type='country', compel=True)\n\n if not force_non_empty and dblocation.contains_any_location_of_class(\n City, session):\n # noinspection PyStringFormat\n raise ArgumentError(\n 'Could not delete {0:l}, at least one city found in this '\n 'country.'.format(dblocation))\n\n return CommandDelLocation.render(\n self, session=session, name=country, type='country', **arguments)",
"def record_export_country_history_delete(sender, instance, by, **kwargs):\n action = CompanyExportCountryHistory.HistoryType.DELETE\n _record_export_country_history(instance, action, by)",
"def delete_language(self,iSurveyID,sLanguage):",
"def delete(self):\n del contactlist[self.get_selection_index()]\n self.update_contactlist()",
"def remove_customer(self, index):\n self.customer_list.pop(index)",
"def _remove_list_item(self, beacon_config, label):\n\n index = self._get_index(beacon_config, label)\n del beacon_config[index]",
"def delete_one_place(place):\n conn = sqlite3.connect('places.db')\n cursor = conn.cursor()\n\n param = '%' + place + '%'\n sql = 'DELETE FROM places WHERE local_name LIKE ?'\n cursor.execute(sql, (param,))\n conn.commit()\n\n conn.close()",
"def clear_identity(self, country_id):\n with self.db.get_cursor() as cursor:\n identity_id = cursor.one(\"\"\"\n\n DELETE\n FROM participant_identities\n WHERE participant_id=%(participant_id)s\n AND country_id=%(country_id)s\n RETURNING id\n\n \"\"\", dict(locals(), participant_id=self.id))\n payload = dict( id=self.id\n , identity_id=identity_id\n , country_id=country_id\n , action='clear identity'\n )\n self.app.add_event(cursor, 'participant', payload)\n self._update_has_verified_identity(cursor)",
"def delete(self):\n Customer.data.remove(self)",
"def remove(self, language: str):\n self.pop(language)",
"def delete_strain(self, selector):\n\n self.cases.pop(selector)",
"def delete_location(city):\n\n if current_user.is_admin is False:\n flash(\"Not Allowed.\", \"danger\")\n return redirect(url_for('suggestion_list', city=city))\n\n CITIES.delete_one({\"location\": city})\n flash(\"Location deleted.\", \"success\")\n return redirect(url_for(\"index\"))",
"def delete(self):\n self.tournament.withdraw_entry(self.get_dao())",
"def delete_item(self):\n\t\tch_name = self.__session_model.selected_channel_var.get()\n\t\tidx, item_obj = self.__model.get_item_from_list(ch_name=ch_name)\t\n\n\t\tsize = int(item_obj.get_bits())\n\t\tself.__model.update_space_data(-size)\n\t\tself.__model.delete_item(idx)\n\t\tself.__view.refresh_item_list(jump=False)",
"def delete_entry(self, *args):\n if len(self.value) > 1 and self.recycle_view_class_pool.selected:\n label = self.recycle_view_class_pool.selected[\"text\"]\n idx = self.imagenet_labels[label]\n self.value.remove(idx)\n self.set_value()",
"def _delete(self, index):\n # check\n if isinstance(index,str):\n index = int(index)\n try:\n self._list_box.delete(index)\n except:\n print(\"Index out of boundary\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
From BO optimization extract X giving the best seen Y and best expt for X already visited
|
def get_bests_from_BO(bo):
y_seen = np.min(bo.Y)
x_seen = bo.X[np.argmin(bo.Y)]
Y_pred = bo.model.predict(bo.X)
y_exp = np.min(Y_pred[0])
x_exp = bo.X[np.argmin(Y_pred[0])]
return (x_seen, y_seen), (x_exp, y_exp)
|
[
"def best_neighborhood_improvement (self, x):\n\n best = []\n best_avg = self.avg_f_neighborhood(x)\n for i in range(self.n):\n for j in range(i+1, self.n):\n if x[i] != x[j]:\n if self.num_evals >= self.max_evals:\n break\n y = x.copy()\n y[i], y[j] = y[j], y[i]\n avg_fz = self.avg_f_neighborhood(y)\n if avg_fz < best_avg:\n best = y\n best_avg = avg_fz\n return best",
"def find_best_overall():\n imgs, gts = gi.load_all_images('data/training/')\n X, Y = gi.produce_XY(imgs, gts)\n\n find_best_LogisticRegression(X, Y)\n find_best_BayesianRidge(X, Y)\n find_best_Ridge(X, Y)",
"def getBestSolutionValue(self) -> float:",
"def _get_optimal_point_from_history(self) -> ResultDict:\n result = {}\n\n # get indices of admissible trace entries\n # shape (n_sample, n_x)\n xs = np.asarray(self.history.get_x_trace())\n ixs_admit = [ix for ix, x in enumerate(xs) if self._admissible(x)]\n\n if len(ixs_admit) == 0:\n # no admittable indices\n return {key: None for key in OptimizerHistory.MIN_KEYS}\n\n # index of minimum of fval values\n ix_min = np.nanargmin(self.history.get_fval_trace(ixs_admit))\n # np.argmin returns ndarray when multiple minimal values are found,\n # we want the first occurrence\n if isinstance(ix_min, np.ndarray):\n ix_min = ix_min[0]\n # select index in original array\n ix_min = ixs_admit[ix_min]\n\n # fill in parameter and function value from that index\n for var in (X, FVAL, RES):\n val = getattr(self.history, f'get_{var}_trace')(ix_min)\n if val is not None and not np.all(np.isnan(val)):\n result[var] = val\n # convert to float if var is FVAL to be sure\n if var == FVAL:\n result[var] = float(result[var])\n\n # derivatives may be evaluated at different indices, therefore\n # iterate over all and check whether any has the same parameter\n # and the desired field filled\n for var in (GRAD, HESS, SRES):\n for ix in range(len(self.history)):\n if not allclose(result[X], self.history.get_x_trace(ix)):\n # different parameter\n continue\n val = getattr(self.history, f'get_{var}_trace')(ix)\n if not is_none_or_nan_array(val):\n result[var] = val\n # successfuly found\n break\n\n # fill remaining keys with None\n for key in OptimizerHistory.MIN_KEYS:\n if key not in result:\n result[key] = None\n\n return result",
"def __best_in_queue(self):\n #return graph, score\n graph = max(self._queue,key=self._queue.get)\n score = self._queue[graph]\n return graph, score",
"def first_neighborhood_improvement (self, x):\n \n current_avg= self.avg_f_neighborhood(x)\n for i in range(self.n):\n for j in range(i+1, self.n):\n if x[i] != x[j]:\n if self.num_evals >= self.max_evals:\n break\n y = x.copy()\n y[i], y[j] = y[j], y[i]\n if self.avg_f_neighborhood(y) < current_avg:\n return y\n return []",
"def find_most_violated_constraint(x, y, sm, sparm):\n global LOSS_METHOD\n # Similar, but include the loss.\n #print\n #print \"MOST VIOLATED Constraint\"\n # l1 = lp_training(x,y,sm,sparm)\n if(LOSS_METHOD== \"micro\"):\n l2 = lp_training_qpbo(x,y,sm,sparm)\n else:\n l2 = lp_training_qpbo_macro(x,y,sm,sparm)\n #print \"l1:\"\n #for i in xrange(l1[1]*sm.num_classes):\n #print l1[0][i,0],l2[0][i,0]\n # assert l1[0][i,0] == l2[0][i,0]\n #print \"l2\"\n #print l2[0]\n\n #print\n #assert (l1[0] == l2[0])\n #l = lp_training_opt(x,y,sm,sparm)\n #l = lp_training(x,y,sm,sparm)\n\n ##print l.T\n return l2",
"def _recommendSolution(self, bayesianOptimizer):\n # Pulling input data from BO instance\n trainingInputs = copy.copy(bayesianOptimizer._trainingInputs[0])\n for varName, array in trainingInputs.items():\n trainingInputs[varName] = np.asarray(array)\n # Evaluating the model at all training points\n modelEvaluation = bayesianOptimizer._evaluateRegressionModel(trainingInputs)\n # Evaluating constraints at all training points\n invalidIndices = []\n if self._constraints is not None:\n arrayTrainingInputs = bayesianOptimizer.featurePointToArray(trainingInputs)\n for constraint in self._constraints:\n constraintArray = constraint.fun(arrayTrainingInputs)\n invalidArray = np.less(constraintArray, np.zeros(constraintArray.shape))\n invalidWhere = np.where(invalidArray[0])\n for index in invalidWhere[0]:\n invalidIndices.append(index)\n # Pulling mean and std out of evaluation to operate on array structure\n muVec = modelEvaluation[0]\n stdVec = modelEvaluation[1]\n # Removing values at locations where constraint violation has occurred\n muVec = np.delete(muVec, invalidIndices)\n stdVec = np.delete(stdVec, invalidIndices)\n for varName in list(trainingInputs):\n trainingInputs[varName] = np.delete(trainingInputs[varName], invalidIndices)\n # Retrieving best mean value within training set locations, need index for retrieving other values\n muStar = np.min(muVec)\n minDex = np.argmin(muVec)\n stdStar = stdVec[minDex]\n # Retrieving location of recommended solution\n xStar = {}\n for varName in list(trainingInputs):\n xStar[varName] = trainingInputs[varName][minDex]\n return muStar, xStar, stdStar",
"def find_best_way(self):\n max_count = 0\n max_element = None\n collectables = self.find_collectables()\n for element in collectables:\n check_fun = self.current_way(self.current_pos.getX(),self.current_pos.getY(),element.getX(),element.getY())\n if not check_fun:\n continue\n k = 1\n for n_element in collectables:\n if check_fun(n_element.getX(),n_element.getY()):\n k += 1\n if k > max_count:\n max_count = k\n max_element = n_element\n print('collected :',max_count)\n return max_element",
"def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n largest_BIC = float(\"inf\") # return value of highest average C.V\n best_model= self.base_model(self.n_constant) # the corrosponding model woth top_score\n\n\n for n_components in range(self.min_n_components, self.max_n_components + 1):\n\n try:\n\n #-------------------------------------------\n n = n_components\n d = len(self.X[0]) # number of features\n p = (n **2) + (2*d*n) - 1 #n*(n-1) + (2*d*n)\n N = len(self.X)\n #-------------------------------------------\n\n model = self.base_model(n_components)\n logL = model.score(self.X, self.lengths)\n #-------------------------------------------\n logN = np.log(N)\n\n current_BIC = -2*logL + p*logN\n\n if current_BIC < largest_BIC:\n largest_BIC, best_model = current_BIC, model\n\n except:\n #print(\"Exception inside SelectorBIC\")\n continue\n\n return best_model",
"def __choose_best_feature(self, x, y, m):\n total_ent = self.__cal_entropy(y)\n samples_num = x.shape[0]\n best_feature = 0\n if m == 'infogain': # method is infogain\n max_gain = 0.0\n for i in range(x.shape[1]): # for every feature\n x_unique = set(x[:, i]) # unique value of every feature\n split_ent = 0.0\n for val in x_unique:\n vals_unique, y_val = self.__split_data(x, y, i, val)\n count = vals_unique.shape[0]\n split_ent += (count / samples_num) * self.__cal_entropy(y_val)\n if (total_ent - split_ent) >= max_gain: # compare the information gain to the total entropy\n max_gain = (total_ent - split_ent)\n best_feature = i\n elif m == 'gini':\n min_gini = 9999\n for i in range(x.shape[1]):\n x_unique = set(x[:, i])\n feat_gini = 0.0\n for val in x_unique:\n vals_unique, y_val = self.__split_data(x, y, i, val)\n count = vals_unique.shape[0]\n feat_gini += (count / samples_num) * self.__cal_gini(y_val)\n if feat_gini <= min_gini:\n min_gini = feat_gini\n best_feature = i\n elif m == 'logistic':\n # TODO: implement logistic function\n pass\n return best_feature",
"def findBestModel(self):\n self.reggridSearch()",
"def best_partial_neighborhood_improvement (self, x):\n\n best = []\n best_avg = self.avg_f_partial_neighborhood(x)\n vnc = self.vertices_no_cut(x)\n for i in range(self.n):\n for j in range(i+1, self.n):\n if x[i] != x[j]:\n # if they are in the partial neighborhood\n if all(i not in l for l in vnc) or all(j not in l for l in vnc):\n if self.num_evals >= self.max_evals:\n break\n y = x.copy()\n y[i], y[j] = y[j], y[i]\n avg_fz = self.avg_f_partial_neighborhood(y)\n if avg_fz < best_avg:\n best = y\n best_avg = avg_fz\n return best",
"def _get_best_parameters(self):\n res = [x.best_parameters for x in self.results]\n return res",
"def find_best(self):\n return min(self.population, key=lambda x: x.fitness)",
"def best_x(self):\n found_x_pos = [x for x in self.recent_x_pos if x is not None]\n return np.mean(found_x_pos)",
"def select_best(self):\n return max(self.population, key=lambda x: self.fitness.eval(x))",
"def best_monitor(self):\n seeable = 0\n best_x = best_y = None\n for x, y in self.asteroids:\n count = self.count_seeable(x, y)\n if count > seeable:\n seeable = count\n best_x, best_y = x, y\n return best_x, best_y",
"def xgboost_heuristic_2(board, model):\n\n return model.predict(get_rep_2(board).reshape(1,288))",
"def bayesian_optimization(self, objective: evalset.test_funcs.TestFunction) -> Tuple[np.ndarray, np.ndarray]:\n \n # SET UP THE GP MODEL #\n bounds = objective.bounds\n dim = len(bounds)\n \n lik = GPy.likelihoods.Gaussian()\n lik.variance.constrain_fixed(self.noise**2, warning=False)\n noise = self.noise\n\n X0 = np.empty((0,dim))\n y = []\n yc = []\n \n def objective_modifier(x, f=None, batch_size=1):\n return np.concatenate(tuple( f(x[:,i*batch_size:(i+1)*batch_size]).reshape((-1,1)) for i in range(x.shape[1]//batch_size)), axis=1)\n \n \n # Initial observations:\n if self.use_comparative_observations_in_init:\n if self.random:\n X0 = util.random_sample(bounds, 2**dim)\n else:\n X0 = util.grid_sample(dim)\n yc = util.give_comparisons(objective.f, X0)\n if self.use_direct_observations_in_init:\n if self.random:\n Xn = util.random_sample(bounds, 2**dim)\n else:\n Xn = util.grid_sample(dim)\n yn = objective.f(Xn).reshape((-1,1))\n y = [(X0.shape[0] + i, yi) for i,yi in enumerate(yn)]\n X0 = np.concatenate((X0, Xn), axis=0)\n \n \n if not self.use_comparative_observations_in_init and not self.use_direct_observations_in_init:\n m = self.inference(util.static_sample(bounds), [(i, yi) for i,yi in enumerate(np.array([[0], [0]]))], yc, self.kernel.copy(), lik, get_logger=self.get_logger)\n else:\n m = self.inference(X0, y, yc, self.kernel.copy(), lik, get_logger=self.get_logger)\n\n # CREATE BO LOOP #\n bo_loop = create_bayesian_optimization_loop(m, bounds, self.batch_size, self.acquisition)\n \n # RUN THE LOOP #\n bo_loop.run_loop( partial(objective_modifier, f=objective.f, batch_size=self.batch_size), self.iterations)\n return m.X, m.yc"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Distribute the test kits to the counties for this agent
|
def distribute_test_kits(self):
raise NotImplementedError("Subclass must implement abstract method")
|
[
"def create_installers_bunch(installer, train, test, coverages, xgrid, ygrid):\n\n \n bunch = Bunch(name=installer)\n\n points = dict(test=test, train=train)\n\n\n\n for label, pts in points.iteritems():\n\n\n # choose points associated with the desired species\n pts = pts[pts['installer'] == installer]\n\n pts = pts[pts['x'] > min(xgrid)]\n pts = pts[pts['x'] < max(xgrid)]\n pts = pts[pts['y'] > min(ygrid)]\n pts = pts[pts['y'] < max(ygrid)]\n\n bunch['pts_%s' % label] = pts\n\n # determine coverage values for each of the training & testing points\n ix = np.searchsorted(xgrid, pts['x'])\n iy = np.searchsorted(ygrid, pts['y'])\n\n bunch['cov_%s' % label] = coverages[:, -iy, ix].T\n\n\n return bunch",
"def _run_tests(build_tree, test_labels, conda_env_files, output_folder):\n test_results = {}\n # Run test commands for each conda environment that was generated\n for variant_string, conda_env_file in conda_env_files.items():\n test_feedstocks = build_tree.get_test_feedstocks(variant_string)\n if test_feedstocks:\n log.info(\"\\n*** Running tests within the %s conda environment ***\\n\", os.path.basename(conda_env_file))\n for feedstock in test_feedstocks:\n log.info(\"Running tests for %s\", feedstock)\n test_result = test_feedstock.test_feedstock(conda_env_file,\n test_labels=test_labels,\n working_directory=feedstock)\n if feedstock not in test_results.keys():\n test_results[feedstock] = test_result\n else:\n test_results[feedstock] += test_result\n test_feedstock.process_test_results(test_results, output_folder, test_labels)",
"def discover_suites_and_tests(self):\n self.root_suite = DiscoveredRobotTestSuite(discovered_robot_app=self, suite_test_data=self.robot_test_data)\n self.test_suites.append(self.root_suite)\n self.root_suite.discover_child_suites_and_tests()",
"def register_tests(cls):\n\n foundry_url = \"http://www.obofoundry.org/registry/ontologies.yml\"\n foundry_rq = six.moves.urllib.request.urlopen(foundry_url)\n foundry_yaml = yaml.load(foundry_rq)\n\n # products = ( o['products'] for o in foundry_yaml['ontologies'] if 'products' in o )\n # for product in products:\n # cls.add_test(product)\n\n for o in foundry_yaml['ontologies']:\n if 'products' in o:\n for product in o['products']:\n cls.add_test(product)",
"def testMakeAggregator(self):\n\n aggs = [self.P6_batch.make_aggregator(),\n self.P7_batch.make_aggregator()]\n options = gw.DataGeneratorOptions(random_seed = 0,\n num_processes = 1,\n num_rows = self.num_rows,\n verbose = False,\n aggregators = aggs,\n batch_size = 5)\n pums_files = \\\n [(\"mock pums\", \n stringio.StringIO(mock_data_files.mock_pums_data))]\n pums_dict = \\\n learn_distributions.learn_pums_dists(self.learner_options,\n self.dummy_logger,\n pums_files) \n names_files = \\\n [('male_first_names.txt', \n stringio.StringIO(mock_data_files.mock_male_first_names)),\n ('female_first_names.txt', \n stringio.StringIO(mock_data_files.mock_female_first_names)),\n ('last_names.txt',\n stringio.StringIO(mock_data_files.mock_last_names))]\n names_dict = \\\n learn_distributions.learn_name_dists(self.learner_options,\n self.dummy_logger,\n names_files)\n zipcode_files = \\\n [('mock_zipcodes', \n stringio.StringIO(mock_data_files.mock_zipcodes))]\n zipcode_dict = \\\n learn_distributions.learn_zipcode_dists(self.learner_options,\n self.dummy_logger,\n zipcode_files)\n \n text_files = \\\n [('mock_text', \n stringio.StringIO(mock_data_files.mock_text_files))]\n text_engine = \\\n learn_distributions.train_text_engine(self.learner_options, \n self.dummy_logger, \n text_files)\n streets_files = \\\n [('mock street file', \n stringio.StringIO(mock_data_files.mock_street_names))]\n address_dict = \\\n learn_distributions.learn_street_address_dists(self.learner_options, \n self.dummy_logger, \n streets_files)\n dist_holder = \\\n learn_distributions.make_distribution_holder(self.learner_options,\n self.dummy_logger,\n pums_dict,\n names_dict,\n zipcode_dict,\n address_dict,\n text_engine)\n \n worker = gw.Worker(options, self.dummy_logger, dist_holder)\n self.aggregator_results = worker.start()",
"def get_distribution_tests():\n tests = {}\n\n for config in available_distributions():\n config = config.copy()\n name_array = bytearray()\n for key in sorted(list(config.keys())):\n if key in (\"info\", \"pkgsys\", \"url\"):\n continue\n\n name_array += bytes(key[0].upper().encode() +\n key[1:].encode() +\n config[key][0].upper().encode() +\n config[key][1:].encode())\n name = \"Test{0}\".format(name_array.decode(\"ascii\"))\n\n distro = config[\"distro\"]\n repositories_to_add = _DISTRO_INFO[distro].repo\n packages_to_install = [_DISTRO_INFO[distro].package]\n files_to_test_for = _DISTRO_INFO[distro].files\n kwargs = dict()\n\n try:\n kwargs[\"arch\"] = Alias.universal(config[\"arch\"])\n except KeyError: # suppress(pointless-except)\n pass\n\n # Set the --local switch if the installation type is local. This is\n # because we pass the keyword arguments to the main function of\n # psq-travis-container-create\n kwargs[\"local\"] = (config.get(\"installation\", None) == \"local\")\n tests[name] = _create_distro_test(name,\n config,\n repositories_to_add,\n packages_to_install,\n files_to_test_for,\n **kwargs)\n\n return tests",
"def partition_suite(suite, classes, bins):\n for test in suite:\n if isinstance(test, unittest.TestSuite):\n partition_suite(test, classes, bins)\n else:\n for i in range(len(classes)):\n if isinstance(test, classes[i]):\n bins[i].addTest(test)\n break\n else:\n bins[-1].addTest(test)",
"def testMakeAggregator(self):\n\n aggs = [self.batch.make_aggregator()]\n options = gw.DataGeneratorOptions(random_seed = 0,\n num_processes = 1,\n num_rows = self.num_rows,\n verbose = False,\n aggregators = aggs,\n batch_size = 5)\n pums_files = \\\n [(\"mock pums\", \n stringio.StringIO(mock_data_files.mock_pums_data))]\n pums_dict = \\\n learn_distributions.learn_pums_dists(self.learner_options,\n self.dummy_logger,\n pums_files) \n names_files = \\\n [('male_first_names.txt', \n stringio.StringIO(mock_data_files.mock_male_first_names)),\n ('female_first_names.txt', \n stringio.StringIO(mock_data_files.mock_female_first_names)),\n ('last_names.txt',\n stringio.StringIO(mock_data_files.mock_last_names))]\n names_dict = \\\n learn_distributions.learn_name_dists(self.learner_options,\n self.dummy_logger,\n names_files)\n zipcode_files = \\\n [('mock_zipcodes', \n stringio.StringIO(mock_data_files.mock_zipcodes))]\n zipcode_dict = \\\n learn_distributions.learn_zipcode_dists(self.learner_options,\n self.dummy_logger,\n zipcode_files)\n \n text_files = \\\n [('mock_text', \n stringio.StringIO(mock_data_files.mock_text_files))]\n text_engine = \\\n learn_distributions.train_text_engine(self.learner_options, \n self.dummy_logger, \n text_files)\n streets_files = \\\n [('mock street file', \n stringio.StringIO(mock_data_files.mock_street_names))]\n address_dict = \\\n learn_distributions.learn_street_address_dists(self.learner_options, \n self.dummy_logger, \n streets_files)\n dist_holder = \\\n learn_distributions.make_distribution_holder(self.learner_options,\n self.dummy_logger,\n pums_dict,\n names_dict,\n zipcode_dict,\n address_dict,\n text_engine)\n \n worker = gw.Worker(options, self.dummy_logger, dist_holder)\n self.aggregator_results = worker.start()\n self.assertEqual(len(self.aggregator_results),1)\n self.assertEqual(self.aggregator_results[0].keys(),['subresults'])\n self.assertEqual(len(self.aggregator_results[0]['subresults']),2)\n self.assertEqual(set(self.aggregator_results[0]['subresults'][0].keys()),\n set(['qid','alarmword_matching_row_id_and_distances',\n 'matching_record_ids','valid']))",
"def __init_settled_shop_agents(self, num_settled_shop_agents):\n for i in range(num_settled_shop_agents):\n unique_id = \"settled_shop_\" + i\n subsidy_cost = 10\n rental_cost = 40 # 入驻平台电商成本\n # 随机选取一个平台电商,作为Settled Shop所依赖的电商平台\n platform_e_commerce_agent = choice(self.platform_e_commerce_schedule.agents)\n settled_shop_agent = SettledShopAgent(unique_id, self, rental_cost, subsidy_cost, platform_e_commerce_agent)\n self.settled_shop_schedule.add(settled_shop_agent)",
"def test_multi(**kwargs):\n # flags\n FLAGS = FlagHolder()\n FLAGS.initialize(**kwargs)\n FLAGS.summary()\n\n # paths\n run_dir = '../scripts'\n target_path = os.path.join(FLAGS.target_dir, '**/weight_final*.pth')\n weight_paths = sorted(glob.glob(target_path, recursive=True), key=lambda x: os.path.basename(x))\n log_path = os.path.join(FLAGS.target_dir, 'test.csv')\n\n # logging\n logger = Logger(path=log_path, mode='test')\n\n for weight_path in weight_paths:\n # get coverage\n # name should be like, '~_coverage_{}.pth'\n basename = os.path.basename(weight_path)\n basename, ext = os.path.splitext(basename)\n coverage = float(basename.split('_')[-1])\n\n # keyword args for test function\n # variable args\n kw_args = {}\n kw_args['weight'] = weight_path\n kw_args['dataset'] = FLAGS.dataset\n kw_args['dataroot'] = FLAGS.dataroot\n kw_args['coverage'] = coverage\n # default args\n kw_args['dim_features'] = 512\n kw_args['dropout_prob'] = 0.3\n kw_args['num_workers'] = 8\n kw_args['batch_size'] = 128\n kw_args['normalize'] = True\n kw_args['alpha'] = 0.5\n \n # run test\n out_dict = test(**kw_args)\n\n metric_dict = OrderedDict()\n metric_dict['coverage'] = coverage\n metric_dict['path'] = weight_path\n metric_dict.update(out_dict)\n\n # log\n logger.log(metric_dict)",
"def main():\n\t\n\t# start running trials\n\t# save outcomes of trials\n\t\n\tsuccesses = 0\n\t\n\tfor trial in range(1000):\n\t\tavailable_seats = list(range(0, 100))\n\t\tsuccesses += simulate(available_seats)\n\t\n\tprint(successes/(1000))",
"def gather(key, dist='b', targets='all'):",
"def run_tests(self, scen, header):\n scen = \"flat\"\n for dirpath, dnames, fnames in os.walk(self.test_dir):\n if dirpath != self.test_dir:\n #TODO: using subdirs for scenarios\n scen = dirname.split(\"/\")[-1]\n break\n for fname in fnames:\n\t\tif not fname.endswith('.test'):\n continue\n print \"### Within %s\" % fname\n fpath = \"%s/%s\" % (dirpath, fname)\n self.run_test(header, fpath)",
"def run(self):\n # Create benchmark directory: the desired name plus the current date\n # and time.\n try:\n os.makedirs(self.benchmarkDir)\n except OSError as e:\n if not \"File exists\" in str(e):\n raise e\n\n for name, agent_klass in self.agents:\n todo = range(self.loops)\n if not self.overwrite:\n # If overwrite is set to false, we will only do the experiments\n # that have not been done.\n\n # index gets the index of a benchmark file out of the filename.\n index = lambda x: int(x[x.rfind(\"-\") + 1:])\n done = set(index(i) for i in os.listdir(self.benchmarkDir)\n if i.startswith(\"%s-\" % name))\n todo = (i for i in todo if i not in done)\n for j in todo:\n logging.info(\"Starting agent %s's loop #%i\" % (name, j + 1))\n # Make a clean copy of the agent for every run\n # Start subprocess that gives us the experiment\n agent = agent_klass()\n stats = self.testAgent(agent)\n # Dump stats to the given directory\n self.saveStats(name + \"-%i\" % j, stats)",
"def all_test_cases(self) -> Iterator[test_case.TestCase]:\n for alg in self.algorithms:\n yield from self.test_cases_for_hash(alg)",
"def discover_child_suites_and_tests(self):\n self._discover_tests()\n for child_suite in self.suite_test_data.children:\n discovered_child = DiscoveredRobotTestSuite(self.discovered_app, child_suite, _parent=self)\n self.discovered_app.test_suites.append(discovered_child)\n self.child_suites.append(discovered_child)\n for child in self.child_suites:\n child.discover_child_suites_and_tests()",
"def AddTestThreadsShmoo():\n AddTest(testname, 'Preparation', '', '', '', '', '', '', '',\n lambda o: {AppendFile(o['name'], testcsv)})\n for threads in threadslist:\n desc = testname + \", Threads=\" + str(threads)\n DoAddTest(testname, seqrand, wmix, bs, threads, iodepth, desc,\n iops_log, runtime)",
"def update_tests(self):\n # Get the test directories from the file.\n test_directories, harness_tld = self.get_test_dirs_from_rgt()\n\n # For each test directory, add any new UIDs from that test.\n for test_dir in test_directories:\n self.update_test_instances(test_dir, harness_tld)",
"def test_example_4():\n import pwseqdist as pw\n import pandas as pd\n from tcrdist.repertoire import TCRrep\n import multiprocessing\n\n df = pd.read_csv(\"dash.csv\")\n df = df.head(100) # for faster testing\n tr = TCRrep(cell_df = df, \n organism = 'mouse', \n chains = ['alpha','beta'], \n use_defaults=False,\n compute_distances = False,\n cpus = 1,\n db_file = 'alphabeta_gammadelta_db.tsv')\n\n metrics_a = {\n \"cdr3_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"pmhc_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr2_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr1_a_aa\" : pw.metrics.nw_hamming_metric }\n\n metrics_b = {\n \"cdr3_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"pmhc_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr2_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr1_b_aa\" : pw.metrics.nw_hamming_metric }\n\n weights_a = { \n \"cdr3_a_aa\" : 1,\n \"pmhc_a_aa\" : 1,\n \"cdr2_a_aa\" : 1,\n \"cdr1_a_aa\" : 1}\n\n weights_b = { \n \"cdr3_b_aa\" : 1,\n \"pmhc_b_aa\" : 1,\n \"cdr2_b_aa\" : 1,\n \"cdr1_b_aa\" : 1}\n\n kargs_a = { \n 'cdr3_a_aa' : \n {'use_numba': False},\n 'pmhc_a_aa' : {\n 'use_numba': False},\n 'cdr2_a_aa' : {\n 'use_numba': False},\n 'cdr1_a_aa' : {\n 'use_numba': False}\n }\n kargs_b = { \n 'cdr3_b_aa' : \n {'use_numba': False},\n 'pmhc_b_aa' : {\n 'use_numba': False},\n 'cdr2_b_aa' : {\n 'use_numba': False},\n 'cdr1_b_aa' : {\n 'use_numba': False}\n }\n\n tr.metrics_a = metrics_a\n tr.metrics_b = metrics_b\n\n tr.weights_a = weights_a\n tr.weights_b = weights_b\n\n tr.kargs_a = kargs_a \n tr.kargs_b = kargs_b\n\n tr.compute_distances()\n\n tr.pw_cdr3_b_aa\n tr.pw_beta"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots sorted coefficient values of the model
|
def plotCoef(model,train_x):
coefs = pd.DataFrame(model.coef_, train_x.columns)
coefs.columns = ["coef"]
coefs["abs"] = coefs.coef.apply(np.abs)
coefs = coefs.sort_values(by="abs", ascending=False).drop(["abs"], axis=1)
plt.figure(figsize=(15, 7))
coefs.coef.plot(kind='bar')
plt.grid(True, axis='y')
plt.hlines(y=0, xmin=0, xmax=len(coefs), linestyles='dashed');
|
[
"def vis_coef(estimator, feature_names, topn = 10):\n fig = plt.figure()\n n_classes = estimator.coef_.shape[0]\n feature_names = np.asarray(feature_names)\n for idx, coefs in enumerate(estimator.coef_, 1):\n sorted_coefs = np.argsort(coefs)\n positive_coefs = sorted_coefs[-topn:]\n negative_coefs = sorted_coefs[:topn]\n top_coefs = np.hstack([negative_coefs, positive_coefs])\n\n colors = ['#A60628' if c < 0 else '#348ABD' for c in coefs[top_coefs]]\n y_pos = np.arange(2 * topn)\n fig.add_subplot(n_classes, 1, idx)\n plt.barh(y_pos, coefs[top_coefs], color = colors, align = 'center')\n plt.yticks(y_pos, feature_names[top_coefs])\n plt.title('top {} positive/negative coefficient'.format(topn))\n\n plt.tight_layout()",
"def plot_model_coefficients(model_object, predictor_names):\n\n coefficients = model_object.coef_\n num_dimensions = len(coefficients.shape)\n if num_dimensions > 1:\n coefficients = coefficients[0, ...]\n\n num_predictors = len(predictor_names)\n y_coords = numpy.linspace(\n 0, num_predictors - 1, num=num_predictors, dtype=float\n )\n\n _, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n axes_object.barh(\n y_coords, coefficients, color=BAR_GRAPH_COLOUR,\n edgecolor=BAR_GRAPH_COLOUR, linewidth=BAR_GRAPH_EDGE_WIDTH\n )\n\n pyplot.xlabel('Coefficient')\n pyplot.ylabel('Predictor variable')\n\n pyplot.yticks([], [])\n x_tick_values, _ = pyplot.xticks()\n pyplot.xticks(x_tick_values, rotation=90)\n\n x_min = numpy.percentile(coefficients, 1.)\n x_max = numpy.percentile(coefficients, 99.)\n pyplot.xlim([x_min, x_max])\n\n for j in range(num_predictors):\n axes_object.text(\n 0, y_coords[j], predictor_names[j], color=BAR_GRAPH_FONT_COLOUR,\n horizontalalignment='center', verticalalignment='center',\n fontsize=BAR_GRAPH_FONT_SIZE\n )",
"def plotOrder(self):\n #-------------------\n return [ p[0] for p in self._plotlist ]",
"def display_coefficients(intercept, coefficients, df):\n names = ['intercept']\n\n for column in df.columns[:len(df.columns)-1]:\n names.append(column)\n \n parameters = np.concatenate((intercept, coefficients.flatten()))\n \n fig = plt.figure(figsize = (10, 6))\n plt.barh(width = parameters[::-1], y = names[::-1])\n plt.show()",
"def plotCostVsIterations(JVals):\n plt.figure()\n # plt.xkcd()\n plt.plot(JVals)\n plt.xlabel('iterations')\n plt.ylabel('cost')\n plt.title('gradient descent performance')\n plt.show()",
"def _sort_plots(self):\n pass",
"def rocplot(models, X, y, **kwargs):\n viz = ROCAUC(models, **kwargs)\n viz.fit(X, y)\n\n return viz.render(X, y)",
"def plot_coefficients(self, figsize=(12, 8), label_zeros=False):\n if not hasattr(self.model, 'best_estimator_'):\n raise NotFittedError(\n (\"The model {} has not yet been fitted. call \"\n \"'fit_to_entire_dataset' first.\").format(self.model_name))\n\n fitted_model = self.model.best_estimator_\n\n def wrap_parens_if_needed(expression):\n if ' ' in expression:\n return '({})'.format(expression.replace(' ', ''))\n else:\n return expression\n input_features = sym.var(\n [wrap_parens_if_needed(x)\n for x in self.panel.minor_axis.values])\n feature_names = (\n fitted_model.steps[0][1].get_feature_names(\n symbolic=True, input_features=input_features))\n\n reg = fitted_model.named_steps['regressor']\n\n fig, ax = plt.subplots(figsize=figsize)\n cmap = shifted_color_map(mpl.cm.PuOr_r, data=np.vstack(reg.coef_))\n mat_plot = ax.matshow(np.vstack(reg.coef_), cmap=cmap)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='4%', pad=0.04)\n cbar = fig.colorbar(mappable=mat_plot, cax=cax, orientation='vertical')\n cbar.ax.set_ylabel('coefficient')\n\n prefix = 'change in ' if self.model_predicts_change else 'next '\n ax.set_yticklabels(\n [''] + [prefix + l for l in self.panel.minor_axis.values])\n ax.set_xticklabels([''] + feature_names, rotation=90)\n ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(1))\n\n if label_zeros:\n for i in range(len(reg.coef_)):\n for j in range(len(reg.coef_[i])):\n if reg.coef_[i, j] == 0:\n ax.text(j, i, 0, ha='center', va='center')\n return fig, ax",
"def plot_sorted_opt_steps(self, obj_ratio):\n\n f, ax = plt.subplots(figsize=(15, 8))\n\n obj_speed = (self.opt_steps[:,-2]).astype(np.float)\n obj_count = (self.opt_steps[:,-1]).astype(np.float)\n #print obj_speed\n #print obj_count\n obj_val = obj_ratio[0]*obj_speed + obj_ratio[1]* obj_count\n\n # the current optimal\n optimum = np.inf\n opt_val = []\n for val in obj_val:\n if val <= optimum:\n optimum = val\n opt_val.append(optimum)\n\n # sort the objective function value\n sorted_obj_val = sorted(obj_val, reverse=True)\n\n # iter_num = self.opt_steps[:,0]\n iter_num = np.arange(0, len(obj_val))\n\n # plot the unsorted values\n ax.plot(iter_num, obj_val, label = 'unsorted', linewidth=2)\n # ax.plot(iter_num, obj_count, label = 'count')\n ax.plot(iter_num, sorted_obj_val, label = 'sorted', linewidth=2)\n ax.plot(iter_num, opt_val, label='optimum', linewidth=2)\n ax.grid(True)\n ax.set_title('Optimization steps', fontsize=24)\n ax.set_xlabel('Iteration step', fontsize=20)\n ax.set_ylabel('Objective function value', fontsize=20)\n ax.set_xlim([0, len(obj_val)])\n\n plt.legend( loc='upper right' )\n\n # plt.savefig('opt_steps.png')\n\n plt.draw()",
"def plot_liposome_titration_insertion_kinetics(model):\n lipo_concs = np.logspace(-2, 2, 40)\n t = np.linspace(0, 12000, 100)\n\n fmax_list = []\n k_list = []\n\n for lipo_conc in lipo_concs:\n model.parameters['Vesicles_0'].value = lipo_conc\n\n # Get the SS mBax value\n #b_trans.model.parameters['Vesicles_0'].value = lipo_conc\n #s = Solver(b_trans.model, t)\n #s.run()\n #max_mBax = s.yobs['mBax'][-1]\n\n # Get the iBax curve\n s = Solver(model, t)\n s.run()\n iBax = s.yobs['iBax'] / model.parameters['Bax_0'].value\n plt.plot(t, iBax, 'r')\n\n # Fit to single exponential\n fmax = fitting.Parameter(0.9)\n k = fitting.Parameter(0.01)\n def single_exp(t):\n return (fmax() * (1 - np.exp(-k()*t)))\n fitting.fit(single_exp, [fmax, k], iBax, t)\n plt.plot(t, single_exp(t), 'b')\n\n fmax_list.append(fmax())\n k_list.append(k())\n\n plt.title('Inserted Bax with liposome titration')\n plt.xlabel('Time')\n plt.ylabel('Fraction of inserted Bax')\n plt.show()\n\n # Make plots of k and fmax as a function of lipo concentration\n fmax_list = np.array(fmax_list)\n k_list = np.array(k_list)\n\n # k\n plt.figure()\n plt.plot(lipo_concs, k_list, 'ro')\n plt.xlabel('Liposomes (nM)')\n plt.ylabel('$k$')\n plt.title(\"$k$ vs. Liposome conc\")\n plt.ylim([0, 0.0020])\n plt.show()\n\n # Fmax \n plt.figure()\n plt.plot(lipo_concs, fmax_list, 'ro')\n plt.xlabel('Liposomes (nM)')\n plt.ylabel('$F_{max}$')\n plt.title(\"$F_{max}$ vs. Liposome conc\")\n plt.show()",
"def coeff_path(x,y):\n alpha_range = np.linspace(10,100.2,300)\n coeffs = []\n for alpha in alpha_range:\n model = Ridge(normalize=True,alpha=alpha)\n model.fit(x,y)\n coeffs.append(model.coef_)\n \n plt.close('all')\n plt.figure(1)\n plt.xlabel(\"alpha value\")\n plt.ylabel(\"Coeff weights\")\n plt.title(\"Coeff Weight path\")\n plt.plot(alpha_range,coeffs)\n plt.show()",
"def plotResultOrder(endNum = 100, frac = 1):\n\n outputDic, numFound = artikel42(endNum, frac)\n\n plt.plot(numFound, 'o')\n plt.plot(numFound)\n plt.title('Order in which the first %i results below %i are found' %(frac*endNum, endNum))\n plt.xlabel('order')\n plt.ylabel('result')\n plt.show()",
"def plotBehaviorOrderedNeurons(dataSets, keyList, behaviors):\n print 'plot neurons ordered by behavior'\n nWorms = len(keyList)\n fig = plt.figure('BehaviorOrdered Neurons',(12, nWorms*3.4))\n gs = gridspec.GridSpec(nWorms,1)\n #gs = gridspec.GridSpec(nWorms, 1)\n for dindex, key in enumerate(keyList):\n inner_grid = gridspec.GridSpecFromSubplotSpec(2, len(behaviors),\n subplot_spec=gs[dindex], hspace=0.5, wspace=0.35, height_ratios=[0.5,1])\n for bindex, beh in enumerate(behaviors):\n x = dataSets[key]['Behavior'][beh]\n Y = dataSets[key]['Neurons']['Activity']\n xOrder = np.argsort(x)\n #plot sorted behavior\n ax = plt.Subplot(fig, inner_grid[0, bindex])\n ax.plot(x[xOrder], color=colorBeh[beh])\n ax.set_xlim([0, len(xOrder)])\n ax.set_ylabel(beh)\n fig.add_subplot(ax)\n # find interesting locations:\n ax.axvline(np.where(x[xOrder]>0)[0][0], color='k', lw=1, linestyle='--')\n# if beh == 'AngleVelocity':\n# ax.axvline(np.where(x[xOrder]>0)[0][0])\n# if beh=='Eigenworm3':\n# ax.axvline(np.where(np.sort(x)<-10)[0][-1])\n# ax.axvline(np.where(np.sort(x)>10)[0][0])\n ax.set_xticks([])\n #plot neural signal sorted\n ax2 = plt.Subplot(fig, inner_grid[1, bindex], sharex=ax)\n plotHeatmap(np.arange(len(Y[0])), gaussian_filter(Y[:,xOrder], (1,5)), ax =ax2,vmin=-0.5, vmax=1)\n ax2.set_xlabel('Neural activity ordered by behavior')\n # find interesting locations:\n ax2.axvline(np.where(x[xOrder]>0)[0][0], color='w', lw=1)\n# if beh == 'AngleVelocity':\n# ax2.axvline(np.where(x[xOrder]>0)[0][0], color='w', lw=0.5)\n# if beh=='Eigenworm3':\n# ax2.axvline(np.where(np.sort(x)<-10)[0][-1], color='w', lw=0.5)\n# ax2.axvline(np.where(np.sort(x)>10)[0][0], color='w', lw=0.5)\n #\n fig.add_subplot(ax2)\n gs.tight_layout(fig)",
"def plot_regression_coefficients(reg_coef_df, top_n=None, coef_col_name='coef',\n **kwargs):\n\n # Sort coefficients\n reg_coef_df = reg_coef_df.sort_values(coef_col_name)\n\n if top_n is not None:\n # Most negative coefficients\n neg_coef_df = reg_coef_df\\\n .query(f'{coef_col_name} < 0')\\\n .head(top_n)\n\n # Most positive coefficients\n pos_coef_df = reg_coef_df\\\n .query(f'{coef_col_name} > 0')\\\n .tail(top_n)\n else:\n # All negative coefficients\n neg_coef_df = reg_coef_df.query(f'{coef_col_name} < 0')\n\n # All positive coefficients\n pos_coef_df = reg_coef_df.query(f'{coef_col_name} > 0')\n\n\n # Captures the amount of positive and negative coefficients to plot\n n_neg_coef = len(neg_coef_df)\n n_pos_coef = len(pos_coef_df)\n\n # Get yticks\n plot_feat_names = neg_coef_df.index.tolist()\\\n + [' ']\\\n + pos_coef_df.index.tolist()\n\n # Plot bar charts for positive and negative\n plt.barh(np.arange(n_pos_coef) + 1,\n pos_coef_df[coef_col_name],\n color=green)\n\n plt.barh(np.arange(-n_neg_coef, 0),\n neg_coef_df[coef_col_name],\n color=red)\n\n # Plot centre dotted line\n plt.plot([0, 0],\n [-n_neg_coef - 0.5, n_pos_coef + 0.5],\n '--', color='black')\n\n plt.xlabel('Coefficient Value')\n plt.ylabel('Feature Name')\n\n plt.yticks(np.arange(-n_neg_coef, n_pos_coef + 1), plot_feat_names)",
"def plot_bax_titration_insertion_kinetics(model):\n bax_concs = np.logspace(-1, 3, 40)\n t = np.linspace(0, 12000, 100)\n\n fmax_list = []\n k_list = []\n\n for bax_conc in bax_concs:\n model.parameters['Bax_0'].value = bax_conc\n\n # Get the iBax curve\n s = Solver(model, t)\n s.run()\n iBax = s.yobs['iBax'] / model.parameters['Bax_0'].value\n plt.plot(t, iBax, 'r')\n\n # Fit to single exponential\n fmax = fitting.Parameter(0.9)\n k = fitting.Parameter(0.01)\n def single_exp(t):\n return (fmax() * (1 - np.exp(-k()*t)))\n fitting.fit(single_exp, [fmax, k], iBax, t)\n plt.plot(t, single_exp(t), 'b')\n\n fmax_list.append(fmax())\n k_list.append(k())\n\n plt.title('Inserted Bax with Bax titration')\n plt.xlabel('Time')\n plt.ylabel('Fraction of inserted Bax')\n plt.show()\n\n # Make plots of k and fmax as a function of lipo concentration\n fmax_list = np.array(fmax_list)\n k_list = np.array(k_list)\n\n # k\n plt.figure()\n plt.plot(bax_concs, k_list, 'ro')\n plt.xlabel('Bax (nM)')\n plt.ylabel('$k$')\n plt.title(\"$k$ vs. Bax conc\")\n plt.show()\n\n # Fmax \n plt.figure()\n plt.plot(bax_concs, fmax_list, 'ro')\n plt.xlabel('Bax (nM)')\n plt.ylabel('$F_{max}$')\n plt.title(\"$F_{max}$ vs. Bax conc\")\n plt.show()",
"def plot(self):\n import matplotlib.pyplot as plt\n plt.plot(self.lambdas, self.result['beta'] )\n plt.ylabel('Coefficient')\n plt.xlabel('Regularization Parameter')\n plt.suptitle('Regularization Path')\n plt.show()",
"def scree_plot(self, x, total_comps=5, cv_method=KFold(7, shuffle=True)):\n plt.figure()\n models = list()\n\n for ncomps in range(1, total_comps + 1):\n currmodel = deepcopy(self)\n currmodel.ncomps = ncomps\n currmodel.fit(x)\n currmodel.cross_validation(x, outputdist=False, cv_method=cv_method)\n models.append(currmodel)\n\n q2 = np.array([x.cvParameters['Q2X'] for x in models])\n r2 = np.array([x.modelParameters['R2X'] for x in models])\n\n plt.bar([x - 0.1 for x in range(1, total_comps + 1)], height=r2, width=0.2)\n plt.bar([x + 0.1 for x in range(1, total_comps + 1)], height=q2, width=0.2)\n plt.legend(['R2', 'Q2'])\n plt.xlabel(\"Number of components\")\n plt.ylabel(\"R2/Q2X\")\n\n # Specific case where n comps = 2 # TODO check this edge case\n if len(q2) == 2:\n plateau = np.min(np.where(np.diff(q2)/q2[0] < 0.05)[0])\n else:\n percent_cutoff = np.where(np.diff(q2) / q2[0:-1] < 0.05)[0]\n if percent_cutoff.size == 0:\n print(\"Consider exploring a higher level of components\")\n else:\n plateau = np.min(percent_cutoff)\n plt.vlines(x= (plateau + 1), ymin=0, ymax=1, colors='red', linestyles ='dashed')\n print(\"Q2X measure stabilizes (increase of less than 5% of previous value or decrease) \"\n \"at component {0}\".format(plateau + 1))\n plt.show()\n\n return None",
"def updatePlot(self):\n self.displayInfo()\n if self.curvelist:\n if self.dcursor is not None:\n self.dcursor.updateCurve()\n else:\n blkno = self.curvelist[self.activcurv].xvinfo.blkpos\n xpos = self.curvelist[self.activcurv].xvinfo.vidx\n X = self.blklst[blkno][xpos]\n self.xascending = (X[X.size - 1] > X[0])\n self.plotCurves(self.curvelist)",
"def plot_pz(self, ax, method):\n self.method = method\n for c in CMR_combination:\n ax.plot(self.pz[method][c][:, 0], self.pz[method][c][:, 1], '.', label=c)\n ax.set_title(method)\n ax.legend()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if your prediction is in str type or not. If not, then raise error.
|
def _check_datatype_to_string(prediction):
if isinstance(prediction, str):
return True
raise TypeError('Prediction is not in string type.')
|
[
"def input_type_check(data: object) -> None:\n if not isinstance(data, str):\n raise TypeError(\"Input data must be a 'str' object.\")",
"def test_response_column_not_str_error(self):\n\n with pytest.raises(TypeError, match=\"response_column must be a str\"):\n\n NearestMeanResponseImputer(response_column=0)",
"def _check_predictor_name(predictor_name):\n\n error_checking.assert_is_string(predictor_name)\n\n if predictor_name not in VALID_PREDICTOR_NAMES:\n error_string = (\n '\\n\\n{0:s}\\nValid predictor names (listed above) do not include '\n '\"{1:s}\".'\n ).format(str(VALID_PREDICTOR_NAMES), predictor_name)\n raise ValueError(error_string)",
"def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.dtype.string",
"def __expectString(val):\n if type(val) != str:\n raise Exception('Expected string, received {}'.format(type(val)))",
"def test_ex_type_str():\n assert putil.exh._ex_type_str(RuntimeError) == 'RuntimeError'\n assert putil.exh._ex_type_str(OSError) == 'OSError'",
"def check_str(cls, **kwargs):\r\n for value in kwargs:\r\n if not isinstance(kwargs[value], str):\r\n raise TypeError(value+' must be of type string')",
"def is_it_a_string(sample):\n\n if sys.version_info < (3, 0):\n return True if isinstance(sample, basestring) else False # NOQA: F821\n else:\n return True if isinstance(sample, (str, bytes)) else False",
"def is_string(atype):\n return atype == str",
"def _validate_strings(self):\n pass",
"def is_str(x):\n return type(x) == str",
"def check_string(seq):\n if not isinstance(seq, str):\n assert False, \"Input is not a string.\"\n else:\n pass\n return None",
"def can_to_str(_type):\n return isinstance(_type, String)",
"def is_string(obj):\n return isinstance(obj, basestring)",
"def check_string(string):\n encoding = encode(string)\n output = model(\n encoding['input_ids'],\n encoding['attention_mask']\n ).logits.cpu().detach().numpy()\n detokens = tokenizer.convert_ids_to_tokens(encoding['input_ids'].numpy()[0])\n predictions = np.argmax(output, axis=2)[0]\n return detokens, predictions",
"def check_input_type_dataset(self, _input: str) -> str:\n try:\n result = \"\"\n\n if os.path.isdir(_input):\n result = INPUT_FORMAT_DATASET[0] # path_dataset\n elif type(_input) == ObjectId:\n self.ID_DATASET = _input\n result = INPUT_FORMAT_DATASET[1] # id_dataset\n elif type(_input) is str:\n result = INPUT_FORMAT_DATASET[2] # raw_text\n else:\n print(\n \"Enter a parameter from a valid dataset [path_dataset|id_dataset|raw_text] .\"\n )\n sys.exit(0)\n\n log.logger.info(\"Input data type: {}\".format(result))\n return result\n except Exception as err:\n log.logger.error(err)\n sys.exit(0)",
"def is_string(self):\n return type(self.value) == str",
"def is_string(self):\n return self.type == py_tokenize.STRING",
"def is_string_like ( v ) :\n return isinstance ( v , string_types )",
"def _validate_lookup(lookup):\n if not isinstance(lookup, str):\n raise TypeError(\"Lookup value must be string. Given type {0}.\".format(type(lookup)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
MainVision class's constructor. Initializes, notably, the various ROS callbacks and starts the puck detection
|
def __init__(self):
rospy.init_node(ROS_VISION_NODE_NAME)
rospy.on_shutdown(self.stopCurrentPuckDetector)
self.puckDetector = None
self.MODE = PuckDetectorBuilder.ROS
self.m_reconfigureSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_START_TOPIC_NAME, Bool, self.reconfigureCallBack)
self.startPuckDetector(False) #starts a PuckDetector without a reconfigure request
|
[
"def __init__(self):\n self.bridge = CvBridge()\n rospy.init_node('occlusion_renderer')\n\n # Instantiate OcclusionRenderer object\n self.pkg_path = rospkg.RosPack().get_path('occlusion_render')\n sawyer_dae = '%s/models/sawyer.dae' % self.pkg_path\n self.renderer = OcclusionRenderer(sawyer_dae)\n self.renderer.setup_sensor()\n\n # Publish renders onto topic\n self.publisher = rospy.Publisher(\n '/pose_image/occlusion_render', Image, queue_size=1)\n\n # Register callback subscribing to image and camera info\n image_sub = message_filters.Subscriber(\n '/pose_image/image', Image)\n info_sub = message_filters.Subscriber(\n '/pose_image/camera_info', CameraInfo)\n image_sync = message_filters.TimeSynchronizer(\n [image_sub, info_sub], 1)\n image_sync.registerCallback(self.image_callback)\n\n # Register callback subscribing to joint angles\n rospy.Subscriber('/robot/joint_states', JointState,\n self.joints_callback)",
"def __init__(self):\n # create ROS subscribers and publishers.\n self.speed = 0\n self.brake_pub = rospy.Publisher('brake', AckermannDriveStamped, queue_size=100)\n self.brake_bool_pub = rospy.Publisher('brake_bool', Bool, queue_size=100)\n self.odom_sub = rospy.Subscriber('odom', Odometry, self.odom_callback)\n self.scan_sub = rospy.Subscriber('scan', LaserScan, self.scan_callback)",
"def __init__(self):\n self.speed = 0\n # TODO: create ROS subscribers and publishers.\n self.sub_odom = rospy.Subscriber('/odom', Odometry, self.odom_callback)\n self.sub_scan = rospy.Subscriber('/scan', LaserScan, self.scan_callback)\n self.pub_brake = rospy.Publisher('/brake', AckermannDriveStamped, queue_size=10)\n self.pub_brake_bool = rospy.Publisher('/brake_bool', Bool, queue_size=10)\n\n self.brake_msg = AckermannDriveStamped()\n self.brake_bool_msg = Bool()",
"def __init__(self):\n self.publisher = rospy.Publisher(\n '/thorvald_001/teleop_joy/cmd_vel',\n Twist, queue_size=1)\n rospy.Subscriber(\"/thorvald_001/front_scan\", LaserScan, self.callback)",
"def __init__(self, input_rgb_image_topic=\"image_raw\", output_rgb_image_topic=\"/opendr/image_face_reco_annotated\",\n detections_topic=\"/opendr/face_recognition\", detections_id_topic=\"/opendr/face_recognition_id\",\n database_path=\"./database\", device=\"cuda\", backbone=\"mobilefacenet\"):\n super().__init__('opendr_face_recognition_node')\n\n self.image_subscriber = self.create_subscription(ROS_Image, input_rgb_image_topic, self.callback, 1)\n\n if output_rgb_image_topic is not None:\n self.image_publisher = self.create_publisher(ROS_Image, output_rgb_image_topic, 1)\n else:\n self.image_publisher = None\n\n if detections_topic is not None:\n self.face_publisher = self.create_publisher(ObjectHypothesisWithPose, detections_topic, 1)\n else:\n self.face_publisher = None\n\n if detections_id_topic is not None:\n\n self.face_id_publisher = self.create_publisher(String, detections_id_topic, 1)\n else:\n self.face_id_publisher = None\n\n self.bridge = ROS2Bridge()\n\n # Initialize the face recognizer\n self.recognizer = FaceRecognitionLearner(device=device, mode='backbone_only', backbone=backbone)\n self.recognizer.download(path=\".\")\n self.recognizer.load(\".\")\n self.recognizer.fit_reference(database_path, save_path=\".\", create_new=True)\n\n # Initialize the face detector\n self.face_detector = RetinaFaceLearner(backbone='mnet', device=device)\n self.face_detector.download(path=\".\", verbose=True)\n self.face_detector.load(\"retinaface_{}\".format('mnet'))\n self.class_names = [\"face\", \"masked_face\"]\n\n self.get_logger().info(\"Face recognition node initialized.\")",
"def __init__(self):\n\n # set up subscriber\n self.bridge = CvBridge()\n self.subscriber = rospy.Subscriber(\"/camera/rgb/image_raw\", sensor_msgs.msg.Image, self.callback, queue_size = 1)\n\n # load reference image\n refImg_path = \"/home/pfaion/catkin_ws/src/ggp_robot/src/board_low2.jpg\"\n self.refImg = cv2.imread(refImg_path, cv2.CV_LOAD_IMAGE_COLOR)\n \n # compute features of reference image\n (self.refGray,\n self.refRegions,\n self.refPoints,\n self.refDes) = self.computeFeatures(self.refImg, more=True)\n print len(self.refPoints)",
"def __init__(self, X=350, Y=350, Z=350, Rx=180, Ry=0, Rz=135, debug=False, gripper_open=True, use_killswitch=True, use_subproc=False):\n self.debug_flag = debug # if set to true, robot won't actually move\n self.__set_coords(X, Y, Z, Rx, Ry, Rz)\n self.__safety_check()\n self.__use_camera = False # if True, take photos in between moves, (See toggle_camera)\n self.__wait_time = 0 # wait time if use_camera set to True, (See toggle_camera)\n self.__previous_script = \"\" # previous script sent to robot, (See __move)\n self.__gripper_status = gripper_open # True: open, False: closed (See grab and release)\n self.__img_tag = 0\n self.use_killswitch = use_killswitch\n self.__joint_mode = False\n self.__joint1 = 45\n self.__joint2 = 0\n self.__joint3 = 90\n self.__joint4 = 0\n self.__joint5 = 90\n self.__joint6 = 0\n if use_subproc:\n self.start_tm_driver()\n self.use_subprocess_to_run_driver = use_subproc",
"def robotInit(self):\n\n # Here we create a function for the command class to return the robot\n # instance, so that we don't have to import the robot module for each\n # command.\n Command.getRobot = lambda _: self\n\n # This launches the camera server between the robot and the computer\n wpilib.CameraServer.launch()\n\n self.joystick = wpilib.Joystick(0)\n\n self.lr_motor = ctre.WPI_TalonSRX(1)\n self.lf_motor = ctre.WPI_TalonSRX(2)\n\n self.rr_motor = ctre.WPI_TalonSRX(5)\n self.rf_motor = ctre.WPI_TalonSRX(6)\n\n self.left = wpilib.SpeedControllerGroup(self.lf_motor, self.lr_motor)\n self.right = wpilib.SpeedControllerGroup(self.rf_motor, self.rr_motor)\n\n self.drivetrain_solenoid = wpilib.DoubleSolenoid(2, 3)\n\n self.drivetrain_gyro = wpilib.AnalogGyro(1)\n\n # Here we create the drivetrain as a whole, combining all the different\n # robot drivetrain compontents.\n self.drivetrain = drivetrain.Drivetrain(self.left, self.right,\n self.drivetrain_solenoid,\n self.drivetrain_gyro,\n self.rf_motor)\n\n self.l_gripper = wpilib.VictorSP(0)\n self.r_gripper = wpilib.VictorSP(1)\n\n self.grippers = grippers.Grippers(self.l_gripper, self.r_gripper)\n\n self.elevator_motor = wpilib.VictorSP(2)\n\n self.elevator_top_switch = wpilib.DigitalInput(4)\n self.elevator_bot_switch = wpilib.DigitalInput(5)\n\n self.elevator = elevator.Elevator(self.elevator_motor,\n self.elevator_top_switch,\n self.elevator_bot_switch)\n\n self.handles_solenoid = wpilib.DoubleSolenoid(0, 1)\n\n self.handles = handles.Handles(self.handles_solenoid)\n\n # This creates the instance of the autonomous program that will run\n # once the autonomous period begins.\n self.autonomous = AutonomousProgram()\n\n # This gets the instance of the joystick with the button function\n # programmed in.\n self.josytick = oi.getJoystick()",
"def __init__(self):\n super(DrivingNode, self).__init__(\"driving\", True, 24)\n rospy.Subscriber('joystick', JoystickState, self.joystick_callback) # Joystick subscriber\n rospy.Subscriber('gyroscope_robot', GyroState, self.gyro_callback) # Gyroscope subscriber\n\n # Controls\n self.pwm = 0\n self.current_direction = \"up\"\n self.left_joy = Joy() # Joy1, speed\n self.right_joy = Joy() # Joy2, direction and speed\n self.gyroscope = Gyroscope()\n self.last_message_time = rospy.get_time()\n\n while not rospy.is_shutdown() and self.is_running:\n self.update()",
"def loadInit(self):\n # Read video\n self.video = cv2.VideoCapture(self.path)\n # Exit if video not opened.\n if not self.video.isOpened():\n print(\"Error - Could not open video\")\n sys.exit(-1)\n\n # store video width/height to variables\n self.video_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.video_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Read and parse existing groundtruth file\n if not(os.path.exists(self.groundtruth_path)):\n print(\"Error - Could not read a groundtruth file\")\n sys.exit(-1)\n\n # Read and parse existing tracking result file\n if not(os.path.exists(self.result_path)):\n print(\"Error - Could not read a tracking result file\")\n sys.exit(-1)\n\n # list of annotated bounding box objects\n self.gt_bounding_boxes = []\n # list of tracking result bounding box objects\n self.result_bounding_boxes = []\n\n # parsing groundtruth and result files\n self.gt_bounding_boxes = self.parser.parseGivenDataFile(self.groundtruth_path, self.video_width)\n self.result_bounding_boxes = self.parser.parseGivenDataFile(self.result_path, self.video_width)",
"def _initLoad(self):\n ########## 1) Video Checking ##########\n # Read video\n self.video = cv2.VideoCapture(self.video_path)\n # Exit if video not opened.\n if not self.video.isOpened():\n print(\"Could not open video\")\n print(help)\n sys.exit(-1)\n\n # Read first frame.\n ok, self.frame = self.video.read()\n if not ok:\n print(\"Error - Could not read a video file\")\n sys.exit(-1)\n\n # save video width/height\n self.video_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.video_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n ########## 2) Tracker Checking ##########\n # handle selection of tracker type\n if not(self.tracker_name in self.TRACKER_TYPES):\n print(\"Invalid tracker name: '\" + self.tracker_name + \"'\")\n print(\"Supported tracker names: BOOSTING, MIL, KCF, TLD, MEDIANFLOW, GOTURN, MOSSE, CSRT\")\n sys.exit(-1)\n\n # Set up tracker\n if self.tracker_name == 'BOOSTING':\n self.tracker = cv2.TrackerBoosting_create()\n elif self.tracker_name == 'MIL':\n self.tracker = cv2.TrackerMIL_create()\n elif self.tracker_name == 'KCF':\n self.tracker = cv2.TrackerKCF_create()\n elif self.tracker_name == 'TLD':\n self.tracker = cv2.TrackerTLD_create()\n elif self.tracker_name == 'MEDIANFLOW':\n self.tracker = cv2.TrackerMedianFlow_create()\n elif self.tracker_name == 'GOTURN':\n self.tracker = cv2.TrackerGOTURN_create()\n elif self.tracker_name == 'MOSSE':\n self.tracker = cv2.TrackerMOSSE_create()\n elif self.tracker_name == 'CSRT':\n self.tracker = cv2.TrackerCSRT_create()\n \n ########## 3) Setup opencv window ##########\n # resize window (lets define max width is 1600px)\n if self.video_width < 1600:\n cv2.namedWindow(self.WINDOW_NAME)\n else:\n cv2.namedWindow(self.WINDOW_NAME, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)\n whRatio = self.video_width / self.video_height\n if whRatio == 2:\n # pure equirectangular 2:1\n cv2.resizeWindow(self.WINDOW_NAME, 1600, 800)\n else:\n # default 16:9\n cv2.resizeWindow(self.WINDOW_NAME, 1600, 900)\n\n scaleFactor = self.video_width / 1600\n self.RECTANGLE_BORDER_PX = int(self.RECTANGLE_BORDER_PX * scaleFactor)\n self.FONT_SCALE = self.FONT_SCALE * scaleFactor\n self.FONT_WEIGHT = int(self.FONT_WEIGHT * scaleFactor) + 1\n self.TEXT_ROW1_POS = (int(self.TEXT_ROW1_POS[0] * scaleFactor), int(self.TEXT_ROW1_POS[1] * scaleFactor))\n self.TEXT_ROW2_POS = (int(self.TEXT_ROW2_POS[0] * scaleFactor), int(self.TEXT_ROW2_POS[1] * scaleFactor))\n self.TEXT_ROW3_POS = (int(self.TEXT_ROW3_POS[0] * scaleFactor), int(self.TEXT_ROW3_POS[1] * scaleFactor))\n self.TEXT_ROW4_POS = (int(self.TEXT_ROW4_POS[0] * scaleFactor), int(self.TEXT_ROW4_POS[1] * scaleFactor))\n\n # use copy of frame to be shown in window\n frame_disp = self.frame.copy()\n\n ########## 4) Initialation of bounding box ##########\n # Set up initial bounding box\n self.bbox = None\n self.gt_bounding_boxes = []\n self.result_bounding_boxes = []\n if self.groundtruth_path:\n # use first bounding box from given groundtruth\n self.gt_bounding_boxes = self.parser.parseGivenDataFile(self.groundtruth_path, self.video_width)\n \n if len(self.gt_bounding_boxes) > 0:\n bb1 = self.gt_bounding_boxes[0]\n if bb1.is_annotated:\n self.bbox = (bb1.get_point1_x(), bb1.get_point1_y(), bb1.get_width(), bb1.get_height())\n self.result_bounding_boxes.append(bb1)\n else:\n print(\"Error - Invalid first frame annotation from file: '\" + self.groundtruth_path + \"'\")\n sys.exit(-1)\n else:\n # using opencv select ROI\n cv2.putText(frame_disp, 'Select target ROI and press ENTER', self.TEXT_ROW1_POS, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 200, 250), self.FONT_WEIGHT)\n self.bbox = cv2.selectROI(self.WINDOW_NAME, frame_disp, False)\n\n # save it to result list\n p1 = (int(self.bbox[0]), int(self.bbox[1]))\n p2 = (int(self.bbox[0] + self.bbox[2]), int(self.bbox[1] + self.bbox[3]))\n # new instance of bounding box\n bb1 = BoundingBox(p1, p2, self.video_width)\n bb1.is_annotated = True\n self.result_bounding_boxes.append(bb1)\n\n if not(self.bbox) or self.bbox == (0,0,0,0):\n print(\"Error - Invalid first frame annotation\")\n sys.exit(-1)",
"def __init__(self):\n super(MetatoneClassifierController, self).__init__()\n self.classifying = False\n self.last_gesture_classes = \"No performance started yet.\"\n self.last_performance_state = \"No performance started yet.\"\n self.last_performance_time = \"\"\n self.current_active_devices = \"None.\"\n self.current_performance_state = (False,False,False)\n self.classification_thread = threading.Thread(target=self.classify_forever, name=\"Classification-Thread\")",
"def start(self):\n # * Initialize parameters and flags\n delay = 10 # ms; only used in GUI mode, needed to process window events\n delayS = delay / 1000.0 # sec; only used in non-GUI mode, so this can be set to 0\n showInput = self.context.options.gui and True\n showOutput = self.context.options.gui and True\n showFPS = False\n showKeys = False\n isFrozen = False\n \n # * Set signal handler before starting vision loop (NOTE must be done in the main thread of this process)\n signal.signal(signal.SIGTERM, self.handleSignal)\n signal.signal(signal.SIGINT, self.handleSignal)\n \n # * Vision loop\n self.logger.info(\"Starting vision loop...\")\n self.isOkay = True\n frameCount = 0 # TODO get frameCount directly from inputDevice\n fresh = True\n self.context.resetTime() # start afresh\n timeLast = self.context.timeNow\n while self.isOkay:\n # ** [timing] Obtain relative timestamp for this loop iteration\n self.context.update()\n \n # ** Print any pre-frame messages\n if not self.context.options.gui:\n self.logger.info(\"[LOOP] Frame: {0:05d}, time: {1:07.3f}\".format(frameCount, self.context.timeNow)) # if no GUI, print something to show we are running\n if showFPS:\n timeDiff = (self.context.timeNow - timeLast)\n fps = (1.0 / timeDiff) if (timeDiff > 0.0) else 0.0\n self.logger.info(\"[LOOP] {0:5.2f} fps\".format(fps))\n #self.logger.debug(\"Pipeline: \" + str(self.pipeline)) # current state of pipeline (preceding ~ means processor is inactive)\n \n # ** Read frame from input device\n if not isFrozen:\n if not self.inputDevice.read():\n break # camera disconnected or reached end of video\n \n if showInput:\n cv2.imshow(\"Input\", self.inputDevice.image)\n \n # ** Initialize FrameProcessors, if required\n if(fresh):\n self.pipeline.initialize(self.inputDevice.image, self.context.timeNow)\n fresh = False\n \n # ** TODO Activate/deactivate processors as desired\n \n # ** Process frame\n keepRunning, imageOut = self.pipeline.process(self.inputDevice.image, self.context.timeNow)\n \n # ** TODO Perform post-process functions\n \n # ** Show output image\n if showOutput and imageOut is not None:\n cv2.imshow(\"Output\", imageOut) # output image from last processor\n if not keepRunning:\n self.stop()\n \n # ** Check if GUI is available\n if self.context.options.gui:\n # *** If so, wait for inter-frame delay and process keyboard events using OpenCV\n key = cv2.waitKey(delay)\n if key != -1:\n keyCode = key & 0x00007f\n keyChar = chr(keyCode) if not (key & KeyCode.SPECIAL) else None\n \n if showKeys:\n self.logger.info(\"Key: \" + KeyCode.describeKey(key))\n \n if keyCode == 0x1b or keyChar == 'q':\n break\n elif keyChar == ' ':\n self.logger.info(\"[PAUSED] Press any key to continue...\")\n self.context.pause() # [timing] saves timestamp when paused\n cv2.waitKey() # wait indefinitely for a key press\n self.context.resume() # [timing] compensates for duration paused\n self.logger.info(\"[RESUMED]\")\n elif keyCode == 0x0d or keyCode == 0x0a:\n isFrozen = not isFrozen # freeze frame, but keep processors running\n self.logger.info(\"Input {} at {:.2f}\".format(\"frozen\" if isFrozen else \"thawed\", self.context.timeNow))\n elif keyChar == 'x':\n self.pipeline.deactivateProcessors()\n self.logger.info(\"Pipeline processors deactivated.\")\n elif keyChar == 'y':\n self.pipeline.activateProcessors()\n self.logger.info(\"Pipeline processors activated.\")\n elif keyChar == 'f':\n showFPS = not showFPS\n elif keyChar == 'k':\n showKeys = not showKeys\n else:\n keepRunning = self.pipeline.onKeyPress(key, keyChar) # pass along key-press to processors in pipeline\n if not keepRunning:\n self.stop()\n else:\n # *** Else, wait for inter-frame delay using system method\n sleep(delayS)\n \n # ** [timing] Save timestamp for fps calculation\n timeLast = self.context.timeNow\n \n # * Reset signal handlers to default behavior\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n \n # * Clean-up\n self.logger.info(\"Cleaning up...\")\n if self.context.options.gui:\n cv2.destroyAllWindows()\n self.inputDevice.close()",
"def robotInit(self) -> None:\n ...",
"def __init__(self):\n # Call parent constructor\n super(Ev3TrackedExplor3r, self).__init__()\n\n # Init robot sensors and actuators\n try:\n # Init robot actuators\n self.left_motor = ev3.LargeMotor('outB') # Address is important for motors\n self.right_motor = ev3.LargeMotor('outC')\n self.head_motor = ev3.MediumMotor('outA')\n self.head_motor.position_i = 1000\n\n # Init robot sensors\n # self.color_sensor = ev3.ColorSensor() # Address is not really important for sensors\n # self.color_sensor.mode = 'COL-REFLECT' # if there are not two instance of the same type\n self.ir_sensor = ev3.InfraredSensor() # of sensor\n self.ir_sensor.mode = 'IR-PROX'\n except Exception as theException:\n # Most probably one of the sensors or one of the actuators is not connected\n ev3te.ev3te_logger.critical(\"Ev3TrackedExplor3r: Exception in routine __init__() + \"\n + str(theException))\n # Init status fields\n self.ir_reading_update_counter = 0\n self.ir_samples_to_skip = 5\n self.ir_last_reading = 0\n self.ir_scan_thread = IRScanner(self)",
"def __init__(self):\n\n self.handle_sysargs()\n self.init_engine()\n self.init_input()\n self.init_caches()\n\n self.start_game()\n self.running = True # When this becomes false, main loop inside run() will quit\n\n self.run()\n self.quit()",
"def __init__(self, host, port):\n \n self.logger = logging.getLogger(\"Borg.Brain.Vision.PredatorSegment\")\n super(PredatorSegment, self).__init__(host, port)\n\n \n self.predator_ready = False\n self.predator_on = False\n \n\t\t#Enable these lines if you want to also use openni kinect images in this module.\n\t\t#Note that there are some unstability problems when matlab is using openni. Somethind\n\t\t#related to Matlab's openni version\n\n #self.depth = kv.OpenNIKinect(\"depth\")\n #self.rgb = kv.OpenNIKinect(\"rgb\")\n \n\t\t#Makes sure that openni is retreiving images before conitnuing.\n #depth = None\n #while not depth:\n # depth = self.depth.get_image()\n\n\t\t#Width and Height of the image being processed (resolution), select manually if\n\t\t#you are not using openni kinect\n #(width, height) = cv.GetSize(depth)\n width, height = 320, 240\n self.transformer = util.speed_angle.SpeedAngle(None, width, height)\n self.area = self.transformer.get_width() * self.transformer.get_height()\n \n\t\t#Creating path to make required files for OpenTLD startup.\n path = \"/dev/shm/images\"\n try:\n os.mkdir(path)\n os.mkdir(path+ \"/predator\") \n except:\n self.logger.warn(\"Cannot create directory for predator in \" + path)",
"def __init__(self):\n self._buffer = list()\n self._stream = cv2.VideoCapture(constants.V_PORT)\n # 1080p resolution recommended for better contour detection results\n self._stream.set(cv2.CAP_PROP_FRAME_HEIGHT,constants.FRAME_HEIGHT)\n self._stream.set(cv2.CAP_PROP_FRAME_WIDTH,constants.FRAME_WIDTH)\n # initial video capture poll\n self._status = True",
"def __init__(self):\n rospy.init_node('camera_relay')\n\n self.camera_relay_pub = rospy.Publisher(self.ROBOT_PUB_TOPIC, Bool, queue_size=3)\n self.enable_service = rospy.Service('enable_camera', SetBool, self._handle_enable_camera)\n\n self.enabled = True\n self.camera_relay_pub.publish(self.enabled)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called to stop the puck detection done by the current PuckDetector
|
def stopCurrentPuckDetector(self):
if self.puckDetector != None:
self.puckDetector.userWantsToQuit()
self.puckDetector.stopIncommingCameraFeed()
self.puckDetector = None
|
[
"def kill(self):\n\n self.running = False\n\n try:\n # teardown robot\n self.strategy.teardown()\n except Exception:\n # method not implemented by strategy\n pass",
"def stopMotor(self) -> None:\n ...",
"def stop(self):\n\n self.tank_level.stop()",
"def stop(self):\n self.stop_loop_event.set()\n self.steppermotor.stop_step()\n self.caliper.stop_listening()",
"def stop(self):\n self.stop_event.set()\n if self.ledController is not None:\n self.ledController.stop()\n if self.motionController is not None:\n self.motionController.stop()",
"def stop(self):\n\t\tself._keepListening = False",
"def stop(self):\n\n for pump in self.pumps.values():\n pump.stop()",
"def stop(self):\n Multipass.stop(self.name)",
"async def stop(self):\n self.playing = False\n self.pm.clean()\n self.entries[:] = []\n\n await self.bot.say(\":information_source: Stopping the blindtest\")\n\n if self.player is not None:\n if self.player.is_playing() is True:\n self.player.stop()\n\n if self.voice is not None:\n if self.voice.is_connected() is True:\n await self.voice.disconnect()\n \n self.voice = None\n self.player = None",
"def stop_poisoning(self):\n self.stop = True\n # self.stop_thread = threading.Thread(target=self.restore_network)",
"def stop(self):\n self.abort = True\n for peer in self.peers:\n peer.stop()\n self.piece_manager.close()\n self.tracker.close()",
"def stop(self):\n\n self.active = False\n self.join()",
"def _stop_motor(self):\n self._send_command_single(CMD.STOP)",
"def stop(self):\r\n if self.tag is not None:\r\n self.canvas.delete(self.tag)\r\n if self.tag is not None:\r\n if self.tag in self.balls:\r\n del self.balls[self.tag]",
"def stop(self):\n self.daqcontroller.stop_voltage()",
"def stop(self):\n if self.patcher:\n self.patcher.stop()",
"def stop(self):\n self.pause()\n self.imagemodel.active = False\n self.imagemodel.join()",
"def stop(self):\n self.is_alive = False\n if(self.port in serials_in_use):\n serials_in_use.remove(self.port)",
"def stop(self):\n self._check(pn_messenger_stop(self._mng))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called to start a new PuckDetector
|
def startPuckDetector(self, i_reconfigure):
builder = PuckDetectorBuilder(self.MODE, 30, i_reconfigure)
self.stopCurrentPuckDetector()
self.puckDetector = builder.build()
self.puckDetector.findPuck()
|
[
"def __init__(self):\n rospy.init_node(ROS_VISION_NODE_NAME)\n rospy.on_shutdown(self.stopCurrentPuckDetector)\n\n self.puckDetector = None\n self.MODE = PuckDetectorBuilder.ROS\n self.m_reconfigureSubscriber = rospy.Subscriber(ROS_SUBSCRIBER_CONFIG_START_TOPIC_NAME, Bool, self.reconfigureCallBack)\n\n self.startPuckDetector(False) #starts a PuckDetector without a reconfigure request",
"def start(self):\n\n self.tank_level.start()",
"def initiate(self, frame, detections):\n if self.tracks:\n self.tracks.clear()\n self.flow.initiate(frame)\n for det in detections:\n state = self.kf.initiate(det.tlbr)\n new_trk = Track(0, self.next_id, det.tlbr, state, det.label)\n self.tracks[self.next_id] = new_trk\n LOGGER.debug('Detected: %s', new_trk)\n self.next_id += 1",
"def initialize_from_detector(self, detector):\n detector.initialize_channel_data(self)",
"def control(self):\n if self.running:\n\n if self.k < len(self.vehicle_ids)*self.headstart_samples and self.do_startup:\n self._startup()\n else:\n self._run_mpc()",
"def initiate_Pepper(self):\n\n # starts the recognizer\n r = sr.Recognizer()\n\n with sr.Microphone() as source:\n\n while True:\n logger.debug(\"Awaiting user input.\")\n audio = r.listen(source)\n\n logger.debug(\"Interpreting user input.\")\n\n # Speech recognition using Google Speech Recognition\n try:\n result = r.recognize_google(audio)\n #result = r.recognize_sphinx(audio)\n\n self.handle_action(result)\n\n except sr.UnknownValueError:\n logger.debug(\"Could not understand audio\")\n #Pepper.speak(\"I'm sorry, but I couldn't understand what you said.\")\n except sr.RequestError as e:\n logger.warn(\"Could not request results from Google Speech Recognition service: %s\", e)\n except Exception as e:\n logger.error(\"Could not process text: %s\", e)",
"def __init__(self, proportional_gain, integral_gain, differential_gain, stepper_motor, caliper, error_margin,\n steppermotor_frequency_limits, settling_time, name, setpoint_offset, interrupt_ignore_time):\n self.pid = PID(p=proportional_gain, i=integral_gain, d=differential_gain) # P I D controller\n self.steppermotor = stepper_motor # The stepper motor moving the load\n self.caliper = caliper # The caliper providing position feedback.\n self.stop_loop_event = threading.Event() # This is set when the control loop stops\n self.setpoint = None # Current setpoint\n self.error_margin = error_margin\n self.step_frequency_min, self.step_frequency_max = steppermotor_frequency_limits\n self.name = name\n self.settling_time = settling_time\n self.setpoint_offset = setpoint_offset\n self.interrupt_ignore_time = interrupt_ignore_time\n\n self.start_settling_time = None # timestamp when settling started\n self.settling = False # true if within allowed error band\n self.captured_data = [] # Stores captured data for visualization and debugging purposes",
"def _connectDetector(self):\n logger.debug('Connecting detector signals to slots in Exposer')\n if self.detector is not None:\n logger.warning('Detector signals already connected')\n return\n assert isinstance(self.instrument.devicemanager, DeviceManager)\n detector = self.instrument.devicemanager.detector()\n assert isinstance(detector, PilatusDetector) # ToDo: generalize\n self.detector = detector\n self.detector.connectionEnded.connect(self.onDetectorDisconnected)\n self.detector.variableChanged.connect(self.onDetectorVariableChanged)\n self.detector.commandResult.connect(self.onCommandResult)",
"def init_drone(self):\n dronePosition = DronePosition()\n droneConnection = DroneConnection(\"192.168.100.1\", 4646, 19798)\n self.drone = Drone(droneConnection, dronePosition)",
"def setup(bot: commands.Bot) -> None:\n bot.add_cog(Speedrun(bot))\n log.info(\"Speedrun cog loaded\")",
"def start(self):\r\n from ubcs_auxiliary.threading import new_thread\r\n new_thread(self.run)",
"def start(self):\n self.running = True",
"def __init__(self, name):\n super().__init__(name)\n self.magnetic_tape = True",
"def __init__(self):\n super(MetatoneClassifierController, self).__init__()\n self.classifying = False\n self.last_gesture_classes = \"No performance started yet.\"\n self.last_performance_state = \"No performance started yet.\"\n self.last_performance_time = \"\"\n self.current_active_devices = \"None.\"\n self.current_performance_state = (False,False,False)\n self.classification_thread = threading.Thread(target=self.classify_forever, name=\"Classification-Thread\")",
"def DaemonStarting(self):\n pass",
"def xray_detector_start(self):\n if self.xray_detector_enabled:\n info(\"Setting up X-ray detector...\")\n import lauecollect; lauecollect.load_settings()\n from ImageViewer import show_images\n filenames = self.image_filenames\n show_images(filenames)\n ccd.bin_factor = lauecollect.align.ccd_bin_factor # Speeds up the acquisition time",
"def __init__(self, detectorPath = None, predictorPath = None, predictorRef = None):\n self.detector = dlib.get_frontal_face_detector()\n self.predictor = dlib.shape_predictor(predictorPath)\n self.refs = predictorRef\n self.face = None\n self.landmarks = []",
"def setup_car(self):\n car_bp = self.world.get_blueprint_library().filter(\"vehicle.*\")[0]\n location = random.choice(self.world.get_map().get_spawn_points())\n self.car = self.world.spawn_actor(car_bp, location)",
"def __init__(self):\n\n self.deck = Deck()\n self.keep_playing = True\n self.total_points = 300"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrapper for callable to cache arguments and return values.
|
def wrapper(*args, **kwargs):
key = wrapper.__cache_key__(*args, **kwargs)
result = g.cache.get(key, ENOVAL)
if result is ENOVAL:
result = func(*args, **kwargs)
g.cache[key] = result
return result
|
[
"def memoize(func, cache, num_args):\n def wrapper(*args):\n mem_args = args[:num_args]\n if mem_args in cache:\n return cache[mem_args]\n result = func(*args)\n cache[mem_args] = result\n return result\n return wraps(func)(wrapper)",
"def memoized(*args, **kwargs):\n\n arguments = args + tuple((a, b) for a, b in kwargs.items())\n\n if arguments not in cache:\n cache[arguments] = function(*args, **kwargs)\n\n return cache[arguments]",
"def memorize(func):\n # Store results in a dict that maps arguments to results\n cache = {}\n # Define the wrapper function to return.\n @wraps(func)\n def wrapper(*args, **kwargs):\n # If these arguments haven't been seen before,\n if (args, kwargs) not in cache:\n # Call func() and store the result.\n cache[(args, kwargs)] = func(*args, **kwargs)\n return cache[(args, kwargs)]\n return wrapper",
"def test_cached_func_returns_the_same_as_original():\n\n def foo(a, b):\n return a + b\n\n arguments = 10, 5\n cached_foo = cache(foo)\n\n assert foo(*arguments) == cached_foo(*arguments)",
"async def wrapper(*args: Tuple[Any, ...], **kwds: Dict[str, Any]) -> Any:\n key = CacheKey.make(args, kwds)\n value = cache[key]\n # cache miss/expired\n if value is None:\n result = await fn(*args, **kwds)\n cache[key] = CacheValue(expired=time.monotonic() + expire, data=result)\n return result\n return value.data",
"def _Memoize(func):\n l = threading.Lock()\n cache = {}\n def _Caller(*args, **kwargs):\n with l:\n params = repr((args, kwargs))\n try:\n return cache[params]\n except KeyError:\n result = func(*args, **kwargs)\n cache[params] = result\n return result\n return _Caller",
"def cached(__cache: dict):\n def _decorator(decoratee):\n\n def _inner(*args):\n try:\n return __cache[args]\n except KeyError:\n result = decoratee(*args)\n __cache[args] = result\n return result\n\n return _inner\n\n return _decorator",
"def cache(method):\n\n def on_call(self, *args, **kwargs):\n name = method.__name__\n try:\n return self._cache[name]\n except AttributeError:\n # Create the cache if necessary\n self._cache = {}\n except KeyError:\n # Handled below\n pass\n\n val = method(self, *args, **kwargs)\n self._cache[name] = val\n return val\n\n return on_call",
"def search_param_memoize(func):\r\n def wrapper(*args, **kwargs):\r\n key = (args, frozenset(kwargs.items()))\r\n if key in search_param_cache:\r\n return search_param_cache[key]\r\n else:\r\n rv = func(*args, **kwargs)\r\n search_param_cache[key] = rv\r\n return rv\r\n return wrapper",
"def memoize(function):\n cache = {}\n @functools.wraps(function)\n def _memoize(*args):\n if args in cache:\n return cache[args]\n result = function(*args)\n cache[args] = result\n return result\n return function",
"def memoize(func, resolver=None):\n\n def memoized(*args: P.args, **kwargs: P.kwargs):\n if resolver:\n key = resolver(*args, **kwargs)\n else:\n key = f\"{args}{kwargs}\"\n\n if key not in memoized.cache: # type: ignore\n memoized.cache[key] = func(*args, **kwargs) # type:ignore\n\n return memoized.cache[key] # type: ignore\n\n memoized.cache = {}\n\n return memoized",
"def cache_last(func):\n arg_cache = [None]\n kw_cache = [None]\n ret_cache = [None]\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n\n if args != arg_cache[0] or kwargs != kw_cache[0]:\n # Generate cache value\n ret_cache[0] = func(*args, **kwargs)\n arg_cache[0] = args\n kw_cache[0] = kwargs\n # Fetch from cache\n return ret_cache[0]\n\n return decorated",
"def memcache(*args, **kwargs):\n decorator_async = memcache_async(*args, **kwargs)\n def decorator(func):\n decorated_async = decorator_async(func)\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n return decorated_async(*args, **kwargs).get_result()\n return decorated\n return decorator",
"def _memo(fn):\n\n cache = {}\n\n @_functools.wraps(fn)\n def _fn(*args):\n if key: args = key(*args)\n try: ret = cache[args]\n except KeyError: ret = cache[args] = fn(*args)\n return ret\n\n _fn._cache = cache\n return _fn",
"def memoize_immutable(f):\n memo = {}\n def wrapper(*args, **kwargs):\n key = (args, frozenset(kwargs.items())) #Must use frozenset because kwargs (= a dictionary) cannot be used as part of dictionary key \n if not key in memo:\n memo[key] = f(*args, **kwargs)\n #print(f'Calculated \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n else:\n pass\n #print(f'Looked-up \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n return memo[key]\n return wrapper",
"def memoize(*args, **kwargs):\n if args:\n assert len(args) == 1\n assert not kwargs\n return memoize()(args[0])\n key_func = kwargs.pop('key_func', None)\n if kwargs:\n raise TypeError('memoize() got unexpected keyword arguments: %s', ', '.join(kwargs))\n\n return _memory_decorator({}, key_func)",
"def memoize_mutable(f):\n memo = {}\n def wrapper(*args, **kwargs):\n key = pickle.dumps(args) + pickle.dumps(kwargs) #To use as hash for mutable objects.\n if not key in memo:\n memo[key] = f(*args, **kwargs)\n #print(f'Calculated \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n else:\n pass\n #print(f'Looked-up \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n return memo[key]\n return wrapper",
"def self_memoized(func):\n\n cache_name = '_cache_{}'.format(func.__name__)\n\n def wrapper(self, *args, **kwargs):\n # Install the self-specific cache, if needed\n cache = getattr(self, cache_name, {})\n setattr(self, cache_name, cache)\n\n key = (args, tuple(kwargs.items()))\n try:\n result = cache[key]\n except KeyError:\n result = func(self, *args, **kwargs)\n cache[key] = result\n return result\n\n def reset(self):\n setattr(self, cache_name, {})\n\n wrapper.reset = reset\n\n return wrapper",
"def __call__(self, func, *args, **kwargs):\n\n\t\tdef cache(*args, **kwargs):\n\t\t\tkey = frozenset([*args, *tuple(kwargs.items())])\n\n\t\t\t# Check cache to see if we have something by this key stored; if we do, load it from the on-disk cache and\n\t\t\t# return it. If we don't, call the original function and write its results out to the cache. Only the JSON\n\t\t\t# response from the function is stored on disk, the return code is stored in memory since it's so small.\n\t\t\tif key in self.cache:\n\t\t\t\tfile_name, status_code = self.cache[key]\n\t\t\t\tlog.debug(\"Returning cached response {}/{} for {}\".format(file_name, status_code, func.__qualname__))\n\t\t\t\twith open(file_name, \"r\") as cached_file:\n\t\t\t\t\t# Parsing from JSON is necessary so Flask will send this back as a JSON object and not a string\n\t\t\t\t\t# TODO Remove JSON parsing step to improve cache performance if possible\n\t\t\t\t\tresult = json.load(cached_file)\n\t\t\telse:\n\t\t\t\tcache_dir = config.config[\"storage\"][\"cache\"]\n\t\t\t\tresult, status_code = func(*args, **kwargs)\n\t\t\t\tfilename = \"{cache_dir}/{qualname}-{uuid}.json\".format(cache_dir=cache_dir,\n\t\t\t\t qualname=func.__qualname__.lower(),\n\t\t\t\t uuid=uuid.uuid4())\n\t\t\t\tlog.info(\"Saving response from {qualname} to file {file}\".format(qualname=func.__qualname__, file=filename))\n\t\t\t\ttry:\n\t\t\t\t\t# Attempt to create the cache directory if it doesn't already exist, then dump the response out to file\n\t\t\t\t\tos.makedirs(cache_dir, exist_ok=True)\n\t\t\t\t\twith open(filename, \"w\") as file:\n\t\t\t\t\t\tjson.dump(result, file)\n\t\t\t\t\tself.cache[key] = (filename, status_code)\n\t\t\t\texcept (IOError or OSError) as e:\n\t\t\t\t\tlog.error(\"Failed to write file {} to cache: {}\".format(filename, e))\n\n\t\t\treturn result, status_code\n\t\tupdate_wrapper(cache, func)\n\t\treturn cache"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make key for cache given function arguments.
|
def __cache_key__(*args, **kwargs):
return args_to_key(base, args, kwargs, False)
|
[
"def _build_cache_key(self, *args):\n return self.key if not self.key_mod else self.key % tuple(args)",
"def cachekey(func, *args, **kwargs):\n args2 = arguments(func, *args, **kwargs)\n\n # ignoring `instance`\n instance_index = getattr(func, '_instance_index', False)\n if instance_index is not False:\n args2.pop(instance_index)\n\n return prefix(func) + str(args2)",
"def cachekey_static(func, *args, **kwargs):\n return prefix(func) + kwargs.get('cachekey', '')",
"def make(cls, args: Tuple[Any, ...], kwds: Dict[str, Any]) -> \"CacheKey\":\n key = []\n # positional arguments\n for argument in args:\n if cls.is_primitive(argument):\n key.append(argument)\n # keyword arguments\n for pair in sorted(kwds.items()):\n if cls.is_primitive(pair[1]):\n key.append(pair)\n return CacheKey(tuple(key))",
"def make_cache_key(pattern, flags):\n return '{}_{}'.format(pattern, flags)",
"def _get_cache_key(self, **kwargs):\n key = 'cartodb_%s_' % _geohash.encode(\n kwargs.pop('lat'), kwargs.pop('lon'))[:8]\n key += '_'.join([\n '%s=%s' % (k, kwargs[k]) for k in sorted(kwargs.iterkeys())])\n return key",
"def get_cache_key(args, kwargs):\n if len(args) == 0 and len(kwargs) == 0:\n return \"\"\n elif len(args) == 0:\n return json.dumps(kwargs)\n elif len(kwargs.keys()) == 0:\n return json.dumps(args)\n else:\n return json.dumps([args, kwargs])",
"def _get_cache_key(self, **kwargs):\n m = md5()\n for significant_kwarg in self.significant_kwargs:\n key, to_str = significant_kwarg\n m.update(to_str(kwargs[key]))\n\n if hasattr(self, 'cache_prefix'):\n cache_prefix = self.cache_prefix\n else:\n cache_prefix = '%s.%s' % (self.__module__, self.__name__)\n return '%s:%s' % (cache_prefix, m.hexdigest())",
"def _cache_key_func(system: System, method: Callable) -> tuple[str, int]:\n if not isinstance(method, str):\n method = method.__name__\n return (f\"{type(system).__name__}.{method}\", id(system))",
"def function_sig_key(\n name: str,\n arguments_matter: bool,\n skip_ignore_cache: bool,\n *args: Any,\n **kwargs: Any,\n) -> int:\n function_sig = name\n if arguments_matter:\n for arg in args:\n function_sig += str(arg)\n for argname, value in kwargs.items():\n if skip_ignore_cache and argname == 'ignore_cache':\n continue\n\n function_sig += str(value)\n\n return hash(function_sig)",
"def _getkey(self, args, kwargs):\n\n values = list(args)\n\n keys = sorted(list(kwargs))\n\n for key in keys:\n values.append((key, kwargs[key]))\n\n result = hash(tuple(values))\n\n return result",
"def _make_cache_key(url, permanent=False):\n return \"WebTemplate:%s:%s\" % (url, {True: \"p\", False: \"t\"}[permanent])",
"def memoized(*args, **kwargs):\n\n arguments = args + tuple((a, b) for a, b in kwargs.items())\n\n if arguments not in cache:\n cache[arguments] = function(*args, **kwargs)\n\n return cache[arguments]",
"def memcache_async(key, key_args=None, time=None):\n assert isinstance(key, basestring), key\n key_args = key_args or []\n assert isinstance(key_args, list), key_args\n assert all(isinstance(a, basestring) for a in key_args), key_args\n assert all(key_args), key_args\n\n memcache_set_kwargs = {}\n if time is not None:\n memcache_set_kwargs['time'] = time\n\n def decorator(func):\n unwrapped = func\n while True:\n deeper = getattr(unwrapped, '__wrapped__', None)\n if not deeper:\n break\n unwrapped = deeper\n\n argspec = inspect.getargspec(unwrapped)\n if argspec.varargs:\n raise NotImplementedError(\n 'varargs in memcached functions are not supported')\n if argspec.keywords:\n raise NotImplementedError(\n 'kwargs in memcached functions are not supported')\n\n # List of arg names and indexes. Has same order as |key_args|.\n arg_indexes = []\n for name in key_args:\n try:\n i = argspec.args.index(name)\n except ValueError:\n raise KeyError(\n 'key_format expects \"%s\" parameter, but it was not found among '\n 'function parameters' % name)\n arg_indexes.append((name, i))\n\n @functools.wraps(func)\n @ndb.tasklet\n def decorated(*args, **kwargs):\n arg_values = []\n for name, i in arg_indexes:\n if i < len(args):\n arg_value = args[i]\n elif name in kwargs:\n arg_value = kwargs[name]\n else:\n # argspec.defaults contains _last_ default values, so we need to shift\n # |i| left.\n default_value_index = i - (len(argspec.args) - len(argspec.defaults))\n if default_value_index < 0:\n # Parameter not provided. Call function to cause TypeError\n func(*args, **kwargs)\n assert False, 'Function call did not fail'\n arg_value = argspec.defaults[default_value_index]\n arg_values.append(arg_value)\n\n # Instead of putting a raw value to memcache, put tuple (value,)\n # so we can distinguish a cached None value and absence of the value.\n\n cache_key = 'utils.memcache/%s/%s%s' % (\n get_app_version(), key, repr(arg_values))\n\n ctx = ndb.get_context()\n result = yield ctx.memcache_get(cache_key)\n if isinstance(result, tuple) and len(result) == 1:\n raise ndb.Return(result[0])\n\n result = func(*args, **kwargs)\n if isinstance(result, ndb.Future):\n result = yield result\n yield ctx.memcache_set(cache_key, (result,), **memcache_set_kwargs)\n raise ndb.Return(result)\n\n return decorated\n return decorator",
"def make_key(self, key, version=None):\n if version is None:\n version = self.version\n\n new_key = self.key_func(self.key_prefix, key, version)\n return new_key",
"def test_cached_func_returns_the_same_as_original():\n\n def foo(a, b):\n return a + b\n\n arguments = 10, 5\n cached_foo = cache(foo)\n\n assert foo(*arguments) == cached_foo(*arguments)",
"def get_cache_key(class_name, settings=()):\n return '#{0}:{1}'.format(class_name, hash(tuple(settings)))",
"def gen_keyfunc(endpoint=True, path=True, method=True, query_string=True,\n data=True, headers=None, session=True, content_type=True,\n content_length=True, remote_addr=True, use_checksum=True):\n\n def keyfunc():\n dimensions = {}\n if endpoint:\n dimensions['endpoint'] = request.endpoint\n if path:\n dimensions['path'] = request.path\n if method:\n dimensions['method'] = request.method\n if query_string:\n dimensions['query_string'] = request.query_string\n if data:\n dimensions['data'] = request.data\n if headers:\n d = {}\n for name in headers:\n d[name] = request.headers.get(name, None)\n dimensions['headers'] = str(sorted(d.items()))\n if session:\n dimensions['session'] = str(sorted(flask_session.items()))\n if content_type:\n dimensions['content_type'] = request.content_type\n if content_length:\n dimensions['content_length'] = request.content_length\n if remote_addr:\n dimensions['remote_addr'] = request.headers.get(\n 'X-Forwarded-For',\n request.remote_addr)\n origin_key = str(sorted(dimensions.items()))\n if use_checksum:\n # Use hashed stringify dimensions\n sha = hashlib.sha1()\n sha.update(origin_key.encode('utf8'))\n return sha.hexdigest()\n return origin_key\n return keyfunc",
"def as_cache_key(self, ireq):\n extras = tuple(sorted(ireq.extras))\n if not extras:\n extras_string = \"\"\n else:\n extras_string = \"[{}]\".format(\",\".join(extras))\n name = _key_from_req(ireq.req)\n version = get_pinned_version(ireq)\n return name, \"{}{}\".format(version, extras_string)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for computing composition descriptors. AAProperty is a dict form containing classifciation of amino acids such as _Polarizability. AAPName is a string used for indicating a AAP name. result is a dict form containing composition descriptors based on the given property.
|
def CalculateComposition(ProteinSequence,AAProperty,AAPName):
TProteinSequence=StringtoNum(ProteinSequence,AAProperty)
Result={}
Num=len(TProteinSequence)
Result[AAPName+'C'+'1']=round(float(TProteinSequence.count('1'))/Num,3)
Result[AAPName+'C'+'2']=round(float(TProteinSequence.count('2'))/Num,3)
Result[AAPName+'C'+'3']=round(float(TProteinSequence.count('3'))/Num,3)
return Result
|
[
"def convertAAToProperties(self, sequence, properties=None):\n properties = properties or ['composition', 'iep', 'polarity']\n result = []\n\n for aa in sequence:\n if aa in PROPERTY_DETAILS:\n aaProperties = sum(PROPERTY_DETAILS[aa][prop] for prop in\n properties)\n result.append(aaProperties)\n return result",
"def CalculateTransition(ProteinSequence,AAProperty,AAPName):\r\n\t\r\n\tTProteinSequence=StringtoNum(ProteinSequence,AAProperty)\r\n\tResult={}\r\n\tNum=len(TProteinSequence)\r\n\tCTD=TProteinSequence\r\n\tResult[AAPName+'T'+'12']=round(float(CTD.count('12')+CTD.count('21'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'13']=round(float(CTD.count('13')+CTD.count('31'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'23']=round(float(CTD.count('23')+CTD.count('32'))/(Num-1),3)\r\n\treturn Result",
"def calc_aa_propensity(seq):\n\n # count absolute number of each residue in the input string\n number_each_aa_dict = {}\n\n all_aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n # create an dictionary of the numbers {\"A\" : 57, \"C\" : 5, ...} etc\n for aa in all_aa:\n number_each_aa_dict[aa] = seq.count(aa)\n\n # create a dictionary to hold the propensity of each residue\n aa_propensity_dict = {}\n length = len(seq)\n for aa in number_each_aa_dict:\n aa_propensity_dict[aa] = number_each_aa_dict[aa] / length\n\n # turn the dictionary into a pd.Series\n aa_prop_ser = pd.Series(aa_propensity_dict)\n # normalise so that all the aa propensities add up to 1.0\n # this is important if \"X\" or \"U\" is in the sequences\n aa_prop_norm_ser = aa_prop_ser / aa_prop_ser.sum()\n # name the index column\n aa_prop_norm_ser.index.name = \"freq\"\n return aa_prop_norm_ser",
"def protein_properties(seq):\n pa = ProteinAnalysis(seq)\n\n aa_counts = pa.count_amino_acids()\n arom = pa.aromaticity()\n isoelec = pa.isoelectric_point()\n try:\n instability = pa.instability_index()\n except KeyError:\n instability = None\n try:\n gravy = pa.gravy()\n except KeyError:\n gravy = None\n\n return ProtProp(aa=str(seq),\n gravy=gravy,\n aromaticity=arom,\n isoelectric_point=isoelec,\n instability=instability,\n aa_counts=aa_counts)",
"def _get_shape_ap_def(self, aperture):\n\n # get type and shape mods\n shape = aperture.shape\n if isinstance(shape, Circle):\n type_ = SHAPE_TAGS['circle']['char']\n mods = [self._convert_units_str(shape.radius * 2)]\n elif isinstance(shape, Rectangle):\n type_ = SHAPE_TAGS['rectangle']['char']\n mods = [self._convert_units_str(shape.width),\n self._convert_units_str(shape.height)]\n elif isinstance(shape, Obround):\n type_ = SHAPE_TAGS['obround']['char']\n mods = [self._convert_units_str(shape.width),\n self._convert_units_str(shape.height)]\n elif isinstance(shape, RegularPolygon):\n rot = shape.rotation\n rotation = int(rot and (2 - rot) * 180 or 0)\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.vertices]\n type_ = SHAPE_TAGS['reg_polygon']['char']\n mods = [self._convert_units_str(shape.outer_diameter),\n vertices,\n rotation]\n elif isinstance(shape, str):\n type_ = shape\n mods = []\n\n # add hole mods\n hole = aperture.hole\n if isinstance(hole, Circle):\n hole_mods = [self._convert_units_str(hole.radius)]\n elif hole:\n hole_mods = [self._convert_units_str(hole.width), self._convert_units_str(hole.height)]\n else:\n hole_mods = []\n mods += hole_mods\n\n # generate param\n mods = 'X'.join(str(m) for m in mods)\n mods_def = (mods and AP_MODS.format(mods=mods) or '')\n ap_def = APERTURE.format(code=aperture.code,\n type=type_,\n mods=mods_def)\n return LINE.format(ap_def)",
"def get_acs():\n data = wikipedia.get_ac_list(\"Haryana\")\n def normalize_name(name):\n name = name.split(\"(\")[0].lower().strip(\". \")\n return name\n\n pc_dict = dict((normalize_name(name), pc_code) for pc_code, ac_code, name in data)\n\n renames = {\n \"ambala cantt\": \"ambala cantonment\",\n \"dadri\": \"charkhi dadri\",\n \"kalawali\": \"kalanwali\",\n \"nangal chaudhry\": \"nagai chaudhry\",\n }\n def get_pc_code(name):\n name = normalize_name(name)\n name = renames.get(name, name)\n if name not in pc_dict:\n name = find_nearest(name, pc_dict.keys())\n return pc_dict[name]\n\n ac_data = _get_ac_data()\n assert(len(pc_dict) == len(ac_data))\n for code, name in ac_data:\n pc_code = get_pc_code(name)\n ac_code_str = \"AC{0:03d}\".format(int(code))\n yield pc_code, ac_code_str, name.title().replace(\"(Sc)\", \" (SC)\").replace(\"(St)\", \" (ST)\")",
"def get_pymatgen_descriptor(composition, property_name):\n eldata = []\n # what are these named tuples for? not used or returned! -KM\n eldata_tup_lst = []\n eldata_tup = collections.namedtuple('eldata_tup', 'element propname propvalue propunit amt')\n\n oxidation_states = {}\n if isinstance(composition, Composition):\n # check whether the composition is composed of oxidation state decorates species (not just plain Elements)\n if hasattr(composition.elements[0], \"oxi_state\"):\n oxidation_states = dict([(str(sp.element), sp.oxi_state) for sp in composition.elements])\n el_amt_dict = composition.get_el_amt_dict()\n # string\n else:\n comp, oxidation_states = get_composition_oxidation_state(composition)\n el_amt_dict = comp.get_el_amt_dict()\n\n symbols = sorted(el_amt_dict.keys(), key=lambda sym: get_el_sp(sym).X)\n\n for el_sym in symbols:\n\n element = Element(el_sym)\n property_value = None\n property_units = None\n\n try:\n p = getattr(element, property_name)\n except AttributeError:\n print(\"{} attribute missing\".format(property_name))\n raise\n\n if p is not None:\n if property_name in ['ionic_radii']:\n if oxidation_states:\n property_value = element.ionic_radii[oxidation_states[el_sym]]\n property_units = Unit(\"ang\")\n else:\n raise ValueError(\"oxidation state not given for {}; It does not yield a unique \"\n \"number per Element\".format(property_name))\n else:\n property_value = float(p)\n\n # units are None for these pymatgen descriptors\n # todo: there seem to be a lot more unitless descriptors which are not listed here... -Alex D\n if property_name not in ['X', 'Z', 'group', 'row', 'number', 'mendeleev_no', 'ionic_radii']:\n property_units = p.unit\n\n # Make a named tuple out of all the available information\n eldata_tup_lst.append(eldata_tup(element=el_sym, propname=property_name, propvalue=property_value,\n propunit=property_units, amt=el_amt_dict[el_sym]))\n\n # Add descriptor values, one for each atom in the compound\n for i in range(int(el_amt_dict[el_sym])):\n eldata.append(property_value)\n\n return eldata",
"def vpn_create_ca(self):\n fpath_prop = self.vpn_create_tmp_ca_prop_file()\n try:\n cmd = self.vpn_create_ca_cmd(fpath_prop)\n return self.ejbca_cmd(cmd, retry_attempts=1, write_dots=self.print_output)[0]\n\n finally:\n util.safely_remove(fpath_prop)",
"def generatePropertyAccessorNameList(property):\n from Products.ERP5Type.Utils import UpperCase\n res=[]\n cased_id = UpperCase(property['id'])\n for hidden in ('', '_'):\n for getset in ('get', 'set', 'has'): # 'is',\n for default in ('', 'Default', 'Translated'):\n for value in ('', 'Value', 'TranslationDomain'):\n for multivalued in ('', 'List', 'Set'):\n res.append('%s%s%s%s%s%s' % (hidden, getset, default, cased_id, value, multivalued))\n if property.has_key('acquired_property_id') and \\\n property['type'] == 'content':\n for aq_property_id in property['acquired_property_id']:\n cased_id = UpperCase('%s_%s' % (property['id'], aq_property_id))\n for hidden in ('', '_'):\n for getset in ('get', 'set'):\n for default in ('', 'Default'):\n for multivalued in ('', 'List'):\n res.append('%s%s%s%s%s' % (hidden, getset, default, cased_id, multivalued))\n return res",
"def _createGetProperty(pName):\n propName = pName\n def getProperty(self):\n if paraview.compatibility.GetVersion() >= 3.5:\n return self.GetPropertyValue(propName)\n else:\n return self.GetProperty(propName)\n return getProperty",
"def summarize_polyA(fasta):\n lst = []\n for name in fasta.keys():\n seq = str(fasta[name])\n L = seq[0:10]\n R = seq[-10:]\n end = L + R\n most_common_char = Counter(end).most_common(1)[0][0]\n Ln = Counter(L)[most_common_char]\n Rn = Counter(R)[most_common_char]\n if Ln > Rn:\n m = re.search('^(' + most_common_char + '+)', seq)\n if m:\n lst.append([\"L\", most_common_char, m.group(1), name, seq])\n else:\n lst.append([\"L\", most_common_char, \"-\", name, seq])\n else:\n m = re.search('(' + most_common_char + '+)$', seq)\n if m:\n lst.append([\"R\", most_common_char, m.group(1), name, seq])\n else:\n lst.append([\"R\", most_common_char, \"-\", name, seq])\n return(lst)",
"def CalcAcres(feature):\n inTable = feature\n fieldName = \"Acres\"\n fieldType = \"DOUBLE\"\n expression = \"!shape.area@ACRES!\" \n arcpy.AddField_management(inTable, fieldName, fieldType)\n arcpy.CalculateField_management(inTable, fieldName, expression,\n \"PYTHON_9.3\", \"\")",
"def getProperties(self, app):\n \n # Get the properties\n props = yield app.properties.get()\n \n # Marshal\n data = {}\n for i,p in enumerate(props):\n data[i] = marshal(p, self.pfields)\n \n returnValue(data)",
"def filter_properties(self, a_filter='all'):\n\t\t\n\t\tindex_key = 0\n\t\tindex_value = 0\n\t\tindex_dict = {}\n\n\t\tfor group in self.properties:\n\t\t\tif a_filter == 'no_full_sets' and len(group) >= group[0].full_size():\n\t\t\t\tindex_value += len(group)\n\t\t\t\tcontinue\n\n\t\t\tfor card in group:\n\t\t\t\tif a_filter == 'no_buildings' and card.name == \"House\" or card.name == \"Hotel\":\n\t\t\t\t\tindex_value += 1\n\t\t\t\t\tcontinue\n\t\t\t\tif a_filter == 'no_any_wilds' and card.name == \"Property Wild: Any\":\n\t\t\t\t\tindex_value += 1\n\t\t\t\t\tcontinue\n\n\t\t\t\tindex_dict[index_key] = index_value\n\t\t\t\tindex_key += 1\n\t\t\t\tindex_value += 1\n\n\t\treturn index_dict",
"def _getProperties(self, ua):\r\n idProperties = {}\r\n matched = ''\r\n sought = None\r\n sought, matched = self._seekProperties(ua, idProperties, sought, matched)\r\n properties = {}\r\n for index, value in idProperties.iteritems():\r\n properties[self.data['properties'][index][0]] = self.data['properties'][index][1](value)\r\n properties['_matched'] = matched\r\n properties['_unmatched'] = ua[len(matched):]\r\n return properties",
"def export_alpha_property(self, flags=0x00ED, threshold=0):\n # search for duplicate\n for block in self.blocks:\n if isinstance(block, NifFormat.NiAlphaProperty) \\\n and block.flags == flags and block.threshold == threshold:\n return block\n # no alpha property with given flag found, so create new one\n alphaprop = self.create_block(\"NiAlphaProperty\")\n alphaprop.flags = flags\n alphaprop.threshold = threshold\n return alphaprop",
"def get_dispersion_PA(self, decimals=0):\n from astropy.coordinates import Angle\n import astropy.units as u\n\n # extra tilt of the 1st order grism spectra\n if 'BEAMA' in self.conf.conf_dict:\n x0 = self.conf.conf_dict['BEAMA']\n else:\n x0 = np.array([10,30])\n \n dy_trace, lam_trace = self.conf.get_beam_trace(x=507, y=507, dx=x0,\n beam='A')\n\n extra = np.arctan2(dy_trace[1]-dy_trace[0], x0[1]-x0[0])/np.pi*180\n\n # Distorted WCS\n crpix = self.direct.wcs.wcs.crpix\n xref = [crpix[0], crpix[0]+1]\n yref = [crpix[1], crpix[1]]\n r, d = self.direct.wcs.all_pix2world(xref, yref, 1)\n pa = Angle((extra +\n np.arctan2(np.diff(r)*np.cos(d[0]/180*np.pi),\n np.diff(d))[0]/np.pi*180)*u.deg)\n\n dispersion_PA = pa.wrap_at(360*u.deg).value\n if decimals is not None:\n dispersion_PA = np.round(dispersion_PA, decimals=decimals)\n\n self.dispersion_PA = dispersion_PA\n return float(dispersion_PA)",
"def run_pca(self):\n model = PCA(k=self.pca_components_index).fit(self.data)\n\n return model",
"def az(self):\n c_ids, p_ids , i_ids = self.vocab.azlist()\n az = \"\"\"<div class=\"azlist\">\"\"\"\n az = \"\"\"%s\\n<p>Classes: |\"\"\" % az\n # print(c_ids, p_ids)\n for c in c_ids:\n # speclog(\"Class \"+c+\" in az generation.\")\n az = \"\"\"%s <a href=\"#%s\">%s</a> | \"\"\" % (az, str(c).replace(\" \", \"\"), c)\n az = \"\"\"%s\\n</p>\"\"\" % az\n \n az = \"\"\"%s\\n<p>Properties: |\"\"\" % az\n for p in p_ids:\n # speclog(\"Property \"+p+\" in az generation.\")\n az = \"\"\"%s <a href=\"#%s\">%s</a> | \"\"\" % (az, str(p).replace(\" \", \"\"), p)\n az = \"\"\"%s\\n</p>\"\"\" % az\n \n if (len(self.vocab.individuals) > 0):\n az = \"\"\"%s\\n<p>Individuals: |\"\"\" % az\n for i in i_ids:\n # speclog(\"Individual \"+p+\" in az generation.\")\n az = \"\"\"%s <a href=\"#%s\">%s</a> | \"\"\" % (az, str(i).replace(\" \", \"\"), i)\n az = \"\"\"%s\\n</p>\"\"\" % az\n \n az = \"\"\"%s\\n</div>\"\"\" % az\n return(az)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for computing transition descriptors AAProperty is a dict form containing classifciation of amino acids such as _Polarizability. AAPName is a string used for indicating a AAP name.
|
def CalculateTransition(ProteinSequence,AAProperty,AAPName):
TProteinSequence=StringtoNum(ProteinSequence,AAProperty)
Result={}
Num=len(TProteinSequence)
CTD=TProteinSequence
Result[AAPName+'T'+'12']=round(float(CTD.count('12')+CTD.count('21'))/(Num-1),3)
Result[AAPName+'T'+'13']=round(float(CTD.count('13')+CTD.count('31'))/(Num-1),3)
Result[AAPName+'T'+'23']=round(float(CTD.count('23')+CTD.count('32'))/(Num-1),3)
return Result
|
[
"def CalculateComposition(ProteinSequence,AAProperty,AAPName):\r\n\tTProteinSequence=StringtoNum(ProteinSequence,AAProperty)\r\n\tResult={}\r\n\tNum=len(TProteinSequence)\r\n\tResult[AAPName+'C'+'1']=round(float(TProteinSequence.count('1'))/Num,3)\r\n\tResult[AAPName+'C'+'2']=round(float(TProteinSequence.count('2'))/Num,3)\r\n\tResult[AAPName+'C'+'3']=round(float(TProteinSequence.count('3'))/Num,3)\r\n\treturn Result",
"def convertAAToProperties(self, sequence, properties=None):\n properties = properties or ['composition', 'iep', 'polarity']\n result = []\n\n for aa in sequence:\n if aa in PROPERTY_DETAILS:\n aaProperties = sum(PROPERTY_DETAILS[aa][prop] for prop in\n properties)\n result.append(aaProperties)\n return result",
"def get_acs():\n data = wikipedia.get_ac_list(\"Haryana\")\n def normalize_name(name):\n name = name.split(\"(\")[0].lower().strip(\". \")\n return name\n\n pc_dict = dict((normalize_name(name), pc_code) for pc_code, ac_code, name in data)\n\n renames = {\n \"ambala cantt\": \"ambala cantonment\",\n \"dadri\": \"charkhi dadri\",\n \"kalawali\": \"kalanwali\",\n \"nangal chaudhry\": \"nagai chaudhry\",\n }\n def get_pc_code(name):\n name = normalize_name(name)\n name = renames.get(name, name)\n if name not in pc_dict:\n name = find_nearest(name, pc_dict.keys())\n return pc_dict[name]\n\n ac_data = _get_ac_data()\n assert(len(pc_dict) == len(ac_data))\n for code, name in ac_data:\n pc_code = get_pc_code(name)\n ac_code_str = \"AC{0:03d}\".format(int(code))\n yield pc_code, ac_code_str, name.title().replace(\"(Sc)\", \" (SC)\").replace(\"(St)\", \" (ST)\")",
"def _get_shape_ap_def(self, aperture):\n\n # get type and shape mods\n shape = aperture.shape\n if isinstance(shape, Circle):\n type_ = SHAPE_TAGS['circle']['char']\n mods = [self._convert_units_str(shape.radius * 2)]\n elif isinstance(shape, Rectangle):\n type_ = SHAPE_TAGS['rectangle']['char']\n mods = [self._convert_units_str(shape.width),\n self._convert_units_str(shape.height)]\n elif isinstance(shape, Obround):\n type_ = SHAPE_TAGS['obround']['char']\n mods = [self._convert_units_str(shape.width),\n self._convert_units_str(shape.height)]\n elif isinstance(shape, RegularPolygon):\n rot = shape.rotation\n rotation = int(rot and (2 - rot) * 180 or 0)\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.vertices]\n type_ = SHAPE_TAGS['reg_polygon']['char']\n mods = [self._convert_units_str(shape.outer_diameter),\n vertices,\n rotation]\n elif isinstance(shape, str):\n type_ = shape\n mods = []\n\n # add hole mods\n hole = aperture.hole\n if isinstance(hole, Circle):\n hole_mods = [self._convert_units_str(hole.radius)]\n elif hole:\n hole_mods = [self._convert_units_str(hole.width), self._convert_units_str(hole.height)]\n else:\n hole_mods = []\n mods += hole_mods\n\n # generate param\n mods = 'X'.join(str(m) for m in mods)\n mods_def = (mods and AP_MODS.format(mods=mods) or '')\n ap_def = APERTURE.format(code=aperture.code,\n type=type_,\n mods=mods_def)\n return LINE.format(ap_def)",
"def get_apartment_name(self, soup, apartment_dict):\n\n apartment_name_tag = soup.find(class_='pageTitle')\n title = apartment_name_tag.text.strip()\n apartment_dict['name'] = title",
"def calc_aa_propensity(seq):\n\n # count absolute number of each residue in the input string\n number_each_aa_dict = {}\n\n all_aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n # create an dictionary of the numbers {\"A\" : 57, \"C\" : 5, ...} etc\n for aa in all_aa:\n number_each_aa_dict[aa] = seq.count(aa)\n\n # create a dictionary to hold the propensity of each residue\n aa_propensity_dict = {}\n length = len(seq)\n for aa in number_each_aa_dict:\n aa_propensity_dict[aa] = number_each_aa_dict[aa] / length\n\n # turn the dictionary into a pd.Series\n aa_prop_ser = pd.Series(aa_propensity_dict)\n # normalise so that all the aa propensities add up to 1.0\n # this is important if \"X\" or \"U\" is in the sequences\n aa_prop_norm_ser = aa_prop_ser / aa_prop_ser.sum()\n # name the index column\n aa_prop_norm_ser.index.name = \"freq\"\n return aa_prop_norm_ser",
"def _set_advertise_apname(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"advertise-apname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"advertise_apname must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"advertise-apname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__advertise_apname = t\n if hasattr(self, '_set'):\n self._set()",
"def _createGetProperty(pName):\n propName = pName\n def getProperty(self):\n if paraview.compatibility.GetVersion() >= 3.5:\n return self.GetPropertyValue(propName)\n else:\n return self.GetProperty(propName)\n return getProperty",
"def CalcAcres(feature):\n inTable = feature\n fieldName = \"Acres\"\n fieldType = \"DOUBLE\"\n expression = \"!shape.area@ACRES!\" \n arcpy.AddField_management(inTable, fieldName, fieldType)\n arcpy.CalculateField_management(inTable, fieldName, expression,\n \"PYTHON_9.3\", \"\")",
"def MakePtAP(self, A, P):\n return _handle.OperatorHandle_MakePtAP(self, A, P)",
"def create_AP_labels(self, APs: list, include_init=True):\n AP_lables = dict()\n for ap in APs:\n AP_lables[ap] = \"property_\" + str(len(AP_lables))\n\n state_labels = dict()\n for key, state in self.states_encoding.items():\n for ap in APs:\n if state.check_AP(ap, self.ordering):\n state_labels[key] = state_labels.get(key, set()) | {AP_lables[ap]}\n if include_init:\n state_labels[self.init] = state_labels.get(self.init, set()) | {\"init\"}\n return state_labels, AP_lables",
"def get_ap_info(self, ue, ap_id):\n # Get UE_AP_STATE\n ue_ap_state = self.env.get_ue_ap_state(ue, ap_id)\n\n # Create a new AP_INFO and fill in the details.\n return AP_INFO(\n ap_id=ap_id,\n ue_ap_state=ue_ap_state)",
"def itemByName(self, *args) -> \"adsk::core::Ptr< adsk::core::Appearance >\" :\n return _core.Appearances_itemByName(self, *args)",
"async def manga(self, ctx, *, title):\n cmd = \"manga\"\n await self.fetch_info(ctx, cmd, title)",
"def print_attribute_list(self):\n p = prettytable.PrettyTable((\"VISA name\", \"Constant\", \"Python name\", \"val\"))\n for attr in getattr(self.current, \"visa_attributes_classes\", ()):\n try:\n val = self.current.get_visa_attribute(attr.attribute_id)\n except VisaIOError as e:\n val = e.abbreviation\n except Exception as e:\n val = str(e)\n if len(val) > 10:\n val = val[:10] + \"...\"\n p.add_row((attr.visa_name, attr.attribute_id, attr.py_name, val))\n\n print(p.get_string(sortby=\"VISA name\"))",
"def az(self):\n c_ids, p_ids , i_ids = self.vocab.azlist()\n az = \"\"\"<div class=\"azlist\">\"\"\"\n az = \"\"\"%s\\n<p>Classes: |\"\"\" % az\n # print(c_ids, p_ids)\n for c in c_ids:\n # speclog(\"Class \"+c+\" in az generation.\")\n az = \"\"\"%s <a href=\"#%s\">%s</a> | \"\"\" % (az, str(c).replace(\" \", \"\"), c)\n az = \"\"\"%s\\n</p>\"\"\" % az\n \n az = \"\"\"%s\\n<p>Properties: |\"\"\" % az\n for p in p_ids:\n # speclog(\"Property \"+p+\" in az generation.\")\n az = \"\"\"%s <a href=\"#%s\">%s</a> | \"\"\" % (az, str(p).replace(\" \", \"\"), p)\n az = \"\"\"%s\\n</p>\"\"\" % az\n \n if (len(self.vocab.individuals) > 0):\n az = \"\"\"%s\\n<p>Individuals: |\"\"\" % az\n for i in i_ids:\n # speclog(\"Individual \"+p+\" in az generation.\")\n az = \"\"\"%s <a href=\"#%s\">%s</a> | \"\"\" % (az, str(i).replace(\" \", \"\"), i)\n az = \"\"\"%s\\n</p>\"\"\" % az\n \n az = \"\"\"%s\\n</div>\"\"\" % az\n return(az)",
"def convert_attribute(aim_attribute, to_aim=True):\n if to_aim:\n # Camel to _ (APIC to AIM)\n result = []\n for x in aim_attribute:\n if x.isupper():\n result.append('_')\n result.append(x.lower())\n return ''.join(result)\n else:\n # _ to Camel (AIM to APIC)\n parts = aim_attribute.split('_')\n result = parts[0]\n for part in parts[1:]:\n result += part[0].upper() + part[1:]\n return result",
"def apcupsd_name(self):\n self.writeCommand('apcupsd_name')\n return self",
"def print_pa_aper(fitsFiles, verbose=True):\n pa_arr = []\n for ii in range(len(fitsFiles)):\n hdu = fits.open(fitsFiles[ii])\n hdr = hdu[1].header\n pa_arr.append(hdr['PA_APER'])\n \n if verbose:\n print('{0}: PA_APER = {1}'.format(fitsFiles[ii], hdr['PA_APER']))\n\n\n print('**** Average PA_APER: {0} ****'.format(np.mean(np.array(pa_arr))))\n \n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating composition descriptors based on Hydrophobicity of AADs.
|
def CalculateCompositionHydrophobicity(ProteinSequence):
result=CalculateComposition(ProteinSequence,_Hydrophobicity,'_Hydrophobicity')
return result
|
[
"def compute_compositionality(self):\n #pdb.set_trace()\n compositionality = 0\n comparisons = 0\n meanings = self.meaning_space.meanings()\n for meaning1,meaning2 in itertools.combinations(meanings, 2):\n mdist = self.meaning_space.hamming(meaning1,meaning2)\n signals1 = self.speak(meaning1, pick=False)\n signals2 = self.speak(meaning2, pick=False)\n for signal1 in signals1:\n for signal2 in signals2:\n sdist = self.signal_space.hamming(signal1,signal2)\n compositionality += ((mdist * sdist) / (len(signals1) * len(signals2)))\n comparisons += 1\n #pdb.set_trace() \n return (compositionality/comparisons)",
"def getMoleculeFeatures(self):\n title = self.__pybelMol.title\n molWeight = self.__pybelMol.molwt\n formula = self.__pybelMol.formula\n ccId = title\n ifCharge = self.__pybelMol.charge\n logger.info(\"%s formula %s charge %d mw %f\", title, formula, ifCharge, molWeight)\n inchi = self.__pybelMol.write(\"inchi\").strip()\n inchiKey = self.__pybelMol.write(\"inchikey\").strip()\n smiles = self.__pybelMol.write(\"can\", opt={\"n\": None}).strip()\n isoSmiles = self.__pybelMol.write(\"can\", opt={\"i\": None, \"n\": None}).strip()\n details = ComponentDetails(ccId=ccId, formula=formula, ifCharge=ifCharge)\n descriptors = ComponentDescriptors(smiles=smiles, isoSmiles=isoSmiles, inchi=inchi, inchiKey=inchiKey)\n #\n #\n typeCounts = defaultdict(int)\n ccAtomD = {}\n ccAtomIdD = {}\n for ii, pat in enumerate(self.__pybelMol.atoms, 1):\n at = pat.OBAtom\n atIdx = at.GetIdx()\n # atNo = at.GetAtomicNum()\n aType = at.GetType()\n typeCounts[aType] += 1\n atName = self.__atomIdxD[ii] if ii in self.__atomIdxD else aType + str(typeCounts[aType])\n #\n isAromatic = at.IsAromatic()\n isChiral = at.IsChiral()\n iCharge = at.GetFormalCharge()\n cipStereo = None\n ccAtomD[atName] = ComponentAtom(name=atName, aType=aType, isAromatic=isAromatic, isChiral=isChiral, CIP=cipStereo, fCharge=iCharge)\n ccAtomIdD[atIdx] = atName\n logger.debug(\"%s Atom %s %s %r %r %s\", ccId, atName, aType, isAromatic, isChiral, cipStereo)\n #\n ccBondD = {}\n for bnd in openbabel.OBMolBondIter(self.__pybelMol.OBMol):\n atI = bnd.GetBeginAtomIdx()\n atJ = bnd.GetEndAtomIdx()\n atNameI = ccAtomIdD[atI]\n atNameJ = ccAtomIdD[atJ]\n isAromatic = bnd.IsAromatic()\n iType = bnd.GetBondOrder()\n cipStereo = None\n logger.debug(\"Bond %s %s iType %r cipStereo %r aromatic %r\", atNameI, atNameJ, iType, cipStereo, isAromatic)\n #\n ccBondD[(atNameI, atNameJ)] = ComponentBond(iType=iType, isAromatic=isAromatic, CIP=cipStereo)\n #\n ccD = {\"details\": details, \"descriptors\": descriptors, \"atoms\": ccAtomD, \"bonds\": ccBondD}\n return ccD",
"def calculate_hydrophobic_fitness(assembly):\n hydrophobic_centroids = []\n tyrosine_centroids = []\n polar_centroids = []\n for residue in [r for r in assembly.get_monomers()\n if isinstance(r, ampal.Residue)]:\n centroid_list = None\n centroid = residue.centroid\n if residue.mol_letter in HYDROPHOBIC:\n centroid_list = hydrophobic_centroids\n elif residue.mol_letter == 'Y':\n centroid_list = tyrosine_centroids\n elif residue.mol_letter in standard_amino_acids:\n centroid_list = polar_centroids\n else:\n continue\n if centroid_list is not None:\n centroid_list.append(\n (residue.parent.id, int(residue.id),\n residue['CA'] if centroid is None else centroid))\n hf = run_hf_loop(hydrophobic_centroids,\n tyrosine_centroids, polar_centroids)\n return hf",
"def get_descriptors(self):\n\n # If any descriptors should be ignored, put their names in the list\n # below.\n blacklist = []\n\n results = []\n for name, descriptor in Chemical:\n if name in blacklist:\n continue\n results.append(sum(descriptor(chem) for chem in self.reactants\n if chem.a.size > 1))\n results.append(sum(descriptor(chem) for chem in self.products\n if chem.a.size > 1))\n return results",
"def compute(self, frame):\n global_desc_dict = {}\n atomic_desc_dict = {}\n for element in self.desc_spec_dict.keys():\n global_desc_dict[element], atomic_desc_dict[element] = self.engines[element].create(frame)\n return global_desc_dict, atomic_desc_dict",
"def _HAC_model():\n\n\tclf = AgglomerativeClustering()\n\treturn clf",
"def aic(self):\n aics = []\n aics_bool = []\n for i, chain in enumerate(self.parent.chains):\n p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params\n if p is None or n_data is None or n_free is None:\n aics_bool.append(False)\n missing = \"\"\n if p is None:\n missing += \"posterior, \"\n if n_data is None:\n missing += \"num_eff_data_points, \"\n if n_free is None:\n missing += \"num_free_params, \"\n\n self._logger.warning(\"You need to set %s for chain %s to get the AIC\" % (missing[:-2], chain.name))\n else:\n aics_bool.append(True)\n c_cor = 1.0 * n_free * (n_free + 1) / (n_data - n_free - 1)\n aics.append(2.0 * (n_free + c_cor - np.max(p)))\n if len(aics) > 0:\n aics -= np.min(aics)\n aics_fin = []\n i = 0\n for b in aics_bool:\n if not b:\n aics_fin.append(None)\n else:\n aics_fin.append(aics[i])\n i += 1\n return aics_fin",
"def get_pymatgen_descriptor(composition, property_name):\n eldata = []\n # what are these named tuples for? not used or returned! -KM\n eldata_tup_lst = []\n eldata_tup = collections.namedtuple('eldata_tup', 'element propname propvalue propunit amt')\n\n oxidation_states = {}\n if isinstance(composition, Composition):\n # check whether the composition is composed of oxidation state decorates species (not just plain Elements)\n if hasattr(composition.elements[0], \"oxi_state\"):\n oxidation_states = dict([(str(sp.element), sp.oxi_state) for sp in composition.elements])\n el_amt_dict = composition.get_el_amt_dict()\n # string\n else:\n comp, oxidation_states = get_composition_oxidation_state(composition)\n el_amt_dict = comp.get_el_amt_dict()\n\n symbols = sorted(el_amt_dict.keys(), key=lambda sym: get_el_sp(sym).X)\n\n for el_sym in symbols:\n\n element = Element(el_sym)\n property_value = None\n property_units = None\n\n try:\n p = getattr(element, property_name)\n except AttributeError:\n print(\"{} attribute missing\".format(property_name))\n raise\n\n if p is not None:\n if property_name in ['ionic_radii']:\n if oxidation_states:\n property_value = element.ionic_radii[oxidation_states[el_sym]]\n property_units = Unit(\"ang\")\n else:\n raise ValueError(\"oxidation state not given for {}; It does not yield a unique \"\n \"number per Element\".format(property_name))\n else:\n property_value = float(p)\n\n # units are None for these pymatgen descriptors\n # todo: there seem to be a lot more unitless descriptors which are not listed here... -Alex D\n if property_name not in ['X', 'Z', 'group', 'row', 'number', 'mendeleev_no', 'ionic_radii']:\n property_units = p.unit\n\n # Make a named tuple out of all the available information\n eldata_tup_lst.append(eldata_tup(element=el_sym, propname=property_name, propvalue=property_value,\n propunit=property_units, amt=el_amt_dict[el_sym]))\n\n # Add descriptor values, one for each atom in the compound\n for i in range(int(el_amt_dict[el_sym])):\n eldata.append(property_value)\n\n return eldata",
"def get_absorption_cross_section(bio_optical_config):\n\n #################\n ## Initialization\n #################\n\n abs_cff_pigments = pd.DataFrame(\n index=bio_optical_config.wvl * 1000\n ) # storing pigment MACs\n\n if bio_optical_config.abs_cff_calculated:\n print(\"abs_cff reconstructed from pigments\")\n # open mass absorption coefficients (m2/mg) for each algal pigment\n # from a dictionary.key is pigment name, value is abs coeff in m2/mg\n abs_coeff = 0\n for key, value in bio_optical_config.pigment_data.items():\n abs_pigm = np.array(pd.read_csv(key, header=None)).flatten() # m2/mg\n abs_cff_pigments[\n str(key.split(bio_optical_config.pigment_dir, 1)[1])[0:-4]\n ] = abs_pigm\n conc = value # intracellular conc in ng/µm3, ng/cell, or ng/mg\n abs_coeff = abs_coeff + conc * abs_pigm / 1000000 # m2/µm3,m2/cell,m2/mg\n abs_cff = abs_coeff\n\n elif bio_optical_config.abs_cff_loaded_reconstructed:\n print(\"abs_cff reconstructed directly loaded\")\n abs_cff = np.loadtxt(bio_optical_config.abs_cff_file) # m2/mg, um3 or cell\n if bio_optical_config.packaging_correction_SA: # ! applies only from 300nm\n pckg_SA = np.loadtxt(bio_optical_config.dir_pckg + \"pckg_SA.csv\")\n abs_cff = abs_cff * pckg_SA\n if bio_optical_config.packaging_correction_GA: # ! applies from 300nm\n pckg_GA = np.loadtxt(bio_optical_config.dir_pckg + \"pckg_GA.csv\")\n abs_cff = abs_cff * pckg_GA\n\n elif bio_optical_config.abs_cff_loaded_invivo:\n print(\"abs_cff in vivo directly loaded\")\n abs_cff = np.loadtxt(bio_optical_config.abs_cff_file) # m2/mg, um3 or cell\n\n return abs_cff",
"def numberConceptsAndComputeIntroduced(self):\n\n numCon = len(self.concepts)\n curConNum = 0\n for curConcept in self.concepts:\n curConcept.cnum = curConNum\n if curConNum % 1000 == 0:\n print(\"computing introduced objects and attributes for concept %d of %d\" % (curConNum, numCon))\n curConcept.upperNeighbours.sort()\n curConcept.lowerNeighbours.sort()\n curConcept.introducedObjects = set(curConcept.extent)\n for ln in curConcept.lowerNeighbours:\n curConcept.introducedObjects.difference_update(ln.extent)\n curConcept.introducedAttributes = set(curConcept.intent)\n for un in curConcept.upperNeighbours:\n curConcept.introducedAttributes.difference_update(un.intent)\n curConNum += 1\n print(\"Done with introduced objects and attributes\")",
"def descriptors(self):\n descs = []\n for x in xrange(0, 4):\n desc = self.GetDescriptor(x)\n if desc:\n descs.append(desc)\n return descs",
"def compute_descriptors(self, videoObj_list):\n\n desc_length = self.desc_dim_c + self.desc_dim_m\n\n X = np.zeros((1, desc_length))\n correct_video_indx = []\n\n for i, v in enumerate(videoObj_list):\n v_name = v.video_name_path\n v_length = v.video_length\n possible_offsets = np.array([v_length / 2, 60])\n off = np.min(possible_offsets[possible_offsets >= 0])\n logger.info('\\t 1.1. Sampling video %s (offset %.2f)'\n % (v.video_name_path, off))\n\n sampler = VideoFFmpegSampler(\n v_name, duration=self.sampling_duration, offset=off,\n fps=self.sampling_fps, scale=self.sampling_scale)\n list_of_frames = sampler.sample(output_dir=v.frames_folder_path)\n\n logger.info('\\t 1.2. Computing descriptors')\n num_frames = len(list_of_frames)\n X_row = []\n if num_frames > self.num_frames_per_video:\n\n X_tmp_f = self.flow_extractor.extract(list_of_frames)\n # this can happen if we only have 1 frame\n if np.sum(X_tmp_f) == 0:\n logger.error('No MOTION DESCRIPTORS computed for %s'\n % (v_name))\n else:\n step = 1\n if self.num_frames_per_video > 1:\n step = num_frames / self.num_frames_per_video\n\n list_imagefiles_color = \\\n list_of_frames[0:num_frames:step]\n list_imagefiles_color = \\\n list_imagefiles_color[0:self.num_frames_per_video]\n\n X_tmp_c = self.color_extractor.extract(\n list_imagefiles_color)\n X_tmp_c = X_tmp_c.reshape(\n (1, X_tmp_c.shape[0] * X_tmp_c.shape[1]))\n\n X_row = np.append(X_tmp_f, X_tmp_c, axis=1)\n\n if len(X_row) > 0 and X_row.shape[1] == desc_length:\n correct_video_indx = correct_video_indx + [i]\n X = np.append(X, X_row, axis=0)\n # only appends the row if the descriptor computation was\n # succesful\n else:\n logger.error('There are not enough frames for this video, \\\n No descriptor computed for %s' % (v_name))\n\n # delete tmp initial empty row\n X = np.delete(X, 0, axis=0)\n\n logger.info('End descriptor computation: X %s' % (str(np.shape(X))))\n return X, correct_video_indx",
"def form_analysis_data(self):\n fatal_percent_sum = 0\n self.analysis_dct[\"max_fatalities\"] = 0\n self.analysis_dct[\"phases\"] = {}\n self.analysis_dct[\"damage\"] = {}\n self.analysis_dct[\"years\"] = []\n destroyed_dct = {}\n\n for accident in self.accidents:\n accident.process_data()\n fatal_percent_sum += accident.fatalities_percent\n if accident.fatalities > self.analysis_dct[\"max_fatalities\"]:\n self.analysis_dct[\"max_fatalities\"] = accident.fatalities\n\n if accident.phase not in self.analysis_dct[\"phases\"].keys():\n self.analysis_dct[\"phases\"][accident.phase] = 1\n else:\n self.analysis_dct[\"phases\"][accident.phase] += 1\n\n if accident.damage not in self.analysis_dct[\"damage\"].keys():\n self.analysis_dct[\"damage\"][accident.damage] = 1\n else:\n self.analysis_dct[\"damage\"][accident.damage] += 1\n\n if accident.damage == \"Destroyed\" or accident.damage == \"Substantial\":\n if accident.phase not in destroyed_dct.keys():\n destroyed_dct[accident.phase] = 1\n else:\n destroyed_dct[accident.phase] += 1\n\n self.analysis_dct[\"years\"].append(accident.aircraft_years)\n\n self.analysis_dct[\"accidents_number\"] = len(self.accidents)\n self.analysis_dct[\"fatalities_percent\"] = fatal_percent_sum / self.analysis_dct[\"accidents_number\"]\n max_percent_phase = sorted(list(self.analysis_dct['phases'].items()), key=lambda x: x[1], reverse=True)[0][0]\n max_percent_phase_num = max(self.analysis_dct['phases'].values()) / sum(self.analysis_dct['phases'].values()) * 100\n self.analysis_dct[\"max_percent_phase\"] = (max_percent_phase, max_percent_phase_num)\n max_destroyed_planes_phase = sorted(list(self.analysis_dct['phases'].items()), key=lambda x: x[1], reverse=True)[0]\n self.analysis_dct[\"destroyed_damage\"] = max_destroyed_planes_phase",
"def aperture_fields(horn_width, horn_effective_length, frequency, x, y):\n # Calculate the wavenumber\n k = 2.0 * pi * frequency / c\n\n # Calculate the wave impedance\n eta = sqrt(mu_0 / epsilon_0)\n\n # Define the x-component of the electric field\n e_x = 0.0\n\n # Define the y-component of the electric field\n e_y = cos(pi * x / horn_width) * exp(-1j * k * 0.5 * (x ** 2 / horn_effective_length))\n\n # Define the z-component of the electric field\n e_z = 0.0\n\n # Define the x-component of the magnetic field\n h_x = -cos(pi * x / horn_width) / eta * exp(-1j * k * 0.5 * (x ** 2 / horn_effective_length))\n\n # Define the y-component of the magnetic field\n h_y = 0.0\n\n # Define the z-component of the magnetic field\n h_z = 0.0\n\n # Return all six components of the aperture field\n return e_x, e_y, e_z, h_x, h_y, h_z",
"def pd_create(cd):\n\n # check that 'c' or 'd' is passed\n #assert cd == (\n # 'c' or 'd'), 'This must be charge (c) or discharge (d) data'\n\n # number of descriptors it generates\n n_desc = 19\n\n # determines prefix string based on need for a charge or\n # discharge dataframe\n if cd == 'c':\n prefix = 'ch_'\n else:\n prefix = 'dc_'\n\n # generates list of names for the top of the descriptors dataframe\n names = []\n for ch in np.arange(n_desc):\n names.append(prefix + str(int(ch)))\n\n # adds names of error parameters to the end of the descriptor list\n names = names + [prefix+'AIC', prefix+'BIC', prefix+'red_chi_squared']\n\n # creates pandas dataframe with necessary heading\n # print(names)\n desc = pd.DataFrame(columns=names)\n\n return desc",
"def featurize_composition(self, df: pd.DataFrame) -> pd.DataFrame:\n if not (self.composition_featurizers or self.oxid_composition_featurizers):\n return pd.DataFrame([])\n\n df = df.copy()\n\n if self.composition_featurizers:\n\n LOG.info(\"Applying composition featurizers...\")\n df['composition'] = df['structure'].apply(lambda s: s.composition)\n\n df = self._fit_apply_featurizers(df, self.composition_featurizers, \"composition\")\n #df = df.replace([np.inf, -np.inf, np.nan], 0)\n df = df.rename(columns={'Input Data': ''})\n df.columns = df.columns.map('|'.join).str.strip('|')\n\n if self.oxid_composition_featurizers:\n LOG.info(\"Applying oxidation state featurizers...\")\n df = CompositionToOxidComposition().featurize_dataframe(df, \"composition\")\n df = self._fit_apply_featurizers(df, self.oxid_composition_featurizers, \"composition_oxid\")\n df = df.rename(columns={'Input Data': ''})\n df.columns = df.columns.map('|'.join).str.strip('|')\n\n return df",
"def ahfhaloid(fpre, ids):\n# f = open(fpre+'.AHF_halos')\n# d = f.read()\n# ds = d.split()\n# Npart = np.array(ds[87::83],dtype='i8') # Obtain number of particles in each halo\n# f.close()\n #print Npart\n Npart = np.loadtxt(fpre+'.AHF_halos', skiprows=1, usecols=4, dtype=np.int64)\n \n# try:\n# f = open(fpre+'.AHF_particles')\n# d = f.read()\n# except IOError:\n# f = open(fpre+'.AHF_particles.gz')\n# d = f.read()\n# ds = np.array(d.split(),dtype='i8')\n try:\n ds = np.loadtxt(fpre+'.AHF_particles', skiprows=1, usecols=0, dtype=np.int64)\n except IOError:\n ds = np.loadtxt(fpre+'.AHF_particles.gz', skiprows=1, usecols=0, dtype=np.int64)\n\n# Nhalo = int(ds[0]) # Number of haloes for the file\n accum = 1 # Value used to keep track of reading the AHF file\n pid, hid = np.array([],dtype='i8'), np.array([],dtype='i8') # Initialise particle and halo ID arrays\n \n for i in range(len(Npart)):\n hid = np.append(hid, np.ones(Npart[i], dtype='i8')*i)\n \n args = np.arange(Npart[i]) + accum # Arguments for the halo's particle IDs\n pid = np.append(pid, np.array(ds[args]))\n accum += (1 + Npart[i])\n\n \n \n if type(ids)==list: # Put/ensure all input IDs in one array\n idarr = np.array([])\n for i in range(len(ids)): idarr = np.append(idarr,ids[i])\n else:\n idarr = np.array(ids)\n \n argorder = np.argsort(idarr) # Order for increasing values in pid\n argreorder = np.argsort(np.arange(len(idarr))[argorder]) # Arguments to reorder everything back\n hid_out = -np.ones(len(idarr), dtype='i8') # Negative ones initially as -1 implies no halo/galaxy for that particle\n \n idargs = np.searchsorted(idarr[argorder], pid) # Find the arguments where the IDs match (in order)\n hid_out[idargs] = hid # Fill the matching entries with halo IDs\n hid_out = hid_out[argreorder] # Return to the same order as the input\n #print hid_out\n \n if type(ids)==list:\n acc = 0\n listout = []\n for i in range(len(ids)):\n #print len(ids[i])\n listout += [hid_out[acc:acc+len(ids[i])]]\n acc += len(ids[i])\n return listout\n else:\n return hid_out",
"def ahf1halo(fpre, hid, h=0.7):\n\tf = open(fpre+'.AHF_halos')\n\td = f.read()\n\tds = d.split()\n\tNpart = np.array(ds[87::83],dtype='i8') # Obtain number of particles in each halo\n\txc = (float(ds[88+83*hid]) - float(ds[88]))*1e3/h # Obtain halo position in pc, translated to the coords the simulations are actually in\n\tyc = (float(ds[89+83*hid]) - float(ds[89]))*1e3/h\n\tzc = (float(ds[90+83*hid]) - float(ds[90]))*1e3/h\n\tf.close()\n\t\n\tNskip = 3 + 2*(sum(Npart[:hid]) + hid)\n\tf = open(fpre+'.AHF_particles')\n\td = f.read()\n\tds = np.array(d.split(), dtype='i8')\n\targs = np.arange(Npart[hid])*2 + Nskip\n\tpid = ds[args]\n\tf.close()\n\treturn pid, [xc,yc,zc]",
"def feature_extraction(img, feature):\r\n\r\n if feature == 'HoG':\r\n # HoG parameters\r\n win_size = (32, 32)\r\n block_size = (32, 32)\r\n block_stride = (16, 16)\r\n cell_size = (16, 16)\r\n nbins = 9\r\n deriv_aperture = 1\r\n win_sigma = 4\r\n histogram_norm_type = 0\r\n l2_hys_threshold = 2.0000000000000001e-01\r\n gamma_correction = 0\r\n nlevels = 64\r\n \r\n # Your code here. You should also change the return value.\r\n\r\n hog = cv2.HOGDescriptor(win_size,block_size,block_stride,cell_size,nbins,deriv_aperture,win_sigma,histogram_norm_type,l2_hys_threshold,gamma_correction,nlevels)\r\n\r\n dsize = hog.getDescriptorSize()\r\n descripters = hog.compute(img,winStride=(32,32),padding=(0,0))\r\n descripters = descripters.reshape(-1,dsize)\r\n\r\n\r\n elif feature == 'SIFT':\r\n sift = cv2.xfeatures2d.SIFT_create()\r\n descripters = []\r\n height= img.shape[0]\r\n width = img.shape[1]\r\n split1 = np.array_split(img, width/20, axis=1)\r\n for split in split1:\r\n split2 =np.array_split(split, height/20, axis=0)\r\n for ig in split2:\r\n keypoints, descripter = sift.detectAndCompute(ig,None)\r\n if descripter is not None:\r\n descripters.append(descripter)\r\n if len(descripters) > 0:\r\n descripters = np.vstack(descripters)\r\n else: \r\n return None\r\n return descripters",
"def _comp_analysis(self):\n\t\ttable = {1:{'A':0, 'U':0, 'C':0, 'G':0}, 2:{'A':0, 'U':0, 'C':0, 'G':0}, 3:{'A':0, 'U':0, 'C':0, 'G':0}}\n\t\tfor codon in self.codon_table:\n\t\t\tcount = 1\n\t\t\tfor n in codon:\n\t\t\t\ttable[count][n] += self.codon_table[codon]\n\t\t\t\tcount += 1\n\t\t\n\t\tper_comp = {}\n\t\tpos_comp = {1:{'A':0, 'U':0, 'C':0, 'G':0}, 2:{'A':0, 'U':0, 'C':0, 'G':0}, 3:{'A':0, 'U':0, 'C':0, 'G':0}}\n\t\ttotal = 0.0\n\t\tfor i in table:\n\t\t\tpos_count = 0.0\n\t\t\tfor n in table[i]:\n\t\t\t\tpos_count += table[i][n]\n\t\t\t\tif n in per_comp:\n\t\t\t\t\tper_comp[n] += table[i][n]\n\t\t\t\t\ttotal += table[i][n]\n\t\t\t\telse:\n\t\t\t\t\tper_comp[n] = table[i][n]\n\t\t\t\t\ttotal += table[i][n]\n\t\t\tfor n in pos_comp[i]:\n\t\t\t\tpos_comp[i][n] = table[i][n]/pos_count\n\n\t\tfor p in per_comp:\n\t\t\tper_comp[p] = per_comp[p]/total\n\n\t\t# print \"Percentage composition\"\n\t\t# print per_comp\n\t\t# print \"Percent composition of each nucleotied at each position\"\n\t\t# print pos_comp\n\t\t# print "
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating composition descriptors based on SecondaryStr of AADs.
|
def CalculateCompositionSecondaryStr(ProteinSequence):
result=CalculateComposition(ProteinSequence,_SecondaryStr,'_SecondaryStr')
return result
|
[
"def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],\n Mapping[str, List[Bond]],\n Mapping[str, List[BondAngle]]]:\n # stereo_chemical_props_path = (\n # 'alphafold/common/stereo_chemical_props.txt')\n # with open(stereo_chemical_props_path, 'rt') as f:\n # stereo_chemical_props = f.read()\n stereo_chemical_props='''Bond\t\t\tResidue\t\tMean\t\tStdDev\nCA-CB\t\t\tALA\t\t1.520\t\t0.021\nN-CA\t\t\tALA\t\t1.459\t\t0.020\nCA-C\t\t\tALA\t\t1.525\t\t0.026\nC-O\t\t\tALA\t\t1.229\t\t0.019\nCA-CB\t\t\tARG\t\t1.535\t\t0.022\nCB-CG\t\t\tARG\t\t1.521\t\t0.027\nCG-CD\t\t\tARG\t\t1.515\t\t0.025\nCD-NE\t\t\tARG\t\t1.460\t\t0.017\nNE-CZ\t\t\tARG\t\t1.326\t\t0.013\nCZ-NH1\t\t\tARG\t\t1.326\t\t0.013\nCZ-NH2\t\t\tARG\t\t1.326\t\t0.013\nN-CA\t\t\tARG\t\t1.459\t\t0.020\nCA-C\t\t\tARG\t\t1.525\t\t0.026\nC-O\t\t\tARG\t\t1.229\t\t0.019\nCA-CB\t\t\tASN\t\t1.527\t\t0.026\nCB-CG\t\t\tASN\t\t1.506\t\t0.023\nCG-OD1\t\t\tASN\t\t1.235\t\t0.022\nCG-ND2\t\t\tASN\t\t1.324\t\t0.025\nN-CA\t\t\tASN\t\t1.459\t\t0.020\nCA-C\t\t\tASN\t\t1.525\t\t0.026\nC-O\t\t\tASN\t\t1.229\t\t0.019\nCA-CB\t\t\tASP\t\t1.535\t\t0.022\nCB-CG\t\t\tASP\t\t1.513\t\t0.021\nCG-OD1\t\t\tASP\t\t1.249\t\t0.023\nCG-OD2\t\t\tASP\t\t1.249\t\t0.023\nN-CA\t\t\tASP\t\t1.459\t\t0.020\nCA-C\t\t\tASP\t\t1.525\t\t0.026\nC-O\t\t\tASP\t\t1.229\t\t0.019\nCA-CB\t\t\tCYS\t\t1.526\t\t0.013\nCB-SG\t\t\tCYS\t\t1.812\t\t0.016\nN-CA\t\t\tCYS\t\t1.459\t\t0.020\nCA-C\t\t\tCYS\t\t1.525\t\t0.026\nC-O\t\t\tCYS\t\t1.229\t\t0.019\nCA-CB\t\t\tGLU\t\t1.535\t\t0.022\nCB-CG\t\t\tGLU\t\t1.517\t\t0.019\nCG-CD\t\t\tGLU\t\t1.515\t\t0.015\nCD-OE1\t\t\tGLU\t\t1.252\t\t0.011\nCD-OE2\t\t\tGLU\t\t1.252\t\t0.011\nN-CA\t\t\tGLU\t\t1.459\t\t0.020\nCA-C\t\t\tGLU\t\t1.525\t\t0.026\nC-O\t\t\tGLU\t\t1.229\t\t0.019\nCA-CB\t\t\tGLN\t\t1.535\t\t0.022\nCB-CG\t\t\tGLN\t\t1.521\t\t0.027\nCG-CD\t\t\tGLN\t\t1.506\t\t0.023\nCD-OE1\t\t\tGLN\t\t1.235\t\t0.022\nCD-NE2\t\t\tGLN\t\t1.324\t\t0.025\nN-CA\t\t\tGLN\t\t1.459\t\t0.020\nCA-C\t\t\tGLN\t\t1.525\t\t0.026\nC-O\t\t\tGLN\t\t1.229\t\t0.019\nN-CA\t\t\tGLY\t\t1.456\t\t0.015\nCA-C\t\t\tGLY\t\t1.514\t\t0.016\nC-O\t\t\tGLY\t\t1.232\t\t0.016\nCA-CB\t\t\tHIS\t\t1.535\t\t0.022\nCB-CG\t\t\tHIS\t\t1.492\t\t0.016\nCG-ND1\t\t\tHIS\t\t1.369\t\t0.015\nCG-CD2\t\t\tHIS\t\t1.353\t\t0.017\nND1-CE1\t\t\tHIS\t\t1.343\t\t0.025\nCD2-NE2\t\t\tHIS\t\t1.415\t\t0.021\nCE1-NE2\t\t\tHIS\t\t1.322\t\t0.023\nN-CA\t\t\tHIS\t\t1.459\t\t0.020\nCA-C\t\t\tHIS\t\t1.525\t\t0.026\nC-O\t\t\tHIS\t\t1.229\t\t0.019\nCA-CB\t\t\tILE\t\t1.544\t\t0.023\nCB-CG1\t\t\tILE\t\t1.536\t\t0.028\nCB-CG2\t\t\tILE\t\t1.524\t\t0.031\nCG1-CD1\t\t\tILE\t\t1.500\t\t0.069\nN-CA\t\t\tILE\t\t1.459\t\t0.020\nCA-C\t\t\tILE\t\t1.525\t\t0.026\nC-O\t\t\tILE\t\t1.229\t\t0.019\nCA-CB\t\t\tLEU\t\t1.533\t\t0.023\nCB-CG\t\t\tLEU\t\t1.521\t\t0.029\nCG-CD1\t\t\tLEU\t\t1.514\t\t0.037\nCG-CD2\t\t\tLEU\t\t1.514\t\t0.037\nN-CA\t\t\tLEU\t\t1.459\t\t0.020\nCA-C\t\t\tLEU\t\t1.525\t\t0.026\nC-O\t\t\tLEU\t\t1.229\t\t0.019\nCA-CB\t\t\tLYS\t\t1.535\t\t0.022\nCB-CG\t\t\tLYS\t\t1.521\t\t0.027\nCG-CD\t\t\tLYS\t\t1.520\t\t0.034\nCD-CE\t\t\tLYS\t\t1.508\t\t0.025\nCE-NZ\t\t\tLYS\t\t1.486\t\t0.025\nN-CA\t\t\tLYS\t\t1.459\t\t0.020\nCA-C\t\t\tLYS\t\t1.525\t\t0.026\nC-O\t\t\tLYS\t\t1.229\t\t0.019\nCA-CB\t\t\tMET\t\t1.535\t\t0.022\nCB-CG\t\t\tMET\t\t1.509\t\t0.032\nCG-SD\t\t\tMET\t\t1.807\t\t0.026\nSD-CE\t\t\tMET\t\t1.774\t\t0.056\nN-CA\t\t\tMET\t\t1.459\t\t0.020\nCA-C\t\t\tMET\t\t1.525\t\t0.026\nC-O\t\t\tMET\t\t1.229\t\t0.019\nCA-CB\t\t\tPHE\t\t1.535\t\t0.022\nCB-CG\t\t\tPHE\t\t1.509\t\t0.017\nCG-CD1\t\t\tPHE\t\t1.383\t\t0.015\nCG-CD2\t\t\tPHE\t\t1.383\t\t0.015\nCD1-CE1\t\t\tPHE\t\t1.388\t\t0.020\nCD2-CE2\t\t\tPHE\t\t1.388\t\t0.020\nCE1-CZ\t\t\tPHE\t\t1.369\t\t0.019\nCE2-CZ\t\t\tPHE\t\t1.369\t\t0.019\nN-CA\t\t\tPHE\t\t1.459\t\t0.020\nCA-C\t\t\tPHE\t\t1.525\t\t0.026\nC-O\t\t\tPHE\t\t1.229\t\t0.019\nCA-CB\t\t\tPRO\t\t1.531\t\t0.020\nCB-CG\t\t\tPRO\t\t1.495\t\t0.050\nCG-CD\t\t\tPRO\t\t1.502\t\t0.033\nCD-N\t\t\tPRO\t\t1.474\t\t0.014\nN-CA\t\t\tPRO\t\t1.468\t\t0.017\nCA-C\t\t\tPRO\t\t1.524\t\t0.020\nC-O\t\t\tPRO\t\t1.228\t\t0.020\nCA-CB\t\t\tSER\t\t1.525\t\t0.015\nCB-OG\t\t\tSER\t\t1.418\t\t0.013\nN-CA\t\t\tSER\t\t1.459\t\t0.020\nCA-C\t\t\tSER\t\t1.525\t\t0.026\nC-O\t\t\tSER\t\t1.229\t\t0.019\nCA-CB\t\t\tTHR\t\t1.529\t\t0.026\nCB-OG1\t\t\tTHR\t\t1.428\t\t0.020\nCB-CG2\t\t\tTHR\t\t1.519\t\t0.033\nN-CA\t\t\tTHR\t\t1.459\t\t0.020\nCA-C\t\t\tTHR\t\t1.525\t\t0.026\nC-O\t\t\tTHR\t\t1.229\t\t0.019\nCA-CB\t\t\tTRP\t\t1.535\t\t0.022\nCB-CG\t\t\tTRP\t\t1.498\t\t0.018\nCG-CD1\t\t\tTRP\t\t1.363\t\t0.014\nCG-CD2\t\t\tTRP\t\t1.432\t\t0.017\nCD1-NE1\t\t\tTRP\t\t1.375\t\t0.017\nNE1-CE2\t\t\tTRP\t\t1.371\t\t0.013\nCD2-CE2\t\t\tTRP\t\t1.409\t\t0.012\nCD2-CE3\t\t\tTRP\t\t1.399\t\t0.015\nCE2-CZ2\t\t\tTRP\t\t1.393\t\t0.017\nCE3-CZ3\t\t\tTRP\t\t1.380\t\t0.017\nCZ2-CH2\t\t\tTRP\t\t1.369\t\t0.019\nCZ3-CH2\t\t\tTRP\t\t1.396\t\t0.016\nN-CA\t\t\tTRP\t\t1.459\t\t0.020\nCA-C\t\t\tTRP\t\t1.525\t\t0.026\nC-O\t\t\tTRP\t\t1.229\t\t0.019\nCA-CB\t\t\tTYR\t\t1.535\t\t0.022\nCB-CG\t\t\tTYR\t\t1.512\t\t0.015\nCG-CD1\t\t\tTYR\t\t1.387\t\t0.013\nCG-CD2\t\t\tTYR\t\t1.387\t\t0.013\nCD1-CE1\t\t\tTYR\t\t1.389\t\t0.015\nCD2-CE2\t\t\tTYR\t\t1.389\t\t0.015\nCE1-CZ\t\t\tTYR\t\t1.381\t\t0.013\nCE2-CZ\t\t\tTYR\t\t1.381\t\t0.013\nCZ-OH\t\t\tTYR\t\t1.374\t\t0.017\nN-CA\t\t\tTYR\t\t1.459\t\t0.020\nCA-C\t\t\tTYR\t\t1.525\t\t0.026\nC-O\t\t\tTYR\t\t1.229\t\t0.019\nCA-CB\t\t\tVAL\t\t1.543\t\t0.021\nCB-CG1\t\t\tVAL\t\t1.524\t\t0.021\nCB-CG2\t\t\tVAL\t\t1.524\t\t0.021\nN-CA\t\t\tVAL\t\t1.459\t\t0.020\nCA-C\t\t\tVAL\t\t1.525\t\t0.026\nC-O\t\t\tVAL\t\t1.229\t\t0.019\n-\n\nAngle\t\t\tResidue\t\tMean\t\tStdDev\nN-CA-CB\t\t\tALA\t\t110.1\t\t1.4\nCB-CA-C\t\t\tALA\t\t110.1\t\t1.5\nN-CA-C\t\t\tALA\t\t111.0\t\t2.7\nCA-C-O\t\t\tALA\t\t120.1\t\t2.1\nN-CA-CB\t\t\tARG\t\t110.6\t\t1.8\nCB-CA-C\t\t\tARG\t\t110.4\t\t2.0\nCA-CB-CG\t\tARG\t\t113.4\t\t2.2\nCB-CG-CD\t\tARG\t\t111.6\t\t2.6\nCG-CD-NE\t\tARG\t\t111.8\t\t2.1\nCD-NE-CZ\t\tARG\t\t123.6\t\t1.4\nNE-CZ-NH1\t\tARG\t\t120.3\t\t0.5\nNE-CZ-NH2\t\tARG\t\t120.3\t\t0.5\nNH1-CZ-NH2\t\tARG\t\t119.4\t\t1.1\nN-CA-C\t\t\tARG\t\t111.0\t\t2.7\nCA-C-O\t\t\tARG\t\t120.1\t\t2.1\nN-CA-CB\t\t\tASN\t\t110.6\t\t1.8\nCB-CA-C\t\t\tASN\t\t110.4\t\t2.0\nCA-CB-CG\t\tASN\t\t113.4\t\t2.2\nCB-CG-ND2\t\tASN\t\t116.7\t\t2.4\nCB-CG-OD1\t\tASN\t\t121.6\t\t2.0\nND2-CG-OD1\t\tASN\t\t121.9\t\t2.3\nN-CA-C\t\t\tASN\t\t111.0\t\t2.7\nCA-C-O\t\t\tASN\t\t120.1\t\t2.1\nN-CA-CB\t\t\tASP\t\t110.6\t\t1.8\nCB-CA-C\t\t\tASP\t\t110.4\t\t2.0\nCA-CB-CG\t\tASP\t\t113.4\t\t2.2\nCB-CG-OD1\t\tASP\t\t118.3\t\t0.9\nCB-CG-OD2\t\tASP\t\t118.3\t\t0.9\nOD1-CG-OD2\t\tASP\t\t123.3\t\t1.9\nN-CA-C\t\t\tASP\t\t111.0\t\t2.7\nCA-C-O\t\t\tASP\t\t120.1\t\t2.1\nN-CA-CB\t\t\tCYS\t\t110.8\t\t1.5\nCB-CA-C\t\t\tCYS\t\t111.5\t\t1.2\nCA-CB-SG\t\tCYS\t\t114.2\t\t1.1\nN-CA-C\t\t\tCYS\t\t111.0\t\t2.7\nCA-C-O\t\t\tCYS\t\t120.1\t\t2.1\nN-CA-CB\t\t\tGLU\t\t110.6\t\t1.8\nCB-CA-C\t\t\tGLU\t\t110.4\t\t2.0\nCA-CB-CG\t\tGLU\t\t113.4\t\t2.2\nCB-CG-CD\t\tGLU\t\t114.2\t\t2.7\nCG-CD-OE1\t\tGLU\t\t118.3\t\t2.0\nCG-CD-OE2\t\tGLU\t\t118.3\t\t2.0\nOE1-CD-OE2\t\tGLU\t\t123.3\t\t1.2\nN-CA-C\t\t\tGLU\t\t111.0\t\t2.7\nCA-C-O\t\t\tGLU\t\t120.1\t\t2.1\nN-CA-CB\t\t\tGLN\t\t110.6\t\t1.8\nCB-CA-C\t\t\tGLN\t\t110.4\t\t2.0\nCA-CB-CG\t\tGLN\t\t113.4\t\t2.2\nCB-CG-CD\t\tGLN\t\t111.6\t\t2.6\nCG-CD-OE1\t\tGLN\t\t121.6\t\t2.0\nCG-CD-NE2\t\tGLN\t\t116.7\t\t2.4\nOE1-CD-NE2\t\tGLN\t\t121.9\t\t2.3\nN-CA-C\t\t\tGLN\t\t111.0\t\t2.7\nCA-C-O\t\t\tGLN\t\t120.1\t\t2.1\nN-CA-C\t\t\tGLY\t\t113.1\t\t2.5\nCA-C-O\t\t\tGLY\t\t120.6\t\t1.8\nN-CA-CB\t\t\tHIS\t\t110.6\t\t1.8\nCB-CA-C\t\t\tHIS\t\t110.4\t\t2.0\nCA-CB-CG\t\tHIS\t\t113.6\t\t1.7\nCB-CG-ND1\t\tHIS\t\t123.2\t\t2.5\nCB-CG-CD2\t\tHIS\t\t130.8\t\t3.1\nCG-ND1-CE1\t\tHIS\t\t108.2\t\t1.4\nND1-CE1-NE2\t\tHIS\t\t109.9\t\t2.2\nCE1-NE2-CD2\t\tHIS\t\t106.6\t\t2.5\nNE2-CD2-CG\t\tHIS\t\t109.2\t\t1.9\nCD2-CG-ND1\t\tHIS\t\t106.0\t\t1.4\nN-CA-C\t\t\tHIS\t\t111.0\t\t2.7\nCA-C-O\t\t\tHIS\t\t120.1\t\t2.1\nN-CA-CB\t\t\tILE\t\t110.8\t\t2.3\nCB-CA-C\t\t\tILE\t\t111.6\t\t2.0\nCA-CB-CG1\t\tILE\t\t111.0\t\t1.9\nCB-CG1-CD1\t\tILE\t\t113.9\t\t2.8\nCA-CB-CG2\t\tILE\t\t110.9\t\t2.0\nCG1-CB-CG2\t\tILE\t\t111.4\t\t2.2\nN-CA-C\t\t\tILE\t\t111.0\t\t2.7\nCA-C-O\t\t\tILE\t\t120.1\t\t2.1\nN-CA-CB\t\t\tLEU\t\t110.4\t\t2.0\nCB-CA-C\t\t\tLEU\t\t110.2\t\t1.9\nCA-CB-CG\t\tLEU\t\t115.3\t\t2.3\nCB-CG-CD1\t\tLEU\t\t111.0\t\t1.7\nCB-CG-CD2\t\tLEU\t\t111.0\t\t1.7\nCD1-CG-CD2\t\tLEU\t\t110.5\t\t3.0\nN-CA-C\t\t\tLEU\t\t111.0\t\t2.7\nCA-C-O\t\t\tLEU\t\t120.1\t\t2.1\nN-CA-CB\t\t\tLYS\t\t110.6\t\t1.8\nCB-CA-C\t\t\tLYS\t\t110.4\t\t2.0\nCA-CB-CG\t\tLYS\t\t113.4\t\t2.2\nCB-CG-CD\t\tLYS\t\t111.6\t\t2.6\nCG-CD-CE\t\tLYS\t\t111.9\t\t3.0\nCD-CE-NZ\t\tLYS\t\t111.7\t\t2.3\nN-CA-C\t\t\tLYS\t\t111.0\t\t2.7\nCA-C-O\t\t\tLYS\t\t120.1\t\t2.1\nN-CA-CB\t\t\tMET\t\t110.6\t\t1.8\nCB-CA-C\t\t\tMET\t\t110.4\t\t2.0\nCA-CB-CG\t\tMET\t\t113.3\t\t1.7\nCB-CG-SD\t\tMET\t\t112.4\t\t3.0\nCG-SD-CE\t\tMET\t\t100.2\t\t1.6\nN-CA-C\t\t\tMET\t\t111.0\t\t2.7\nCA-C-O\t\t\tMET\t\t120.1\t\t2.1\nN-CA-CB\t\t\tPHE\t\t110.6\t\t1.8\nCB-CA-C\t\t\tPHE\t\t110.4\t\t2.0\nCA-CB-CG\t\tPHE\t\t113.9\t\t2.4\nCB-CG-CD1\t\tPHE\t\t120.8\t\t0.7\nCB-CG-CD2\t\tPHE\t\t120.8\t\t0.7\nCD1-CG-CD2\t\tPHE\t\t118.3\t\t1.3\nCG-CD1-CE1\t\tPHE\t\t120.8\t\t1.1\nCG-CD2-CE2\t\tPHE\t\t120.8\t\t1.1\nCD1-CE1-CZ\t\tPHE\t\t120.1\t\t1.2\nCD2-CE2-CZ\t\tPHE\t\t120.1\t\t1.2\nCE1-CZ-CE2\t\tPHE\t\t120.0\t\t1.8\nN-CA-C\t\t\tPHE\t\t111.0\t\t2.7\nCA-C-O\t\t\tPHE\t\t120.1\t\t2.1\nN-CA-CB\t\t\tPRO\t\t103.3\t\t1.2\nCB-CA-C\t\t\tPRO\t\t111.7\t\t2.1\nCA-CB-CG\t\tPRO\t\t104.8\t\t1.9\nCB-CG-CD\t\tPRO\t\t106.5\t\t3.9\nCG-CD-N\t\t\tPRO\t\t103.2\t\t1.5\nCA-N-CD\t\t\tPRO\t\t111.7\t\t1.4\nN-CA-C\t\t\tPRO\t\t112.1\t\t2.6\nCA-C-O\t\t\tPRO\t\t120.2\t\t2.4\nN-CA-CB\t\t\tSER\t\t110.5\t\t1.5\nCB-CA-C\t\t\tSER\t\t110.1\t\t1.9\nCA-CB-OG\t\tSER\t\t111.2\t\t2.7\nN-CA-C\t\t\tSER\t\t111.0\t\t2.7\nCA-C-O\t\t\tSER\t\t120.1\t\t2.1\nN-CA-CB\t\t\tTHR\t\t110.3\t\t1.9\nCB-CA-C\t\t\tTHR\t\t111.6\t\t2.7\nCA-CB-OG1\t\tTHR\t\t109.0\t\t2.1\nCA-CB-CG2\t\tTHR\t\t112.4\t\t1.4\nOG1-CB-CG2\t\tTHR\t\t110.0\t\t2.3\nN-CA-C\t\t\tTHR\t\t111.0\t\t2.7\nCA-C-O\t\t\tTHR\t\t120.1\t\t2.1\nN-CA-CB\t\t\tTRP\t\t110.6\t\t1.8\nCB-CA-C\t\t\tTRP\t\t110.4\t\t2.0\nCA-CB-CG\t\tTRP\t\t113.7\t\t1.9\nCB-CG-CD1\t\tTRP\t\t127.0\t\t1.3\nCB-CG-CD2\t\tTRP\t\t126.6\t\t1.3\nCD1-CG-CD2\t\tTRP\t\t106.3\t\t0.8\nCG-CD1-NE1\t\tTRP\t\t110.1\t\t1.0\nCD1-NE1-CE2\t\tTRP\t\t109.0\t\t0.9\nNE1-CE2-CD2\t\tTRP\t\t107.3\t\t1.0\nCE2-CD2-CG\t\tTRP\t\t107.3\t\t0.8\nCG-CD2-CE3\t\tTRP\t\t133.9\t\t0.9\nNE1-CE2-CZ2\t\tTRP\t\t130.4\t\t1.1\nCE3-CD2-CE2\t\tTRP\t\t118.7\t\t1.2\nCD2-CE2-CZ2\t\tTRP\t\t122.3\t\t1.2\nCE2-CZ2-CH2\t\tTRP\t\t117.4\t\t1.0\nCZ2-CH2-CZ3\t\tTRP\t\t121.6\t\t1.2\nCH2-CZ3-CE3\t\tTRP\t\t121.2\t\t1.1\nCZ3-CE3-CD2\t\tTRP\t\t118.8\t\t1.3\nN-CA-C\t\t\tTRP\t\t111.0\t\t2.7\nCA-C-O\t\t\tTRP\t\t120.1\t\t2.1\nN-CA-CB\t\t\tTYR\t\t110.6\t\t1.8\nCB-CA-C\t\t\tTYR\t\t110.4\t\t2.0\nCA-CB-CG\t\tTYR\t\t113.4\t\t1.9\nCB-CG-CD1\t\tTYR\t\t121.0\t\t0.6\nCB-CG-CD2\t\tTYR\t\t121.0\t\t0.6\nCD1-CG-CD2\t\tTYR\t\t117.9\t\t1.1\nCG-CD1-CE1\t\tTYR\t\t121.3\t\t0.8\nCG-CD2-CE2\t\tTYR\t\t121.3\t\t0.8\nCD1-CE1-CZ\t\tTYR\t\t119.8\t\t0.9\nCD2-CE2-CZ\t\tTYR\t\t119.8\t\t0.9\nCE1-CZ-CE2\t\tTYR\t\t119.8\t\t1.6\nCE1-CZ-OH\t\tTYR\t\t120.1\t\t2.7\nCE2-CZ-OH\t\tTYR\t\t120.1\t\t2.7\nN-CA-C\t\t\tTYR\t\t111.0\t\t2.7\nCA-C-O\t\t\tTYR\t\t120.1\t\t2.1\nN-CA-CB\t\t\tVAL\t\t111.5\t\t2.2\nCB-CA-C\t\t\tVAL\t\t111.4\t\t1.9\nCA-CB-CG1\t\tVAL\t\t110.9\t\t1.5\nCA-CB-CG2\t\tVAL\t\t110.9\t\t1.5\nCG1-CB-CG2\t\tVAL\t\t110.9\t\t1.6\nN-CA-C\t\t\tVAL\t\t111.0\t\t2.7\nCA-C-O\t\t\tVAL\t\t120.1\t\t2.1\n-\n\nNon-bonded distance Minimum Dist Tolerance\nC-C 3.4 1.5\nC-N 3.25 1.5\nC-S 3.5 1.5\nC-O 3.22 1.5\nN-N 3.1 1.5\nN-S 3.35 1.5\nN-O 3.07 1.5\nO-S 3.32 1.5\nO-O 3.04 1.5\nS-S 2.03 1.0\n-'''\n lines_iter = iter(stereo_chemical_props.splitlines())\n # Load bond lengths.\n residue_bonds = {}\n next(lines_iter) # Skip header line.\n for line in lines_iter:\n if line.strip() == '-':\n break\n bond, resname, length, stddev = line.split()\n atom1, atom2 = bond.split('-')\n if resname not in residue_bonds:\n residue_bonds[resname] = []\n residue_bonds[resname].append(\n Bond(atom1, atom2, float(length), float(stddev)))\n residue_bonds['UNK'] = []\n\n # Load bond angles.\n residue_bond_angles = {}\n next(lines_iter) # Skip empty line.\n next(lines_iter) # Skip header line.\n for line in lines_iter:\n if line.strip() == '-':\n break\n bond, resname, angle_degree, stddev_degree = line.split()\n atom1, atom2, atom3 = bond.split('-')\n if resname not in residue_bond_angles:\n residue_bond_angles[resname] = []\n residue_bond_angles[resname].append(\n BondAngle(atom1, atom2, atom3,\n float(angle_degree) / 180. * np.pi,\n float(stddev_degree) / 180. * np.pi))\n residue_bond_angles['UNK'] = []\n\n def make_bond_key(atom1_name, atom2_name):\n \"\"\"Unique key to lookup bonds.\"\"\"\n return '-'.join(sorted([atom1_name, atom2_name]))\n\n # Translate bond angles into distances (\"virtual bonds\").\n residue_virtual_bonds = {}\n for resname, bond_angles in residue_bond_angles.items():\n # Create a fast lookup dict for bond lengths.\n bond_cache = {}\n for b in residue_bonds[resname]:\n bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b\n residue_virtual_bonds[resname] = []\n for ba in bond_angles:\n bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]\n bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]\n\n # Compute distance between atom1 and atom3 using the law of cosines\n # c^2 = a^2 + b^2 - 2ab*cos(gamma).\n gamma = ba.angle_rad\n length = np.sqrt(bond1.length**2 + bond2.length**2\n - 2 * bond1.length * bond2.length * np.cos(gamma))\n\n # Propagation of uncertainty assuming uncorrelated errors.\n dl_outer = 0.5 / length\n dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer\n dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer\n dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer\n stddev = np.sqrt((dl_dgamma * ba.stddev)**2 +\n (dl_db1 * bond1.stddev)**2 +\n (dl_db2 * bond2.stddev)**2)\n residue_virtual_bonds[resname].append(\n Bond(ba.atom1_name, ba.atom3name, length, stddev))\n\n return (residue_bonds,\n residue_virtual_bonds,\n residue_bond_angles)",
"def _find_primary_component(self):\n progcomps = {}\n spec = {}\n primary_component = None\n for comp in self._component_classes:\n if comp == \"CPL\":\n continue\n spec[comp] = self.get_value(\"COMP_{}\".format(comp))\n notprogcomps = (\"D{}\".format(comp), \"X{}\".format(comp), \"S{}\".format(comp))\n if spec[comp].upper() in notprogcomps:\n progcomps[comp] = False\n else:\n progcomps[comp] = True\n expect(\n \"ATM\" in progcomps\n and \"LND\" in progcomps\n and \"OCN\" in progcomps\n and \"ICE\" in progcomps,\n \" Not finding expected components in {}\".format(self._component_classes),\n )\n if (\n progcomps[\"ATM\"]\n and progcomps[\"LND\"]\n and progcomps[\"OCN\"]\n and progcomps[\"ICE\"]\n ):\n primary_component = \"allactive\"\n elif progcomps[\"LND\"] and progcomps[\"OCN\"] and progcomps[\"ICE\"]:\n # this is a \"J\" compset\n primary_component = \"allactive\"\n elif progcomps[\"ATM\"] and progcomps[\"OCN\"] and progcomps[\"ICE\"]:\n # this is a ufs s2s compset\n primary_component = \"allactive\"\n elif progcomps[\"ATM\"]:\n if \"DOCN%SOM\" in self._compsetname and progcomps[\"LND\"]:\n # This is an \"E\" compset\n primary_component = \"allactive\"\n else:\n # This is an \"F\" or \"Q\" compset\n primary_component = spec[\"ATM\"]\n elif progcomps[\"LND\"]:\n # This is an \"I\" compset\n primary_component = spec[\"LND\"]\n elif progcomps[\"OCN\"]:\n # This is a \"C\" or \"G\" compset\n primary_component = spec[\"OCN\"]\n elif progcomps[\"ICE\"]:\n # This is a \"D\" compset\n primary_component = spec[\"ICE\"]\n elif \"GLC\" in progcomps and progcomps[\"GLC\"]:\n # This is a \"TG\" compset\n primary_component = spec[\"GLC\"]\n elif progcomps[\"ROF\"]:\n # This is a \"R\" compset\n primary_component = spec[\"ROF\"]\n elif progcomps[\"WAV\"]:\n # This is a \"V\" compset\n primary_component = spec[\"WAV\"]\n else:\n # This is \"A\", \"X\" or \"S\"\n primary_component = \"drv\"\n\n return primary_component",
"def attributes_desc():\n columns = [\n '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',\n 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',\n 'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',\n 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',\n 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',\n 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young',\n ]\n\n return map(str.lower, columns)",
"def get_pymatgen_descriptor(composition, property_name):\n eldata = []\n # what are these named tuples for? not used or returned! -KM\n eldata_tup_lst = []\n eldata_tup = collections.namedtuple('eldata_tup', 'element propname propvalue propunit amt')\n\n oxidation_states = {}\n if isinstance(composition, Composition):\n # check whether the composition is composed of oxidation state decorates species (not just plain Elements)\n if hasattr(composition.elements[0], \"oxi_state\"):\n oxidation_states = dict([(str(sp.element), sp.oxi_state) for sp in composition.elements])\n el_amt_dict = composition.get_el_amt_dict()\n # string\n else:\n comp, oxidation_states = get_composition_oxidation_state(composition)\n el_amt_dict = comp.get_el_amt_dict()\n\n symbols = sorted(el_amt_dict.keys(), key=lambda sym: get_el_sp(sym).X)\n\n for el_sym in symbols:\n\n element = Element(el_sym)\n property_value = None\n property_units = None\n\n try:\n p = getattr(element, property_name)\n except AttributeError:\n print(\"{} attribute missing\".format(property_name))\n raise\n\n if p is not None:\n if property_name in ['ionic_radii']:\n if oxidation_states:\n property_value = element.ionic_radii[oxidation_states[el_sym]]\n property_units = Unit(\"ang\")\n else:\n raise ValueError(\"oxidation state not given for {}; It does not yield a unique \"\n \"number per Element\".format(property_name))\n else:\n property_value = float(p)\n\n # units are None for these pymatgen descriptors\n # todo: there seem to be a lot more unitless descriptors which are not listed here... -Alex D\n if property_name not in ['X', 'Z', 'group', 'row', 'number', 'mendeleev_no', 'ionic_radii']:\n property_units = p.unit\n\n # Make a named tuple out of all the available information\n eldata_tup_lst.append(eldata_tup(element=el_sym, propname=property_name, propvalue=property_value,\n propunit=property_units, amt=el_amt_dict[el_sym]))\n\n # Add descriptor values, one for each atom in the compound\n for i in range(int(el_amt_dict[el_sym])):\n eldata.append(property_value)\n\n return eldata",
"def components(tdata):\n\n\t# start by seeing how many components they match to source names.\n\tcomp = '../LOFAR_HBA_T1_DR1_merge_ID_v1.1b.comp.fits'\n\tcomp = Table(fits.open(comp)[1].data)\n\tcomp['Source_Name_2'] = comp['Source_Name'] # For matching\n\n\tresult = join(tdata,comp,keys='Source_Name_2',uniq_col_name='{col_name}_{table_name}_3')\n\tnames, counts = np.unique(result['Source_Name_2'],return_counts=True)\n\t# component names now are the column: result['Component_Name_2_3']\n\t# print (len(comp), len(names), len(counts))\n\tindices = np.where(counts > 1)\n\t# print (indices)\n\tmultiple_comp_names = names[indices]\n\n\t# Should also check if we have NN when there is only 1 component\n\tnum_matches = 0\n\tnum_mg = 0 \n\tsource_name1s = []\n\tsource_names_correct = []\n\tfor name in multiple_comp_names:\n\t\tcurrent_index = np.where(result['Source_Name_2'] == name)\n\t\tcompnames = result['Component_Name_2_3'][current_index] # Both components as in the VA\n\t\tcomp1 = result['Source_Name_1'][current_index][0] # Component 1 \n\t\tcomp2 = result['new_NN_Source_Name'][current_index][0] # Component 2\n\n\t\tif comp2 == 'N/A': # MG source\n\t\t\tnum_mg +=1\n\n\t\telif (comp1 in compnames and comp2 in compnames): # Both correct\n\t\t\tnum_matches+=1\n\t\t\tsource_names_correct.append(comp1)\n\n\t\telif (comp1 in compnames) != (comp2 in compnames): # One wrong, one correct\n\t\t\t# print 'Half fout:', current_index\n\t\t\t# print compnames\n\t\t\t# print comp1, comp2\n\t\t\tsource_name1s.append(comp1) # save the sourcenames that are wrong\n\n\n\tprint ('Number of correct matches:',num_matches)\n\tprint ('Number of MG sources:', num_mg)\n\t# print source_name1s\n\t# sourcenamesincorrect = Table()\n\t# sourcenamesincorrect['Source_Name_1'] = source_name1s\n\t# sourcenamesincorrect.write('/data1/osinga/value_added_catalog/2876_NOTmatched_sourcesnames.fits')\n\t\n\t# return the unique source names, how much times they appear and the (in)correct matches\n\treturn names, counts, source_name1s, source_names_correct",
"def double_helix_parser(input_file, output_file, helicies_length = 6, helix_gap = 3, pro_eitherside = 3):\n res_no_l = [] # for residue names \n res_name_l = [] # for amino acid names\n sec_str_l = [] # for sec structure prediction\n\n two_helix_l = [] # contains a list aminoacids (also a list)\n\n # Extracts the residue no, amino acid and secstr and signs to variables\n rx_seq = re.compile(r\"^(\\w+?)\\s+?(\\w+?)\\s+?(\\S)\", re.MULTILINE)\n text = fileread(input_file)\n\n\n # assign the matched groups in the text to the res_no_l, res_name_l and sec_str_str\n for match in rx_seq.finditer(text):\n res_no, res_name, sec_str = match.groups()\n\n res_no_l.append(res_no)\n res_name_l.append(res_name)\n sec_str_l += sec_str\n\n\n # creates dictionaries for each with the chain as the key\n chains_sec_str_d = keychain_value_str(res_no_l, sec_str_l)\n chains_res_no_d = keychain_value_list(res_no_l, res_no_l)\n chains_res_name_d = keychain_value_list(res_no_l, res_name_l)\n\n\n\n # which a Pro is found a in the res_name_d[chain] its secstr in sec_str_d is replaced with a P\n # We will then search for this P later on \n\n counter = 0 \n for chain in chains_res_name_d:\n #print(chains_res_name_d[chain])\n counter = 0 \n for residue in chains_res_name_d[chain]:\n #print(chains_res_name_d[chain][counter])\n if residue == 'PRO':\n chains_sec_str_d[chain] = chains_sec_str_d[chain][:counter] + 'P' + chains_sec_str_d[chain][counter + 1:]\n #print(chains_res_no_d[chain][counter])\n counter += 1 \n\n # only adds if a proline is found in the gap\n # contains 2 groups, the 1st group being the whole helix and group 2 being the gap\n for x in chains_sec_str_d:\n \n regex = \"([h|H]{6,}(?:.?){1}(P)(?:.?){1}[h|H]{6,})\"\n p = re.compile(r\"\" +regex +\"\")\n\n # if one is found it prints out the residues numbers of that helix\n for match in p.finditer(chains_sec_str_d[x]):\n # adjusted to check for Proline around the gap 1 before and 1 after\n two_helix_l += [chains_res_no_d[x][ (match.start(1)) : (match.end(1)) ]]\n match_groups =(match.groups())\n\n # finds the location of the proline for mutation using mutmod\n pro_res = (x + str(match.start(2)))\n print(pro_res + \" :\" + match.group(2))\n\n\n tempstr = \"\"\n\n for protein in two_helix_l:\n for residue in protein:\n tempstr += (residue + \"\\n\")\n tempstr +=(\"\\n\")\n\n\n output = open(output_file, 'w')\n output.write(tempstr)\n output.close()\n #print('#####################')\n #print(tempstr)\n #print('#####################')",
"def AddProteinName(files,PDic,apecific_dic,MergeFileList):\n controlList=[\"control+\",\"SD+control\"]\n OnlyControl=open(MergeFileList[0],'a')\n withoutControl=open(MergeFileList[1],'a')\n All=open(MergeFileList[2],'a')\n\n with open(files+\".txt\") as openFile:\n next(openFile)\n for line in openFile:\n splits=line.strip().split(\"\\t\")\n plateName=splits[0].strip()\n # if len(splits)==13:\n # modline=\"\\t\".join(splits)\n # else:\n modline=\"\\t\".join(splits[:13])\n id=splits[3].strip()+\"\\t\"+splits[4].strip()+\"\\t\"+splits[5].strip()\n if id in PDic :#and PDic[id][1].strip() not in controlList:\n if PDic[id][2].strip() in apecific_dic:\n wln=[PDic[id][0],\"\\t\",\"A-specific\",\"\\t\",PDic[id][2],\"\\t\",PDic[id][3],\"\\t\",PDic[id][4],\"\\t\",modline+\"\\n\"]\n else:\n wln=[PDic[id][0],\"\\t\",PDic[id][1],\"\\t\",PDic[id][2],\"\\t\",PDic[id][3],\"\\t\",PDic[id][4],\"\\t\",modline+\"\\n\"]\n if wln[2] not in controlList:\n withoutControl.writelines(wln)\n else:\n OnlyControl.writelines(wln)\n All.writelines(wln)\n return files",
"def get_second_layer_props(compound_name, required_properties):\n # Get the compound's PubChem CID\n compound_cid = get_cid_by_name(compound_name)\n\n # Contsruct the link\n pubchem_all_data_link = \"https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/\"\n pubchem_all_data_link += \"data/compound/%s/JSON\" % compound_cid\n\n # Get the JSON from the constructed link and convert it to Python Dictionary\n all_the_data = pubchem_parsing(pubchem_all_data_link)\n\n # Get to the data sections, get rid of References\n data_sections = all_the_data['Record']['Section']\n\n \"\"\"\n Out of all the sections\n\n (2D structure, 3D conformer, LCSS, Names and Idenrifers, Chemical and \n Physical Properties, Related Records, Chemical Vendors, Food additives, \n Agrochemical Info, Pharmacology and Biochemistry, Use and Manufacturing,\n Identification, Safety and Hazards, Toxicity, Literature, Patents, \n Biomolecular Interactions, Biological Tests Result, Classification)\n\n choose only the most chemically interesting ones: \n\n <Names and Identifies> and <Chemical and Physical Properties>\n \"\"\"\n\n # this could be customizable, actually - please send PR or raise an issue\n # if you'd like to have this done\n sections_of_interests = ['Names and Identifiers',\n 'Chemical and Physical Properties']\n\n # Empty array to store all the data of the sections of interes\n sections_of_interests_data = []\n\n # the required parameters can be accesed via\n # dictionary in the array => Section => TOCHeading == 'parameter name' =>\n # => Information\n\n # Construct an array of only interesting data\n for section_title in sections_of_interests:\n sample_list = list(filter(lambda section: section['TOCHeading'] == section_title, data_sections))\n sections_of_interests_data.append(sample_list[0])\n\n # However, we are still away from getting the parameters values\n # The sections_of_interests_data contains lots of interesting information,\n # however it is not that useful for the chemist's everyday usage,\n # so I limit the data we can get to 3 identifiers listed below:\n\n required_identifiers = ['Computed Descriptors',\n 'Other Identifiers',\n 'Experimental Properties']\n\n # The data for each identifier (stated above) will be stored in this array\n # To get the parameters for these identifiers one will have to look for\n # dictionary => 'Section' array, which will yield yet another array,\n # but this time it will be full of parameters one can grab\n all_pubchem_data_array_for_section = []\n\n # if section_dictionary['Section'] contains a dictionary with\n # 'TOCHeading' equaling to anything from required_identifiers,\n # add the matching dictionary into the new array initialized above\n for section_dictionary in sections_of_interests_data:\n sample_list = list(\n filter(lambda section: section['TOCHeading'] in required_identifiers, section_dictionary['Section']))\n all_pubchem_data_array_for_section = all_pubchem_data_array_for_section + sample_list\n\n # it's not full, though, you can also extract more data from PubChem\n # but I've just found these parameters to be of the most interest\n # for a chemist's everyday use\n list_of_all_possible_params = ['IUPAC Name',\n 'InChI',\n 'InChI Key',\n 'Canonical SMILES',\n 'Wikipedia',\n 'Boiling Point',\n 'Melting Point',\n 'Flash Point',\n 'Solubility',\n 'Density',\n 'Vapor Density',\n 'Vapor Pressure',\n 'LogP',\n 'Stability',\n 'Auto-Ignition',\n 'Viscosity',\n 'Heat of Combustion',\n 'Heat of Vaporization',\n 'Surface Tension',\n 'Ionization Potential',\n 'Dissociation Constants']\n\n # this array will later store the data about requested parameters\n # in the PubChem format\n required_data_in_pubchem_format = []\n\n for molecule_desc_object in all_pubchem_data_array_for_section:\n sample_list = list(\n filter(lambda section: section['TOCHeading'] in required_properties, molecule_desc_object['Section']))\n required_data_in_pubchem_format = required_data_in_pubchem_format + sample_list\n\n # Final dictionary of compound properties that will be returned in the end\n compound_properties_dictionary = {}\n\n for property_object in required_data_in_pubchem_format:\n compound_properties_dictionary[property_object['TOCHeading']] = property_object['Information']\n\n return compound_properties_dictionary",
"def composition(self):\n if 'parallel' in self.properties: #Composite parallel\n return 'parallel'\n elif 'sequence' in self.properties: #Composite sequence\n return 'sequence'\n elif 'choose' in self.properties: #Composite choice\n return 'choose'\n else:\n return None",
"def calculate_description(self, dataframe, dataframe_y=''):\r\n # NO return\r\n #TODO: Missing value replacement??\r\n #NaN and '' problems. Or it is better to do it in the data-dict center.\r\n column = dataframe.replace({self.variablename:\r\n {'': NaN}})[self.variablename]\r\n #column = dataframe[self.variablename]\r\n ############# GENERATE DESCRIPTION TABLE ################\r\n # calculation of the missing value proportion.\r\n m = column.size\r\n missing = (m - column.count())/m\r\n if self.vartype == '':\r\n #TODO: Add \"intelligence\" (identify str or numeric)\r\n self.vartype = 'Categorical'\r\n if self.vartype == 'Categorical':\r\n #TODO: calculate description\r\n categories = list(column.unique())\r\n number_cats = len(categories)\r\n if dataframe_y:\r\n conversion = []\r\n for e in categories:\r\n # TOTEST\r\n conversion.append(float(dataframe_y[column == e].mean()))\r\n # calculate Gini index or sth like or the unequality index\r\n mode = column.mode()[0]\r\n vmode = column[(column == mode)].count()\r\n\r\n table = pd.DataFrame([str(number_cats), mode, str(vmode),\r\n \"{0:.2f}\".format(missing*100) + ' %'])\r\n table = table.transpose()\r\n table.columns = ['# cats', 'mode', 'volumn_mode', '% missings']\r\n elif self.vartype == 'Ordinal':\r\n #TODO: Problems, could be ordinal but in string expression.\r\n # (ex: Bad, regular good.) Search for solutions.\r\n pass\r\n elif self.vartype == 'Numerical':\r\n #TODO: calculate description\r\n # column = lista[self.variablename].apply(int)\r\n # In theory it is formatted as we want.\r\n #Else we have a problem, but this line it shouldnt be needed.\r\n rang = [column.min(), column.max()]\r\n mean = column.mean()\r\n std = column.std()\r\n\r\n table = pd.DataFrame([str(rang), \"{0:.2f}\".format(mean),\r\n \"{0:.2f}\".format(std),\r\n \"{0:.2f}\".format(missing*100) + ' %'])\r\n table = table.transpose()\r\n table.columns = ['range', 'mean', 'std', '% missings']\r\n\r\n # probably histogram to calculate conversion?\r\n\r\n self.table['Description'] = table\r\n\r\n #########################################################\r\n #TODO: generate tables\r\n #TODO: generate plots\r",
"def annoate_basepair_composition(self, helix, data):\n nt_pairings = [f'{l1}{l2}' for l1, l2 in product('AUGC', repeat=2)] # AA, AU, AG, AC, ... # noqa: E501\n bp_count = np.zeros(len(nt_pairings))\n for (pos1, pos2) in data['basepairs']:\n nuc1 = self.ctgraph.graph.node[pos1]['letter']\n nuc2 = self.ctgraph.graph.node[pos2]['letter']\n\n # Through the next three lines of code, \"AU\" is treated the same\n # as \"UA\".\n bp = f'{nuc1}{nuc2}'\n if bp not in nt_pairings:\n bp = f'{nuc2}{nuc1}'\n # print(bp_count[nt_pairings.index(bp)])\n\n bp_count[nt_pairings.index(bp)] += 1\n self.graph.node[helix]['bp_count'] = bp_count",
"def electrode_separations(dc_survey, survey_type=\"dipole-dipole\", electrode_pair=\"all\"):\n\n if not isinstance(electrode_pair, list):\n if electrode_pair.lower() == \"all\":\n electrode_pair = [\"AB\", \"MN\", \"AM\", \"AN\", \"BM\", \"BN\"]\n elif isinstance(electrode_pair, str):\n electrode_pair = [electrode_pair.upper()]\n else:\n raise Exception(\n \"electrode_pair must be either a string, list of strings, or an \"\n \"ndarray containing the electrode separation distances you would \"\n \"like to calculate not {}\".format(type(electrode_pair))\n )\n\n elecSepDict = {}\n AB = []\n MN = []\n AM = []\n AN = []\n BM = []\n BN = []\n\n for ii, src in enumerate(dc_survey.source_list):\n Tx = src.location\n Rx = src.receiver_list[0].locations\n nDTx = src.receiver_list[0].nD\n\n if survey_type.lower() == \"dipole-dipole\":\n A = matlib.repmat(Tx[0], nDTx, 1)\n B = matlib.repmat(Tx[1], nDTx, 1)\n M = Rx[0]\n N = Rx[1]\n\n AB.append(np.sqrt(np.sum((A[:, :] - B[:, :]) ** 2.0, axis=1)))\n MN.append(np.sqrt(np.sum((M[:, :] - N[:, :]) ** 2.0, axis=1)))\n AM.append(np.sqrt(np.sum((A[:, :] - M[:, :]) ** 2.0, axis=1)))\n AN.append(np.sqrt(np.sum((A[:, :] - N[:, :]) ** 2.0, axis=1)))\n BM.append(np.sqrt(np.sum((B[:, :] - M[:, :]) ** 2.0, axis=1)))\n BN.append(np.sqrt(np.sum((B[:, :] - N[:, :]) ** 2.0, axis=1)))\n\n elif survey_type.lower() == \"pole-dipole\":\n A = matlib.repmat(Tx, nDTx, 1)\n M = Rx[0]\n N = Rx[1]\n\n MN.append(np.sqrt(np.sum((M[:, :] - N[:, :]) ** 2.0, axis=1)))\n AM.append(np.sqrt(np.sum((A[:, :] - M[:, :]) ** 2.0, axis=1)))\n AN.append(np.sqrt(np.sum((A[:, :] - N[:, :]) ** 2.0, axis=1)))\n\n elif survey_type.lower() == \"dipole-pole\":\n A = matlib.repmat(Tx[0], nDTx, 1)\n B = matlib.repmat(Tx[1], nDTx, 1)\n M = Rx\n\n AB.append(np.sqrt(np.sum((A[:, :] - B[:, :]) ** 2.0, axis=1)))\n AM.append(np.sqrt(np.sum((A[:, :] - M[:, :]) ** 2.0, axis=1)))\n BM.append(np.sqrt(np.sum((B[:, :] - M[:, :]) ** 2.0, axis=1)))\n\n elif survey_type.lower() == \"pole-pole\":\n A = matlib.repmat(Tx, nDTx, 1)\n M = Rx\n\n AM.append(np.sqrt(np.sum((A[:, :] - M[:, :]) ** 2.0, axis=1)))\n\n else:\n raise Exception(\n \"survey_type must be 'dipole-dipole' | 'pole-dipole' | \"\n \"'dipole-pole' | 'pole-pole' not {}\".format(survey_type)\n )\n\n if \"AB\" in electrode_pair:\n if AB:\n AB = np.hstack(AB)\n elecSepDict[\"AB\"] = AB\n if \"MN\" in electrode_pair:\n if MN:\n MN = np.hstack(MN)\n elecSepDict[\"MN\"] = MN\n if \"AM\" in electrode_pair:\n if AM:\n AM = np.hstack(AM)\n elecSepDict[\"AM\"] = AM\n if \"AN\" in electrode_pair:\n if AN:\n AN = np.hstack(AN)\n elecSepDict[\"AN\"] = AN\n if \"BM\" in electrode_pair:\n if BM:\n BM = np.hstack(BM)\n elecSepDict[\"BM\"] = BM\n if \"BN\" in electrode_pair:\n if BN:\n BN = np.hstack(BN)\n elecSepDict[\"BN\"] = BN\n\n return elecSepDict",
"def run_pairwise_comp(self, ref_df):\n\n #List of lists\n ref_df_peps = self._get_all_peptides_from_df(ref_df) #Extract high affinity peptides\n score_dict_per_len = self._get_protein_dict_per_len(self.filt_dfs, ref_df_peps) #Create scores dictionary\n\n for prot_name in self.original_proteins:\n\n prot_seq = self.original_proteins_df.ProtSeq[self.original_proteins_df.ID == prot_name].values[0]\n ranges = self.original_proteins_df.Ranges[self.original_proteins_df.ID == prot_name].values[0]\n #Ranges: index data about the location of high affinity peptides in protein being used for comparison\n #Ranges_2: make shallow list from deep list of lists\n ranges_2 = [item for sublist in [i[0] for i in ranges] for item in sublist]\n\n matches_range = []\n\n for list_pep in ref_df_peps:\n for single_pep in list_pep:\n\n high_aa_count = 0\n pep_len = len(single_pep)\n count = prot_seq.count(single_pep) #Number of times a single pep occurs in the entire prot seq\n\n if count > 0: #Find locations where matches occur\n it = re.finditer(single_pep, prot_seq)\n\n for i in it:\n present_range = list(range(i.start(), i.end()))\n if set(present_range).issubset(set(ranges_2)):\n high_aa_count += 1\n matches_range.append(present_range) #Retain match location data\n\n self._update_dict_values_per_len(score_dict_per_len, prot_name, count,\n pep_len, high_aa_count, matches_range)\n\n return score_dict_per_len",
"def calc_aa_propensity(seq):\n\n # count absolute number of each residue in the input string\n number_each_aa_dict = {}\n\n all_aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n # create an dictionary of the numbers {\"A\" : 57, \"C\" : 5, ...} etc\n for aa in all_aa:\n number_each_aa_dict[aa] = seq.count(aa)\n\n # create a dictionary to hold the propensity of each residue\n aa_propensity_dict = {}\n length = len(seq)\n for aa in number_each_aa_dict:\n aa_propensity_dict[aa] = number_each_aa_dict[aa] / length\n\n # turn the dictionary into a pd.Series\n aa_prop_ser = pd.Series(aa_propensity_dict)\n # normalise so that all the aa propensities add up to 1.0\n # this is important if \"X\" or \"U\" is in the sequences\n aa_prop_norm_ser = aa_prop_ser / aa_prop_ser.sum()\n # name the index column\n aa_prop_norm_ser.index.name = \"freq\"\n return aa_prop_norm_ser",
"def build_path_des(self, path_img):\n des_dir = self.des_dir\n # split the path of the image\n split_res = path_img.split(\"/\")\n # get the image name without the extension .jpg\n img_name_without_extension = split_res[-1].split(\".jpg\")[0]\n # build the beginning of the path\n if len(split_res[:-2]) != 0:\n beg_path = os.path.join(*split_res[:-2])\n else:\n beg_path = \"\"\n # build the end of the path\n end_path = os.path.join(des_dir, img_name_without_extension)\n # build the descriptor path\n path_des = os.path.join(beg_path, end_path)\n return path_des",
"def compute(self, frame):\n global_desc_dict = {}\n atomic_desc_dict = {}\n for element in self.desc_spec_dict.keys():\n global_desc_dict[element], atomic_desc_dict[element] = self.engines[element].create(frame)\n return global_desc_dict, atomic_desc_dict",
"def get_assay_name_info(\n list_assays, assay_name, path, friendly_assay_type, rec_descriptor\n):\n if friendly_assay_type == \"germline\":\n assay_type = \"genetic_variant\"\n elif friendly_assay_type == \"somatic\":\n assay_type = \"somatic_variant\"\n\n #### Get names of genetic assays ####\n if list_assays:\n (target_assays, other_assays) = get_assay_info(\n rec_descriptor, assay_type=assay_type\n )\n if not target_assays:\n err_exit(\"There's no {} assay in the dataset provided.\").format(assay_type)\n else:\n for a in target_assays:\n print(a[\"name\"])\n sys.exit(0)\n\n #### Decide which assay is to be queried and which ref genome is to be used ####\n (target_assays, other_assays) = get_assay_info(\n rec_descriptor, assay_type=assay_type\n )\n\n target_assay_names = [ga[\"name\"] for ga in target_assays]\n target_assay_ids = [ga[\"uuid\"] for ga in target_assays]\n other_assay_names = [oa[\"name\"] for oa in other_assays]\n # other_assay_ids = [oa[\"uuid\"] for oa in other_assays]\n\n if target_assay_names and target_assay_ids:\n selected_assay_name = target_assay_names[0]\n selected_assay_id = target_assay_ids[0]\n else:\n err_exit(\"There's no {} assay in the dataset provided.\").format(\n friendly_assay_type\n )\n if assay_name:\n if assay_name not in list(target_assay_names):\n if assay_name in list(other_assay_names):\n err_exit(\n \"This is not a valid assay. For valid assays accepted by the function, `extract_assay {}`, please use the --list-assays flag.\".format(\n friendly_assay_type\n )\n )\n else:\n err_exit(\n \"Assay {assay_name} does not exist in the {path}.\".format(\n assay_name=assay_name, path=path\n )\n )\n else:\n selected_assay_name = assay_name\n for ga in target_assays:\n if ga[\"name\"] == assay_name:\n selected_assay_id = ga[\"uuid\"]\n\n selected_ref_genome = \"GRCh38.92\"\n \n if friendly_assay_type == \"germline\":\n for a in target_assays:\n if a[\"name\"] == selected_assay_name and a[\"reference_genome\"]:\n selected_ref_genome = a[\"reference_genome\"][\"name\"]\n\n return(selected_assay_name, selected_assay_id, selected_ref_genome)",
"def pairs(self):\n return np.core.defchararray.add(self.primary, self.aligned)",
"def get_descriptions(codes, zlookup, folder=default_folder, prefix=default_prefix):\n \n if zlookup.lower()=='histology':\n return get_histology_description_1(codes, folder, prefix)\n if zlookup.lower()=='deathcause':\n return get_deathcause_description(codes, folder, prefix)\n \n #check inputs are valid\n if zlookup.lower()=='icdclassification':\n raise ValueError(\"this function won't work for icdclassification. did you want icd?\")\n if not zlookup.lower() in zlookup_table_names:\n raise ValueError(\"zlookup must be in \" + str(zlookup_table_names))\n \n zlookup_table = load_zlookup_table(zlookup, folder, prefix)\n return pd.Series(codes).map(zlookup_table[\"SHORTDESC\"])",
"def element_descriptor(protein, ligand, binsize=0.0):\n\t# SUPPRESS OPENBABEL WARNINGS\n\tpybel.ob.obErrorLog.StopLogging()\n\n\t# ELEMENT TABLE TO DETERMINE VDW AND COVALENT BONDS\n\tet = OBElementTable()\n\n\t# CONVERT ELEMENT SYMBOLS TO ATOMIC NUMBERS\n\tatomicnums = (et.GetAtomicNum(str(element)) for element in config['elements'])\n\tatomicnums_pro = (et.GetAtomicNum(str(element)) for element in config['elements_pro'])\n\t#print(et.GetAtomicNum(\"Me\"), \"Fe\")\n\n\t# CREATE A NUMERICAL ID TO ELEMENT COMBINATION MAPPING\n\t# IMPORTANT TO MAP THE DESCRIPTOR VECTOR BACK TO THE LABELS\n\t#element_pairs = product(sorted(atomicnums),repeat=2)\n\telement_pairs = product(sorted(atomicnums),sorted(atomicnums_pro),repeat=1)\n\telement_pairs = dict((p,i) for i,p in enumerate(element_pairs))\n\n\n\t# ALSO CREATE A COLUMN LABEL FOR THIS DESCRIPTOR\n\tsorted_pairs = zip(*sorted(element_pairs.items(), key=itemgetter(1)))[0]\n\t#print(sorted_pairs)\n\n\tnumcols = len(element_pairs)\n\n\t# GENERATE THE DISTANCE BINS\n\tif binsize:\n\n\t\t# get the distance bins for the given cutoff and bin size\n\t\tbins = get_distance_bins(config['cutoff'], binsize)\n\n\t\t# NUMBER OF TOTAL COLUMNS IN DESCRIPTOR\n\t\tnumcols *= (bins.size + 1)\n\n\t\t# CREATE A COLUMN FOR EACH ELEMENT PAIR AND DISTANCE BIN\n\t\tlabels = []\n\t\tfor x,y in sorted_pairs:\n\t\t\tfor i in range(len(bins) + 1):\n\t\t\t\tlabel = \"{0}.{1}-B{2}\".format(et.GetSymbol(x), et.GetSymbol(y), i)\n\t\t\t\tlabels.append(label)\n\n\t# LABEL WITHOUT BINS\n\telse:\n\t\tlabels = ['.'.join((et.GetSymbol(x),et.GetSymbol(y))) for x,y in sorted_pairs]\n\n\t# DESCRIPTOR THAT WILL CONTAIN THE SUM OF ALL ELEMENT-ELEMENT INTERACTIONS\n\tdescriptor = numpy.zeros(numcols, dtype=int)\n\n\t# GET THE CONTACTS\n\tcontacts = get_contacts(protein, ligand, config['cutoff'])\n\n\t# ITERATE THROUGH CONTACT PAIRS AND DETERMINE SIFT\n\tfor hetatm, hetatm_contacts in contacts:\n\t\thetatm_num = hetatm.GetAtomicNum()\n\n\t# ITERATE THROUGH ALL THE CONTACTS THE HETATM HAS\n\t\tfor atom, distance in hetatm_contacts:\n\t\t\tresidue = atom.GetResidue()\n\n\t\t\tif residue.GetAtomID(atom).strip() in ['FE','FE2']:\n\t\t\t\tatom_num == 26\n\t\t\telse:\n\t\t\t\tatom_num = atom.GetAtomicNum()\n\n\t\t\t# IGNORE WATER RESIDUES\n\t\t\tif residue.GetName() == 'HOH': continue\n\n\t\t\t# IGNORE ZN,FE ETC.\n\t\t\ttry: index = element_pairs[(atom_num, hetatm_num)]\n\t\t\texcept KeyError: continue\n\t\t\t#print(element_pairs, 'ele')\n\n\t\t\t# BIN INTERACTIONS\n\t\t\tif binsize:\n\n\t\t\t\t# GET THE BIN THIS CONTACT BELONGS IN\n\t\t\t\t# DIGITIZE TAKES AN ARRAY-LIKE AS INPUT\n\t\t\t\tbin_id = numpy.digitize([distance,], bins)[0]\n\t\t\t\tdescriptor[1 + index + index*bins.size + bin_id] += 1\n\n\t\t\telse:\n\n\t\t\t\t# ELEMENTS ARE SORTED NUMERICALLY\n\t\t\t\tdescriptor[index] += 1\n\n\tif binsize: sum_descriptor_bins(descriptor, bins)\n\n\treturn descriptor, labels"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating composition descriptors based on SolventAccessibility of AADs.
|
def CalculateCompositionSolventAccessibility(ProteinSequence):
result=CalculateComposition(ProteinSequence,_SolventAccessibility,'_SolventAccessibility')
return result
|
[
"def get_pymatgen_descriptor(composition, property_name):\n eldata = []\n # what are these named tuples for? not used or returned! -KM\n eldata_tup_lst = []\n eldata_tup = collections.namedtuple('eldata_tup', 'element propname propvalue propunit amt')\n\n oxidation_states = {}\n if isinstance(composition, Composition):\n # check whether the composition is composed of oxidation state decorates species (not just plain Elements)\n if hasattr(composition.elements[0], \"oxi_state\"):\n oxidation_states = dict([(str(sp.element), sp.oxi_state) for sp in composition.elements])\n el_amt_dict = composition.get_el_amt_dict()\n # string\n else:\n comp, oxidation_states = get_composition_oxidation_state(composition)\n el_amt_dict = comp.get_el_amt_dict()\n\n symbols = sorted(el_amt_dict.keys(), key=lambda sym: get_el_sp(sym).X)\n\n for el_sym in symbols:\n\n element = Element(el_sym)\n property_value = None\n property_units = None\n\n try:\n p = getattr(element, property_name)\n except AttributeError:\n print(\"{} attribute missing\".format(property_name))\n raise\n\n if p is not None:\n if property_name in ['ionic_radii']:\n if oxidation_states:\n property_value = element.ionic_radii[oxidation_states[el_sym]]\n property_units = Unit(\"ang\")\n else:\n raise ValueError(\"oxidation state not given for {}; It does not yield a unique \"\n \"number per Element\".format(property_name))\n else:\n property_value = float(p)\n\n # units are None for these pymatgen descriptors\n # todo: there seem to be a lot more unitless descriptors which are not listed here... -Alex D\n if property_name not in ['X', 'Z', 'group', 'row', 'number', 'mendeleev_no', 'ionic_radii']:\n property_units = p.unit\n\n # Make a named tuple out of all the available information\n eldata_tup_lst.append(eldata_tup(element=el_sym, propname=property_name, propvalue=property_value,\n propunit=property_units, amt=el_amt_dict[el_sym]))\n\n # Add descriptor values, one for each atom in the compound\n for i in range(int(el_amt_dict[el_sym])):\n eldata.append(property_value)\n\n return eldata",
"def describe(self, access, element):\n self._prepare(access)\n # Accumulate the descriptor sets from each ability, then turn into a string.\n tags = set()\n for c in self.abilities:\n tags |= c.describe(access, element)\n return ' '.join(list(tags)).lower()",
"def CalcDescriptors(self, mol, *args, **kwargs):\n res = [-666] * len(self.simpleList)\n for i, nm in enumerate(self.simpleList):\n fn = getattr(DescriptorsMod, nm, lambda x: 777)\n try:\n res[i] = fn(mol)\n except Exception:\n import traceback\n traceback.print_exc()\n return tuple(res)",
"def descriptors(self):\n descs = []\n for x in xrange(0, 4):\n desc = self.GetDescriptor(x)\n if desc:\n descs.append(desc)\n return descs",
"def aic(self):\n aics = []\n aics_bool = []\n for i, chain in enumerate(self.parent.chains):\n p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params\n if p is None or n_data is None or n_free is None:\n aics_bool.append(False)\n missing = \"\"\n if p is None:\n missing += \"posterior, \"\n if n_data is None:\n missing += \"num_eff_data_points, \"\n if n_free is None:\n missing += \"num_free_params, \"\n\n self._logger.warning(\"You need to set %s for chain %s to get the AIC\" % (missing[:-2], chain.name))\n else:\n aics_bool.append(True)\n c_cor = 1.0 * n_free * (n_free + 1) / (n_data - n_free - 1)\n aics.append(2.0 * (n_free + c_cor - np.max(p)))\n if len(aics) > 0:\n aics -= np.min(aics)\n aics_fin = []\n i = 0\n for b in aics_bool:\n if not b:\n aics_fin.append(None)\n else:\n aics_fin.append(aics[i])\n i += 1\n return aics_fin",
"def Attributes(self) -> _n_5_t_17:",
"def get_descriptors(self):\n\n # If any descriptors should be ignored, put their names in the list\n # below.\n blacklist = []\n\n results = []\n for name, descriptor in Chemical:\n if name in blacklist:\n continue\n results.append(sum(descriptor(chem) for chem in self.reactants\n if chem.a.size > 1))\n results.append(sum(descriptor(chem) for chem in self.products\n if chem.a.size > 1))\n return results",
"def attractors(self):\n if not self.__landscaped:\n self.landscape()\n if not self.__expounded:\n self.expound()\n return self.__landscape_data.attractors",
"def _inspect_descriptor(descriptor):\n # TODO memoize to cache these results\n data_keys = descriptor.data_keys\n is_external = defaultdict(lambda: False)\n for data_key, data_key_dict in data_keys.items():\n if (data_key_dict and 'external' in data_key_dict):\n is_external[data_key] = bool(data_key_dict['external'])\n return is_external",
"def numberConceptsAndComputeIntroduced(self):\n\n numCon = len(self.concepts)\n curConNum = 0\n for curConcept in self.concepts:\n curConcept.cnum = curConNum\n if curConNum % 1000 == 0:\n print(\"computing introduced objects and attributes for concept %d of %d\" % (curConNum, numCon))\n curConcept.upperNeighbours.sort()\n curConcept.lowerNeighbours.sort()\n curConcept.introducedObjects = set(curConcept.extent)\n for ln in curConcept.lowerNeighbours:\n curConcept.introducedObjects.difference_update(ln.extent)\n curConcept.introducedAttributes = set(curConcept.intent)\n for un in curConcept.upperNeighbours:\n curConcept.introducedAttributes.difference_update(un.intent)\n curConNum += 1\n print(\"Done with introduced objects and attributes\")",
"async def get_expert_advisors(self) -> List[ExpertAdvisor]:",
"def Attributes(self) -> EPlotAttributeCollection:",
"def attributes_desc():\n columns = [\n '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',\n 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',\n 'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',\n 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',\n 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',\n 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young',\n ]\n\n return map(str.lower, columns)",
"def get_descriptors(self, type_descriptor):\n raise NotImplementedError",
"def contract_exchange_descriptors(desc):\n # desc[0:6] = rho_data\n # desc[6:7] = g0\n # desc[7:10] = g1\n # desc[10:15] = g2\n # desc[15] = g0-r^2\n # g1 order: x, y, z\n # g2 order: xy, yz, z^2, xz, x^2-y^2\n\n N = desc.shape[1]\n res = np.zeros((12,N))\n rho_data = desc[:6]\n\n rho, s, alpha, tau_w, tau_unif = get_dft_input2(desc[:6])\n sprefac = 2 * (3 * np.pi * np.pi)**(1.0/3)\n n43 = rho**(4.0/3)\n svec = desc[1:4] / (sprefac * n43 + 1e-16)\n\n res[0] = rho\n res[1] = s**2\n res[2] = alpha\n\n # other setup\n g0 = desc[6]\n g1 = desc[7:10]\n g2 = desc[10:15]\n\n # g1_norm and 1d dot product\n g1_norm = np.linalg.norm(g1, axis=0)**2\n dot1 = np.einsum('an,an->n', svec, g1)\n\n # Clebsch Gordan https://en.wikipedia.org/wiki/Table_of_Clebsch%E2%80%93Gordan_coefficients\n g2_norm = 0\n for i in range(5):\n g2_norm += g2[i] * g2[i]\n g2_norm /= np.sqrt(5)\n\n res[3] = g0\n res[4] = g1_norm\n res[5] = dot1\n res[6] = g2_norm\n\n sgc = contract21(g2, svec)\n sgg = contract21(g2, g1)\n\n res[7] = np.einsum('pn,pn->n', sgc, svec)\n res[8] = np.einsum('pn,pn->n', sgc, g1)\n res[9] = np.einsum('pn,pn->n', sgg, g1)\n\n res[10] = desc[15]\n res[11] = desc[16]\n\n # res\n # 0: rho\n # 1: s\n # 2: alpha\n # 3: g0\n # 4: norm(g1)**2\n # 5: g1 dot svec\n # 6: norm(g2)**2\n # 7: svec dot g2 dot svec\n # 8: g1 dot g2 dot svec\n # 9: g1 dot g2 dot g1\n # 10: g0-r^2\n # 11: g0-r^4\n return res",
"def rdkit_descriptors(mol):\n dd = {}\n if mol is not None:\n try:\n dd = {k:fn(mol) for k,fn in INTERESTING_DESCRIPTORS.items()}\n inchi = Chem.MolToInchi(mol, options='/SUU')\n inchi_info = InchiInfo.InchiInfo(inchi).get_sp3_stereo()\n (n_stereo, n_undef_stereo, is_meso, dummy) = inchi_info['main']['non-isotopic']\n dd['NumChiralCenters'] = n_stereo\n dd['NumDefinedChiralCenters'] = n_stereo - n_undef_stereo\n dd['NumUndefinedChiralCenters'] = n_undef_stereo\n dd['IsMesoStructure'] = is_meso\n except (ValueError):\n pass\n\n return dd",
"def attributeInfo(multi=bool, inherited=bool, bool=bool, internal=bool, type=\"string\", hidden=bool, enumerated=bool, allAttributes=bool, logicalAnd=bool, writable=bool, userInterface=bool, leaf=bool, short=bool):\n pass",
"def getMoleculeFeatures(self):\n title = self.__pybelMol.title\n molWeight = self.__pybelMol.molwt\n formula = self.__pybelMol.formula\n ccId = title\n ifCharge = self.__pybelMol.charge\n logger.info(\"%s formula %s charge %d mw %f\", title, formula, ifCharge, molWeight)\n inchi = self.__pybelMol.write(\"inchi\").strip()\n inchiKey = self.__pybelMol.write(\"inchikey\").strip()\n smiles = self.__pybelMol.write(\"can\", opt={\"n\": None}).strip()\n isoSmiles = self.__pybelMol.write(\"can\", opt={\"i\": None, \"n\": None}).strip()\n details = ComponentDetails(ccId=ccId, formula=formula, ifCharge=ifCharge)\n descriptors = ComponentDescriptors(smiles=smiles, isoSmiles=isoSmiles, inchi=inchi, inchiKey=inchiKey)\n #\n #\n typeCounts = defaultdict(int)\n ccAtomD = {}\n ccAtomIdD = {}\n for ii, pat in enumerate(self.__pybelMol.atoms, 1):\n at = pat.OBAtom\n atIdx = at.GetIdx()\n # atNo = at.GetAtomicNum()\n aType = at.GetType()\n typeCounts[aType] += 1\n atName = self.__atomIdxD[ii] if ii in self.__atomIdxD else aType + str(typeCounts[aType])\n #\n isAromatic = at.IsAromatic()\n isChiral = at.IsChiral()\n iCharge = at.GetFormalCharge()\n cipStereo = None\n ccAtomD[atName] = ComponentAtom(name=atName, aType=aType, isAromatic=isAromatic, isChiral=isChiral, CIP=cipStereo, fCharge=iCharge)\n ccAtomIdD[atIdx] = atName\n logger.debug(\"%s Atom %s %s %r %r %s\", ccId, atName, aType, isAromatic, isChiral, cipStereo)\n #\n ccBondD = {}\n for bnd in openbabel.OBMolBondIter(self.__pybelMol.OBMol):\n atI = bnd.GetBeginAtomIdx()\n atJ = bnd.GetEndAtomIdx()\n atNameI = ccAtomIdD[atI]\n atNameJ = ccAtomIdD[atJ]\n isAromatic = bnd.IsAromatic()\n iType = bnd.GetBondOrder()\n cipStereo = None\n logger.debug(\"Bond %s %s iType %r cipStereo %r aromatic %r\", atNameI, atNameJ, iType, cipStereo, isAromatic)\n #\n ccBondD[(atNameI, atNameJ)] = ComponentBond(iType=iType, isAromatic=isAromatic, CIP=cipStereo)\n #\n ccD = {\"details\": details, \"descriptors\": descriptors, \"atoms\": ccAtomD, \"bonds\": ccBondD}\n return ccD",
"def get_experimental_design(self):\n\n return {\n 'minimumNumberOfParticipants': self._get_min_num_participants(),\n 'numberOfConditionsPerParticipant': self._get_num_conditions_per_participant(),\n 'arrangements': self._get_arrangement()\n }",
"def _get_shape_ap_def(self, aperture):\n\n # get type and shape mods\n shape = aperture.shape\n if isinstance(shape, Circle):\n type_ = SHAPE_TAGS['circle']['char']\n mods = [self._convert_units_str(shape.radius * 2)]\n elif isinstance(shape, Rectangle):\n type_ = SHAPE_TAGS['rectangle']['char']\n mods = [self._convert_units_str(shape.width),\n self._convert_units_str(shape.height)]\n elif isinstance(shape, Obround):\n type_ = SHAPE_TAGS['obround']['char']\n mods = [self._convert_units_str(shape.width),\n self._convert_units_str(shape.height)]\n elif isinstance(shape, RegularPolygon):\n rot = shape.rotation\n rotation = int(rot and (2 - rot) * 180 or 0)\n vertices = [(self._convert_units_str(p.x), self._convert_units_str(p.y)) for p in shape.vertices]\n type_ = SHAPE_TAGS['reg_polygon']['char']\n mods = [self._convert_units_str(shape.outer_diameter),\n vertices,\n rotation]\n elif isinstance(shape, str):\n type_ = shape\n mods = []\n\n # add hole mods\n hole = aperture.hole\n if isinstance(hole, Circle):\n hole_mods = [self._convert_units_str(hole.radius)]\n elif hole:\n hole_mods = [self._convert_units_str(hole.width), self._convert_units_str(hole.height)]\n else:\n hole_mods = []\n mods += hole_mods\n\n # generate param\n mods = 'X'.join(str(m) for m in mods)\n mods_def = (mods and AP_MODS.format(mods=mods) or '')\n ap_def = APERTURE.format(code=aperture.code,\n type=type_,\n mods=mods_def)\n return LINE.format(ap_def)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Transition descriptors based on Hydrophobicity ofAADs.
|
def CalculateTransitionHydrophobicity(ProteinSequence):
result=CalculateTransition(ProteinSequence,_Hydrophobicity,'_Hydrophobicity')
return result
|
[
"def _cal_hoag(self) -> AbstractHOAG:\n # 读入梯度信息\n with open(self.our_work, \"r\") as fp:\n lines = fp.readlines()\n # 计算loss的值\n loss = [float(line.strip().split()[0]) for line in lines]\n gradient = []\n for i in range(1, len(loss)):\n gradient.append((loss[i] - loss[i - 1]) * len(loss))\n hoag = DummyHOAG(0.00095, 1, np.array(gradient))\n\n return hoag",
"def hmm_aic(n_states_options,training_data,training_lengths,timer=True,plot=True):\r\n AIC = []\r\n for k in n_states_options:\r\n if timer is True:\r\n lap = laptimer()\r\n model = hmm_model.GaussianHMM(\r\n k,\r\n algorithm='viterbi',\r\n n_iter=10)\r\n model.fit(training_data.transpose(), training_lengths)\r\n \r\n \r\n logprob = model.decode(training_data.transpose(),algorithm='viterbi')[0]\r\n n_params = 2*model.n_components*model.n_features +(model.n_components)**2 -1\r\n aic = aic = 2*(n_params) - 2*logprob\r\n AIC.append(aic)\r\n if timer is True and k>15:\r\n print(f\"finished generating model with {k} states\")\r\n print(f\"Processing time: {np.round(lap(), 1)} seconds\")\r\n plt.figure(figsize=(4,1.5))\r\n plt.plot(n_states_options,AIC)\r\n plt.title(\"AIC\")\r\n return AIC",
"def feature_extraction(img, feature):\r\n\r\n if feature == 'HoG':\r\n # HoG parameters\r\n win_size = (32, 32)\r\n block_size = (32, 32)\r\n block_stride = (16, 16)\r\n cell_size = (16, 16)\r\n nbins = 9\r\n deriv_aperture = 1\r\n win_sigma = 4\r\n histogram_norm_type = 0\r\n l2_hys_threshold = 2.0000000000000001e-01\r\n gamma_correction = 0\r\n nlevels = 64\r\n \r\n # Your code here. You should also change the return value.\r\n\r\n hog = cv2.HOGDescriptor(win_size,block_size,block_stride,cell_size,nbins,deriv_aperture,win_sigma,histogram_norm_type,l2_hys_threshold,gamma_correction,nlevels)\r\n\r\n dsize = hog.getDescriptorSize()\r\n descripters = hog.compute(img,winStride=(32,32),padding=(0,0))\r\n descripters = descripters.reshape(-1,dsize)\r\n\r\n\r\n elif feature == 'SIFT':\r\n sift = cv2.xfeatures2d.SIFT_create()\r\n descripters = []\r\n height= img.shape[0]\r\n width = img.shape[1]\r\n split1 = np.array_split(img, width/20, axis=1)\r\n for split in split1:\r\n split2 =np.array_split(split, height/20, axis=0)\r\n for ig in split2:\r\n keypoints, descripter = sift.detectAndCompute(ig,None)\r\n if descripter is not None:\r\n descripters.append(descripter)\r\n if len(descripters) > 0:\r\n descripters = np.vstack(descripters)\r\n else: \r\n return None\r\n return descripters",
"def _HAC_model():\n\n\tclf = AgglomerativeClustering()\n\treturn clf",
"def FK_dh(joint_angles,link):\n # print (\"DOING DH\")\n\n base_theta=joint_angles[0]\n shoulder_theta=joint_angles[1]\n elbow_theta=joint_angles[2]\n w1_theta=joint_angles[3]\n w2_theta=joint_angles[4]\n\n # Defining DH table parameters \n\n # Distances are in mm\n\n d1=118\n a2=99\n a3=112\n a4=109\n\n # d1=122.14 \n # a2=105\n # a3=126.77\n # a4=122.12\n\n a=np.array([0,a2,a3,a4])\n alpha=np.array([np.pi/2,0,0,0])\n d=np.array([d1,0,0,0])\n theta=np.array([base_theta,shoulder_theta+np.pi/2,elbow_theta,w2_theta])\n\n # Defining functions to compute matrices\n\n def Trans_z_d (d):\n return np.array([[1,0,0,0],[0,1,0,0],[0,0,1,d],[0,0,0,1]])\n\n def Trans_x_a (a):\n return np.array([[1,0,0,a],[0,1,0,0],[0,0,1,0],[0,0,0,1]])\n\n def Rot_z_theta (theta):\n return np.array([[np.cos(theta),-np.sin(theta),0,0],[np.sin(theta),np.cos(theta),0,0],[0,0,1,0],[0,0,0,1]])\n\n def Rot_x_alpha (alpha):\n return np.array([[1,0,0,0],[0,np.cos(alpha),-np.sin(alpha),0],[0,np.sin(alpha),np.cos(alpha),0],[0,0,0,1]])\n\n # Computing the H matrix \n H=np.identity(4)\n \n for i in range(4):\n A=np.matmul(Rot_z_theta(theta[i]),np.matmul(Trans_z_d(d[i]),np.matmul(Trans_x_a(a[i]),Rot_x_alpha(alpha[i]))))\n H=np.matmul(H,A)\n\n # Calculating phi as the euler angle about the y-axis in the base frame\n\n phi=np.array([joint_angles[1]+joint_angles[2]+joint_angles[4]])\n\n # Extracting the required x,y and z elements from H matrix\n #print(H)\n H=H[0:3,-1]\n #print(H)\n np.append(H, phi) \n\n return H\n pass",
"def create_segments_and_labels(df, time_steps, step, labeldf, isBehavioral):\n\n # list number of features to be extracted, can be changed depending on what features are desired to be extracted\n N_FEATURES = 4\n\n segments = []\n palm = []\n hr = []\n br = []\n per = []\n labels = []\n for j in range(0, len(df)):\n for i in range(1, len(df[j]) - time_steps, step):\n if isBehavioral is True:\n steering = df[j]['Steering'].values[i: i + time_steps]\n acceleration = df[j]['Acceleration'].values[i: i + time_steps]\n speed = df[j]['Speed'].values[i: i + time_steps]\n brake = df[j]['Brake'].values[i: i + time_steps]\n segments.append([steering, acceleration, speed, brake])\n else:\n palmEDA = df[j]['Palm.EDA'].values[i: i + time_steps]\n heartRate = df[j]['Heart.Rate'].values[i: i + time_steps]\n breathingRate = df[j]['Breathing.Rate'].values[i: i + time_steps]\n perinasalPerspiration = df[j]['Perinasal.Perspiration'].values[i: i + time_steps]\n palm.append([palmEDA])\n hr.append([heartRate])\n br.append([breathingRate])\n per.append([perinasalPerspiration])\n\n maxLabel = labeldf[j]['Max'].values[i: i + time_steps]\n labels.append(maxLabel)\n\n if isBehavioral is False:\n palm_r = np.asarray(palm, dtype=np.float32).reshape(-1, time_steps, 1)\n hr_r = np.asarray(hr, dtype=np.float32).reshape(-1, time_steps, 1)\n br_r = np.asarray(br, dtype=np.float32).reshape(-1, time_steps, 1)\n per_r = np.asarray(per, dtype=np.float32).reshape(-1, time_steps, 1)\n list_f = []\n list_f.append(palm_r)\n list_f.append(hr_r)\n list_f.append(br_r)\n list_f.append(per_r)\n labels = np.asarray(labels)\n\n return list_f, labels\n\n # Bring the segments into a better shape\n print(len(segments[0]))\n reshaped_segments = np.asarray(segments, dtype=np.float32).reshape(-1, time_steps, N_FEATURES)\n labels = np.asarray(labels)\n\n return reshaped_segments, labels",
"def feature_extraction(img, feature):\n\n if feature == 'HoG':\n # HoG parameters\n win_size = (32, 32)\n block_size = (32, 32)\n block_stride = (16, 16)\n cell_size = (16, 16)\n nbins = 9\n deriv_aperture = 1\n win_sigma = 4\n histogram_norm_type = 0\n l2_hys_threshold = 2.0000000000000001e-01\n gamma_correction = 0\n nlevels = 64\n\n\n # Your code here. You should also change the return value.\n\n # make HOG descriptor model with given parameter\n hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size, nbins, deriv_aperture, win_sigma, histogram_norm_type, l2_hys_threshold, gamma_correction, nlevels)\n\n m = img.shape[0]\n n = img.shape[1]\n\n all_f = []\n\n # divide original image with 16 X 16 grid and make subimages, so find HoG descriptor by grouped 4 cell (32 X 32) and 16 X 16 stride\n for i in range(int(m / 16) - 1):\n for j in range(int(n / 16) - 1):\n x = i * 16\n y = j * 16\n h = hog.compute(img[x:x+32, y:y+32])\n all_f.append(np.reshape(h, (1, 36)))\n\n # combine Hog desciptor from sub images\n all_f = np.concatenate(all_f, 0)\n return all_f\n\n elif feature == 'SIFT':\n\n # Your code here. You should also change the return value.\n m = img.shape[0]\n n = img.shape[1]\n\n all_f = []\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d.SIFT_create()\n\n # divide original image with 20 X 20 grid and make subimages, so find SIFT descriptor by 20 X 20 sub images.\n for i in range(int(m / 20)):\n for j in range(int(n / 20)):\n x = i * 20\n y = j * 20\n kp, des = sift.detectAndCompute(gray[x:x+20, y:y+20], None)\n if len(kp) != 0:\n all_f.append(des)\n\n #If sift is not detected, exception handling is done.\n if len(all_f) != 0:\n all_f = np.concatenate(all_f, 0)\n else:\n all_f = None\n\n return all_f",
"def transition(self, age_i, t_st, t_tr, dt, temp_i, temp_f,\n accum_i, accum_f):\n\n\n t_final = 2*t_st + t_tr\n time = np.arange(0, t_final+dt, dt)\n t_loc = t_final/2\n\n rv = stats.norm(loc = t_loc, scale = float(t_tr/4))\n\n temp = temp_i + (temp_f - temp_i)*rv.cdf(time)\n accum = accum_i + (accum_f - accum_i)*rv.cdf(time)\n\n sigma_instance = Sigma(P = 0.75, rho_o = 330, rho_i = 917, rho_c = 550,\\\n rho_co = 804.3, fo = 1, f1 = 1)\n\n\n sigma_18 = np.zeros(np.size(temp))\n sigma_D = np.zeros(np.size(temp))\n sigma_sq = np.zeros(np.size(temp))\n\n for i in np.arange(np.size(temp)):\n sigma_18[i], sigma_D[i] = sigma_instance.semi_analytical_HL(T = temp[i], accum = accum[i])\n\n\n plt.clf()\n time = age_i - time\n sigma_18 = sigma_18*917./804.3\n sigma_D = sigma_D*917./804.3\n sigma_sq = sigma_D*2 - sigma_18**2\n plt.figure(845)\n plt.subplot(311)\n plt.plot(time, sigma_18)\n plt.subplot(312)\n plt.plot(time, sigma_D)\n plt.subplot(313)\n plt.plot(time, sigma_sq)\n\n return time, temp, accum, sigma_18, sigma_D, sigma_sq",
"def test_make_adaptive_delta_h_true():\n hops = HOPS(\n sys_param,\n noise_param=noise_param,\n hierarchy_param=hier_param,\n eom_param=eom_param,\n integration_param=integrator_param,\n )\n hops.make_adaptive(delta_h=1e-4, delta_s=0)\n adap = hops.basis.eom.param[\"ADAPTIVE\"]\n known_adap = True\n assert adap == known_adap\n\n adap_h = hops.basis.eom.param[\"ADAPTIVE_H\"]\n known_adap_h = True\n assert adap_h == known_adap_h\n\n delta_h = hops.basis.eom.param[\"DELTA_H\"]\n known_delta_h = 1e-4\n assert delta_h == known_delta_h\n\n adap_s = hops.basis.eom.param[\"ADAPTIVE_S\"]\n known_adap_s = False\n assert adap_s == known_adap_s",
"def create_dicts(self):\n print(\"There are \" + str(self.matrix.shape[1]) + \" features and \")\n print(str(self.matrix.shape[0]) + \" instances to consider\")\n possible_labels = list(set(self.labels))\n matricies = {}\n ig_dict = {}\n indexes_dict = {}\n sums = {}\n probabilities = {}\n total_sum = float(self.matrix.sum())\n ig_term1 = 0\n for label in possible_labels:\n row_slice = [True if val == label else False for val in self.labels]\n matricies[label] = self.matrix[row_slice, :]\n sums[label] = float(matricies[label].sum())\n probabilities[label] = max(sums[label] / total_sum, 0.00000000001)\n ig_term1 += probabilities[label] * log(probabilities[label])\n\n ig_term1 *= -1\n print(\"Calculating information gain for feature: \")\n print(\"\\r0\", end='')\n for col_index in range(len(self.vocab)):\n if col_index % 100 == 0:\n print(\"\\r\" + str(col_index), end=\"\")\n term = self.vocab[col_index]\n t_count = max(float(self.matrix[:, col_index].sum()), 0.00000000001)\n label_counts = {}\n ig_term2 = 0\n ig_term3 = 0\n p_t = float(t_count) / total_sum\n p_tbar = 1 - p_t\n for label in possible_labels:\n try:\n label_counts[label] = float(a_matrix[:, col_index].sum())\n except:\n label_counts[label] = 0.0\n p_c1_t = max(label_counts[label] / t_count, 0.00000000001)\n ig_term2 += p_c1_t * log(p_c1_t)\n p_c1_tbar = max((sums[label] - label_counts[label]) / (total_sum - t_count), 0.00000000001)\n ig_term3 += p_c1_tbar * log(p_c1_tbar)\n\n ig_term2 *= p_t\n ig_term3 *= p_tbar\n ig = ig_term1 + ig_term2 + ig_term3\n # print ig\n ig_dict[term] = ig\n indexes_dict[term] = col_index\n\n self.ig_dict = ig_dict\n self.indexes_dict = indexes_dict",
"def get_full_hypnogram(self):\n return self.hypnogram.to_dense()[\"sleep_stage\"].to_numpy().reshape(-1, 1)",
"def create_features():\n if insight:\n data = pd.read_csv(insight_subjective_path)\n else:\n data = pd.read_csv(epoc_subjective_path)\n\n for video_index in range(1, video_count+1):\n video_df = data[data['Video'] == video_index]\n\n count = video_df.__len__()\n arousal_sum = sum(video_df['Arousal'])\n valence_sum = sum(video_df['Valence'])\n\n data.loc[data['Video'] == video_index, 'Mean_video_arousal'] = arousal_sum / count\n data.loc[data['Video'] == video_index, 'Mean_video_valence'] = valence_sum / count\n\n arousal_min = min(video_df['Arousal'])\n valence_min = min(video_df['Valence'])\n arousal_max = max(video_df['Arousal'])\n valence_max = max(video_df['Valence'])\n\n data.loc[data['Video'] == video_index, 'Min_video_arousal'] = arousal_min\n data.loc[data['Video'] == video_index, 'Min_video_valence'] = valence_min\n data.loc[data['Video'] == video_index, 'Max_video_arousal'] = arousal_max\n data.loc[data['Video'] == video_index, 'Max_video_valence'] = valence_max\n\n data['Arousal^2'] = data['Arousal']**2\n data['Valence^2'] = data['Valence']**2\n\n return data",
"def form_analysis_data(self):\n fatal_percent_sum = 0\n self.analysis_dct[\"max_fatalities\"] = 0\n self.analysis_dct[\"phases\"] = {}\n self.analysis_dct[\"damage\"] = {}\n self.analysis_dct[\"years\"] = []\n destroyed_dct = {}\n\n for accident in self.accidents:\n accident.process_data()\n fatal_percent_sum += accident.fatalities_percent\n if accident.fatalities > self.analysis_dct[\"max_fatalities\"]:\n self.analysis_dct[\"max_fatalities\"] = accident.fatalities\n\n if accident.phase not in self.analysis_dct[\"phases\"].keys():\n self.analysis_dct[\"phases\"][accident.phase] = 1\n else:\n self.analysis_dct[\"phases\"][accident.phase] += 1\n\n if accident.damage not in self.analysis_dct[\"damage\"].keys():\n self.analysis_dct[\"damage\"][accident.damage] = 1\n else:\n self.analysis_dct[\"damage\"][accident.damage] += 1\n\n if accident.damage == \"Destroyed\" or accident.damage == \"Substantial\":\n if accident.phase not in destroyed_dct.keys():\n destroyed_dct[accident.phase] = 1\n else:\n destroyed_dct[accident.phase] += 1\n\n self.analysis_dct[\"years\"].append(accident.aircraft_years)\n\n self.analysis_dct[\"accidents_number\"] = len(self.accidents)\n self.analysis_dct[\"fatalities_percent\"] = fatal_percent_sum / self.analysis_dct[\"accidents_number\"]\n max_percent_phase = sorted(list(self.analysis_dct['phases'].items()), key=lambda x: x[1], reverse=True)[0][0]\n max_percent_phase_num = max(self.analysis_dct['phases'].values()) / sum(self.analysis_dct['phases'].values()) * 100\n self.analysis_dct[\"max_percent_phase\"] = (max_percent_phase, max_percent_phase_num)\n max_destroyed_planes_phase = sorted(list(self.analysis_dct['phases'].items()), key=lambda x: x[1], reverse=True)[0]\n self.analysis_dct[\"destroyed_damage\"] = max_destroyed_planes_phase",
"def _p_to_h_on_basis(self, A):\n h = self.realization_of().h()\n P_refine = Poset((A.refinements(), A.parent().lt))\n c = abs(prod((-1)**(i-1) * factorial(i-1) for i in A.shape()))\n R = self.base_ring()\n return h._from_dict({B: R(P_refine.moebius_function(B, A) / ZZ(c))\n for B in P_refine}, remove_zeros=False)",
"def create_hdas(HDAs, restore_purged, restore_deleted, verbose):\n if verbose:\n print(\"\\n ####### HistoryDatasetAssociation #######\")\n for the_hda in HDAs:\n # check if the corresponding hda already exists\n (the_hda_e, ), = sa_session.query(exists().\\\n where(HistoryDatasetAssociation.hid == the_hda['hid']).\\\n where(HistoryDatasetAssociation.info == the_hda['misc_info']).\\\n where(HistoryDatasetAssociation.blurb == the_hda['misc_blurb']).\\\n where(HistoryDatasetAssociation.name == the_hda['name']))\n # check if the corresponding history already exists\n the_history = sa_session.query(History).get(the_hda['history_id'])\n if the_hda_e is False:\n if verbose:\n print(\"A new HistoryDatasetAssociation has been \"+\\\n \"discovered: %s\" %(the_hda['name']))\n new_hda = HistoryDatasetAssociation()\n new_hda.hid = the_hda['hid']\n new_hda.info = the_hda['misc_info']\n new_hda.blurb = the_hda['misc_blurb']\n new_hda.name = the_hda['name']\n new_hda.peek = the_hda['peek']\n new_hda.extension = the_hda['file_ext']\n new_hda.dbkey=the_hda['metadata_dbkey']\n new_hda.visible = the_hda['visible']\n new_hda.deleted = the_hda['deleted']\n new_hda.history = the_history\n # Get the corresponding dataset\n try:\n the_dataset = sa_session.query(Dataset).filter(\\\n Dataset.uuid == the_hda['uuid']).one()\n new_hda.dataset = the_dataset\n new_hda.state = the_hda['state']\n new_hda.history_content_type = the_hda['history_content_type']\n except:\n if verbose:\n print(\"...But the corresponding dataset does not exist.\")\n pass\n # Get the corresponding ldda\n try:\n new_hda.copied_from_library_dataset_dataset_association.id = \\\n the_hda['copied_from_ldda_id']\n except:\n pass\n new_hda.purged = the_hda['purged']\n new_hda.create_time = datetime.datetime.strptime(\\\n the_hda['create_time'], \"%Y-%m-%dT%H:%M:%S.%f\")\n new_hda.update_time = datetime.datetime.strptime(\\\n the_hda['update_time'], \"%Y-%m-%dT%H:%M:%S.%f\")\n sa_session.add(new_hda)\n sa_session.flush()\n the_dataset = \"\"",
"def reconstruct_eta_decay(self) -> None:\n if self.has_eta23pi0:\n ma.reconstructDecay(\"pi0 -> gamma gamma\", \"\", path=self.path)\n ma.reconstructDecay(\"eta -> pi0 pi0 pi0\", \"\", path=self.path)\n\n elif self.has_eta2pipipi0:\n ma.reconstructDecay(\"pi0 -> gamma gamma\", \"\", path=self.path)\n ma.reconstructDecay(\"eta -> pi+ pi- pi0\", \"\", path=self.path)\n\n elif self.has_eta2pipigamma:\n ma.reconstructDecay(\"eta -> pi+ pi- gamma\", \"\", path=self.path)\n\n elif self.has_eta2gammagamma:\n ma.reconstructDecay(\"eta -> gamma gamma\", \"\", path=self.path)",
"def compute_descriptors(self, videoObj_list):\n\n desc_length = self.desc_dim_c + self.desc_dim_m\n\n X = np.zeros((1, desc_length))\n correct_video_indx = []\n\n for i, v in enumerate(videoObj_list):\n v_name = v.video_name_path\n v_length = v.video_length\n possible_offsets = np.array([v_length / 2, 60])\n off = np.min(possible_offsets[possible_offsets >= 0])\n logger.info('\\t 1.1. Sampling video %s (offset %.2f)'\n % (v.video_name_path, off))\n\n sampler = VideoFFmpegSampler(\n v_name, duration=self.sampling_duration, offset=off,\n fps=self.sampling_fps, scale=self.sampling_scale)\n list_of_frames = sampler.sample(output_dir=v.frames_folder_path)\n\n logger.info('\\t 1.2. Computing descriptors')\n num_frames = len(list_of_frames)\n X_row = []\n if num_frames > self.num_frames_per_video:\n\n X_tmp_f = self.flow_extractor.extract(list_of_frames)\n # this can happen if we only have 1 frame\n if np.sum(X_tmp_f) == 0:\n logger.error('No MOTION DESCRIPTORS computed for %s'\n % (v_name))\n else:\n step = 1\n if self.num_frames_per_video > 1:\n step = num_frames / self.num_frames_per_video\n\n list_imagefiles_color = \\\n list_of_frames[0:num_frames:step]\n list_imagefiles_color = \\\n list_imagefiles_color[0:self.num_frames_per_video]\n\n X_tmp_c = self.color_extractor.extract(\n list_imagefiles_color)\n X_tmp_c = X_tmp_c.reshape(\n (1, X_tmp_c.shape[0] * X_tmp_c.shape[1]))\n\n X_row = np.append(X_tmp_f, X_tmp_c, axis=1)\n\n if len(X_row) > 0 and X_row.shape[1] == desc_length:\n correct_video_indx = correct_video_indx + [i]\n X = np.append(X, X_row, axis=0)\n # only appends the row if the descriptor computation was\n # succesful\n else:\n logger.error('There are not enough frames for this video, \\\n No descriptor computed for %s' % (v_name))\n\n # delete tmp initial empty row\n X = np.delete(X, 0, axis=0)\n\n logger.info('End descriptor computation: X %s' % (str(np.shape(X))))\n return X, correct_video_indx",
"def feature_extractor(X_train, X_test):\n \n hog_train = []\n hog_test = []\n sift_train = []\n sift_test = []\n hog = cv2.HOGDescriptor()\n #HOGFeatureExtractor()\n \n winSize = (64,64)\n blockSize = (16,16)\n blockStride = (8,8)\n cellSize = (8,8)\n nbins = 9\n derivAperture = 1\n winSigma = 4.\n histogramNormType = 0\n L2HysThreshold = 2.0000000000000001e-01\n gammaCorrection = 0\n nlevels = 64\n hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,\n histogramNormType,L2HysThreshold,gammaCorrection,nlevels)\n winStride = (8,8)\n padding = (8,8)\n locations = ((10,20),)\n \n for img in X_train:\n kps, descs = sift(img)\n #if len(img.shape) == 2 :\n #img = img[:,:,numpy.newaxis]\n hog_train.append(hog.compute(img,winStride,padding,locations))\n if descs is None:\n sift_train.append([])\n else:\n sift_train.append(descs)\n i += 1\n if i%1000 == 0:\n print(i,datetime.now()-t)\n\n for img in X_test: \n kps, descs = sift(img)\n #if len(img.shape) == 2 :\n #img = img[:,:,numpy.newaxis]\n hog_test.append(hog.compute(img,winStride,padding,locations))\n if descs is None:\n sift_test.append([])\n else:\n sift_test.append(descs)\n \n return hog_train, hog_test, sift_train, sift_test",
"def ahf1halo(fpre, hid, h=0.7):\n\tf = open(fpre+'.AHF_halos')\n\td = f.read()\n\tds = d.split()\n\tNpart = np.array(ds[87::83],dtype='i8') # Obtain number of particles in each halo\n\txc = (float(ds[88+83*hid]) - float(ds[88]))*1e3/h # Obtain halo position in pc, translated to the coords the simulations are actually in\n\tyc = (float(ds[89+83*hid]) - float(ds[89]))*1e3/h\n\tzc = (float(ds[90+83*hid]) - float(ds[90]))*1e3/h\n\tf.close()\n\t\n\tNskip = 3 + 2*(sum(Npart[:hid]) + hid)\n\tf = open(fpre+'.AHF_particles')\n\td = f.read()\n\tds = np.array(d.split(), dtype='i8')\n\targs = np.arange(Npart[hid])*2 + Nskip\n\tpid = ds[args]\n\tf.close()\n\treturn pid, [xc,yc,zc]",
"def test_age_transitions(self):\n\n params = copy.deepcopy(ZERO_PARAMS)\n params['trans_tanaok'] = np.random.rand(3)\n\n self.model.params = params\n\n state = np.random.rand(15)\n expected_deriv = np.zeros_like(state)\n\n for i in range(4):\n if i < 3:\n expected_deriv[3*i] -= (params['trans_tanoak'][i] * state[3*i])\n expected_deriv[3*i+1] -= (params['trans_tanoak'][i] * state[3*i+1])\n expected_deriv[3*i+2] -= (params['trans_tanoak'][i] * state[3*i+2])\n if i > 0:\n expected_deriv[3*i] += (params['trans_tanoak'][i-1] * state[3*(i - 1)])\n expected_deriv[3*i+1] += (params['trans_tanoak'][i-1] * state[3*(i - 1)+1])\n expected_deriv[3*i+2] += (params['trans_tanoak'][i-1] * state[3*(i - 1)+2])\n\n deriv = self.model.state_deriv(0.0, state)[0:-1]\n print(\"Age class transition rates:\", deriv, expected_deriv)\n self.assertTrue(np.allclose(deriv, expected_deriv))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Transition descriptors based on Charge of AADs.
|
def CalculateTransitionCharge(ProteinSequence):
result=CalculateTransition(ProteinSequence,_Charge,'_Charge')
return result
|
[
"def pd_create(cd):\n\n # check that 'c' or 'd' is passed\n #assert cd == (\n # 'c' or 'd'), 'This must be charge (c) or discharge (d) data'\n\n # number of descriptors it generates\n n_desc = 19\n\n # determines prefix string based on need for a charge or\n # discharge dataframe\n if cd == 'c':\n prefix = 'ch_'\n else:\n prefix = 'dc_'\n\n # generates list of names for the top of the descriptors dataframe\n names = []\n for ch in np.arange(n_desc):\n names.append(prefix + str(int(ch)))\n\n # adds names of error parameters to the end of the descriptor list\n names = names + [prefix+'AIC', prefix+'BIC', prefix+'red_chi_squared']\n\n # creates pandas dataframe with necessary heading\n # print(names)\n desc = pd.DataFrame(columns=names)\n\n return desc",
"def contract_exchange_descriptors(desc):\n # desc[0:6] = rho_data\n # desc[6:7] = g0\n # desc[7:10] = g1\n # desc[10:15] = g2\n # desc[15] = g0-r^2\n # g1 order: x, y, z\n # g2 order: xy, yz, z^2, xz, x^2-y^2\n\n N = desc.shape[1]\n res = np.zeros((12,N))\n rho_data = desc[:6]\n\n rho, s, alpha, tau_w, tau_unif = get_dft_input2(desc[:6])\n sprefac = 2 * (3 * np.pi * np.pi)**(1.0/3)\n n43 = rho**(4.0/3)\n svec = desc[1:4] / (sprefac * n43 + 1e-16)\n\n res[0] = rho\n res[1] = s**2\n res[2] = alpha\n\n # other setup\n g0 = desc[6]\n g1 = desc[7:10]\n g2 = desc[10:15]\n\n # g1_norm and 1d dot product\n g1_norm = np.linalg.norm(g1, axis=0)**2\n dot1 = np.einsum('an,an->n', svec, g1)\n\n # Clebsch Gordan https://en.wikipedia.org/wiki/Table_of_Clebsch%E2%80%93Gordan_coefficients\n g2_norm = 0\n for i in range(5):\n g2_norm += g2[i] * g2[i]\n g2_norm /= np.sqrt(5)\n\n res[3] = g0\n res[4] = g1_norm\n res[5] = dot1\n res[6] = g2_norm\n\n sgc = contract21(g2, svec)\n sgg = contract21(g2, g1)\n\n res[7] = np.einsum('pn,pn->n', sgc, svec)\n res[8] = np.einsum('pn,pn->n', sgc, g1)\n res[9] = np.einsum('pn,pn->n', sgg, g1)\n\n res[10] = desc[15]\n res[11] = desc[16]\n\n # res\n # 0: rho\n # 1: s\n # 2: alpha\n # 3: g0\n # 4: norm(g1)**2\n # 5: g1 dot svec\n # 6: norm(g2)**2\n # 7: svec dot g2 dot svec\n # 8: g1 dot g2 dot svec\n # 9: g1 dot g2 dot g1\n # 10: g0-r^2\n # 11: g0-r^4\n return res",
"def charges(self, molecule):\n\n # TODO add option to use chargemol on onetep cube files.\n copy(f'../density/{molecule.name}.wfx', f'{molecule.name}.wfx')\n c_mol = Chargemol(molecule, self.all_configs)\n c_mol.generate_input()\n\n append_to_log(f'Chargemol analysis with DDEC{self.qm[\"ddec_version\"]} complete')\n\n return molecule",
"def reconstruct_eta_decay(self) -> None:\n if self.has_eta23pi0:\n ma.reconstructDecay(\"pi0 -> gamma gamma\", \"\", path=self.path)\n ma.reconstructDecay(\"eta -> pi0 pi0 pi0\", \"\", path=self.path)\n\n elif self.has_eta2pipipi0:\n ma.reconstructDecay(\"pi0 -> gamma gamma\", \"\", path=self.path)\n ma.reconstructDecay(\"eta -> pi+ pi- pi0\", \"\", path=self.path)\n\n elif self.has_eta2pipigamma:\n ma.reconstructDecay(\"eta -> pi+ pi- gamma\", \"\", path=self.path)\n\n elif self.has_eta2gammagamma:\n ma.reconstructDecay(\"eta -> gamma gamma\", \"\", path=self.path)",
"def _construct_adv_cost(self):\n match_cost = self.GN.compute_log_prob(Xd=self.match_target)\n adv_cost = -T.sum(match_cost) / self.obs_count\n return adv_cost",
"def calculate_TODs(self, pce):\n mf_amet_upper = self.atl01_dict[pce].mf_amet_upper\n mf_amet_lower = self.atl01_dict[pce].mf_amet_lower\n # This tags each photon in the Major Frame\n raw_pce_mframe_cnt_ph = self.atl01_dict[pce].raw_pce_mframe_cnt_ph\n # This maps which laser fire \"made\" the returns. Many photons can have same pulse id.\n # It is not always true that pulse id is 1-200, though usually is.\n ph_id_pulse = self.atl01_dict[pce].ph_id_pulse \n\n # This gives the major frame numnber of each shot\n mframe_counts = raw_pce_mframe_cnt_ph - raw_pce_mframe_cnt_ph[0]\n # This gives the list of major frames. Length = number of major frames.\n mframes = list(set(mframe_counts))\n\n # Leading lower element coarse counts.\n tx_cc = self.tx_cc(pce) #equations.tx_cc(self.atl01, pce)\n TX_CC = self.TX_CC(tx_cc)\n\n if self.verbose:\n print(\" \")\n print(\"pce: \", pce)\n print(\"mframe_counts: \", len(mframe_counts), mframe_counts)\n print(\"tx_cc: \", len(tx_cc), tx_cc)\n print(\"TX_CC: \", len(TX_CC), TX_CC)\n\n # Initialize lists to store values per MF.\n amet_FirstT0MF_permf = []\n GPSTime_FirstT0MF_permf = []\n delta_GPSTime_permf = []\n GPSTime_T0_permf = []\n GPSTime_ll_permf = []\n DeltaTime_ll_permf = []\n mf_amet_upper_permf = []\n mf_amet_lower_permf = []\n tx_cc_permf = []\n TX_CC_permf = []\n\n if self.mf_limit != None:\n end_mf = self.mf_limit\n else:\n end_mf = len(mframes)\n\n for mframe in mframes[:end_mf]:\n print(\"PCE {}, mframe {}\".format(pce, mframe))\n\n mframe = int(mframe)\n # mask out all events not in major frame.\n mframe_mask = np.where(mframe_counts == mframe)[0]\n TX_CC_mframe = TX_CC[mframe_mask]\n ph_id_pulse_mframe = ph_id_pulse[mframe_mask]\n # List unique pulse IDs available in the major frame\n pulses = list(set(ph_id_pulse_mframe))\n\n amet_FirstT0MF = self.amet_FirstT0MF(mf_amet_upper[mframe], mf_amet_lower[mframe])\n GPSTime_FirstT0MF = self.GPSTime_FirstT0MF(pce, amet_FirstT0MF)\n\n # Do a first calculation of T0_effective and GPSTime_T0\n # Need the GPS time for each T0 in order to establish the GPS\n # time of each transmit (Tx) that uses T0 as reference\n T0_effective = self.T0_effective(pce, TX_CC_mframe, ph_id_pulse_mframe)\n GPSTime_T0 = self.GPSTime_T0(GPSTime_FirstT0MF, T0_effective) \n\n # Determine GPS time of the LL now that know each shot's T0effective GPS time.\n GPSTime_ll = self.GPSTime_ll(pce, TX_CC_mframe, GPSTime_T0) \n # ToD value for PCE, each MF?\n DeltaTime_ll = self.DeltaTime_ll(GPSTime_ll)\n\n # Append to lists.\n GPSTime_T0_permf.append(GPSTime_T0)\n GPSTime_ll_permf.append(GPSTime_ll)\n DeltaTime_ll_permf.append(DeltaTime_ll)\n amet_FirstT0MF_permf.append(amet_FirstT0MF)\n GPSTime_FirstT0MF_permf.append(GPSTime_FirstT0MF)\n\n mf_amet_upper_permf.append(mf_amet_upper[mframe])\n mf_amet_lower_permf.append(mf_amet_lower[mframe])\n tx_cc_permf.append(tx_cc[mframe_mask])\n TX_CC_permf.append(TX_CC[mframe_mask])\n\n if self.verbose:\n print(\" mframe: \", mframe)\n print(\" mframe_mask: \", len(mframe_mask), mframe_mask)\n print(\" TX_CC_mframe: \", len(TX_CC_mframe), TX_CC_mframe)\n print(\" ph_id_pulse_mframe: \", len(ph_id_pulse_mframe), ph_id_pulse_mframe)\n print(\" amet_FirstT0MF: \", amet_FirstT0MF)\n print(\" GPSTime_FirstT0MF: \", GPSTime_FirstT0MF)\n print(\" T0_effective: \", T0_effective)\n print(\" GPSTime_T0: \", GPSTime_T0)\n print(\" GPSTime_ll: \", len(GPSTime_ll), GPSTime_ll)\n print(\" DeltaTime_ll: \", len(DeltaTime_ll), DeltaTime_ll)\n \n # Flatten the arrays in major frame key. Only want one array.\n GPSTime_T0_permf = flatten_mf_arrays(GPSTime_T0_permf)\n GPSTime_ll_permf = flatten_mf_arrays(GPSTime_ll_permf)\n DeltaTime_ll_permf = flatten_mf_arrays(DeltaTime_ll_permf)\n amet_FirstT0MF_permf = np.array(amet_FirstT0MF_permf).flatten()\n GPSTime_FirstT0MF_permf = np.array(GPSTime_FirstT0MF_permf).flatten()\n mf_amet_upper_permf = np.array(mf_amet_upper_permf).flatten()\n mf_amet_lower_permf = np.array(mf_amet_lower_permf).flatten()\n tx_cc_permf = np.array(tx_cc_permf).flatten()\n TX_CC_permf = np.array(TX_CC_permf).flatten() \n\n # Return all values for this PCE in a named tuple.\n pce_variables = PCEVariables(\n mf_amet_upper=mf_amet_upper_permf, #mf_amet_upper, \n mf_amet_lower=mf_amet_lower_permf, #mf_amet_lower, \n tx_cc=tx_cc_permf, #tx_cc, \n TX_CC=TX_CC_permf, #TX_CC,\n amet_FirstT0MF=amet_FirstT0MF_permf, \n T0_effective=T0_effective, \n GPSTime_FirstT0MF=GPSTime_FirstT0MF_permf, \n GPSTime_T0=GPSTime_T0_permf, \n GPSTime_ll=GPSTime_ll_permf, \n DeltaTime_ll=DeltaTime_ll_permf) \n\n return pce_variables",
"def afterCharge(self):\n\n self.setServiceClient(DetailedService.NOCLIENT)\n self.setServiceArrivalHour(self.getServiceArrivalHour().add(Vehicle.RECHDURATION))\n self.setServiceDepartHour(self.getServiceArrivalHour())\n self.setServiceCircuit(DetailedService.NOCIRCUIT)\n self.setServiceCircuitKms(\"0\")\n self.setServiceDriverStatus(Driver.STATUSStandBy)\n self.setVehicleKmsDone(\"0\")",
"def u(self, name: str, charges: list) -> Tuple[float, list]:\n if len(charges) != 3:\n assert ValueError(\"Length of charge states must be 3.\")\n elif charges[2] - charges[1] != 1 or charges[1] - charges[0] != 1:\n assert ValueError(\"The charge states {} {} {} are not sequential.\"\n .format(*charges))\n elif not charges[0] < charges[1] < charges[2]:\n assert ValueError(\"The charge states {} {} {} are not incremental.\"\n .format(*charges))\n\n energies = []\n names = []\n for charge in charges:\n defect = self.defect_energies[name][charge]\n energies.append(defect.defect_energy)\n names.append(str(DefectName(name, charge, defect.annotation)))\n\n return energies[0] + energies[2] - 2 * energies[1], names",
"def distances(self):\n\n\n # Distances between atoms and ESP points\n self.dist = np.zeros((self.natoms, self.npoints))\n self.dist_3 = np.zeros((self.natoms, self.npoints))\n self.dist_x = np.zeros((self.natoms, self.npoints))\n self.dist_y = np.zeros((self.natoms, self.npoints))\n self.dist_z = np.zeros((self.natoms, self.npoints))\n\n self.dist = 1. / distance.cdist(self.atomcrd, self.crd)\n self.dist_3 = np.power(self.dist, 3) # maybe free afterwards\n self.dist_x = -np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[0], np.transpose(self.crd)[0]),\n self.dist_3)\n # self.dist_x2=np.multiply(np.transpose(np.subtract.outer(np.transpose(self.crd)[0],np.transpose(self.atomcrd)[0])),self.dist_3)\n self.dist_y = -np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[1], np.transpose(self.crd)[1]),\n self.dist_3)\n self.dist_z = -np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[2], np.transpose(self.crd)[2]),\n self.dist_3)\n del self.dist_3\n\n # Distances between atoms and atoms\n self.adist = np.zeros((self.natoms, self.natoms))\n self.adist_3 = np.zeros((self.natoms, self.natoms))\n self.adist_5 = np.zeros((self.natoms, self.natoms))\n self.adist_x = np.zeros((self.natoms, self.natoms))\n self.adist_y = np.zeros((self.natoms, self.natoms))\n self.adist_z = np.zeros((self.natoms, self.natoms))\n self.adistb_x = np.zeros((self.natoms, self.natoms))\n self.adistb_y = np.zeros((self.natoms, self.natoms))\n self.adistb_z = np.zeros((self.natoms, self.natoms))\n\n self.adist = distance.cdist(self.atomcrd, self.atomcrd)\n di = np.diag_indices(self.natoms)\n self.adist[di] = 1.0E10\n # self.adist=np.fill_diagonal(self.adist,1.0)\n self.adist = 1. / self.adist\n self.adist_3 = np.power(self.adist, 3)\n self.adist_5 = np.power(self.adist, 5)\n self.adist[di] = 0.0\n self.adist_x = np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[0], np.transpose(self.atomcrd)[0]),\n self.adist_3) # X distance between two atoms divided by the dist^3\n self.adist_y = np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[1], np.transpose(self.atomcrd)[1]),\n self.adist_3)\n self.adist_z = np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[2], np.transpose(self.atomcrd)[2]),\n self.adist_3)\n self.adistb_x = np.subtract.outer(np.transpose(self.atomcrd)[0],\n np.transpose(self.atomcrd)[0]) # X distances between two atoms\n self.adistb_y = np.subtract.outer(np.transpose(self.atomcrd)[1], np.transpose(self.atomcrd)[1])\n self.adistb_z = np.subtract.outer(np.transpose(self.atomcrd)[2], np.transpose(self.atomcrd)[2])\n\n # self.dist_d=np.multiply(self.dist_d,self.dist_3)\n # for i in range(len(self.atomcrd3)):\n # for j in range(len(self.crd3)):\n # self.dist_d[i][j]=1./(self.atomcrd3[i]-self.crd3[j])",
"def pd_update(desc, charge_descript):\n\n # check if the inputs have the right Type\n # c is the charge_descript and desc is the empty dataframe \n assert isinstance(\n desc, pd.core.frame.DataFrame), \"This input must be a pandas dataframe\"\n assert isinstance(\n charge_descript, dict), \"Stop right there, only dictionaries are allowed in these parts\"\n #print('here is charge descript thingy: ')\n #print(charge_descript)\n # converts the dictionary of descriptors into a list of descriptors\n #desc_ls = process.dict_2_list(charge_descript)\n desc_ls = pd.DataFrame(charge_descript)\n # still c but as a list \n #print('here is c but as a list: ')\n #print(desc_ls)\n # print('here is the desc_ls: ')\n # print(desc_ls)\n # adds zeros to the end of each descriptor list to create\n # a list with 22 entries\n # also appends error parameters to the end of the descriptor list\n #desc_app = desc_ls + \\\n # np.zeros(19-len(desc_ls)).tolist() + charge_descript['errorParams']\n # generates a dataframe of descriptors\n #desc_df = pd.DataFrame([desc_app], columns=desc.columns)\n # combines row of a dataframe with previous dataframe\n desc = pd.concat([desc, desc_df], ignore_index=True)\n # print('here is the desc.to_string(): ')\n # print(desc.to_string())\n\n return desc",
"def test_age_transitions(self):\n\n params = copy.deepcopy(ZERO_PARAMS)\n params['trans_tanaok'] = np.random.rand(3)\n\n self.model.params = params\n\n state = np.random.rand(15)\n expected_deriv = np.zeros_like(state)\n\n for i in range(4):\n if i < 3:\n expected_deriv[3*i] -= (params['trans_tanoak'][i] * state[3*i])\n expected_deriv[3*i+1] -= (params['trans_tanoak'][i] * state[3*i+1])\n expected_deriv[3*i+2] -= (params['trans_tanoak'][i] * state[3*i+2])\n if i > 0:\n expected_deriv[3*i] += (params['trans_tanoak'][i-1] * state[3*(i - 1)])\n expected_deriv[3*i+1] += (params['trans_tanoak'][i-1] * state[3*(i - 1)+1])\n expected_deriv[3*i+2] += (params['trans_tanoak'][i-1] * state[3*(i - 1)+2])\n\n deriv = self.model.state_deriv(0.0, state)[0:-1]\n print(\"Age class transition rates:\", deriv, expected_deriv)\n self.assertTrue(np.allclose(deriv, expected_deriv))",
"def simpleCIA(self, data):\n\n\t\t#pprint.pprint(data)\n\n\t\t#Actions costs - downtime and probe costs\n\t\tdtCost = self.cparams['dtCost']\n\t\tprCost = self.cparams['prCost']\n\t\tdownTime = self.cparams['downTime']\n\n\t\t#Status payoffs - server control costs\n\t\tcontrolPayoffs = {}\n\t\tcontrolPayoffs['DEF'] = self.cparams['DEF']\n\t\tcontrolPayoffs['ATT'] = self.cparams['ATT']\n\n\t\tself.params['DEF'] = 0\n\t\tself.params['ATT'] = 0\n\t\tself.params['totalDowntimeCost'] = 0\n\t\tself.params['totalProbeCost'] = 0\n\t\tself.params['totalDowntime'] = 0\n\t\tpreviousTime = 0\n\t\tcurrentTime = 0\n\t\tprevC = {}\n\t\t#Tracks the servers under each agents control\n\t\tsCount = {\n\t\t'DEF':0,\n\t\t'ATT':0\n\t\t}\n\t\t#Tracks the previous controller of each server\n\t\tprevC['Server0'] = 'DEF'\n\t\tprevC['Server1'] = 'DEF'\n\t\tprevC['Server2'] = 'DEF'\n\n\t\tfor it in sorted(data.items()):\n\t\t\t# sCount['DEF'] = 0\n\t\t\t# sCount['ATT'] = 0\n\t\t\t# print \"------------------>>\"+str(it[0])+\"\\n\"\n\t\t\ttime = it[0]\n\t\t\thist = it[1]\n\t\t\tcurrentTime = time\n\t\t\ttimeFactor = currentTime - previousTime\n\t\t\tpTime = previousTime\n\t\t\tpreviousTime = currentTime\n\t\t\t#Might need to correct this\n\t\t\t# for res, rep in hist['inactiveResources'].iteritems():\n\t\t\t\t# self.params['totalDowntimeCost'] += timeFactor*dtCost\n\t\t\t\t# # print \"-------->\" + res\n\t\t\t\t# self.params['totalDowntime'] += timeFactor\n\t\t\t# print hist['activeResources']\t\n\t\t\t# for res, rep in hist['activeResources'].iteritems():\n\t\t\t# \tsCount[prevC[res]] += 1\n\t\t\t# \tprevC[res] = rep['Control']\n\t\t\tfor k,v in sCount.iteritems():\n\t\t\t\t# print k,v, time\n\t\t\t\t# print \"Do [\" + str(currentTime) + \"-\" + str(pTime) + \"] \" + \"*\" +str((controlPayoffs[k])[v])\n\t\t\t\t# print ']]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\\n'\n\t\t\t\t#Accrues utility for time period (t-1) to t\n\t\t\t\tself.params[k] += timeFactor*(controlPayoffs[k])[v]\n\t\t\t\tsCount[k] = 0\n\t\t\t# print self.params\n\t\t\t#Count servers for each agent at time t\n\t\t\tfor res, rep in hist['activeResources'].iteritems():\n\t\t\t\tsCount[rep['Control']] += 1\n\t\t\tfor res, rep in hist['inactiveResources'].iteritems():\n\t\t\t\tsCount[rep['Control']] += 1\n\n\n\t\tlastItem = data[max(data.keys(), key=int)]\n\t\tfor k,v in lastItem.iteritems():\n\t\t\tfor s,r in v.iteritems():\n\t\t\t\tself.params['totalProbeCost'] += r['Total Probes till now']\n\t\t\t\tself.params['totalDowntime'] += r['Reimage Count']\n\t\t\t\t# print self.params\n\n\n\t\tself.params['totalDowntime'] *= downTime\n\t\tself.params['totalDowntimeCost'] = self.params['totalDowntime']*dtCost\n\t\tpayoff = {}\n\t\tpayoff[\"totalProbes\"] = self.params['totalProbeCost'] \n\n\t\tself.params['totalProbeCost'] *= prCost\t\t\n\t\tpayoff[\"DEF\"] = self.params['totalDowntimeCost'] + self.params['DEF']\n\t\tpayoff[\"ATT\"] = self.params['totalProbeCost'] + self.params['ATT']\n\t\tpayoff[\"totalDownTime\"] = self.params['totalDowntime']\n\n\t\t# print \"---------------------------------------------\\n\"\n\t\t# print payoff\n\t\treturn payoff",
"def tally_cds_descriptions(self):\n self._cds_descriptions_tally = 0\n self._cds_products_tally = 0\n self._cds_functions_tally = 0\n self._cds_notes_tally = 0\n for cds_ftr in self.cds_features:\n if cds_ftr.description != \"\":\n self._cds_descriptions_tally += 1\n if cds_ftr.product != \"\":\n self._cds_products_tally += 1\n if cds_ftr.function != \"\":\n self._cds_functions_tally += 1\n if cds_ftr.note != \"\":\n self._cds_notes_tally += 1",
"def aic(self):\n aics = []\n aics_bool = []\n for i, chain in enumerate(self.parent.chains):\n p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params\n if p is None or n_data is None or n_free is None:\n aics_bool.append(False)\n missing = \"\"\n if p is None:\n missing += \"posterior, \"\n if n_data is None:\n missing += \"num_eff_data_points, \"\n if n_free is None:\n missing += \"num_free_params, \"\n\n self._logger.warning(\"You need to set %s for chain %s to get the AIC\" % (missing[:-2], chain.name))\n else:\n aics_bool.append(True)\n c_cor = 1.0 * n_free * (n_free + 1) / (n_data - n_free - 1)\n aics.append(2.0 * (n_free + c_cor - np.max(p)))\n if len(aics) > 0:\n aics -= np.min(aics)\n aics_fin = []\n i = 0\n for b in aics_bool:\n if not b:\n aics_fin.append(None)\n else:\n aics_fin.append(aics[i])\n i += 1\n return aics_fin",
"def info_gain(self,a):\n entro = self.entropy()\n Dv = dict()\n for d in self.datas:\n a_info = d.data[a]\n if a_info in Dv:\n Dv[a_info].add(d)\n else:\n new_dataset = DataSet()\n new_dataset.add(d)\n Dv[a_info] = new_dataset\n for x in Dv:\n N = len(self.datas) #|D|\n Nv = len(Dv[x].datas)#|Dv|\n entro -= Dv[x].entropy() * Nv / N\n return entro, Dv",
"def build_admittance_mat(cls):\n cm_mat = System.coupling_matrix\n\n deri = sy.eye(cm_mat.shape[0]) * sy.I * 2 * sy.pi * System.freq\n\n capa_matrix = System.capacity_matrix\n\n admit = capa_matrix*(cm_mat + deri)\n\n System.admittance_matrix = admit\n return System.admittance_matrix",
"def CalculateTransition(ProteinSequence,AAProperty,AAPName):\r\n\t\r\n\tTProteinSequence=StringtoNum(ProteinSequence,AAProperty)\r\n\tResult={}\r\n\tNum=len(TProteinSequence)\r\n\tCTD=TProteinSequence\r\n\tResult[AAPName+'T'+'12']=round(float(CTD.count('12')+CTD.count('21'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'13']=round(float(CTD.count('13')+CTD.count('31'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'23']=round(float(CTD.count('23')+CTD.count('32'))/(Num-1),3)\r\n\treturn Result",
"def __init__(self, cs, miller, energy, Idafs, f1start=None, f2start=None):\n self.cs = cs\n self.miller = miller\n self.energy = energy\n self.DAFS = Idafs\n for k in f1start:\n cs.feed_feff(k, energy, f1start[k], f2start[k])\n self.Ffunc = cs.DAFS(energy, miller, func_output=True)\n self.f, self.f1, self.f2 = {}, {}, {}\n self.f1tab, self.f2tab = {}, {}\n self.f1func, self.f2func = {}, {}\n self.Z = {}\n for symbol in cs.f:\n if symbol.name.startswith(\"f_\"):\n self.f1[symbol] = cs.f[symbol].real.copy()\n self.f2[symbol] = cs.f[symbol].imag.copy()\n self.f[symbol.name] = cs.f[symbol].copy()\n #self.eneleft = \n self.Isim = abs(self.Ffunc.dictcall(self.f))**2\n self.Isim0 = self.Isim.copy()\n self.ind={}\n self.ilim = {}\n self.anchors = {}\n self.diff = {}\n self.debug = True",
"def der_cost ( self, x_dict, state_config ):\n i = 0\n cost = 0.\n n = 0\n n = 0\n for param, typo in state_config.iteritems():\n if typo == CONSTANT:\n n += 1\n elif typo == VARIABLE:\n n_elems = len ( x_dict[param] )\n n += n_elems\n der_cost = np.zeros ( n )\n x_params = np.empty ( ( len( x_dict.keys()), self.nt ) )\n j = 0\n ii = 0\n the_derivatives = np.zeros ( ( len( x_dict.keys()), self.nt ) )\n for param, typo in state_config.iteritems():\n \n if typo == FIXED or typo == CONSTANT:\n x_params[ j, : ] = x_dict[param]\n \n elif typo == VARIABLE:\n x_params[ j, : ] = x_dict[param]\n\n j += 1\n \n\n for itime, tstep in enumerate ( self.state_grid ):\n if self.mask[itime, 0] == 0:\n # No obs here\n continue\n # We use the `get_emulator` method to select the required\n # emulator for this geometry, spectral setting etc\n obs_ops = self.get_emulator ( itime, self.mask, self.emulators )\n sigma_obs_vis, sigma_obs_vis = self.bu[ :, itime ]\n # forward model the proposal\n x = x_params[:, itime]\n model_albedo_vis, vis_var, vis_der = \\\n obs_ops[0] ( np.atleast_2d(x) )\n model_albedo_nir, nir_var, nir_der = \\\n obs_ops[1] ( np.atleast_2d(x) )\n # Calculate the actual cost\n this_cost = 0.5*( model_albedo_vis - albedo_vis )**2/sigma_obs_vis**2 + \\\n 0.5*( model_albedo_nir - albedo_nir )**2/sigma_obs_nir**2\n \n # The partial derivatives of the cost function are then\n this_der= (1./sigma_obs_vis**2)*( model_albedo_vis - \\\n albedo_vis )*vis_der + \\\n (1./sigma_obs_nir**2)*( model_albedo_nir - albedo_nir )*nir_der \n \n\n cost += this_cost\n the_derivatives[ :, itime] = this_der\n \n \n j = 0\n for i, (param, typo) in enumerate ( state_config.iteritems()) :\n if typo == CONSTANT:\n der_cost[j] = the_derivatives[i, 0]\n j += 1\n elif typo == VARIABLE:\n n_elems = len ( x_dict[param] )\n der_cost[j:(j+n_elems) ] = the_derivatives[i, :]\n j += n_elems\n \n return cost, der_cost",
"def get_adverts(self):\n self.queries = generate_description_queries(self, CHUNKSIZE)\n self.next(self.extract_requires_degree, foreach=\"queries\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Transition descriptors based on SecondaryStr of AADs.
|
def CalculateTransitionSecondaryStr(ProteinSequence):
result=CalculateTransition(ProteinSequence,_SecondaryStr,'_SecondaryStr')
return result
|
[
"def _compute_durations_on_diff_addrs(self, stud_address, stud_alternate_1,\n stud_alternate_2, has_car):\n main_duration = self._fetch_travel_duration(stud_address,\n has_car)\n self.durations['main_duration' + str(has_car)] = main_duration\n\n if not pd.isna(stud_alternate_1):\n alter_duration1 = self._fetch_travel_duration(stud_alternate_1,\n has_car)\n # add has_car in key so that keys do not override\n # All bicycle durations will have 0 at the end of the key\n # and all car durations will have 1 at the end of the key\n self.durations['alter_duration1' + str(has_car)] = alter_duration1\n\n if not pd.isna(stud_alternate_2):\n alter_duration2 = self._fetch_travel_duration(stud_alternate_2,\n has_car)\n self.durations['alter_duration2' + str(has_car)] = alter_duration2",
"def seg2seg(dc: DistanceCalculator, alignedSegments: List[List[MessageSegment]],\n coordA: Tuple[int, int], coordB: Tuple[int, int]):\n segA = alignedSegments[coordA[0]][coordA[1]]\n print(segA)\n segB = alignedSegments[coordB[0]][coordB[1]]\n print(segB)\n return dc.pairDistance(segA, segB)",
"def CalculateTransition(ProteinSequence,AAProperty,AAPName):\r\n\t\r\n\tTProteinSequence=StringtoNum(ProteinSequence,AAProperty)\r\n\tResult={}\r\n\tNum=len(TProteinSequence)\r\n\tCTD=TProteinSequence\r\n\tResult[AAPName+'T'+'12']=round(float(CTD.count('12')+CTD.count('21'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'13']=round(float(CTD.count('13')+CTD.count('31'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'23']=round(float(CTD.count('23')+CTD.count('32'))/(Num-1),3)\r\n\treturn Result",
"def get_string(self):\n return_string = None\n if not self.mmc:\n return \"\"\n method = 'PDASTRING'\n if method == 'PDASTRING':\n stringgen = PdaString()\n print '* Reduce PDA using DFA BFS (remove unreachable states):'\n newpda = self.mmc.s\n handle = IntersectionHandling()\n newpda = handle.get(newpda, self.mmc.accepted)\n reduce_b = ReducePDA()\n newpda = reduce_b.get(newpda)\n #simply = SimplifyStateIDs()\n #newpda, biggestid, newaccepted = simply.get(\n # newpda, self.mmc.accepted)\n print \"- Total PDA states after reduction are \" + repr(len(newpda))\n return_string = stringgen.init(newpda, self.mmc.accepted)\n if return_string is not None:\n return_string = return_string[0]\n elif method == 'PDACFGSTRING':\n\n optimized = 1\n dt1 = datetime.datetime.fromtimestamp(time.time())\n print '* Initiating PDA simplification'\n print ' - Total PDA states are ' + repr(len(self.mmc.s))\n handle = IntersectionHandling()\n newpda = handle.get(self.mmc.s, self.mmc.accepted)\n newpda = self.mmc.s\n simply = SimplifyStateIDs()\n newpda, biggestid, newaccepted = simply.get(\n newpda, self.mmc.accepted)\n print ' - Total PDA states after id clearence are ' + repr(len(newpda))\n replace = ReadReplace(newpda, biggestid)\n newpda = replace.replace_read()\n print ' - Total PDA states after read elimination are ' + repr(len(newpda))\n maxstate = replace.nextstate() - 1\n print '* Reduce PDA using DFA BFS (remove unreachable states):'\n reduce_b = ReducePDA()\n newpda = reduce_b.get(newpda)\n print \"- Total PDA states after reduction are \" + repr(len(newpda))\n\n dt2 = datetime.datetime.fromtimestamp(time.time())\n rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)\n print \"* PDA was simplyfied in %d days, %d hours, %d minutes and %d seconds\" % (\n rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)\n dt1 = datetime.datetime.fromtimestamp(time.time())\n print '* Initiating CNF from PDA generation'\n cnfgenerator = PdaCnf(newpda, newaccepted)\n dt2 = datetime.datetime.fromtimestamp(time.time())\n rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)\n print \"* CNF was generated in %d days, %d hours, %d minutes and %d seconds\" % (\n rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)\n dt1 = datetime.datetime.fromtimestamp(time.time())\n print '* Initiating string from CFG generation'\n grammar = cnfgenerator.get_rules(optimized)\n print ' - Total grammar rules are ' + repr(len(grammar))\n gen = CFGGenerator(CNFGenerator(grammar),\n optimized=optimized,\n splitstring=0,\n maxstate=maxstate)\n return_string = gen.generate()\n dt2 = datetime.datetime.fromtimestamp(time.time())\n rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)\n print \"* A string was generated in %d days, %d hours, %d minutes and %d seconds\" % (\n rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)\n\n print return_string\n else:\n return_string = None\n return return_string",
"def create_segments_and_labels(df, time_steps, step, labeldf, isBehavioral):\n\n # list number of features to be extracted, can be changed depending on what features are desired to be extracted\n N_FEATURES = 4\n\n segments = []\n palm = []\n hr = []\n br = []\n per = []\n labels = []\n for j in range(0, len(df)):\n for i in range(1, len(df[j]) - time_steps, step):\n if isBehavioral is True:\n steering = df[j]['Steering'].values[i: i + time_steps]\n acceleration = df[j]['Acceleration'].values[i: i + time_steps]\n speed = df[j]['Speed'].values[i: i + time_steps]\n brake = df[j]['Brake'].values[i: i + time_steps]\n segments.append([steering, acceleration, speed, brake])\n else:\n palmEDA = df[j]['Palm.EDA'].values[i: i + time_steps]\n heartRate = df[j]['Heart.Rate'].values[i: i + time_steps]\n breathingRate = df[j]['Breathing.Rate'].values[i: i + time_steps]\n perinasalPerspiration = df[j]['Perinasal.Perspiration'].values[i: i + time_steps]\n palm.append([palmEDA])\n hr.append([heartRate])\n br.append([breathingRate])\n per.append([perinasalPerspiration])\n\n maxLabel = labeldf[j]['Max'].values[i: i + time_steps]\n labels.append(maxLabel)\n\n if isBehavioral is False:\n palm_r = np.asarray(palm, dtype=np.float32).reshape(-1, time_steps, 1)\n hr_r = np.asarray(hr, dtype=np.float32).reshape(-1, time_steps, 1)\n br_r = np.asarray(br, dtype=np.float32).reshape(-1, time_steps, 1)\n per_r = np.asarray(per, dtype=np.float32).reshape(-1, time_steps, 1)\n list_f = []\n list_f.append(palm_r)\n list_f.append(hr_r)\n list_f.append(br_r)\n list_f.append(per_r)\n labels = np.asarray(labels)\n\n return list_f, labels\n\n # Bring the segments into a better shape\n print(len(segments[0]))\n reshaped_segments = np.asarray(segments, dtype=np.float32).reshape(-1, time_steps, N_FEATURES)\n labels = np.asarray(labels)\n\n return reshaped_segments, labels",
"def attributes_desc():\n columns = [\n '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',\n 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',\n 'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',\n 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',\n 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',\n 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young',\n ]\n\n return map(str.lower, columns)",
"def minimize(nondist_pairs, transitions, alphabet, accepting_states):\n for pair1 in nondist_pairs:\n for elem in pair1:\n transitions.update({tuple(pair1) : transitions[str(elem)]})\n for pair in nondist_pairs:\n for elem in pair:\n if str(elem) in transitions:\n del transitions[str(elem)]\n new_list = list(transitions.items())\n for elem in new_list:\n for char in alphabet:\n for pair in nondist_pairs:\n if elem[1][char] in pair:\n elem[1][char] = pair\n\n print(\"\\nHere is the DFA Description:\")\n print(f\"Number of states: {len(new_list)}\")\n print(f\"Accepting states: {accepting_states}\")\n\n for elem in new_list:\n print(f\"State: {elem[0]} Transitions: {elem[1]}\")",
"def _identify_substring(self, sentence_slice, fsa_list):\n fsaCounter = -1\n for fsa in fsa_list:\n logger.debug(\"Applying FSA %s\" % fsa.fsaname)\n fsaCounter += 1\n # We first used acceptsShortestSubstringOf(), now we use the longest\n # match. The latter gave a marginally better result, but this was\n # only apparent on one Slink in the Slink regression test so more\n # tests may be needed.\n lenSubstring = fsa.acceptsSubstringOf(sentence_slice)\n if lenSubstring:\n logger.debug(\"FSA %s matched\" % fsa.fsaname)\n return (lenSubstring, fsaCounter)\n return (0, fsaCounter)",
"def double_helix_parser(input_file, output_file, helicies_length = 6, helix_gap = 3, pro_eitherside = 3):\n res_no_l = [] # for residue names \n res_name_l = [] # for amino acid names\n sec_str_l = [] # for sec structure prediction\n\n two_helix_l = [] # contains a list aminoacids (also a list)\n\n # Extracts the residue no, amino acid and secstr and signs to variables\n rx_seq = re.compile(r\"^(\\w+?)\\s+?(\\w+?)\\s+?(\\S)\", re.MULTILINE)\n text = fileread(input_file)\n\n\n # assign the matched groups in the text to the res_no_l, res_name_l and sec_str_str\n for match in rx_seq.finditer(text):\n res_no, res_name, sec_str = match.groups()\n\n res_no_l.append(res_no)\n res_name_l.append(res_name)\n sec_str_l += sec_str\n\n\n # creates dictionaries for each with the chain as the key\n chains_sec_str_d = keychain_value_str(res_no_l, sec_str_l)\n chains_res_no_d = keychain_value_list(res_no_l, res_no_l)\n chains_res_name_d = keychain_value_list(res_no_l, res_name_l)\n\n\n\n # which a Pro is found a in the res_name_d[chain] its secstr in sec_str_d is replaced with a P\n # We will then search for this P later on \n\n counter = 0 \n for chain in chains_res_name_d:\n #print(chains_res_name_d[chain])\n counter = 0 \n for residue in chains_res_name_d[chain]:\n #print(chains_res_name_d[chain][counter])\n if residue == 'PRO':\n chains_sec_str_d[chain] = chains_sec_str_d[chain][:counter] + 'P' + chains_sec_str_d[chain][counter + 1:]\n #print(chains_res_no_d[chain][counter])\n counter += 1 \n\n # only adds if a proline is found in the gap\n # contains 2 groups, the 1st group being the whole helix and group 2 being the gap\n for x in chains_sec_str_d:\n \n regex = \"([h|H]{6,}(?:.?){1}(P)(?:.?){1}[h|H]{6,})\"\n p = re.compile(r\"\" +regex +\"\")\n\n # if one is found it prints out the residues numbers of that helix\n for match in p.finditer(chains_sec_str_d[x]):\n # adjusted to check for Proline around the gap 1 before and 1 after\n two_helix_l += [chains_res_no_d[x][ (match.start(1)) : (match.end(1)) ]]\n match_groups =(match.groups())\n\n # finds the location of the proline for mutation using mutmod\n pro_res = (x + str(match.start(2)))\n print(pro_res + \" :\" + match.group(2))\n\n\n tempstr = \"\"\n\n for protein in two_helix_l:\n for residue in protein:\n tempstr += (residue + \"\\n\")\n tempstr +=(\"\\n\")\n\n\n output = open(output_file, 'w')\n output.write(tempstr)\n output.close()\n #print('#####################')\n #print(tempstr)\n #print('#####################')",
"def level_two_graphs_titles(front_end_name : str, back_end_name : str) -> str:\n return ([\"IPC Degradation (LEVEL ONE)\", \"STALLS on TOTAL (LEVEL ONE)\", \"IPC Degradation on TOTAL (LEVEL TWO)\", \"STALLS on TOTAL (LEVEL TWO)\",\n \"STALLS on \" + front_end_name + \" (LEVEL TWO)\", \"STALLS on \" + back_end_name + \" (LEVEL TWO)\"])",
"def process_dual_diagrams(self):\n ags_net=self.dic_attr['ags_net']\n form_orig_net=self.dic_attr['form_orig_net']\n force_orig_net=self.dic_attr['force_orig_net']\n map_edg_orig_dic=self.dic_attr['map_edg_orig_dic']\n q_c=self.dic_attr['q_c'] # force_densities, based on dic_attr['edg_dic'] indeces(indeces of original ags_net)\n edg_dic=self.dic_attr['edg_dic'] # the dictionary with original indeces\n\n # map the original edges to their forces\n old_edg_f_dic={} # {old_edg:f}\n for ind, edg in edg_dic.items():\n old_q=round(q_c[ind][0], 1)\n old_len=hf.edge_length(ags_net, edg)\n old_edg_f_dic[edg]=(old_q*old_len).item() # .item() to make it reabale in ironpytho (numpyfloat64>>float)\n \n # update the dual edge mapping (removing repetative vertices of force)\n map_edg_temp_dic=hf.update_dual_mapping_1(force_orig_net, map_edg_orig_dic)\n\n # update the dual edge mapping\n map_edg_dic, new_edg_f_dic=hf.update_dual_mapping_2(form_orig_net, map_edg_temp_dic, old_edg_f_dic)\n\n # make a new form_net (without aligned edges)\n form_net=hf.make_new_network(form_orig_net, list(map_edg_dic.keys()))\n\n # make a new dual (force) network without repetative egdes and vertices\n force_net=hf.make_new_network(force_orig_net, list(map_edg_dic.values()))\n\n # rotate force_net 90 degrees\n ANG=np.pi/2.0\n force_90_net=hf.rotate_dual(force_net , ANG)\n\n # dictionary of dual vertices\n dual_ver_dic={}\n for key in force_net.nodes():\n dual_ver_dic[key]=force_net.node_coordinates(key)\n\n # ### save the data to draw form and force diagrams in Rhino ###\n with open(os.path.join(BASEDIR, 'map_edg_dic.p'), 'wb') as fp:\n pickle.dump(map_edg_dic, fp, protocol=2)\n with open(os.path.join(BASEDIR, 'new_edg_f_dic.p'), 'wb') as fp:\n pickle.dump(new_edg_f_dic, fp, protocol=2)\n with open(os.path.join(BASEDIR, 'dual_ver_dic.p'), 'wb') as fp:\n pickle.dump(dual_ver_dic, fp, protocol=2) \n\n self.dic_attr['map_edg_dic']=map_edg_dic\n self.dic_attr['form_net']=form_net\n self.dic_attr['force_net']=force_net\n self.dic_attr['force_90_net']=force_90_net\n self.dic_attr['new_edg_f_dic']=new_edg_f_dic # {new_edg:f} ",
"def infer_2d_links(self):\n if self.exch_to_2d_link is None:\n self.infer_2d_elements() \n poi0=self.pointers-1\n\n # map 0-based exchange index to 0-based link index\n exch_to_2d_link=np.zeros(self.n_exch_x+self.n_exch_y,[('link','i4'),\n ('sgn','i4')])\n exch_to_2d_link['link']=-1\n\n # track some info about links\n links=[] # elt_from,elt_to\n mapped=dict() # (src_2d, dest_2d) => link idx\n\n # hmm - if there are multiple boundary exchanges coming into the\n # same segment, how can those be differentiated? probably it's just\n # up to the sub-implementations to make the distinction.\n # so here they will get lumped together, but the datastructure should\n # allow for them to be distinct.\n\n seg_bc_count=defaultdict(int)\n \n for exch_i,(a,b,_,_) in enumerate(poi0[:self.n_exch_x+self.n_exch_y]):\n if a<0 and b<0:\n # probably DFM writing dense exchanges information for segment\n # which don't exist. I think it's safest to just not include these\n # at all.\n continue\n \n if a>=0:\n a2d=self.seg_to_2d_element[a]\n else:\n # this is a source of the problem mentioned above, since\n # we throw away an unique identity of distinct boundary\n # exchanges here.\n # a2d=-1 # ??\n # instead, count up how many bc exchanges hit this segment\n # first one will be -1, but we can see distinct additional\n # exchanges as -2, -3, etc.\n # the 'b' here is because we are labeling boundary a values\n # with the count of bcs entering b.\n seg_bc_count[b]+=1\n a2d=-seg_bc_count[b]\n if b>=0:\n b2d=self.seg_to_2d_element[b]\n else:\n #b2d=-1 # ??\n # see above comments for a\n seg_bc_count[a]+=1\n b2d=-seg_bc_count[a]\n\n # cruft -- just a reminder, now tested with a,b above.\n #if a2d<0 and b2d<0:\n # # probably DFM writing dense exchanges information for segment\n # # which don't exist. I think it's safest to just not include these\n # # at all.\n # continue\n \n if (b2d,a2d) in mapped:\n exch_to_2d_link['link'][exch_i] = mapped[(b2d,a2d)]\n exch_to_2d_link['sgn'][exch_i]=-1\n else:\n k=(a2d,b2d)\n if k not in mapped:\n mapped[k]=len(links)\n links.append( [a2d,b2d] )\n exch_to_2d_link['link'][exch_i] = mapped[k]\n exch_to_2d_link['sgn'][exch_i]=1\n\n links=np.array(links)\n n_2d_links=len(links)\n\n # Bit of a sanity warning on multiple boundary exchanges involving the\n # same segment - this would indicate that there should be multiple 2D\n # links into that segment, but this generic code doesn't have a robust\n # way to deal with that.\n if 1:\n # indexes of which links are boundary\n bc_links=np.nonzero( links[:,0] < 0 )[0]\n\n for bc_link in bc_links:\n # index of which exchanges map to this link\n exchs=np.nonzero( exch_to_2d_link['link']==bc_link )[0]\n # link id, sgn for each of those exchanges\n ab=exch_to_2d_link[exchs]\n # find the internal segments for each of those exchanges\n segs=np.zeros(len(ab),'i4')\n sel0=exch_to_2d_link['sgn'][exchs]>0 # regular order\n segs[sel0]=poi0[exchs[sel0],1]\n if np.any(~sel0):\n # including checking for weirdness\n self.log.warning(\"Some exchanges had to be flipped when flattening to 2D links\")\n segs[~sel0]=poi0[exchs[~sel0],0]\n # And finally, are there any duplicates into the same segment? i.e. a segment\n # which has multiple boundary exchanges which we have failed to distinguish (since\n # in this generic implementation we have little info for distinguishing them).\n # note that in the case of suntans output, this is possible, but if it has been\n # mapped from multiple domains to a global domain, those exchanges have probably\n # already been combined.\n if len(np.unique(segs)) < len(segs):\n self.log.warning(\"In flattening exchanges to links, link %d has ambiguous multiple exchanges for the same segment\"%bc_link)\n\n self.exch_to_2d_link=exch_to_2d_link\n self.links=links\n self.n_2d_links=n_2d_links",
"def get_feat_desc(ibs, fid_list):\n desc_list = ibs.db.get(FEATURE_TABLE, ('feature_sifts',), fid_list)\n return desc_list",
"def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],\n Mapping[str, List[Bond]],\n Mapping[str, List[BondAngle]]]:\n # stereo_chemical_props_path = (\n # 'alphafold/common/stereo_chemical_props.txt')\n # with open(stereo_chemical_props_path, 'rt') as f:\n # stereo_chemical_props = f.read()\n stereo_chemical_props='''Bond\t\t\tResidue\t\tMean\t\tStdDev\nCA-CB\t\t\tALA\t\t1.520\t\t0.021\nN-CA\t\t\tALA\t\t1.459\t\t0.020\nCA-C\t\t\tALA\t\t1.525\t\t0.026\nC-O\t\t\tALA\t\t1.229\t\t0.019\nCA-CB\t\t\tARG\t\t1.535\t\t0.022\nCB-CG\t\t\tARG\t\t1.521\t\t0.027\nCG-CD\t\t\tARG\t\t1.515\t\t0.025\nCD-NE\t\t\tARG\t\t1.460\t\t0.017\nNE-CZ\t\t\tARG\t\t1.326\t\t0.013\nCZ-NH1\t\t\tARG\t\t1.326\t\t0.013\nCZ-NH2\t\t\tARG\t\t1.326\t\t0.013\nN-CA\t\t\tARG\t\t1.459\t\t0.020\nCA-C\t\t\tARG\t\t1.525\t\t0.026\nC-O\t\t\tARG\t\t1.229\t\t0.019\nCA-CB\t\t\tASN\t\t1.527\t\t0.026\nCB-CG\t\t\tASN\t\t1.506\t\t0.023\nCG-OD1\t\t\tASN\t\t1.235\t\t0.022\nCG-ND2\t\t\tASN\t\t1.324\t\t0.025\nN-CA\t\t\tASN\t\t1.459\t\t0.020\nCA-C\t\t\tASN\t\t1.525\t\t0.026\nC-O\t\t\tASN\t\t1.229\t\t0.019\nCA-CB\t\t\tASP\t\t1.535\t\t0.022\nCB-CG\t\t\tASP\t\t1.513\t\t0.021\nCG-OD1\t\t\tASP\t\t1.249\t\t0.023\nCG-OD2\t\t\tASP\t\t1.249\t\t0.023\nN-CA\t\t\tASP\t\t1.459\t\t0.020\nCA-C\t\t\tASP\t\t1.525\t\t0.026\nC-O\t\t\tASP\t\t1.229\t\t0.019\nCA-CB\t\t\tCYS\t\t1.526\t\t0.013\nCB-SG\t\t\tCYS\t\t1.812\t\t0.016\nN-CA\t\t\tCYS\t\t1.459\t\t0.020\nCA-C\t\t\tCYS\t\t1.525\t\t0.026\nC-O\t\t\tCYS\t\t1.229\t\t0.019\nCA-CB\t\t\tGLU\t\t1.535\t\t0.022\nCB-CG\t\t\tGLU\t\t1.517\t\t0.019\nCG-CD\t\t\tGLU\t\t1.515\t\t0.015\nCD-OE1\t\t\tGLU\t\t1.252\t\t0.011\nCD-OE2\t\t\tGLU\t\t1.252\t\t0.011\nN-CA\t\t\tGLU\t\t1.459\t\t0.020\nCA-C\t\t\tGLU\t\t1.525\t\t0.026\nC-O\t\t\tGLU\t\t1.229\t\t0.019\nCA-CB\t\t\tGLN\t\t1.535\t\t0.022\nCB-CG\t\t\tGLN\t\t1.521\t\t0.027\nCG-CD\t\t\tGLN\t\t1.506\t\t0.023\nCD-OE1\t\t\tGLN\t\t1.235\t\t0.022\nCD-NE2\t\t\tGLN\t\t1.324\t\t0.025\nN-CA\t\t\tGLN\t\t1.459\t\t0.020\nCA-C\t\t\tGLN\t\t1.525\t\t0.026\nC-O\t\t\tGLN\t\t1.229\t\t0.019\nN-CA\t\t\tGLY\t\t1.456\t\t0.015\nCA-C\t\t\tGLY\t\t1.514\t\t0.016\nC-O\t\t\tGLY\t\t1.232\t\t0.016\nCA-CB\t\t\tHIS\t\t1.535\t\t0.022\nCB-CG\t\t\tHIS\t\t1.492\t\t0.016\nCG-ND1\t\t\tHIS\t\t1.369\t\t0.015\nCG-CD2\t\t\tHIS\t\t1.353\t\t0.017\nND1-CE1\t\t\tHIS\t\t1.343\t\t0.025\nCD2-NE2\t\t\tHIS\t\t1.415\t\t0.021\nCE1-NE2\t\t\tHIS\t\t1.322\t\t0.023\nN-CA\t\t\tHIS\t\t1.459\t\t0.020\nCA-C\t\t\tHIS\t\t1.525\t\t0.026\nC-O\t\t\tHIS\t\t1.229\t\t0.019\nCA-CB\t\t\tILE\t\t1.544\t\t0.023\nCB-CG1\t\t\tILE\t\t1.536\t\t0.028\nCB-CG2\t\t\tILE\t\t1.524\t\t0.031\nCG1-CD1\t\t\tILE\t\t1.500\t\t0.069\nN-CA\t\t\tILE\t\t1.459\t\t0.020\nCA-C\t\t\tILE\t\t1.525\t\t0.026\nC-O\t\t\tILE\t\t1.229\t\t0.019\nCA-CB\t\t\tLEU\t\t1.533\t\t0.023\nCB-CG\t\t\tLEU\t\t1.521\t\t0.029\nCG-CD1\t\t\tLEU\t\t1.514\t\t0.037\nCG-CD2\t\t\tLEU\t\t1.514\t\t0.037\nN-CA\t\t\tLEU\t\t1.459\t\t0.020\nCA-C\t\t\tLEU\t\t1.525\t\t0.026\nC-O\t\t\tLEU\t\t1.229\t\t0.019\nCA-CB\t\t\tLYS\t\t1.535\t\t0.022\nCB-CG\t\t\tLYS\t\t1.521\t\t0.027\nCG-CD\t\t\tLYS\t\t1.520\t\t0.034\nCD-CE\t\t\tLYS\t\t1.508\t\t0.025\nCE-NZ\t\t\tLYS\t\t1.486\t\t0.025\nN-CA\t\t\tLYS\t\t1.459\t\t0.020\nCA-C\t\t\tLYS\t\t1.525\t\t0.026\nC-O\t\t\tLYS\t\t1.229\t\t0.019\nCA-CB\t\t\tMET\t\t1.535\t\t0.022\nCB-CG\t\t\tMET\t\t1.509\t\t0.032\nCG-SD\t\t\tMET\t\t1.807\t\t0.026\nSD-CE\t\t\tMET\t\t1.774\t\t0.056\nN-CA\t\t\tMET\t\t1.459\t\t0.020\nCA-C\t\t\tMET\t\t1.525\t\t0.026\nC-O\t\t\tMET\t\t1.229\t\t0.019\nCA-CB\t\t\tPHE\t\t1.535\t\t0.022\nCB-CG\t\t\tPHE\t\t1.509\t\t0.017\nCG-CD1\t\t\tPHE\t\t1.383\t\t0.015\nCG-CD2\t\t\tPHE\t\t1.383\t\t0.015\nCD1-CE1\t\t\tPHE\t\t1.388\t\t0.020\nCD2-CE2\t\t\tPHE\t\t1.388\t\t0.020\nCE1-CZ\t\t\tPHE\t\t1.369\t\t0.019\nCE2-CZ\t\t\tPHE\t\t1.369\t\t0.019\nN-CA\t\t\tPHE\t\t1.459\t\t0.020\nCA-C\t\t\tPHE\t\t1.525\t\t0.026\nC-O\t\t\tPHE\t\t1.229\t\t0.019\nCA-CB\t\t\tPRO\t\t1.531\t\t0.020\nCB-CG\t\t\tPRO\t\t1.495\t\t0.050\nCG-CD\t\t\tPRO\t\t1.502\t\t0.033\nCD-N\t\t\tPRO\t\t1.474\t\t0.014\nN-CA\t\t\tPRO\t\t1.468\t\t0.017\nCA-C\t\t\tPRO\t\t1.524\t\t0.020\nC-O\t\t\tPRO\t\t1.228\t\t0.020\nCA-CB\t\t\tSER\t\t1.525\t\t0.015\nCB-OG\t\t\tSER\t\t1.418\t\t0.013\nN-CA\t\t\tSER\t\t1.459\t\t0.020\nCA-C\t\t\tSER\t\t1.525\t\t0.026\nC-O\t\t\tSER\t\t1.229\t\t0.019\nCA-CB\t\t\tTHR\t\t1.529\t\t0.026\nCB-OG1\t\t\tTHR\t\t1.428\t\t0.020\nCB-CG2\t\t\tTHR\t\t1.519\t\t0.033\nN-CA\t\t\tTHR\t\t1.459\t\t0.020\nCA-C\t\t\tTHR\t\t1.525\t\t0.026\nC-O\t\t\tTHR\t\t1.229\t\t0.019\nCA-CB\t\t\tTRP\t\t1.535\t\t0.022\nCB-CG\t\t\tTRP\t\t1.498\t\t0.018\nCG-CD1\t\t\tTRP\t\t1.363\t\t0.014\nCG-CD2\t\t\tTRP\t\t1.432\t\t0.017\nCD1-NE1\t\t\tTRP\t\t1.375\t\t0.017\nNE1-CE2\t\t\tTRP\t\t1.371\t\t0.013\nCD2-CE2\t\t\tTRP\t\t1.409\t\t0.012\nCD2-CE3\t\t\tTRP\t\t1.399\t\t0.015\nCE2-CZ2\t\t\tTRP\t\t1.393\t\t0.017\nCE3-CZ3\t\t\tTRP\t\t1.380\t\t0.017\nCZ2-CH2\t\t\tTRP\t\t1.369\t\t0.019\nCZ3-CH2\t\t\tTRP\t\t1.396\t\t0.016\nN-CA\t\t\tTRP\t\t1.459\t\t0.020\nCA-C\t\t\tTRP\t\t1.525\t\t0.026\nC-O\t\t\tTRP\t\t1.229\t\t0.019\nCA-CB\t\t\tTYR\t\t1.535\t\t0.022\nCB-CG\t\t\tTYR\t\t1.512\t\t0.015\nCG-CD1\t\t\tTYR\t\t1.387\t\t0.013\nCG-CD2\t\t\tTYR\t\t1.387\t\t0.013\nCD1-CE1\t\t\tTYR\t\t1.389\t\t0.015\nCD2-CE2\t\t\tTYR\t\t1.389\t\t0.015\nCE1-CZ\t\t\tTYR\t\t1.381\t\t0.013\nCE2-CZ\t\t\tTYR\t\t1.381\t\t0.013\nCZ-OH\t\t\tTYR\t\t1.374\t\t0.017\nN-CA\t\t\tTYR\t\t1.459\t\t0.020\nCA-C\t\t\tTYR\t\t1.525\t\t0.026\nC-O\t\t\tTYR\t\t1.229\t\t0.019\nCA-CB\t\t\tVAL\t\t1.543\t\t0.021\nCB-CG1\t\t\tVAL\t\t1.524\t\t0.021\nCB-CG2\t\t\tVAL\t\t1.524\t\t0.021\nN-CA\t\t\tVAL\t\t1.459\t\t0.020\nCA-C\t\t\tVAL\t\t1.525\t\t0.026\nC-O\t\t\tVAL\t\t1.229\t\t0.019\n-\n\nAngle\t\t\tResidue\t\tMean\t\tStdDev\nN-CA-CB\t\t\tALA\t\t110.1\t\t1.4\nCB-CA-C\t\t\tALA\t\t110.1\t\t1.5\nN-CA-C\t\t\tALA\t\t111.0\t\t2.7\nCA-C-O\t\t\tALA\t\t120.1\t\t2.1\nN-CA-CB\t\t\tARG\t\t110.6\t\t1.8\nCB-CA-C\t\t\tARG\t\t110.4\t\t2.0\nCA-CB-CG\t\tARG\t\t113.4\t\t2.2\nCB-CG-CD\t\tARG\t\t111.6\t\t2.6\nCG-CD-NE\t\tARG\t\t111.8\t\t2.1\nCD-NE-CZ\t\tARG\t\t123.6\t\t1.4\nNE-CZ-NH1\t\tARG\t\t120.3\t\t0.5\nNE-CZ-NH2\t\tARG\t\t120.3\t\t0.5\nNH1-CZ-NH2\t\tARG\t\t119.4\t\t1.1\nN-CA-C\t\t\tARG\t\t111.0\t\t2.7\nCA-C-O\t\t\tARG\t\t120.1\t\t2.1\nN-CA-CB\t\t\tASN\t\t110.6\t\t1.8\nCB-CA-C\t\t\tASN\t\t110.4\t\t2.0\nCA-CB-CG\t\tASN\t\t113.4\t\t2.2\nCB-CG-ND2\t\tASN\t\t116.7\t\t2.4\nCB-CG-OD1\t\tASN\t\t121.6\t\t2.0\nND2-CG-OD1\t\tASN\t\t121.9\t\t2.3\nN-CA-C\t\t\tASN\t\t111.0\t\t2.7\nCA-C-O\t\t\tASN\t\t120.1\t\t2.1\nN-CA-CB\t\t\tASP\t\t110.6\t\t1.8\nCB-CA-C\t\t\tASP\t\t110.4\t\t2.0\nCA-CB-CG\t\tASP\t\t113.4\t\t2.2\nCB-CG-OD1\t\tASP\t\t118.3\t\t0.9\nCB-CG-OD2\t\tASP\t\t118.3\t\t0.9\nOD1-CG-OD2\t\tASP\t\t123.3\t\t1.9\nN-CA-C\t\t\tASP\t\t111.0\t\t2.7\nCA-C-O\t\t\tASP\t\t120.1\t\t2.1\nN-CA-CB\t\t\tCYS\t\t110.8\t\t1.5\nCB-CA-C\t\t\tCYS\t\t111.5\t\t1.2\nCA-CB-SG\t\tCYS\t\t114.2\t\t1.1\nN-CA-C\t\t\tCYS\t\t111.0\t\t2.7\nCA-C-O\t\t\tCYS\t\t120.1\t\t2.1\nN-CA-CB\t\t\tGLU\t\t110.6\t\t1.8\nCB-CA-C\t\t\tGLU\t\t110.4\t\t2.0\nCA-CB-CG\t\tGLU\t\t113.4\t\t2.2\nCB-CG-CD\t\tGLU\t\t114.2\t\t2.7\nCG-CD-OE1\t\tGLU\t\t118.3\t\t2.0\nCG-CD-OE2\t\tGLU\t\t118.3\t\t2.0\nOE1-CD-OE2\t\tGLU\t\t123.3\t\t1.2\nN-CA-C\t\t\tGLU\t\t111.0\t\t2.7\nCA-C-O\t\t\tGLU\t\t120.1\t\t2.1\nN-CA-CB\t\t\tGLN\t\t110.6\t\t1.8\nCB-CA-C\t\t\tGLN\t\t110.4\t\t2.0\nCA-CB-CG\t\tGLN\t\t113.4\t\t2.2\nCB-CG-CD\t\tGLN\t\t111.6\t\t2.6\nCG-CD-OE1\t\tGLN\t\t121.6\t\t2.0\nCG-CD-NE2\t\tGLN\t\t116.7\t\t2.4\nOE1-CD-NE2\t\tGLN\t\t121.9\t\t2.3\nN-CA-C\t\t\tGLN\t\t111.0\t\t2.7\nCA-C-O\t\t\tGLN\t\t120.1\t\t2.1\nN-CA-C\t\t\tGLY\t\t113.1\t\t2.5\nCA-C-O\t\t\tGLY\t\t120.6\t\t1.8\nN-CA-CB\t\t\tHIS\t\t110.6\t\t1.8\nCB-CA-C\t\t\tHIS\t\t110.4\t\t2.0\nCA-CB-CG\t\tHIS\t\t113.6\t\t1.7\nCB-CG-ND1\t\tHIS\t\t123.2\t\t2.5\nCB-CG-CD2\t\tHIS\t\t130.8\t\t3.1\nCG-ND1-CE1\t\tHIS\t\t108.2\t\t1.4\nND1-CE1-NE2\t\tHIS\t\t109.9\t\t2.2\nCE1-NE2-CD2\t\tHIS\t\t106.6\t\t2.5\nNE2-CD2-CG\t\tHIS\t\t109.2\t\t1.9\nCD2-CG-ND1\t\tHIS\t\t106.0\t\t1.4\nN-CA-C\t\t\tHIS\t\t111.0\t\t2.7\nCA-C-O\t\t\tHIS\t\t120.1\t\t2.1\nN-CA-CB\t\t\tILE\t\t110.8\t\t2.3\nCB-CA-C\t\t\tILE\t\t111.6\t\t2.0\nCA-CB-CG1\t\tILE\t\t111.0\t\t1.9\nCB-CG1-CD1\t\tILE\t\t113.9\t\t2.8\nCA-CB-CG2\t\tILE\t\t110.9\t\t2.0\nCG1-CB-CG2\t\tILE\t\t111.4\t\t2.2\nN-CA-C\t\t\tILE\t\t111.0\t\t2.7\nCA-C-O\t\t\tILE\t\t120.1\t\t2.1\nN-CA-CB\t\t\tLEU\t\t110.4\t\t2.0\nCB-CA-C\t\t\tLEU\t\t110.2\t\t1.9\nCA-CB-CG\t\tLEU\t\t115.3\t\t2.3\nCB-CG-CD1\t\tLEU\t\t111.0\t\t1.7\nCB-CG-CD2\t\tLEU\t\t111.0\t\t1.7\nCD1-CG-CD2\t\tLEU\t\t110.5\t\t3.0\nN-CA-C\t\t\tLEU\t\t111.0\t\t2.7\nCA-C-O\t\t\tLEU\t\t120.1\t\t2.1\nN-CA-CB\t\t\tLYS\t\t110.6\t\t1.8\nCB-CA-C\t\t\tLYS\t\t110.4\t\t2.0\nCA-CB-CG\t\tLYS\t\t113.4\t\t2.2\nCB-CG-CD\t\tLYS\t\t111.6\t\t2.6\nCG-CD-CE\t\tLYS\t\t111.9\t\t3.0\nCD-CE-NZ\t\tLYS\t\t111.7\t\t2.3\nN-CA-C\t\t\tLYS\t\t111.0\t\t2.7\nCA-C-O\t\t\tLYS\t\t120.1\t\t2.1\nN-CA-CB\t\t\tMET\t\t110.6\t\t1.8\nCB-CA-C\t\t\tMET\t\t110.4\t\t2.0\nCA-CB-CG\t\tMET\t\t113.3\t\t1.7\nCB-CG-SD\t\tMET\t\t112.4\t\t3.0\nCG-SD-CE\t\tMET\t\t100.2\t\t1.6\nN-CA-C\t\t\tMET\t\t111.0\t\t2.7\nCA-C-O\t\t\tMET\t\t120.1\t\t2.1\nN-CA-CB\t\t\tPHE\t\t110.6\t\t1.8\nCB-CA-C\t\t\tPHE\t\t110.4\t\t2.0\nCA-CB-CG\t\tPHE\t\t113.9\t\t2.4\nCB-CG-CD1\t\tPHE\t\t120.8\t\t0.7\nCB-CG-CD2\t\tPHE\t\t120.8\t\t0.7\nCD1-CG-CD2\t\tPHE\t\t118.3\t\t1.3\nCG-CD1-CE1\t\tPHE\t\t120.8\t\t1.1\nCG-CD2-CE2\t\tPHE\t\t120.8\t\t1.1\nCD1-CE1-CZ\t\tPHE\t\t120.1\t\t1.2\nCD2-CE2-CZ\t\tPHE\t\t120.1\t\t1.2\nCE1-CZ-CE2\t\tPHE\t\t120.0\t\t1.8\nN-CA-C\t\t\tPHE\t\t111.0\t\t2.7\nCA-C-O\t\t\tPHE\t\t120.1\t\t2.1\nN-CA-CB\t\t\tPRO\t\t103.3\t\t1.2\nCB-CA-C\t\t\tPRO\t\t111.7\t\t2.1\nCA-CB-CG\t\tPRO\t\t104.8\t\t1.9\nCB-CG-CD\t\tPRO\t\t106.5\t\t3.9\nCG-CD-N\t\t\tPRO\t\t103.2\t\t1.5\nCA-N-CD\t\t\tPRO\t\t111.7\t\t1.4\nN-CA-C\t\t\tPRO\t\t112.1\t\t2.6\nCA-C-O\t\t\tPRO\t\t120.2\t\t2.4\nN-CA-CB\t\t\tSER\t\t110.5\t\t1.5\nCB-CA-C\t\t\tSER\t\t110.1\t\t1.9\nCA-CB-OG\t\tSER\t\t111.2\t\t2.7\nN-CA-C\t\t\tSER\t\t111.0\t\t2.7\nCA-C-O\t\t\tSER\t\t120.1\t\t2.1\nN-CA-CB\t\t\tTHR\t\t110.3\t\t1.9\nCB-CA-C\t\t\tTHR\t\t111.6\t\t2.7\nCA-CB-OG1\t\tTHR\t\t109.0\t\t2.1\nCA-CB-CG2\t\tTHR\t\t112.4\t\t1.4\nOG1-CB-CG2\t\tTHR\t\t110.0\t\t2.3\nN-CA-C\t\t\tTHR\t\t111.0\t\t2.7\nCA-C-O\t\t\tTHR\t\t120.1\t\t2.1\nN-CA-CB\t\t\tTRP\t\t110.6\t\t1.8\nCB-CA-C\t\t\tTRP\t\t110.4\t\t2.0\nCA-CB-CG\t\tTRP\t\t113.7\t\t1.9\nCB-CG-CD1\t\tTRP\t\t127.0\t\t1.3\nCB-CG-CD2\t\tTRP\t\t126.6\t\t1.3\nCD1-CG-CD2\t\tTRP\t\t106.3\t\t0.8\nCG-CD1-NE1\t\tTRP\t\t110.1\t\t1.0\nCD1-NE1-CE2\t\tTRP\t\t109.0\t\t0.9\nNE1-CE2-CD2\t\tTRP\t\t107.3\t\t1.0\nCE2-CD2-CG\t\tTRP\t\t107.3\t\t0.8\nCG-CD2-CE3\t\tTRP\t\t133.9\t\t0.9\nNE1-CE2-CZ2\t\tTRP\t\t130.4\t\t1.1\nCE3-CD2-CE2\t\tTRP\t\t118.7\t\t1.2\nCD2-CE2-CZ2\t\tTRP\t\t122.3\t\t1.2\nCE2-CZ2-CH2\t\tTRP\t\t117.4\t\t1.0\nCZ2-CH2-CZ3\t\tTRP\t\t121.6\t\t1.2\nCH2-CZ3-CE3\t\tTRP\t\t121.2\t\t1.1\nCZ3-CE3-CD2\t\tTRP\t\t118.8\t\t1.3\nN-CA-C\t\t\tTRP\t\t111.0\t\t2.7\nCA-C-O\t\t\tTRP\t\t120.1\t\t2.1\nN-CA-CB\t\t\tTYR\t\t110.6\t\t1.8\nCB-CA-C\t\t\tTYR\t\t110.4\t\t2.0\nCA-CB-CG\t\tTYR\t\t113.4\t\t1.9\nCB-CG-CD1\t\tTYR\t\t121.0\t\t0.6\nCB-CG-CD2\t\tTYR\t\t121.0\t\t0.6\nCD1-CG-CD2\t\tTYR\t\t117.9\t\t1.1\nCG-CD1-CE1\t\tTYR\t\t121.3\t\t0.8\nCG-CD2-CE2\t\tTYR\t\t121.3\t\t0.8\nCD1-CE1-CZ\t\tTYR\t\t119.8\t\t0.9\nCD2-CE2-CZ\t\tTYR\t\t119.8\t\t0.9\nCE1-CZ-CE2\t\tTYR\t\t119.8\t\t1.6\nCE1-CZ-OH\t\tTYR\t\t120.1\t\t2.7\nCE2-CZ-OH\t\tTYR\t\t120.1\t\t2.7\nN-CA-C\t\t\tTYR\t\t111.0\t\t2.7\nCA-C-O\t\t\tTYR\t\t120.1\t\t2.1\nN-CA-CB\t\t\tVAL\t\t111.5\t\t2.2\nCB-CA-C\t\t\tVAL\t\t111.4\t\t1.9\nCA-CB-CG1\t\tVAL\t\t110.9\t\t1.5\nCA-CB-CG2\t\tVAL\t\t110.9\t\t1.5\nCG1-CB-CG2\t\tVAL\t\t110.9\t\t1.6\nN-CA-C\t\t\tVAL\t\t111.0\t\t2.7\nCA-C-O\t\t\tVAL\t\t120.1\t\t2.1\n-\n\nNon-bonded distance Minimum Dist Tolerance\nC-C 3.4 1.5\nC-N 3.25 1.5\nC-S 3.5 1.5\nC-O 3.22 1.5\nN-N 3.1 1.5\nN-S 3.35 1.5\nN-O 3.07 1.5\nO-S 3.32 1.5\nO-O 3.04 1.5\nS-S 2.03 1.0\n-'''\n lines_iter = iter(stereo_chemical_props.splitlines())\n # Load bond lengths.\n residue_bonds = {}\n next(lines_iter) # Skip header line.\n for line in lines_iter:\n if line.strip() == '-':\n break\n bond, resname, length, stddev = line.split()\n atom1, atom2 = bond.split('-')\n if resname not in residue_bonds:\n residue_bonds[resname] = []\n residue_bonds[resname].append(\n Bond(atom1, atom2, float(length), float(stddev)))\n residue_bonds['UNK'] = []\n\n # Load bond angles.\n residue_bond_angles = {}\n next(lines_iter) # Skip empty line.\n next(lines_iter) # Skip header line.\n for line in lines_iter:\n if line.strip() == '-':\n break\n bond, resname, angle_degree, stddev_degree = line.split()\n atom1, atom2, atom3 = bond.split('-')\n if resname not in residue_bond_angles:\n residue_bond_angles[resname] = []\n residue_bond_angles[resname].append(\n BondAngle(atom1, atom2, atom3,\n float(angle_degree) / 180. * np.pi,\n float(stddev_degree) / 180. * np.pi))\n residue_bond_angles['UNK'] = []\n\n def make_bond_key(atom1_name, atom2_name):\n \"\"\"Unique key to lookup bonds.\"\"\"\n return '-'.join(sorted([atom1_name, atom2_name]))\n\n # Translate bond angles into distances (\"virtual bonds\").\n residue_virtual_bonds = {}\n for resname, bond_angles in residue_bond_angles.items():\n # Create a fast lookup dict for bond lengths.\n bond_cache = {}\n for b in residue_bonds[resname]:\n bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b\n residue_virtual_bonds[resname] = []\n for ba in bond_angles:\n bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]\n bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]\n\n # Compute distance between atom1 and atom3 using the law of cosines\n # c^2 = a^2 + b^2 - 2ab*cos(gamma).\n gamma = ba.angle_rad\n length = np.sqrt(bond1.length**2 + bond2.length**2\n - 2 * bond1.length * bond2.length * np.cos(gamma))\n\n # Propagation of uncertainty assuming uncorrelated errors.\n dl_outer = 0.5 / length\n dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer\n dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer\n dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer\n stddev = np.sqrt((dl_dgamma * ba.stddev)**2 +\n (dl_db1 * bond1.stddev)**2 +\n (dl_db2 * bond2.stddev)**2)\n residue_virtual_bonds[resname].append(\n Bond(ba.atom1_name, ba.atom3name, length, stddev))\n\n return (residue_bonds,\n residue_virtual_bonds,\n residue_bond_angles)",
"def format_advertisement(data):\n resolve_dict = {\n # FLAGS AD type\n st_constant.AD_TYPE_FLAGS: 'FLAGS',\n # Service UUID AD types\n st_constant.AD_TYPE_16_BIT_SERV_UUID: '16_BIT_SERV_UUID',\n st_constant.AD_TYPE_16_BIT_SERV_UUID_CMPLT_LIST: '16_BIT_SERV_UUID_CMPLT_LIST',\n st_constant.AD_TYPE_32_BIT_SERV_UUID: '32_BIT_SERV_UUID',\n st_constant.AD_TYPE_32_BIT_SERV_UUID_CMPLT_LIST: '32_BIT_SERV_UUID_CMPLT_LIST',\n st_constant.AD_TYPE_128_BIT_SERV_UUID: '128_BIT_SERV_UUID',\n st_constant.AD_TYPE_128_BIT_SERV_UUID_CMPLT_LIST: '128_BIT_SERV_UUID_CMPLT_LIST',\n # Local name AD types\n st_constant.AD_TYPE_SHORTENED_LOCAL_NAME: 'SHORTENED_LOCAL_NAME',\n st_constant.AD_TYPE_COMPLETE_LOCAL_NAME: 'COMPLETE_LOCAL_NAME',\n # TX power level AD type\n st_constant.AD_TYPE_TX_POWER_LEVEL: 'TX_POWER_LEVEL',\n # Class of device\n st_constant.AD_TYPE_CLASS_OF_DEVICE: 'CLASS_OF_DEVICE',\n # Security manager TK value AD type\n st_constant.AD_TYPE_SEC_MGR_TK_VALUE: 'SEC_MGR_TK_VALUE',\n # Security manager OOB flags\n st_constant.AD_TYPE_SEC_MGR_OOB_FLAGS: 'SEC_MGR_OOB_FLAGS',\n # Slave connection interval AD type\n st_constant.AD_TYPE_SLAVE_CONN_INTERVAL: 'SLAVE_CONN_INTERVAL',\n # Service solicitation UUID list AD types\n st_constant.AD_TYPE_SERV_SOLICIT_16_BIT_UUID_LIST: 'SERV_SOLICIT_16_BIT_UUID_LIST',\n st_constant.AD_TYPE_SERV_SOLICIT_32_BIT_UUID_LIST: 'SERV_SOLICIT_32_BIT_UUID_LIST',\n st_constant.AD_TYPE_SERV_SOLICIT_128_BIT_UUID_LIST: 'SERV_SOLICIT_128_BIT_UUID_LIST',\n # Service data AD type\n st_constant.AD_TYPE_SERVICE_DATA: 'SERVICE_DATA',\n # Manufaturer specific data AD type\n st_constant.AD_TYPE_MANUFACTURER_SPECIFIC_DATA: 'MANUFACTURER_SPECIFIC_DATA'\n }\n offset = 0\n size = len(data)\n advertisement_dict = {}\n while offset < size:\n field_len = int.from_bytes(data[offset:offset + 1], 'little')\n if field_len == 0 or offset + field_len > size:\n return advertisement_dict\n\n field_type = int.from_bytes(data[offset + 1:offset + 2], 'little')\n field_value = data[offset + 2:offset + 2 + field_len - 1]\n\n advertisement_dict.update({resolve_dict[field_type]: field_value})\n\n offset += field_len + 1\n\n return advertisement_dict",
"def __buildFSA(self, list_strings, cap = False):\n\n if cap:\n string2Fsa = self.__stringCap2Fsa\n else:\n string2Fsa = self.__string2Fsa\n\n list_fsa = map(lambda s: string2Fsa(s), list_strings)\n return self.__mergeFSA(list_fsa)",
"def extract_fn_user_descriptions(data):\n\n # trigger_fn2user_desc = collections.defaultdict(set)\n # action_fn2user_desc = collections.defaultdict(set)\n\n # a dict of {template : a dict of {fn:desc set}}\n tf_fn2desc_set_by_template = [collections.defaultdict(set) for _ in range(6)]\n af_fn2desc_set_by_template = [collections.defaultdict(set) for _ in range(6)]\n\n source_data = data['train'] + data['dev']\n\n for item in source_data:\n words = item['words']\n tc, tf, ac, af = item['label_names']\n template_id = None\n tf_desc = None\n af_desc = None\n\n if len(words) < 4:\n continue\n\n # check the templates one by one\n if words[0] == \"if\" and words.count(\"if\") == 1: # template (1),(4)\n if \",\" in words: # template (1)\n template_id = 1\n tf_desc = words[1:words.index(\",\")]\n af_desc = words[words.index(\",\")+1:]\n if af_desc and af_desc[0] == \"then\": # remove the redundant \"then\"\n af_desc = af_desc[1:]\n elif \"then\" in words: # template (4)\n template_id = 4\n tf_desc = words[1:words.index(\"then\")]\n af_desc = words[words.index(\"then\")+1:]\n elif words.count(\"if\") == 1: # template (5)\n template_id = 5\n tf_desc = words[words.index(\"if\")+1:]\n af_desc = words[:words.index(\"if\")]\n elif \"if\" not in words: # others\n if words.count(\"when\") == 1: # template (3),(6)\n if words[0] == \"when\" and \",\" in words:\n template_id = 6\n tf_desc = words[1:words.index(\",\")]\n af_desc = words[words.index(\",\")+1:]\n elif words[0] != \"when\" and \",\" not in words:\n template_id = 3\n tf_desc = words[words.index(\"when\")+1:]\n af_desc = words[:words.index(\"when\")]\n elif \"when\" not in words: # template (2)\n phrases = {\"every time\", \"every year\", \"every month\", \"every week\", \"every day\", \"every hour\"}\n picked_phrase = None\n picked_phrase_idx = None\n for word_idx in range(len(words)-2): # at least one token following the phrase\n phrase = words[word_idx] + \" \" + words[word_idx+1]\n if phrase in phrases:\n picked_phrase = phrase\n picked_phrase_idx = word_idx\n break\n if picked_phrase and picked_phrase_idx: # idx must > 0\n template_id = 2\n tf_desc = words[picked_phrase_idx:]\n af_desc = words[:picked_phrase_idx]\n\n if tf_desc and af_desc:\n tf_fn = \"%s.%s\" % (tc.lower().strip(), tf.lower().strip())\n af_fn = \"%s.%s\" % (ac.lower().strip(), af.lower().strip())\n tf_fn2desc_set_by_template[template_id-1][tf_fn].add(\" \".join(tf_desc)) # NOTE: be sure to split\n af_fn2desc_set_by_template[template_id-1][af_fn].add(\" \".join(af_desc))\n\n\n # print\n def _print_stats(src):\n for template_id, fn2desc_set in enumerate(src):\n print(\"Template %d: %d functions.\" % (template_id, len(fn2desc_set)))\n samples = random.sample(fn2desc_set.items(), 3)\n for fn, descs in samples:\n print(\"Function: %s:\" % fn)\n for desc in descs:\n print desc.encode('ascii', 'ignore')\n print(\"-\"*10)\n print(\"\")\n\n print(\"Stats of trigger function:\")\n _print_stats(tf_fn2desc_set_by_template)\n print(\"Stats of action function:\")\n _print_stats(af_fn2desc_set_by_template)\n\n return tf_fn2desc_set_by_template, af_fn2desc_set_by_template",
"def tally_cds_descriptions(self):\n self._cds_descriptions_tally = 0\n self._cds_products_tally = 0\n self._cds_functions_tally = 0\n self._cds_notes_tally = 0\n for cds_ftr in self.cds_features:\n if cds_ftr.description != \"\":\n self._cds_descriptions_tally += 1\n if cds_ftr.product != \"\":\n self._cds_products_tally += 1\n if cds_ftr.function != \"\":\n self._cds_functions_tally += 1\n if cds_ftr.note != \"\":\n self._cds_notes_tally += 1",
"def find_substrs12_endchars(sidestr,mainstr,substr1,substr2,delay1=0,delay2=0):\n ## don't use regular expressions re module, which finds only non-overlapping matches\n ## we want to find overlapping matches too.\n substr2len = len(substr2)\n substr1len = len(substr1)\n abs_idx1 = 0 ## mainstr is getting chopped, but we maintain abs index on sidestr\n while True:\n idx2 = mainstr.find(substr2)\n ## find returns -1 if substr2 not found\n if idx2 != -1:\n endcharidx2 = idx2+substr2len+delay2\n ### NOTE: abs_startidx1 is one earlier than definition!!! I think necessary for causality.\n ## put +1 below to switch to definition in Quinn et al 2010\n abs_startidx1 = abs_idx1 + endcharidx2 - substr1len-delay1\n if endcharidx2<len(mainstr): # mainstr Y has characters left?\n if abs_startidx1 >= 0: # sidestr X has sufficient chars before?\n ## sidestr has substr1 before the char to be returned? and mainstr is not over\n ## IMP: below if's first term is the only place directed info enters.\n ## Remove first term below and you get just the entropy of mainstr Y: VERIFIED.\n #print sidestr[abs_startidx1:abs_startidx1+substr1len], substr1, abs_startidx1\n if sidestr[abs_startidx1:abs_startidx1+substr1len]==substr1:\n yield mainstr[endcharidx2]\n else: # reached end of string\n break\n ## chop the mainstr just after the start of substr2,\n ## not after the end, as we want overlapping strings also\n mainstr = mainstr[idx2+1:]\n ## don't chop sidestr as substr1len may be greater than substr2len\n ## in the next iteration, idx2 will be relative, but for sidestr we maintain abs_idx1\n abs_idx1 += idx2+1\n else: # substr2 not found\n break",
"def _distance_max(self, string_1, string_2, len_1, len_2, start, max_distance,\n char_1_costs, prev_char_1_costs):\n char_1_costs = np.asarray([j + 1 if j < max_distance\n else max_distance + 1 for j in range(len_2)])\n len_diff = len_2 - len_1\n j_start_offset = max_distance - len_diff\n j_start = 0\n j_end = max_distance\n char_1 = \" \"\n current_cost = 0\n for i in range(len_1):\n prev_char_1 = char_1\n char_1 = string_1[start + i]\n char_2 = \" \"\n left_char_cost = above_char_cost = i\n next_trans_cost = 0\n # no need to look beyond window of lower right diagonal -\n # max_distance cells (lower right diag is i - len_diff) and the\n # upper left diagonal + max_distance cells (upper left is i)\n j_start += 1 if i > j_start_offset else 0\n j_end += 1 if j_end < len_2 else 0\n for j in range(j_start, j_end):\n this_trans_cost = next_trans_cost\n next_trans_cost = prev_char_1_costs[j]\n # cost of diagonal (substitution)\n prev_char_1_costs[j] = current_cost = left_char_cost\n # left now equals current cost (which will be diagonal at next\n # iteration)\n left_char_cost = char_1_costs[j]\n prev_char_2 = char_2\n char_2 = string_2[start + j]\n if char_1 != char_2:\n # substitution if neither of two conditions below\n if above_char_cost < current_cost:\n current_cost = above_char_cost\n if left_char_cost < current_cost:\n current_cost = left_char_cost\n current_cost += 1\n if (i != 0 and j != 0 and char_1 == prev_char_2\n and prev_char_1 == char_2\n and this_trans_cost + 1 < current_cost):\n current_cost = this_trans_cost + 1 # transposition\n char_1_costs[j] = above_char_cost = current_cost\n if char_1_costs[i + len_diff] > max_distance:\n return -1\n return current_cost if current_cost <= max_distance else -1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Transition descriptors based on SolventAccessibility of AADs.
|
def CalculateTransitionSolventAccessibility(ProteinSequence):
result=CalculateTransition(ProteinSequence,_SolventAccessibility,'_SolventAccessibility')
return result
|
[
"def describe(self, access, element):\n self._prepare(access)\n # Accumulate the descriptor sets from each ability, then turn into a string.\n tags = set()\n for c in self.abilities:\n tags |= c.describe(access, element)\n return ' '.join(list(tags)).lower()",
"def get_acs():\n data = wikipedia.get_ac_list(\"Haryana\")\n def normalize_name(name):\n name = name.split(\"(\")[0].lower().strip(\". \")\n return name\n\n pc_dict = dict((normalize_name(name), pc_code) for pc_code, ac_code, name in data)\n\n renames = {\n \"ambala cantt\": \"ambala cantonment\",\n \"dadri\": \"charkhi dadri\",\n \"kalawali\": \"kalanwali\",\n \"nangal chaudhry\": \"nagai chaudhry\",\n }\n def get_pc_code(name):\n name = normalize_name(name)\n name = renames.get(name, name)\n if name not in pc_dict:\n name = find_nearest(name, pc_dict.keys())\n return pc_dict[name]\n\n ac_data = _get_ac_data()\n assert(len(pc_dict) == len(ac_data))\n for code, name in ac_data:\n pc_code = get_pc_code(name)\n ac_code_str = \"AC{0:03d}\".format(int(code))\n yield pc_code, ac_code_str, name.title().replace(\"(Sc)\", \" (SC)\").replace(\"(St)\", \" (ST)\")",
"def parse_accidents(self, accidents_url):\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--window-size=600x400\")\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--no-sandbox')\n driver = webdriver.Chrome(executable_path=os.getcwd() + \"/chromedriver\", chrome_options=chrome_options)\n driver.get(accidents_url)\n html = driver.page_source\n accidents_soup = bs(html, 'html.parser')\n accidents_table = accidents_soup.find(\"table\")\n links = accidents_table.find_all(\"a\")\n hrefs = []\n\n for link in links:\n hrefs.append(\"https://aviation-safety.net\" + link.get(\"href\"))\n hrefs = hrefs[1:]\n for href in hrefs:\n driver.get(href)\n html = driver.page_source\n driver.back()\n\n soup = bs(html, 'html.parser')\n table = soup.find(\"table\")\n if not table:\n continue\n type_caption = table.find(\"td\", \"caption\", text=\"Type:\")\n date_caption = table.find(\"td\", \"caption\", text=\"Date:\")\n if type_caption:\n type_desc = type_caption.parent.find(\"td\", \"desc\").find(\"a\").text\n else:\n type_desc = None\n if date_caption:\n date_desc = date_caption.nextSibling.text\n else:\n date_desc = None\n data = [type_desc, date_desc]\n\n for caption_text in [\"First flight:\", \"Total airframe hrs:\",\n \"Total:\", \"Aircraft damage:\", \"Phase:\"]:\n td = table.find(\"td\", \"caption\", text=caption_text)\n if not td:\n desc = None\n else:\n desc = td.parent.find(\"td\", \"desc\").text.strip()\n data.append(desc)\n if not data[2] or data[6] == \"()\":\n continue\n self.accidents.add(Accident(data))",
"def setup_admonitions_handlers(self):\n handled_admonitions = [\n 'attention',\n 'caution',\n 'danger',\n 'error',\n 'hint',\n 'important',\n 'note',\n 'tip',\n 'warning',\n ]\n for adm in handled_admonitions:\n visit_func, depart_func = self.create_admonition_functor(adm)\n visit_func = new.instancemethod(visit_func, self, MoinTranslator)\n depart_func = new.instancemethod(depart_func, self, MoinTranslator)\n setattr(self, 'visit_%s' % (adm), visit_func)\n setattr(self, 'depart_%s' % (adm), depart_func)",
"def CalculateTransition(ProteinSequence,AAProperty,AAPName):\r\n\t\r\n\tTProteinSequence=StringtoNum(ProteinSequence,AAProperty)\r\n\tResult={}\r\n\tNum=len(TProteinSequence)\r\n\tCTD=TProteinSequence\r\n\tResult[AAPName+'T'+'12']=round(float(CTD.count('12')+CTD.count('21'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'13']=round(float(CTD.count('13')+CTD.count('31'))/(Num-1),3)\r\n\tResult[AAPName+'T'+'23']=round(float(CTD.count('23')+CTD.count('32'))/(Num-1),3)\r\n\treturn Result",
"def descriptors(self):\n descs = []\n for x in xrange(0, 4):\n desc = self.GetDescriptor(x)\n if desc:\n descs.append(desc)\n return descs",
"def Attributes(self) -> _n_5_t_17:",
"def get_adverts(self):\n self.queries = generate_description_queries(self, CHUNKSIZE)\n self.next(self.extract_requires_degree, foreach=\"queries\")",
"def attributes_desc():\n columns = [\n '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',\n 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',\n 'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',\n 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',\n 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',\n 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young',\n ]\n\n return map(str.lower, columns)",
"def at_desc(self, looker=None):\r\n pass",
"def attractors(self):\n if not self.__landscaped:\n self.landscape()\n if not self.__expounded:\n self.expound()\n return self.__landscape_data.attractors",
"def adverbs(self):\n return self._adverbs",
"def convertAAToProperties(self, sequence, properties=None):\n properties = properties or ['composition', 'iep', 'polarity']\n result = []\n\n for aa in sequence:\n if aa in PROPERTY_DETAILS:\n aaProperties = sum(PROPERTY_DETAILS[aa][prop] for prop in\n properties)\n result.append(aaProperties)\n return result",
"def analyse_activity(casts_dict, encounter):\n\n combat_time = encounter.duration\n print(f\"Activity for {encounter.name}, {combat_time:.1f}s\")\n\n print(f\" {'Healer':<12s} {'setup'} {'activ'} {'act %'} {'inact'} {'regen'}\")\n\n for healer in HEALERS:\n end = encounter.start_t\n\n if healer not in casts_dict:\n continue\n\n casts = casts_dict[healer]\n first_cast_time = casts[0][1]\n setup_time = (first_cast_time - end).total_seconds()\n end = first_cast_time\n\n inactive_time = 0.0\n regen_time = 0.0\n\n for c in casts[1:]:\n source, cast_start, cast_end, spell_id, target = c\n\n if cast_start < end:\n end = cast_end\n continue\n\n d_time = (cast_start - end).total_seconds()\n inactive_time += d_time\n if d_time > 5.0:\n regen_time += d_time - 5.0\n\n if cast_end == cast_start:\n end = cast_end + timedelta(seconds=1.5)\n else:\n end = cast_end\n\n if end < encounter.end_t:\n d_time = (encounter.end_t - end).total_seconds()\n inactive_time += d_time\n if d_time > 5.0:\n regen_time += d_time - 5.0\n\n active_time = combat_time - inactive_time\n active_pct = active_time / combat_time\n print(\n f\" {healer:<12s} {setup_time:5.1f} {active_time:5.1f} {active_pct:5.1%} {inactive_time:5.1f} \"\n f\"{regen_time:5.1f}\"\n )",
"def get_turn_on_descriptions(get_turn_on, initial_state, current_state):\n turn_on_descriptions = []\n light_on = get_turn_on(initial_state, current_state)\n for c in light_on:\n turn_on_descriptions.append('Turn on the {} light'.format(c))\n return turn_on_descriptions.copy()",
"def assist2aspect ( assist ):\n ## columns in assist that are in aspect\n assist_aspect_cols = [ \n 'Date', 'LAT', 'LON', \n 'TC','PPC','PT','PZ','PF','PTop','PSY','PSH','PA','PMPC',\n 'PMPD','SPC','ST','SZ','SF','STop','SSY','SSH','SA',\n 'SMPC','SMPD','TPC','TT','TZ','TF','TTop','TSY','TSH',\n 'TA','TMPC','TMPD','OW','WT','AT','WS','WD','TCC','V',\n 'WX','PO', 'AO','Comments'\n ]\n ## columns used in constructing the topography feild in assist\n topo_cols = [\n 'PTop','PTopC','PRH','POld','PCs','PSC',\n 'STop','STopC','SRH','SOld','SCs','SSC',\n 'TTop','TTopC','TRH','TOld','TCs','TSC',\n ]\n topo_data = assist[ topo_cols ]\n topo_data = format_topo_data( topo_data )\n \n # brown ice columns\n bi_data = assist[['PA', 'PAL', 'SA', 'SAL', 'TA', 'TAL']]\n \n # get aspect columns from assist and rename to aspect names\n aspect = assist[ assist_aspect_cols ]\n #~ aspect = aspect.astype(str)\n aspect.columns = [\n 'Date', 'Latitude', 'Longitude', \n 'T/Conc', 'c1', 'ty1', 'z1', 'f1', 'to1', 'Sty1', 'Sz1', 'BI1', 'MPc1',\n 'MPd1', 'c2', 'ty2', 'z2', 's2', 'to2', 'Sty2', 'Sz2', 'BI2',\n 'MPc2', 'MPd2', 'c3', 'ty3', 'z3', 'f3', 'to3', 'Sty3', 'Sz3',\n 'BI3', 'MPc3', 'MPd3', 'O/W', 'Water Temp', 'Air Temp', \n 'Wind speed [m/s]','Wind Dir. [deg]','Cloud [_/8]','Vis',\n 'WW', 'PO', 'AO', 'Comments'\n ]\n # split out time and date\n aspect['Time'] = aspect['Date'].map(lambda d: str(d).strip().split(' ')[1])\n aspect['Time'] = aspect['Time'].map(\n lambda d: str(\n int(d.split(':')[0]) + (1 if int(d.split(':')[1]) >= 30 else 0)\n )\n )\n aspect['Date'] = aspect['Date'].map(lambda d: str(d).strip().split(' ')[0])\n \n # add columns not in assist\n not_in_assist = ['MPl11', 'MPl21', 'MPl12', 'MPl22', 'MPl13', 'MPl23']\n for key in not_in_assist:\n aspect [key] = None\n \n # Ice Types\n for key in ['ty1', 'ty2', 'ty3']:\n aspect [aspect[key] == '75'][key] = '85'\n \n \n #~ # Floe sizes: this is a direct conversion \n #~ convert = ['f1', 'f2', 'f3']\n \n # Topography \n for col in [('P','to1'), ('S','to2'), ('T','to3')]:\n temp = topo_data[[c for c in topo_data.columns if c[0] == col[0]]]\n temp.columns = ['Top','TopC','RH','Old','Cs','SC',]\n aspect[col[1]] = create_aspect_topo_code(temp)\n \n \n # Algea -> brown ice\n ## use concentration and location to crate single aspect code\n for col in [('P','BI1'),('S','BI2'),('T','BI3')]:\n temp = bi_data[[c for c in bi_data.columns if c[0] == col[0]]]\n temp.columns = ['A', 'AL']\n aspect[col[1]] = ''\n aspect[col[1]][temp['A'] == '0'] = \"'0'\"\n \n for loc in [('10',\"'d00'\"), ('20',\"'0d0'\"), ('30',\"'00d'\")]:\n index = np.logical_and(\n np.logical_or(temp['A'] != '', temp['A'] != '0'),\n temp['AL'] == loc[0]\n )\n aspect[col[1]][index] = \\\n temp['A'][index].map(lambda d: loc[1].replace('d',d))\n \n \n \n \n \n \n # Wind speed convert to m/s\n aspect['Wind speed [m/s]'] = \\\n aspect['Wind speed [m/s]'].astype(float) * .514 # knots * ([m/s]/knots) \n \n # Format weather codes\n #~ aspect['WW'] = aspect['WW'].map(lambda x: str(x).replace('nan',''))\n aspect['WW'] = aspect['WW'].map(lambda x: '{:02d}'.format(int(x)) if str(x) != 'nan' else '')\n \n aspect['AO'][ aspect['AO'].map(lambda x: str(x).lower()) == 'nan'] = ''\n aspect['Observer'] = aspect['PO'] + \\\n aspect['AO'].map(lambda o: ':'+ str(o) if len(str(o)) > 0 else str(o))\n \n \n aspect['Flag1'] = ''\n aspect['Flag2'] = ''\n aspect['Flag3'] = ''\n # aspect columns in corret order\n sorted_aspect_cols = [ \n 'Date', 'Time', 'Latitude', 'Longitude', \n 'T/Conc','c1','ty1','z1','f1','to1','Sty1','Sz1','BI1','MPc1','MPd1',\n 'MPl11','MPl21','c2','ty2','z2','s2','to2','Sty2','Sz2','BI2','MPc2',\n 'MPd2','MPl12','MPl22','c3','ty3','z3','f3','to3','Sty3','Sz3','BI3',\n 'MPc3','MPd3','MPl13','MPl23','O/W','Water Temp','Air Temp',\n 'Wind speed [m/s]','Wind Dir. [deg]','Cloud [_/8]','Vis',\n 'WW', 'Flag1', 'Flag2', 'Flag3','Observer','Comments'\n ]\n\n ## TEMP\n return aspect[sorted_aspect_cols]",
"def getAutomaticTransitions():",
"def getAdjectiveTraits(self):\n\n if self.adjective == 0: #Fierce\n self.speed += 1\n self.dropRate += 1\n self.spriteList.append(self.fierce_sprite)\n\n elif self.adjective == 1: #Monstrous\n self.size *= 2\n self.health *= 2\n self.dropRate += 1\n elif self.adjective == 2: #Swift\n self.speed *= 2\n\n elif self.adjective == 3: #Stalking\n self.moveThroughDoors = True\n self.dropRate += 1\n\n elif self.adjective == 4: #Perceptive\n self.chase = True\n self.spriteList.append(self.perceptive_sprite)\n\n elif self.adjective == 5: #Deadly\n self.damage *= 2\n self.dropRate += 1\n\n elif self.adjective == 6: #Bloated\n self.attack = \"exploding\"\n self.damage *= 2\n\n elif self.adjective == 7: #Friendly\n self.damage *= 0.5\n self.spriteList.append(self.friendly_sprite)\n\n elif self.adjective == 8: #Deranged\n self.canChase = False\n\n elif self.adjective == 9: #Cowardly\n self.canChase = False\n self.flee = True\n\n elif self.adjective == 10: #Zombified\n self.speed /= 2\n self.health *= 2\n\n elif self.adjective == 11: #Slimy\n self.hasPlayerEffect = True\n self.playerEffect = \"slimy\"\n\n elif self.adjective == 12: #Rich\n self.dropRate += 3\n self.spriteList.append(self.rich_sprite)\n\n elif self.adjective == 13: #Poor\n self.dropRate -= .5\n\n elif self.adjective == 14: #Ugly\n pass #just a descriptor\n\n elif self.adjective == 15: #Barbaric\n pass #just a descriptor\n\n else:\n pass # TODO: log",
"async def _build_distance_table(\n self, graph: nx.DiGraph, goals: List[Goal]\n ) -> Dict[str, float]:\n ability_distances = dict()\n self.goal_actions = set()\n max_dist = 0\n\n for goal in goals:\n shortest_path = nx.shortest_path(graph, target=goal.target)\n\n for node, path in shortest_path.items():\n if not isinstance(node, Ability):\n continue\n\n filtered_path = [n for n in path if isinstance(n, Ability)]\n path_distance = len(filtered_path)\n\n if path_distance == 1:\n self.goal_actions.add(node.ability_id)\n\n if path_distance > max_dist:\n max_dist = path_distance\n\n if (\n node.ability_id in ability_distances\n and path_distance > ability_distances[node.ability_id]\n ):\n continue\n\n ability_distances[node.ability_id] = path_distance\n\n for ab, length in ability_distances.items():\n ability_distances[ab] = abs(length - max_dist) + 1\n\n return ability_distances",
"def aic(self):\n aics = []\n aics_bool = []\n for i, chain in enumerate(self.parent.chains):\n p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params\n if p is None or n_data is None or n_free is None:\n aics_bool.append(False)\n missing = \"\"\n if p is None:\n missing += \"posterior, \"\n if n_data is None:\n missing += \"num_eff_data_points, \"\n if n_free is None:\n missing += \"num_free_params, \"\n\n self._logger.warning(\"You need to set %s for chain %s to get the AIC\" % (missing[:-2], chain.name))\n else:\n aics_bool.append(True)\n c_cor = 1.0 * n_free * (n_free + 1) / (n_data - n_free - 1)\n aics.append(2.0 * (n_free + c_cor - np.max(p)))\n if len(aics) > 0:\n aics -= np.min(aics)\n aics_fin = []\n i = 0\n for b in aics_bool:\n if not b:\n aics_fin.append(None)\n else:\n aics_fin.append(aics[i])\n i += 1\n return aics_fin"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Distribution descriptors based on Hydrophobicity of AADs.
|
def CalculateDistributionHydrophobicity(ProteinSequence):
result=CalculateDistribution(ProteinSequence,_Hydrophobicity,'_Hydrophobicity')
return result
|
[
"def _HAC_model():\n\n\tclf = AgglomerativeClustering()\n\treturn clf",
"def pd_create(cd):\n\n # check that 'c' or 'd' is passed\n #assert cd == (\n # 'c' or 'd'), 'This must be charge (c) or discharge (d) data'\n\n # number of descriptors it generates\n n_desc = 19\n\n # determines prefix string based on need for a charge or\n # discharge dataframe\n if cd == 'c':\n prefix = 'ch_'\n else:\n prefix = 'dc_'\n\n # generates list of names for the top of the descriptors dataframe\n names = []\n for ch in np.arange(n_desc):\n names.append(prefix + str(int(ch)))\n\n # adds names of error parameters to the end of the descriptor list\n names = names + [prefix+'AIC', prefix+'BIC', prefix+'red_chi_squared']\n\n # creates pandas dataframe with necessary heading\n # print(names)\n desc = pd.DataFrame(columns=names)\n\n return desc",
"def element_descriptor(protein, ligand, binsize=0.0):\n\t# SUPPRESS OPENBABEL WARNINGS\n\tpybel.ob.obErrorLog.StopLogging()\n\n\t# ELEMENT TABLE TO DETERMINE VDW AND COVALENT BONDS\n\tet = OBElementTable()\n\n\t# CONVERT ELEMENT SYMBOLS TO ATOMIC NUMBERS\n\tatomicnums = (et.GetAtomicNum(str(element)) for element in config['elements'])\n\tatomicnums_pro = (et.GetAtomicNum(str(element)) for element in config['elements_pro'])\n\t#print(et.GetAtomicNum(\"Me\"), \"Fe\")\n\n\t# CREATE A NUMERICAL ID TO ELEMENT COMBINATION MAPPING\n\t# IMPORTANT TO MAP THE DESCRIPTOR VECTOR BACK TO THE LABELS\n\t#element_pairs = product(sorted(atomicnums),repeat=2)\n\telement_pairs = product(sorted(atomicnums),sorted(atomicnums_pro),repeat=1)\n\telement_pairs = dict((p,i) for i,p in enumerate(element_pairs))\n\n\n\t# ALSO CREATE A COLUMN LABEL FOR THIS DESCRIPTOR\n\tsorted_pairs = zip(*sorted(element_pairs.items(), key=itemgetter(1)))[0]\n\t#print(sorted_pairs)\n\n\tnumcols = len(element_pairs)\n\n\t# GENERATE THE DISTANCE BINS\n\tif binsize:\n\n\t\t# get the distance bins for the given cutoff and bin size\n\t\tbins = get_distance_bins(config['cutoff'], binsize)\n\n\t\t# NUMBER OF TOTAL COLUMNS IN DESCRIPTOR\n\t\tnumcols *= (bins.size + 1)\n\n\t\t# CREATE A COLUMN FOR EACH ELEMENT PAIR AND DISTANCE BIN\n\t\tlabels = []\n\t\tfor x,y in sorted_pairs:\n\t\t\tfor i in range(len(bins) + 1):\n\t\t\t\tlabel = \"{0}.{1}-B{2}\".format(et.GetSymbol(x), et.GetSymbol(y), i)\n\t\t\t\tlabels.append(label)\n\n\t# LABEL WITHOUT BINS\n\telse:\n\t\tlabels = ['.'.join((et.GetSymbol(x),et.GetSymbol(y))) for x,y in sorted_pairs]\n\n\t# DESCRIPTOR THAT WILL CONTAIN THE SUM OF ALL ELEMENT-ELEMENT INTERACTIONS\n\tdescriptor = numpy.zeros(numcols, dtype=int)\n\n\t# GET THE CONTACTS\n\tcontacts = get_contacts(protein, ligand, config['cutoff'])\n\n\t# ITERATE THROUGH CONTACT PAIRS AND DETERMINE SIFT\n\tfor hetatm, hetatm_contacts in contacts:\n\t\thetatm_num = hetatm.GetAtomicNum()\n\n\t# ITERATE THROUGH ALL THE CONTACTS THE HETATM HAS\n\t\tfor atom, distance in hetatm_contacts:\n\t\t\tresidue = atom.GetResidue()\n\n\t\t\tif residue.GetAtomID(atom).strip() in ['FE','FE2']:\n\t\t\t\tatom_num == 26\n\t\t\telse:\n\t\t\t\tatom_num = atom.GetAtomicNum()\n\n\t\t\t# IGNORE WATER RESIDUES\n\t\t\tif residue.GetName() == 'HOH': continue\n\n\t\t\t# IGNORE ZN,FE ETC.\n\t\t\ttry: index = element_pairs[(atom_num, hetatm_num)]\n\t\t\texcept KeyError: continue\n\t\t\t#print(element_pairs, 'ele')\n\n\t\t\t# BIN INTERACTIONS\n\t\t\tif binsize:\n\n\t\t\t\t# GET THE BIN THIS CONTACT BELONGS IN\n\t\t\t\t# DIGITIZE TAKES AN ARRAY-LIKE AS INPUT\n\t\t\t\tbin_id = numpy.digitize([distance,], bins)[0]\n\t\t\t\tdescriptor[1 + index + index*bins.size + bin_id] += 1\n\n\t\t\telse:\n\n\t\t\t\t# ELEMENTS ARE SORTED NUMERICALLY\n\t\t\t\tdescriptor[index] += 1\n\n\tif binsize: sum_descriptor_bins(descriptor, bins)\n\n\treturn descriptor, labels",
"def CalculateEntropy(hs, n_extra_events, verbose = False):\n\n # As sometimes variables can be ill defined we need to be able to deal with \n # -> multiple histograms: for example a TH2 and two TH1 where one variable each is ill-defined\n # an extra number of events that passed the fiducial cut but fail both variable selections\n\n # n_extra_events: If a variable is only defined for a subset of\n # events we need to take this into account when calculating the\n # entropy\n total = sum([h.Integral() for h in hs]) + n_extra_events\n checksum = 0\n entropy = 0.\n\n # Add events failing the cuts to entropy\n p_extra = 1. * n_extra_events / total\n if p_extra > 0:\n checksum += p_extra\n entropy -= p_extra*math.log(p_extra,2) \n\n # Loop over histograms\n for h in hs:\n # 1d Histograms\n if type(h) in [ROOT.TH1F, ROOT.TH1D]:\n for i_bin in range(1, h.GetXaxis().GetNbins()+1):\n\n p = 1. * h.GetBinContent(i_bin)/total\n if p>0:\n checksum += p\n entropy -= p*math.log(p,2) \n\n # 2d Histograms\n elif type(h) in [ROOT.TH2F, ROOT.TH2D]:\n\n for i_bin_x in range(1, h.GetXaxis().GetNbins()+1):\n for i_bin_y in range(1, h.GetYaxis().GetNbins()+1):\n\n p = 1. * h.GetBinContent(i_bin_x, i_bin_y)/total\n if p>0:\n checksum += p\n entropy -= p*math.log(p,2) \n \n # Invalid Histogram type\n else:\n print \"Ivalid type {0} for histogram in CalculateEntropy\".format(type(h))\n print \"Exiting\"\n # End of handling different histogram dimensions\n # End of loop over histograms\n\n if verbose:\n print \"checksum = \", checksum\n\n return entropy",
"def FK_dh(joint_angles,link):\n # print (\"DOING DH\")\n\n base_theta=joint_angles[0]\n shoulder_theta=joint_angles[1]\n elbow_theta=joint_angles[2]\n w1_theta=joint_angles[3]\n w2_theta=joint_angles[4]\n\n # Defining DH table parameters \n\n # Distances are in mm\n\n d1=118\n a2=99\n a3=112\n a4=109\n\n # d1=122.14 \n # a2=105\n # a3=126.77\n # a4=122.12\n\n a=np.array([0,a2,a3,a4])\n alpha=np.array([np.pi/2,0,0,0])\n d=np.array([d1,0,0,0])\n theta=np.array([base_theta,shoulder_theta+np.pi/2,elbow_theta,w2_theta])\n\n # Defining functions to compute matrices\n\n def Trans_z_d (d):\n return np.array([[1,0,0,0],[0,1,0,0],[0,0,1,d],[0,0,0,1]])\n\n def Trans_x_a (a):\n return np.array([[1,0,0,a],[0,1,0,0],[0,0,1,0],[0,0,0,1]])\n\n def Rot_z_theta (theta):\n return np.array([[np.cos(theta),-np.sin(theta),0,0],[np.sin(theta),np.cos(theta),0,0],[0,0,1,0],[0,0,0,1]])\n\n def Rot_x_alpha (alpha):\n return np.array([[1,0,0,0],[0,np.cos(alpha),-np.sin(alpha),0],[0,np.sin(alpha),np.cos(alpha),0],[0,0,0,1]])\n\n # Computing the H matrix \n H=np.identity(4)\n \n for i in range(4):\n A=np.matmul(Rot_z_theta(theta[i]),np.matmul(Trans_z_d(d[i]),np.matmul(Trans_x_a(a[i]),Rot_x_alpha(alpha[i]))))\n H=np.matmul(H,A)\n\n # Calculating phi as the euler angle about the y-axis in the base frame\n\n phi=np.array([joint_angles[1]+joint_angles[2]+joint_angles[4]])\n\n # Extracting the required x,y and z elements from H matrix\n #print(H)\n H=H[0:3,-1]\n #print(H)\n np.append(H, phi) \n\n return H\n pass",
"def test_zernike_descriptor(self):\n self.assertTrue(abs(np.sum(self.des[0,:]) - 43.6876) < 0.01, \"Incorrect sum of feature 0 descriptor\")",
"def hdi(self, alpha=.05):\n credible_mass = 1 - alpha\n try:\n _hdi = highest_density_interval(self.data, credible_mass)\n return (round(_hdi[0], 4), round(_hdi[1], 4))\n except Exception as e:\n logger.warn(e)\n return (None, None)",
"def get_empirical_distribution(element_dict: FeatureDictionary, elements, dirichlet_alpha=10.):\n targets = np.array([element_dict.get_id_or_unk(t) for t in elements])\n empirical_distribution = np.bincount(targets, minlength=len(element_dict)).astype(float)\n empirical_distribution += dirichlet_alpha / len(empirical_distribution)\n return empirical_distribution / (np.sum(empirical_distribution) + dirichlet_alpha)",
"def ahfhaloid(fpre, ids):\n# f = open(fpre+'.AHF_halos')\n# d = f.read()\n# ds = d.split()\n# Npart = np.array(ds[87::83],dtype='i8') # Obtain number of particles in each halo\n# f.close()\n #print Npart\n Npart = np.loadtxt(fpre+'.AHF_halos', skiprows=1, usecols=4, dtype=np.int64)\n \n# try:\n# f = open(fpre+'.AHF_particles')\n# d = f.read()\n# except IOError:\n# f = open(fpre+'.AHF_particles.gz')\n# d = f.read()\n# ds = np.array(d.split(),dtype='i8')\n try:\n ds = np.loadtxt(fpre+'.AHF_particles', skiprows=1, usecols=0, dtype=np.int64)\n except IOError:\n ds = np.loadtxt(fpre+'.AHF_particles.gz', skiprows=1, usecols=0, dtype=np.int64)\n\n# Nhalo = int(ds[0]) # Number of haloes for the file\n accum = 1 # Value used to keep track of reading the AHF file\n pid, hid = np.array([],dtype='i8'), np.array([],dtype='i8') # Initialise particle and halo ID arrays\n \n for i in range(len(Npart)):\n hid = np.append(hid, np.ones(Npart[i], dtype='i8')*i)\n \n args = np.arange(Npart[i]) + accum # Arguments for the halo's particle IDs\n pid = np.append(pid, np.array(ds[args]))\n accum += (1 + Npart[i])\n\n \n \n if type(ids)==list: # Put/ensure all input IDs in one array\n idarr = np.array([])\n for i in range(len(ids)): idarr = np.append(idarr,ids[i])\n else:\n idarr = np.array(ids)\n \n argorder = np.argsort(idarr) # Order for increasing values in pid\n argreorder = np.argsort(np.arange(len(idarr))[argorder]) # Arguments to reorder everything back\n hid_out = -np.ones(len(idarr), dtype='i8') # Negative ones initially as -1 implies no halo/galaxy for that particle\n \n idargs = np.searchsorted(idarr[argorder], pid) # Find the arguments where the IDs match (in order)\n hid_out[idargs] = hid # Fill the matching entries with halo IDs\n hid_out = hid_out[argreorder] # Return to the same order as the input\n #print hid_out\n \n if type(ids)==list:\n acc = 0\n listout = []\n for i in range(len(ids)):\n #print len(ids[i])\n listout += [hid_out[acc:acc+len(ids[i])]]\n acc += len(ids[i])\n return listout\n else:\n return hid_out",
"def generate_abundances(ds_list = None, outfile = 'star_abundances.h5', dir = './abundances/', overwrite = False):\n #\n # do this for all\n #\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n if not os.path.isfile(dir + outfile) or overwrite:\n hf = h5py.File(dir + outfile, 'w')\n else:\n hf = h5py.File(dir + outfile, 'a')\n\n if ds_list is None:\n ds_list = np.sort( glob.glob('./DD????/DD????') )\n times = np.zeros(np.size(ds_list))\n elif (not (type(ds_list) is list)):\n # assume a single string passed\n ds_list = [ds_list]\n\n # get elements present:\n ds = yt.load(ds_list[-1])\n fields = ds.field_list\n elements = utilities.species_from_fields(fields, include_primordial=True)\n metals = [x for x in elements if (x != 'H' and x != 'He')]\n metals = metals + ['alpha', 'alpha_5'] # add these two by hand for aggregate metal abundances\n ratios = [ x +'/H' for x in metals]\n\n if 'Mg' in metals:\n ratios = ratios + [ x + '/Mg' for x in metals]\n\n if 'Fe' in metals:\n ratios = ratios + [ x + '/Fe' for x in metals]\n\n if 'O' in metals:\n ratios = ratios + [ x + '/O' for x in metals]\n\n if 'C' in metals:\n ratios = ratios + [ x + '/C' for x in metals]\n\n if 'Ba' in metals:\n ratios = ratios + [ x + '/Ba' for x in metals]\n\n# if 'alpha' in metals:\n# ratios = ratios + [ x + '/alpha' for x in metals]\n\n for i, dsname in enumerate(ds_list):\n ds = yt.load(dsname)\n data = ds.all_data()\n\n groupname = dsname.rsplit('/')[1]\n\n if groupname in hf and not overwrite:\n continue # skip this one, it already exists\n\n if ('io','particle_type') in ds.field_list:\n g = hf.create_group(groupname)\n g.create_dataset('Time' , data = ds.current_time.to('Myr').value)\n\n# if ('io', 'particle_type') in ds.field_list:\n\n #\n # Compute and store abundance ratios and relevant properties for all MS stars\n #\n aratios = compute_aratio(ds, data, ratios) # by default, only does MS stars\n mass_fractions = compute_mass_fractions(ds, data, elements)\n\n MS = data['particle_type'] == 11\n\n Nstars = np.size(data['particle_mass'][MS])\n g.create_dataset('Nstars', data = Nstars)\n g.create_dataset('Mstars', data = np.sum( data['particle_mass'][ MS].to('Msun').value))\n g.create_dataset('creation_time', data = data['creation_time'][MS].to('Myr').value)\n g.create_dataset('birth_mass', data = data['birth_mass'][MS].value)\n g.create_dataset('metallicity', data = data['metallicity_fraction'][MS].value)\n spatial = g.create_group('kinematics')\n\n r = np.zeros(Nstars)\n vr = np.zeros(Nstars)\n for i, xname in enumerate(['x','y','z']):\n x = (data['particle_position_' + xname][MS] - ds.domain_center[i]).to('pc').value\n vx = (data['particle_velocity_' + xname][MS]).to('km/s').value\n r += x**2\n vr += vx**2\n spatial.create_dataset( xname, data = x)\n spatial.create_dataset('r', data = np.sqrt(r))\n spatial.create_dataset('vr', data = np.sqrt(vr))\n\n#\n mf = hf.create_group(groupname + '/mass_fractions')\n for e in elements:\n mf.create_dataset( e, data = mass_fractions[e])\n mf_statgroup = hf.create_group(groupname + '/mass_fraction_statistics')\n all = mf_statgroup.create_group('all_MS')\n for e in elements:\n stats = utilities.compute_stats( mass_fractions[e], return_dict = True)\n g = all.create_group(e)\n for k in stats.keys():\n g.create_dataset(k, data = stats[k])\n\n#\n sg = hf.create_group(groupname + '/abundances')\n for abundance in aratios.keys():\n sg.create_dataset( abundance, data = aratios[abundance])\n\n # now compute statistics on the MS stars, and store them\n #\n statgroup = hf.create_group(groupname + '/statistics')\n all = statgroup.create_group('all_MS')\n for abundance in aratios.keys():\n stats = utilities.compute_stats(aratios[abundance], return_dict = True)\n g = all.create_group(abundance)\n for k in stats.keys():\n g.create_dataset(k, data = stats[k])\n\n #\n # Now, do this for all particles, regardless of type.\n # Aka... ignore observational / physical reality and treat them all as tracers\n #\n aratios = compute_aratio(ds, data, ratios, particle_type = 'all')\n tracers = statgroup.create_group('all_particles')\n for abundance in aratios.keys():\n stats = utilities.compute_stats(aratios[abundance], return_dict = True)\n g = tracers.create_group(abundance)\n\n if COMPUTE_ACF: # hide this for now - not working\n t = data['creation_time'].to('Myr').value\n t_n = t - np.min(t)\n dt = 1.0\n\n bins = np.arange(0.0, np.ceil(np.max(t_n)) + dt, dt)\n y = aratios[abundance]\n y = y + np.min(y)*2.0\n dy = np.abs(0.001 * y) # error should be irrelevant, but must be non-zero\n dy[dy == 0.0] = 0.00001\n acf, acf_error, acf_bins = utilities.acf(t_n, y, dy = dy, bins = bins)\n\n stats['acf'] = acf\n stats['acf_error'] = acf_error\n stats['acf_bins'] = acf_bins\n\n for k in stats.keys():\n g.create_dataset(k, data = stats[k])\n\n mass_fractions = compute_mass_fractions(ds, data, elements, particle_type = 'all')\n tracers = mf_statgroup.create_group('all_particles')\n for e in elements:\n stats = utilities.compute_stats(mass_fractions[e], return_dict = True)\n#\n# left off here\n#\n\n g = mf_statgroup.create_group(\"cumulative\")\n t = ds.current_time.to('Myr').value\n tmax = np.ceil(t)\n tbins = np.arange(0.0, tmax + 0.1, 0.5)\n hist,bins = np.histogram(data['creation_time'].to('Myr').value, bins = tbins)\n g.create_dataset('bins', data = tbins)\n g.create_dataset('hist', data = np.array(hist))\n t_form = data['creation_time'].to('Myr').value\n lifetime = data[('io','particle_model_lifetime')].to('Myr').value\n age = t - t_form\n\n mf_stats_array_dict = {}\n for e in elements:\n mf_stats_array_dict[e] = {}\n for k in stats.keys():\n mf_stats_array_dict[e][k] = np.zeros(np.size(tbins)-1)\n\n for i in np.arange(np.size(tbins)-1):\n\n age = tbins[i] - t_form\n selection = (age >= 0.0)*(age <= lifetime)\n for e in elements:\n if i == 0:\n sub_g = g.create_group(e)\n\n if np.size(age[selection]) > 1:\n stats = utilities.compute_stats(mass_fractions[e][selection], return_dict = True) # +1 b/c index starts at 1\n for k in stats.keys():\n mf_stats_array_dict[e][k][i] = stats[k]\n else:\n for k in stats.keys():\n mf_stats_array_dict[e][k][i] = None\n\n for e in elements:\n g = hf[groupname + '/mass_fraction_statistics/cumulative/' + e]\n for k in mf_stats_array_dict[e].keys():\n g.create_dataset(k, data = mf_stats_array_dict[e][k])\n\n for dt in [0.1, 1, 10]:\n g = mf_statgroup.create_group('%iMyr'%(dt))\n t = ds.current_time.to('Myr').value\n tmax = np.around(t, decimals = -len(str(dt)) + 1)\n if tmax < t:\n tmax = tmax + dt\n tbins = np.arange(0.0, tmax + 0.5*dt, dt)\n\n index = np.digitize(data['creation_time'].to('Myr').value, tbins)\n hist, bins = np.histogram(data['creation_time'].to('Myr').value, bins = tbins)\n g.create_dataset('bins', data = tbins)\n g.create_dataset('hist', data = np.array(hist))\n\n mf_stats_array_dict = {}\n for e in elements:\n mf_stats_array_dict[e] = {}\n for k in stats.keys():\n mf_stats_array_dict[e][k] = np.zeros(np.size(tbins) - 1)\n\n for i in np.arange(np.size(tbins)-1):\n for e in elements:\n if i == 0:\n sub_g = g.create_group(e)\n if hist[i] > 0:\n stats = utilities.compute_stats(mass_fractions[e][index == i+1], return_dict = True) # +1 b/c index starts at$\n for k in stats.keys():\n mf_stats_array_dict[e][k][i] = stats[k]\n else:\n for k in stats.keys():\n mf_stats_array_dict[e][k][i] = None\n\n for e in elements:\n # - - - - - Produce a gap-less, interpolated mean to compute the ACF\n if False: # don't do this anymore\n first = np.where( np.logical_not(np.isnan( mf_stats_array_dict[e]['mean'] )))[0][0]\n mean = mf_stats_array_dict[e]['mean'][first:]\n select = np.logical_not(np.isnan(mean))\n clean_mean = mean[select]\n tcent = 0.5 * (tbins[1:] + tbins[:-1])\n tcent = tcent[first:]\n clean_t = tcent[select]\n f_interp = interp1d(clean_t, clean_mean)\n interp_mean = mean\n interp_mean[np.logical_not(select)] = f_interp( tcent[np.logical_not(select)] )\n mf_stats_array_dict[e]['interp_mean'] = interp_mean\n mf_stats_array_dict[e]['acf'] = utilities.acf(interp_mean, nlags = len(tcent))\n\n g = hf[groupname + '/mass_fraction_statistics/%iMyr/'%(dt) + e]\n for k in mf_stats_array_dict[e].keys():\n g.create_dataset(k, data = mf_stats_array_dict[e][k])\n\n\n #\n # now do it in time bins to get time evolution\n #\n\n # First, lets do the observational version, where we compute the total\n # MDF at each point in time (using all stars) and compute median and spread, etc.\n # next we will do the instantaneous (binned) version of this\n g = statgroup.create_group(\"cumulative\")\n t = ds.current_time.to('Myr').value\n tmax = np.ceil(t)\n tbins = np.arange(0.0, tmax + 0.1, 0.5) # can go arbitrarily small here\n hist, bins = np.histogram(data['creation_time'].to('Myr').value, bins = tbins)\n g.create_dataset('bins', data = tbins)\n g.create_dataset('hist', data = np.array(hist))\n\n t_form = data['creation_time'].to('Myr').value\n # unfortunately we can't use dynamical_time because we are doing this for a single data output\n # and want to get WD and SN remnant stars binned appropriately, but their dynamical_time values change\n # when they form...\n lifetime = data[('io','particle_model_lifetime')].to('Myr').value\n age = t - t_form\n\n stats_array_dict = {}\n for abundance in aratios.keys():\n stats_array_dict[abundance] = {}\n for k in stats.keys():\n stats_array_dict[abundance][k] = np.zeros(np.size(tbins) - 1)\n for i in np.arange(np.size(tbins)-1):\n\n age = tbins[i] - t_form\n selection = (age >= 0.0)*(age <= lifetime)\n for abundance in aratios.keys():\n if i == 0:\n sub_g = g.create_group(abundance)\n\n if np.size(age[selection]) > 1:\n stats = utilities.compute_stats(aratios[abundance][selection], return_dict = True) # +1 b/c index starts at 1\n for k in stats.keys():\n stats_array_dict[abundance][k][i] = stats[k]\n else:\n for k in stats.keys():\n stats_array_dict[abundance][k][i] = None\n\n for abundance in aratios.keys():\n g = hf[groupname + '/statistics/cumulative/' + abundance]\n for k in stats_array_dict[abundance].keys():\n g.create_dataset(k, data = stats_array_dict[abundance][k])\n\n # now bin by times (using various dt) to get instantaneous median and spread in SF\n # at any given point in time. This is NOT an observational quantity, but rather a theoretical\n # bit of information to understand how much formed stars vary in abundance ratio at any\n # given point in time (i.e. this is the stellar analog to the gas version of these plots)\n for dt in [0.1, 1, 10]:\n g = statgroup.create_group('%iMyr'%(dt))\n t = ds.current_time.to('Myr').value\n tmax = np.around(t, decimals = -len(str(dt)) + 1)\n if tmax < t:\n tmax = tmax + dt\n tbins = np.arange(0.0, tmax + 0.5*dt, dt)\n\n index = np.digitize(data['creation_time'].to('Myr').value, tbins)\n hist, bins = np.histogram(data['creation_time'].to('Myr').value, bins = tbins)\n g.create_dataset('bins', data = tbins)\n g.create_dataset('hist', data = np.array(hist))\n\n stats_array_dict = {}\n for abundance in aratios.keys():\n stats_array_dict[abundance] = {}\n for k in stats.keys():\n stats_array_dict[abundance][k] = np.zeros(np.size(tbins) - 1)\n\n for i in np.arange(np.size(tbins)-1):\n for abundance in aratios.keys():\n if i == 0:\n sub_g = g.create_group(abundance)\n if hist[i] > 0:\n stats = utilities.compute_stats(aratios[abundance][index == i+1], return_dict = True) # +1 b/c index starts at 1\n for k in stats.keys():\n stats_array_dict[abundance][k][i] = stats[k]\n else:\n for k in stats.keys():\n stats_array_dict[abundance][k][i] = None\n\n for abundance in aratios.keys():\n # - - - - - Produce a gap-less, interpolated mean to compute the ACF\n if False: # don't do this anymore\n first = np.where( np.logical_not(np.isnan( stats_array_dict[abundance]['mean'] )))[0][0]\n mean = stats_array_dict[abundance]['mean'][first:]\n select = np.logical_not(np.isnan(mean))\n clean_mean = mean[select]\n tcent = 0.5 * (tbins[1:] + tbins[:-1])\n tcent = tcent[first:]\n clean_t = tcent[select]\n f_interp = interp1d(clean_t, clean_mean)\n interp_mean = mean\n interp_mean[np.logical_not(select)] = f_interp( tcent[np.logical_not(select)] )\n stats_array_dict[abundance]['interp_mean'] = interp_mean\n stats_array_dict[abundance]['acf'] = utilities.acf(interp_mean, nlags = len(tcent))\n\n g = hf[groupname + '/statistics/%iMyr/'%(dt) + abundance]\n for k in stats_array_dict[abundance].keys():\n g.create_dataset(k, data = stats_array_dict[abundance][k])\n\n # ------------ can do a correlation across time bins here too ---------\n # Pick some time t_o, for the ith bin past t_o, do correlation between\n # those two populations of stars\n # x = np.array([stars in t_o bin] + [stars in t_i bin])\n # corr[i] = np.correlate(x,x, mode = 'full')\n # allow to plot correlation as a function of time.\n\n\n else:\n continue\n#\n# g.create_dataset('Nstars', data = 0.0)\n# g.create_dataset('Mstars', data = 0.0)\n# sg = hf.create_group(groupname + '/abundances')\n# for abundance in aratios.keys():\n# sg.create_dataset( abundance, data = 0.0)\n\n\n hf.close()\n\n return",
"def calculate_hydrophobic_fitness(assembly):\n hydrophobic_centroids = []\n tyrosine_centroids = []\n polar_centroids = []\n for residue in [r for r in assembly.get_monomers()\n if isinstance(r, ampal.Residue)]:\n centroid_list = None\n centroid = residue.centroid\n if residue.mol_letter in HYDROPHOBIC:\n centroid_list = hydrophobic_centroids\n elif residue.mol_letter == 'Y':\n centroid_list = tyrosine_centroids\n elif residue.mol_letter in standard_amino_acids:\n centroid_list = polar_centroids\n else:\n continue\n if centroid_list is not None:\n centroid_list.append(\n (residue.parent.id, int(residue.id),\n residue['CA'] if centroid is None else centroid))\n hf = run_hf_loop(hydrophobic_centroids,\n tyrosine_centroids, polar_centroids)\n return hf",
"def _get_bonded_hydrogens(self, atom, **kwargs):\n return self._get_bonded_hydrogens_algorithms[self.detect_hydrogens](atom, **kwargs)",
"def __init__(self, hd: HyperparameterDistribution = None, hds: List[HyperparameterDistribution] = None,\n null_default_value=None):\n DiscreteHyperparameterDistribution.__init__(self, null_default_value)\n self.hd: HyperparameterDistribution = hd",
"def compute_bond_parameters(self, atomtype_i, atomtype_j, plot=False):\n \n topol = self.traj.topology\n target_pair = (atomtype_i, atomtype_j)\n bonded_pairs = []\n if len([(i,j) for i,j in topol.bonds]) == 0:\n sys.exit(\"No bonds detected, check your input files\")\n \n for (i, j) in topol.bonds:\n if set((i.name, j.name)) == set(target_pair):\n bonded_pairs.append((i.index, j.index))\n\n if len(bonded_pairs) == 0:\n #print(\"No {}-{} bonds detected\".format(atomtype_i, atomtype_j))\n return None\n\n # Compute distance between bonded pairs\n bond_distances = np.asarray(mdtraj.compute_distances(self.traj, bonded_pairs))\n \n fig,ax = plt.subplots(1,1)\n # 51 bins, 50 probabilities\n all_probabilities, bins, patches = ax.hist(bond_distances.flatten(), 50, normed=True)\n if plot:\n ax.set_xlabel(\"Distance (nm)\")\n ax.set_ylabel(\"Frequency density\")\n ax.grid()\n fig.tight_layout()\n plt.savefig(\"{}-{}_bond_distribution.jpg\".format(atomtype_i, atomtype_j))\n plt.close()\n \n \n # Need to compute energies from the probabilities\n # For each probability, compute energy and assign it appropriately\n all_energies = []\n all_distances = []\n \n for index, probability in enumerate(all_probabilities):\n first_bin = bins[index]\n second_bin = bins[index+1]\n bin_width = second_bin - first_bin\n if probability - 1e-6 <=0:\n probability = 1e-6\n energy = -self._k_b * self._T * np.log(probability)\n distance = np.mean((bins[index], bins[index+1]))\n all_energies.append(energy)\n all_distances.append(distance)\n # Shift energies to positive numbers\n min_shift = min(all_energies)\n all_energies = [energy - min_shift for energy in all_energies]\n min_index = np.argmin(all_energies)\n converged = False\n i = 2\n while not converged:\n try:\n # Slice data to be center the fit around the minima\n sliced_distances = all_distances[min_index-i: min_index+i]\n sliced_energies = all_energies[min_index-i: min_index+i]\n sliced_probabilities = all_probabilities[min_index-i: min_index+i]\n\n #bonded_parameters = self.fit_to_gaussian(sliced_distances, sliced_energies)\n bonded_parameters = self.fit_to_gaussian(sliced_distances, sliced_probabilities)\n converged=True\n\n except RuntimeError:\n i +=1\n if min_index + i >= 50 or min_index -i <= 0:\n #bonded_parameters = self.fit_to_gaussian(all_distances, all_energies)\n bonded_parameters = self.fit_to_gaussian(all_distances, all_probabilities)\n converged=True\n \n\n predicted_energies = self.harmonic_energy(all_distances, **bonded_parameters)\n if plot:\n #fig, axarray = plt.subplots(2,1,sharex=True)\n #axarray[0].plot(all_distances, predicted_energies, c='darkgray', label=\"Predicted\")\n #axarray[1].plot(all_distances, all_energies, c='black', label=\"Target\")\n #axarray[0].legend()\n #axarray[1].legend()\n #axarray[1].set_xlabel(\"Distance (nm)\")\n #axarray[0].set_ylabel(\"Energy (kJ/mol)\")\n #axarray[1].set_ylabel(\"Energy (kJ/mol)\")\n #plt.savefig(\"{}-{}_bond_energies.jpg\".format(atomtype_i, atomtype_j))\n #plt.close()\n \n fig ,ax = plt.subplots(1,1)\n ax.plot(all_distances, predicted_energies, c='darkgray', \n label=\"Predicted\")\n ax.plot(all_distances,all_energies, c='black', label=\"Target\", \n alpha=1, linestyle='--')\n ax.legend()\n ax.set_xlabel(\"Distance (nm)\")\n ax.set_ylabel(\"Energy (kJ/mol)\")\n ax.grid()\n fig.tight_layout()\n fig.savefig(\"{}-{}_bond_energies.jpg\".format(atomtype_i, atomtype_j))\n plt.close(fig)\n\n\n return bonded_parameters",
"def _create_describe_df(self, feature_list):\n df = self.features.data()[feature_list]\n ds = df.describe().astype(\"float64\").T\n ds[self._feature_missing] = df.isna().sum() / max(df.count())\n return ds",
"def EntropyHYFromFrequencyDistribution(distribution, rows):\n\n HY = -1\n\n rows = max(rows, 1)\n columns = int(len(distribution) / rows)\n\n # check that rows is a factor of the size of the distribution - ensures\n # we have a proper AxB matrix with integer numbers of rows and columns\n if (columns * rows == len(distribution)):\n\n listy = []\n\n for col in range(0, columns):\n listy.append(0)\n for row in range(0, rows):\n listy[col] = listy[col] + distribution[row * columns + col]\n\n HY = EntropyFromFrequencyDistribution(listy)\n\n return (HY)",
"def feature_extraction(img, feature):\r\n\r\n if feature == 'HoG':\r\n # HoG parameters\r\n win_size = (32, 32)\r\n block_size = (32, 32)\r\n block_stride = (16, 16)\r\n cell_size = (16, 16)\r\n nbins = 9\r\n deriv_aperture = 1\r\n win_sigma = 4\r\n histogram_norm_type = 0\r\n l2_hys_threshold = 2.0000000000000001e-01\r\n gamma_correction = 0\r\n nlevels = 64\r\n \r\n # Your code here. You should also change the return value.\r\n\r\n hog = cv2.HOGDescriptor(win_size,block_size,block_stride,cell_size,nbins,deriv_aperture,win_sigma,histogram_norm_type,l2_hys_threshold,gamma_correction,nlevels)\r\n\r\n dsize = hog.getDescriptorSize()\r\n descripters = hog.compute(img,winStride=(32,32),padding=(0,0))\r\n descripters = descripters.reshape(-1,dsize)\r\n\r\n\r\n elif feature == 'SIFT':\r\n sift = cv2.xfeatures2d.SIFT_create()\r\n descripters = []\r\n height= img.shape[0]\r\n width = img.shape[1]\r\n split1 = np.array_split(img, width/20, axis=1)\r\n for split in split1:\r\n split2 =np.array_split(split, height/20, axis=0)\r\n for ig in split2:\r\n keypoints, descripter = sift.detectAndCompute(ig,None)\r\n if descripter is not None:\r\n descripters.append(descripter)\r\n if len(descripters) > 0:\r\n descripters = np.vstack(descripters)\r\n else: \r\n return None\r\n return descripters",
"def extractHyperParameters(self):\n return(np.array(self.hypers))",
"def overall_distributions(self):\n overall_dist = pd.DataFrame(self.df[self.year].describe()) #make a dataframe containing salaries statistics information for each year\n overall_dist = overall_dist.rename(columns={self.year: 'League'})\n overall_dist['League'] = overall_dist['League'].apply(lambda x: int(x)) #convert all elements in dataframe to integers\n \n ax = self.df[self.year].hist(bins=30,histtype='stepfilled', fc='#0077FF',alpha=0.5,figsize=(10,6))\n ax.set_axis_bgcolor('#EEEEEE')\n ax.grid(color='white', linestyle='solid')\n ax.set_xlabel('Salaries', fontsize=16)\n ax.xaxis.set_label_coords(0.5,-0.08)\n ax.set_ylabel('Counts', fontsize=16)\n ax.yaxis.set_label_coords(-0.05,0.5)\n fig = ax.get_figure()\n html = mpld3.fig_to_html(fig)\n plt.close()\n return html, overall_dist",
"def dist_distr_display(self):\n bool_idx = self.all_distance.pair.apply(lambda x: True if x in list(self.friends.pair) else False)\n nbool_idx = bool_idx.apply(lambda x: not x)\n sim_a2b = self.all_distance.ix[bool_idx, \"dist_a2b\"]\n sim_b2a = self.all_distance.ix[bool_idx, \"dist_b2a\"]\n diff_a2b = self.all_distance.ix[nbool_idx, \"dist_a2b\"]\n diff_b2a = self.all_distance.ix[nbool_idx, \"dist_b2a\"]\n\n ## Visualize the ploting\n plt.figure(1)\n plt.subplot(211)\n\n plt.title(\"Distance (A to B) Distribution\")\n sim_a2b.hist(color = 'green', alpha = .5, bins = 20)\n diff_a2b.hist(color = 'red', alpha = .5, bins = 20)\n\n plt.subplot(212)\n plt.title(\"Distance (B to A)Distribution From B to A\")\n sim_b2a.hist(color = 'green', alpha = .5, bins = 20)\n diff_b2a.hist(color = 'red', alpha = .5, bins = 20)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Distribution descriptors based on NormalizedVDWV of AADs.
|
def CalculateDistributionNormalizedVDWV(ProteinSequence):
result=CalculateDistribution(ProteinSequence,_NormalizedVDWV,'_NormalizedVDWV')
return result
|
[
"def _compute_det_variance(self):",
"def detAcceptanceRateValues(self):\n if(not(self.weight)):\n for nodej in self.g.nodes():\n WeightedSum = len(self.g.in_edges(nodej))\n for edge in self.g.in_edges(nodej):\n self.A[edge[0] + \"-\"+edge[1]] = (1.0/self.NormA[nodej])\n else:\n for nodej in self.g.nodes():\n for edge in self.g.in_edges(nodej):\n self.A[edge[0] + \"-\"+edge[1]] = (self.g.get_edge_data(edge[0],edge[1]) + 0.0)/self.NormA[nodej]",
"def normalize_advantage(self, advantages):\n #######################################################\n ######### YOUR CODE HERE - 1-5 lines. ############\n\n advantages = (advantages - np.mean(advantages)) / np.std(advantages)\n\n #######################################################\n ######### END YOUR CODE. ############\n return advantages",
"def _normalize_article_values(self):\n for data_set in sorted(self.article_name_size):\n for size in sorted(self.article_name_size[data_set]):\n # Set up volume in physical units\n L = self.article_name_size[data_set][size][\"L\"]\n a = self.article_name_size[data_set][size][\"a\"]\n aL = a*float(L)\n V = (aL)**4\n # self._add_article_dict_item(name, size, key, value)\n self._add_article_dict_item(data_set, size, \"aL\", aL)\n self._add_article_dict_item(data_set, size, \"V\", V)\n\n # Normalize Q^2 by V\n Q2 = self.article_name_size[data_set][size][\"Q2\"]\n Q2Err = self.article_name_size[data_set][size][\"Q2Err\"]\n Q2_norm = Q2/V\n Q2Err_norm = Q2Err/V\n self._add_article_dict_item(data_set, size, \"Q2_norm\", Q2_norm)\n self._add_article_dict_item(\n data_set, size, \"Q2Err_norm\", Q2Err_norm)\n\n # Normalize Q^4 by V\n Q4 = self.article_name_size[data_set][size][\"Q4\"]\n Q4Err = self.article_name_size[data_set][size][\"Q4Err\"]\n Q4_norm = Q4/V**2\n Q4Err_norm = Q4Err/V**2\n self._add_article_dict_item(data_set, size, \"Q4_norm\", Q4_norm)\n self._add_article_dict_item(\n data_set, size, \"Q4Err_norm\", Q4Err_norm)\n\n # Recalculates 4th cumulant\n Q4C_norm = self.Q4C(Q4_norm, Q2_norm)\n Q4CErr_norm = self.Q4C_error(Q4_norm, Q4Err_norm, Q2_norm,\n Q2Err_norm)\n self._add_article_dict_item(\n data_set, size, \"Q4C_norm\", Q4C_norm)\n self._add_article_dict_item(\n data_set, size, \"Q4CErr_norm\", Q4CErr_norm)\n\n # Recalculates R\n R_norm = self.R(Q4C_norm, Q2_norm)\n RErr_norm = self.R_error(Q4C_norm, Q4CErr_norm, Q2_norm,\n Q2Err_norm)\n self._add_article_dict_item(data_set, size, \"R_norm\", R_norm)\n self._add_article_dict_item(\n data_set, size, \"RErr_norm\", RErr_norm)\n\n # for data_set in sorted(self.article_name_size):\n # \tfor size in sorted(self.article_name_size[data_set]):\n # \t\tprint \"=\"*50\n # \t\tprint \"Dataset: %s Size number: %s Volume: %f\" % (\n # \t\t\tdata_set, size, self.article_name_size[data_set][size][\"V\"])\n # \t\tprint \"Q2: %10.5f %10.5f\" % (\n # \t\t\tself.article_name_size[data_set][size][\"Q2_norm\"],\n # \t\t\tself.article_name_size[data_set][size][\"Q2Err_norm\"])\n # \t\tprint \"Q4: %10.5f %10.5f\" % (\n # \t\t\tself.article_name_size[data_set][size][\"Q4_norm\"],\n # \t\t\tself.article_name_size[data_set][size][\"Q4Err_norm\"])\n # \t\tprint \"Q4C: %10.5f %10.5f\" % (\n # \t\t\tself.article_name_size[data_set][size][\"Q4C_norm\"],\n # \t\t\tself.article_name_size[data_set][size][\"Q4CErr_norm\"])\n # \t\tprint \"R: %10.5f %10.5f\" % (\n # \t\t\tself.article_name_size[data_set][size][\"R_norm\"],\n # \t\t\tself.article_name_size[data_set][size][\"RErr_norm\"])",
"def create_density_list():\r\n food = pd.read_csv(PATH + r\"\\FoodData_legacy\\food.csv\")\r\n food_portion = pd.read_csv(PATH + r\"\\FoodData_legacy\\food_portion.csv\")\r\n\r\n joined_ingredient_list = pd.merge(food_portion, food, on=\"fdc_id\", how=\"left\")\r\n\r\n clean_list = joined_ingredient_list[[\"description\", \"gram_weight\", \"modifier\"]]\r\n\r\n clean_list = clean_list[clean_list[\"modifier\"].isin([\"cup\", \"tbsp\", \"tsp\"])]\r\n\r\n clean_list[\"ingredient\"] = (\r\n clean_list[\"description\"].str.split(\",\").str[0].str.strip().str.lower()\r\n )\r\n clean_list[\"attribute\"] = (\r\n clean_list[\"description\"].str.split(\",\").str[1].str.strip().str.lower()\r\n )\r\n clean_list = clean_list.fillna(\"\")\r\n\r\n grouped_multiple = clean_list.groupby([\"ingredient\", \"attribute\", \"modifier\"])[\r\n \"gram_weight\"\r\n ].mean()\r\n grouped_multiple = grouped_multiple.reset_index()\r\n\r\n grouped_multiple[\"density\"] = np.where(\r\n grouped_multiple[\"modifier\"] == \"cup\",\r\n grouped_multiple[\"gram_weight\"] / 240,\r\n np.where(\r\n grouped_multiple[\"modifier\"] == \"tsp\",\r\n grouped_multiple[\"gram_weight\"] / 4.9,\r\n np.where(\r\n grouped_multiple[\"modifier\"] == \"tbsp\",\r\n grouped_multiple[\"gram_weight\"] / 14.7,\r\n grouped_multiple[\"gram_weight\"],\r\n ),\r\n ),\r\n )\r\n\r\n density_list = grouped_multiple.groupby([\"ingredient\", \"attribute\"])[\r\n \"density\"\r\n ].mean()\r\n density_list = density_list.reset_index()\r\n\r\n density_avg = density_list.groupby(\"ingredient\")[\"density\"].mean()\r\n density_list = density_list.set_index(\"ingredient\")\r\n density_list[\"density_avg\"] = density_avg\r\n density_list = density_list.reset_index()\r\n\r\n density_list.insert(0, \"ingredient_combined\", \"\")\r\n density_list[\"ingredient_combined\"] = (\r\n density_list[[\"attribute\", \"ingredient\"]].agg(\" \".join, axis=1).str.strip()\r\n )\r\n\r\n return density_list",
"def generateModelData(params, standoffDistance, nBins_tof, range_tof, ddnXSfxn, dedxfxn,\n nSamples, getPDF=False):\n e0 = params[0]\n dataHist = np.zeros((x_bins, eD_bins))\n nLoops = int(nSamples / nEvPerLoop)\n for loopNum in range(0, nLoops):\n eZeros = np.repeat(params, nEvPerLoop)\n data_eD_matrix = odeint( dedxfxn, eZeros, x_binCenters )\n data_eD = data_eD_matrix.flatten('K')\n data_weights = ddnXSfxn.evaluate(data_eD)\n# print('length of data_x {} length of data_eD {} length of weights {}'.format(\n# len(data_x), len(data_eD), len(data_weights)))\n dataHist2d, xedges, yedges = np.histogram2d( data_x, data_eD,\n [x_bins, eD_bins],\n [[x_minRange,x_maxRange],[eD_minRange,eD_maxRange]],\n weights=data_weights)\n dataHist = np.add(dataHist, dataHist2d)\n \n# print('linalg norm value {}'.format(np.linalg.norm(dataHist)))\n# dataHist = dataHist / np.linalg.norm(dataHist)\n# print('sum of data hist {}'.format(np.sum(dataHist*eD_binSize*x_binSize)))\n dataHist = dataHist/ np.sum(dataHist*eD_binSize*x_binSize)\n# plot.matshow(dataHist)\n# plot.show()\n drawHist2d = (np.rint(dataHist * nSamples)).astype(int)\n tofs = []\n tofWeights = []\n for index, weight in np.ndenumerate( drawHist2d ):\n cellLocation = x_binCenters[index[0]]\n effectiveDenergy = (e0 + eD_binCenters[index[1]])/2\n tof_d = getTOF( masses.deuteron, effectiveDenergy, cellLocation )\n neutronDistance = (distances.tunlSSA_CsI.cellLength - cellLocation +\n distances.tunlSSA_CsI.zeroDegLength/2 +\n standoffDistance )\n tof_n = getTOF(masses.neutron, eN_binCenters[index[1]], neutronDistance)\n tofs.append( tof_d + tof_n )\n tofWeights.append(weight)\n tofData, tofBinEdges = np.histogram( tofs, bins=nBins_tof, range=range_tof,\n weights=tofWeights, density=getPDF)\n \n return beamTiming.applySpreading(tofData)",
"def test_total_dv_sensitivities(self):\n # Make sure vecs are initialized to zero\n self.zero_tacs_vecs()\n\n # Initial solve\n func_vals = self.run_solve()\n\n # Compute the total derivative w.r.t. material design variables using adjoint\n self.run_adjoints()\n self.assembler.addDVSens(self.func_list, self.dfddv_list, 1.0)\n self.assembler.addAdjointResProducts(\n self.adjoint_list, self.dfddv_list, -1.0\n )\n # Accumulate sensitivity across all procs\n self.set_tacs_vec_values(self.dfddv_list)\n\n # Compute the total derivative w.r.t. material design variables using fd/cs\n self.perturb_tacs_vec(self.dv1, self.dv0, self.dv_pert)\n # Run perturbed solution\n func_vals_pert = self.run_solve(dv=self.dv1)\n # Compute approximate sens\n fdv_sens_approx = self.compute_fdcs_approx(func_vals_pert, func_vals)\n\n # Tests cs/fd against sensitivity from adjoint\n for i in range(len(self.func_list)):\n with self.subTest(function=self.func_list[i]):\n dfddv_proj_i = self.dfddv_list[i].dot(self.dv_pert)\n np.testing.assert_allclose(\n dfddv_proj_i, fdv_sens_approx[i], rtol=self.rtol, atol=self.atol\n )",
"def info_gain(self,a):\n entro = self.entropy()\n Dv = dict()\n for d in self.datas:\n a_info = d.data[a]\n if a_info in Dv:\n Dv[a_info].add(d)\n else:\n new_dataset = DataSet()\n new_dataset.add(d)\n Dv[a_info] = new_dataset\n for x in Dv:\n N = len(self.datas) #|D|\n Nv = len(Dv[x].datas)#|Dv|\n entro -= Dv[x].entropy() * Nv / N\n return entro, Dv",
"def generateModelData(params, standoffDistance, range_tof, nBins_tof, ddnXSfxn,\n dedxfxn, beamTimer, nSamples, getPDF=False):\n beamE, eLoss, scale, s, scaleFactor = params\n e0mean = 900.0\n dataHist = np.zeros((x_bins, eD_bins))\n \n dedxForODE = lambda x, y: dedxfxn(energy=y,x=x)\n \n nLoops = int(np.ceil(nSamples / nEvPerLoop))\n for loopNum in range(0, nLoops):\n #eZeros = np.random.normal( params[0], params[0]*params[1], nEvPerLoop )\n #eZeros = skewnorm.rvs(a=skew0, loc=e0, scale=e0*sigma0, size=nEvPerLoop)\n eZeros = np.repeat(beamE, nEvPerLoop)\n eZeros -= lognorm.rvs(s=s, loc=eLoss, scale=scale, size=nEvPerLoop)\n checkForBadEs = True\n while checkForBadEs:\n badIdxs = np.where(eZeros <= 0.0)[0]\n nBads = badIdxs.shape[0]\n if nBads == 0:\n checkForBadEs = False\n replacements = np.repeat(beamE, nBads) - lognorm.rvs(s=s, loc=eLoss, scale=scale, size=nBads)\n eZeros[badIdxs] = replacements\n\n#data_eD_matrix = odeint( dedxfxn, eZeros, x_binCenters )\n \n odesolver = ode( dedxForODE ).set_integrator('dopri5').set_initial_value(eZeros)\n for idx, xEvalPoint in enumerate(x_binCenters):\n sol = odesolver.integrate( xEvalPoint )\n #print('shape of returned ode solution {}, first 10 entries {}'.format(sol.shape, sol[:10]))\n #data_eD_matrix = odesolver.integrate( x_binCenters )\n #print('shape of returned ode solution {}, first 10 entries {}'.format(data_eD_matrix.shape, data_eD_matrix[:10]))\n #data_eD = data_eD_matrix.flatten('K')\n data_weights = ddnXSfxn.evaluate(sol)\n hist, edEdges = np.histogram( sol, bins=eD_bins, range=(eD_minRange, eD_maxRange), weights=data_weights)\n dataHist[idx,:] += hist\n# print('length of data_x {} length of data_eD {} length of weights {}'.format(\n# len(data_x), len(data_eD), len(data_weights)))\n#dataHist2d, xedges, yedges = np.histogram2d( data_x, data_eD,\n# [x_bins, eD_bins],\n# [[x_minRange,x_maxRange],[eD_minRange,eD_maxRange]],\n# weights=data_weights)\n# dataHist += dataHist2d # element-wise, in-place addition\n\n\n \n# print('linalg norm value {}'.format(np.linalg.norm(dataHist)))\n# dataHist = dataHist / np.linalg.norm(dataHist)\n# print('sum of data hist {}'.format(np.sum(dataHist*eD_binSize*x_binSize)))\n dataHist /= np.sum(dataHist*eD_binSize*x_binSize)\n# plot.matshow(dataHist)\n# plot.show()\n e0mean = np.mean(eZeros)\n drawHist2d = (np.rint(dataHist * nSamples)).astype(int)\n tofs = []\n tofWeights = []\n for index, weight in np.ndenumerate( drawHist2d ):\n cellLocation = x_binCenters[index[0]]\n effectiveDenergy = (e0mean + eD_binCenters[index[1]])/2\n tof_d = getTOF( masses.deuteron, effectiveDenergy, cellLocation )\n neutronDistance = (distances.tunlSSA_CsI.cellLength - cellLocation +\n standoffDistance )\n tof_n = getTOF(masses.neutron, eN_binCenters[index[1]], neutronDistance)\n zeroD_times, zeroD_weights = zeroDegTimeSpreader.getTimesAndWeights( eN_binCenters[index[1]] )\n tofs.append( tof_d + tof_n + zeroD_times )\n tofWeights.append(weight * zeroD_weights)\n # TODO: next line needs adjustment if using OLD NUMPY < 1.6.1 \n # if lower than that, use the 'normed' arg, rather than 'density'\n tofData, tofBinEdges = np.histogram( tofs, bins=nBins_tof, range=range_tof,\n weights=tofWeights, density=getPDF)\n return scaleFactor * beamTimer.applySpreading(tofData)",
"def _calc_density(self, EigenVecs, num_electrons): \n density = 0\n\n for i in range (0, len(self.occupation_list)):\n #print(\"orbital number - {0} adding occupation: {1}\".format(i, self.occupation_list[i]))\n #density += self.occupation_list[i] * np.power(np.abs(EigenVecs[:, i]), 2)\n density += self.occupation_list[i] * np.abs(EigenVecs[:, i])**2 \n\n self._check_density(density, num_electrons)\n return density",
"def detCumulativeAcceptanceValues(self):\n if(not(self.weight)):\n for nodej in self.g.nodes():\n self.NormA[nodej] = len(self.g.in_edges(nodej))\n else:\n for nodej in self.g.nodes():\n WeightedSum = 0.0\n for edge in self.g.in_edges(nodej):\n weight = self.g.get_edge_data(edge[0],edge[1])\n print weight\n WeightedSum += weight\n self.NormA[nodej] = WeightedSum",
"def create_features():\n if insight:\n data = pd.read_csv(insight_subjective_path)\n else:\n data = pd.read_csv(epoc_subjective_path)\n\n for video_index in range(1, video_count+1):\n video_df = data[data['Video'] == video_index]\n\n count = video_df.__len__()\n arousal_sum = sum(video_df['Arousal'])\n valence_sum = sum(video_df['Valence'])\n\n data.loc[data['Video'] == video_index, 'Mean_video_arousal'] = arousal_sum / count\n data.loc[data['Video'] == video_index, 'Mean_video_valence'] = valence_sum / count\n\n arousal_min = min(video_df['Arousal'])\n valence_min = min(video_df['Valence'])\n arousal_max = max(video_df['Arousal'])\n valence_max = max(video_df['Valence'])\n\n data.loc[data['Video'] == video_index, 'Min_video_arousal'] = arousal_min\n data.loc[data['Video'] == video_index, 'Min_video_valence'] = valence_min\n data.loc[data['Video'] == video_index, 'Max_video_arousal'] = arousal_max\n data.loc[data['Video'] == video_index, 'Max_video_valence'] = valence_max\n\n data['Arousal^2'] = data['Arousal']**2\n data['Valence^2'] = data['Valence']**2\n\n return data",
"def AggregateDampingDer(self):\n dcda = DcDalpha(self.damp, self.rho)\n\n # zero out the contribution from the zero frequency mode if present\n for i in range(self.freq.size):\n if abs(self.freq[i]) < 1e-7:\n dcda[i] = 0.0\n dcdl = DalphaDlamTrans(dcda, self.lam, self.dt)\n dcdA = DlamDATrans(dcdl, self.W, self.V)\n dcdV1T = dAdV1Trans(dcdA, self.V1T, self.V1inv, self.V2T)\n dcdV2T = dAdV2Trans(dcdA, self.V1inv)\n dcdVhat = dV12dVhatTrans(dcdV1T, dcdV2T)\n dcdY = dVhatdYTrans(dcdVhat, self.U, self.s, self.VT)\n dcdX = dYdXTrans(dcdY)\n dcdx = self.H.T.dot(dcdX)\n\n return dcdx",
"def por_v_aff(data):\n tdata = dc(data)\n\n try:\n vp_b = tdata['vp_b']\n except NameError:\n raise\n vp_s = tdata.get('vp_s', np.array(1000./220))\n if 'x_e' in tdata: # Backwards compatibility\n m_e = tdata.get('x_e', np.array(2.19))\n else:\n m_e = tdata.get('m_e', np.array(2.19))\n\n return 1. - (vp_b/vp_s)**(1./m_e)",
"def getDatasetOfVariations(dfAllNNs,dfTest, row, caseInd, categorical, continuous, alpha, \n variations, partialLinear, linearVarCols):\n\n #######################################################################\n \n x = dfTest.loc[caseInd].as_matrix()\n \n if sum(row)>0: #if there are missing values\n boolCategorical = booleanRow(dfAllNNs.columns,categorical)\n boolContinuous = booleanRow(dfAllNNs.columns,continuous)\n\n catColumns = np.logical_and(boolCategorical,row) #oldIndex not present in dfAllNNs\n contColumns = np.logical_and(boolContinuous,row)\n \n if (np.sum(catColumns)>0): \n cols = dfAllNNs.columns[catColumns]\n freqValues = [dfAllNNs[i].value_counts().index[0] for i in cols]\n ######## impute categorical values\n ind = np.array(catColumns)\n x[ind] = freqValues\n if(np.sum(contColumns)>0):\n cols = dfAllNNs.columns[contColumns]\n if partialLinear:# and 'C_currentage' in cols:\n confs = []\n for j in cols:\n if j in linearVarCols and ~row[list(dfAllNNs.columns).index(j)]:\n confs.append(getVariablesLI(dfTest.loc[caseInd,j],alpha=1.0))\n else:\n confs.append(getVariablesCI(dfAllNNs[j].as_matrix(),alpha=alpha))\n x = getVariations(x=x, variations=variations, contColumns=contColumns, confs=confs, step_size=10) \n else:\n confs = []\n for j in cols:\n confs.append(getVariablesCI(dfAllNNs[j].as_matrix(),alpha=alpha))\n x = getVariations(x=x, variations=variations, contColumns=contColumns, confs=confs, step_size=10)\n else:\n contColumns = booleanRow(dfAllNNs.columns,linearVarCols)\n cols = dfAllNNs.columns[contColumns]\n if partialLinear:# and 'C_currentage' in cols:\n confs = []\n for j in cols:\n if j in linearVarCols and ~row[list(dfAllNNs.columns).index(j)]:\n confs.append(getVariablesLI(dfTest.loc[caseInd,j],alpha=1.0))\n x = getVariations(x=x, variations=variations, contColumns=contColumns, confs=confs, step_size=10) \n \n \n return x",
"def benes_daum():\n\n def f(t, x):\n return np.tanh(x)\n\n def df(t, x):\n return 1.0 - np.tanh(x) ** 2\n\n def l(t):\n return np.ones(1)\n\n initmean = np.zeros(1)\n initcov = 3.0 * np.eye(1)\n initrv = Normal(initmean, initcov)\n dynamod = pnfs.statespace.SDE(dimension=1, driftfun=f, dispmatfun=l, jacobfun=df)\n measmod = pnfs.statespace.DiscreteLTIGaussian(np.eye(1), np.zeros(1), np.eye(1))\n return dynamod, measmod, initrv, {}",
"def main( auc_ordering, abundances, sample_df, centrality_type, name, detailed=False ):\n\n out_dir = f\"{os.path.dirname(os.path.realpath(__file__))}/output\"\n # allows for cleaner execution and use of relative paths\n\n if( detailed ):\n out_file = f\"{out_dir}/{name}_PERMANOVA_result.csv\"\n # Create new files for output\n\n # Call PERMANOVA calculation\n permanova_df = perform_permanova( auc_ordering, abundances, sample_df, out_file, detailed )\n print( f\"Plots generated to {out_dir}.\" )\n\n # Since this is detailed must generate plots\n _generate_figures( permanova_df, centrality_type, out_dir, name )\n\n else:\n # No excess files necessary just generate dataframe to pass on\n permanova_df = perform_permanova( auc_ordering, abundances, sample_df, None )\n\n permanova_df.reset_index( drop=True, inplace=True ) # reset indicis as a precautionary to make sure all df's start at index 0\n\n return permanova_df",
"def compute_feature_vectors(self, means_bgr=True, means_hsv=True,\n h_hist=True, h_hist_bins=5, h_hist_entropy=True,\n s_hist=True, s_hist_bins=3, s_hist_entropy=True,\n hs_hist=False, hs_hist_bins_h=5, hs_hist_bins_s=3,\n sift=False, sift_kp_size=32.0,\n hog=False, hog_winSize=(32, 32), hog_blockSize=(16, 16), hog_blockStride=(8, 8),\n hog_cellSize=(8, 8), hog_bins=9):\n for img in self.images:\n self.images_superpixels_feature_vector[img] = [[] for sp in self.images_superpixels[img]]\n for superpixel in self.images_superpixels[img]:\n feature_vector = []\n mask = np.uint8(self.images_segmented[img] == superpixel)\n coord_xy = (self.images_superpixels_center[img][superpixel][0],\n self.images_superpixels_center[img][superpixel][1])\n if means_bgr:\n feature_vector += self.images_feature_extraction[img].means_bgr(mask)\n if means_hsv:\n feature_vector += self.images_feature_extraction[img].means_hsv(mask)\n if h_hist:\n feature_vector += self.images_feature_extraction[img].h_hist(mask, h_hist_bins, h_hist_entropy)\n if s_hist:\n feature_vector += self.images_feature_extraction[img].s_hist(mask, s_hist_bins, s_hist_entropy)\n if hs_hist:\n feature_vector += self.images_feature_extraction[img].hs_hist(mask, hs_hist_bins_h, hs_hist_bins_s)\n if sift:\n feature_vector += self.images_feature_extraction[img].sift(coord_xy[0], coord_xy[1], sift_kp_size)\n if hog:\n feature_vector += self.images_feature_extraction[img].hog(coord_xy[0], coord_xy[1], hog_winSize,\n hog_blockSize, hog_blockStride,\n hog_cellSize, hog_bins)\n assert len(feature_vector) > 0, 'Feature vector needs to have at least one feature'\n self.images_superpixels_feature_vector[img][superpixel] = np.asarray(feature_vector, dtype='float32')",
"def calculate_energy(self, atoms):\n\n pair_energy = 0.0\n embedding_energy = 0.0\n mu_energy = 0.0\n lam_energy = 0.0\n trace_energy = 0.0\n\n self.total_density = np.zeros(len(atoms))\n if (self.form == 'adp'):\n self.mu = np.zeros([len(atoms), 3])\n self.lam = np.zeros([len(atoms), 3, 3])\n\n for i in range(len(atoms)): # this is the atom to be embedded\n neighbors, offsets = self.neighbors.get_neighbors(i)\n offset = np.dot(offsets, atoms.get_cell())\n\n rvec = (atoms.positions[neighbors] + offset -\n atoms.positions[i])\n\n # calculate the distance to the nearest neighbors\n r = np.sqrt(np.sum(np.square(rvec), axis=1)) # fast\n# r = np.apply_along_axis(np.linalg.norm, 1, rvec) # sloow\n\n nearest = np.arange(len(r))[r <= self.cutoff]\n for j_index in range(self.Nelements):\n use = self.index[neighbors[nearest]] == j_index\n if not use.any():\n continue\n pair_energy += np.sum(self.phi[self.index[i], j_index](\n r[nearest][use])) / 2.\n\n density = np.sum(\n self.electron_density[j_index](r[nearest][use]))\n self.total_density[i] += density\n\n if self.form == 'adp':\n self.mu[i] += self.adp_dipole(\n r[nearest][use],\n rvec[nearest][use],\n self.d[self.index[i], j_index])\n\n self.lam[i] += self.adp_quadrupole(\n r[nearest][use],\n rvec[nearest][use],\n self.q[self.index[i], j_index])\n\n # add in the electron embedding energy\n embedding_energy += self.embedded_energy[self.index[i]](\n self.total_density[i])\n\n components = dict(pair=pair_energy, embedding=embedding_energy)\n\n if self.form == 'adp':\n mu_energy += np.sum(self.mu ** 2) / 2.\n lam_energy += np.sum(self.lam ** 2) / 2.\n\n for i in range(len(atoms)): # this is the atom to be embedded\n trace_energy -= np.sum(self.lam[i].trace() ** 2) / 6.\n\n adp_result = dict(adp_mu=mu_energy,\n adp_lam=lam_energy,\n adp_trace=trace_energy)\n components.update(adp_result)\n\n self.positions = atoms.positions.copy()\n self.cell = atoms.get_cell().copy()\n\n energy = 0.0\n for i in components.keys():\n energy += components[i]\n\n self.energy_free = energy\n self.energy_zero = energy\n\n self.results['energy_components'] = components\n self.results['energy'] = energy"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Distribution descriptors based on Charge of ADDs.
|
def CalculateDistributionCharge(ProteinSequence):
result=CalculateDistribution(ProteinSequence,_Charge,'_Charge')
return result
|
[
"def pd_create(cd):\n\n # check that 'c' or 'd' is passed\n #assert cd == (\n # 'c' or 'd'), 'This must be charge (c) or discharge (d) data'\n\n # number of descriptors it generates\n n_desc = 19\n\n # determines prefix string based on need for a charge or\n # discharge dataframe\n if cd == 'c':\n prefix = 'ch_'\n else:\n prefix = 'dc_'\n\n # generates list of names for the top of the descriptors dataframe\n names = []\n for ch in np.arange(n_desc):\n names.append(prefix + str(int(ch)))\n\n # adds names of error parameters to the end of the descriptor list\n names = names + [prefix+'AIC', prefix+'BIC', prefix+'red_chi_squared']\n\n # creates pandas dataframe with necessary heading\n # print(names)\n desc = pd.DataFrame(columns=names)\n\n return desc",
"def charge_1(dists, charges):\n charge = charges / ( map(epsilon, dists) * dists )\n return sum(charge)",
"def get_group_descriptor(self, groups):\n group_count = {}\n for chem in self.reactants + self.products:\n for group, count in chem.functional_groups.items():\n group_count.setdefault(group, 0.0)\n group_count[group] += count\n\n descriptor = []\n for smarts in sorted(groups.keys()):\n descriptor.append(group_count.get(smarts, 0))\n return descriptor",
"def charges(self, molecule):\n\n # TODO add option to use chargemol on onetep cube files.\n copy(f'../density/{molecule.name}.wfx', f'{molecule.name}.wfx')\n c_mol = Chargemol(molecule, self.all_configs)\n c_mol.generate_input()\n\n append_to_log(f'Chargemol analysis with DDEC{self.qm[\"ddec_version\"]} complete')\n\n return molecule",
"def sum_num(self, d1, d2, rho=0, samples=200): \n if isinstance(d1, tuple):\n print(\"Warning -- we still have T,f_T tupe as input for sum_num\")\n d1 = NumDist(d1[0], d1[1])\n if isinstance(d2, tuple):\n print(\"Warning -- we still have T,f_T tupe as input for sum_num\")\n d2 = NumDist(d2[0], d2[1])\n\n # Dirac distribution \n if d1 == 0:\n return d2\n if d2 == 0:\n return d1\n assert isinstance(d1, Distribution), print(\"Error: d1 is not distribution\") \n assert isinstance(d2, Distribution), print(\"Error: d2 is not distribution\") \n \n # The issue is that formal distributions need samples, but not NumDist\n T1, f_T_1 = d1.pmf() if isinstance(d1, NumDist) else d1.pmf(samples)\n \n l1, h1 = d1.margin()\n l2, h2 = d2.margin()\n low = l1 + l2\n high = h1 + h2\n domain = np.linspace(low, high, samples)\n # dT = domain[1] - domain[0]\n # print(\"Margin Dist #1: {:.3f}\\t{:.3f}\".format(l1, h1))\n # print(\"Margin Dist #2: {:.3f}\\t{:.3f}\".format(l2, h2))\n # print(\"Margin Dist Sum: {:.3f}\\t{:.3f}\".format(low, high))\n f_sum = np.zeros(samples) \n for sum_idx, t in enumerate(domain):\n # fz(t) = INT fX(k) * fY(t-k) for k in [-inf, +inf]\n # ..... = INT fX(k) * fY(t-k) for k in [X_min, X_max]\n for idx, k in enumerate(T1[:-1]):\n if k > t-l2: break\n dK = T1[idx+1] - T1[idx]\n f_sum[sum_idx] += (d1.pdf(k) * d2.pdf(t-k) * dK)\n # print(\"\\tArea: {:.6f}\".format(Distribution.area_pmf(domain, f_sum)))\n\n return NumDist(domain, f_sum, clean=True)",
"def pd_update(desc, charge_descript):\n\n # check if the inputs have the right Type\n # c is the charge_descript and desc is the empty dataframe \n assert isinstance(\n desc, pd.core.frame.DataFrame), \"This input must be a pandas dataframe\"\n assert isinstance(\n charge_descript, dict), \"Stop right there, only dictionaries are allowed in these parts\"\n #print('here is charge descript thingy: ')\n #print(charge_descript)\n # converts the dictionary of descriptors into a list of descriptors\n #desc_ls = process.dict_2_list(charge_descript)\n desc_ls = pd.DataFrame(charge_descript)\n # still c but as a list \n #print('here is c but as a list: ')\n #print(desc_ls)\n # print('here is the desc_ls: ')\n # print(desc_ls)\n # adds zeros to the end of each descriptor list to create\n # a list with 22 entries\n # also appends error parameters to the end of the descriptor list\n #desc_app = desc_ls + \\\n # np.zeros(19-len(desc_ls)).tolist() + charge_descript['errorParams']\n # generates a dataframe of descriptors\n #desc_df = pd.DataFrame([desc_app], columns=desc.columns)\n # combines row of a dataframe with previous dataframe\n desc = pd.concat([desc, desc_df], ignore_index=True)\n # print('here is the desc.to_string(): ')\n # print(desc.to_string())\n\n return desc",
"def update_charge(self):\n for atom in self.atoms:\n if (len(atom.charge) == 1) and (len(atom.lone_pairs) == 1) and (len(atom.radical_electrons) == 1):\n # if the charge of the group is not labeled, then no charge update will be\n # performed. If there multiple charges are assigned, no update either.\n # Besides, this groupatom should have enough information to be updated\n atom_type = atom.atomtype[0]\n for element in allElements:\n if atom_type is ATOMTYPES[element] or atom_type in ATOMTYPES[element].specific:\n bond_order = 0\n valence_electron = elements.PeriodicSystem.valence_electrons[element]\n for _, bond in atom.bonds.items():\n bond_order += bond.order[0]\n lone_pairs = atom.lone_pairs[0]\n radical_electrons = atom.radical_electrons[0]\n atom.charge[0] = valence_electron - bond_order - 2 * lone_pairs - radical_electrons\n else:\n # if the group is not specified to specific element, charge will not be updated\n pass",
"def element_descriptor(protein, ligand, binsize=0.0):\n\t# SUPPRESS OPENBABEL WARNINGS\n\tpybel.ob.obErrorLog.StopLogging()\n\n\t# ELEMENT TABLE TO DETERMINE VDW AND COVALENT BONDS\n\tet = OBElementTable()\n\n\t# CONVERT ELEMENT SYMBOLS TO ATOMIC NUMBERS\n\tatomicnums = (et.GetAtomicNum(str(element)) for element in config['elements'])\n\tatomicnums_pro = (et.GetAtomicNum(str(element)) for element in config['elements_pro'])\n\t#print(et.GetAtomicNum(\"Me\"), \"Fe\")\n\n\t# CREATE A NUMERICAL ID TO ELEMENT COMBINATION MAPPING\n\t# IMPORTANT TO MAP THE DESCRIPTOR VECTOR BACK TO THE LABELS\n\t#element_pairs = product(sorted(atomicnums),repeat=2)\n\telement_pairs = product(sorted(atomicnums),sorted(atomicnums_pro),repeat=1)\n\telement_pairs = dict((p,i) for i,p in enumerate(element_pairs))\n\n\n\t# ALSO CREATE A COLUMN LABEL FOR THIS DESCRIPTOR\n\tsorted_pairs = zip(*sorted(element_pairs.items(), key=itemgetter(1)))[0]\n\t#print(sorted_pairs)\n\n\tnumcols = len(element_pairs)\n\n\t# GENERATE THE DISTANCE BINS\n\tif binsize:\n\n\t\t# get the distance bins for the given cutoff and bin size\n\t\tbins = get_distance_bins(config['cutoff'], binsize)\n\n\t\t# NUMBER OF TOTAL COLUMNS IN DESCRIPTOR\n\t\tnumcols *= (bins.size + 1)\n\n\t\t# CREATE A COLUMN FOR EACH ELEMENT PAIR AND DISTANCE BIN\n\t\tlabels = []\n\t\tfor x,y in sorted_pairs:\n\t\t\tfor i in range(len(bins) + 1):\n\t\t\t\tlabel = \"{0}.{1}-B{2}\".format(et.GetSymbol(x), et.GetSymbol(y), i)\n\t\t\t\tlabels.append(label)\n\n\t# LABEL WITHOUT BINS\n\telse:\n\t\tlabels = ['.'.join((et.GetSymbol(x),et.GetSymbol(y))) for x,y in sorted_pairs]\n\n\t# DESCRIPTOR THAT WILL CONTAIN THE SUM OF ALL ELEMENT-ELEMENT INTERACTIONS\n\tdescriptor = numpy.zeros(numcols, dtype=int)\n\n\t# GET THE CONTACTS\n\tcontacts = get_contacts(protein, ligand, config['cutoff'])\n\n\t# ITERATE THROUGH CONTACT PAIRS AND DETERMINE SIFT\n\tfor hetatm, hetatm_contacts in contacts:\n\t\thetatm_num = hetatm.GetAtomicNum()\n\n\t# ITERATE THROUGH ALL THE CONTACTS THE HETATM HAS\n\t\tfor atom, distance in hetatm_contacts:\n\t\t\tresidue = atom.GetResidue()\n\n\t\t\tif residue.GetAtomID(atom).strip() in ['FE','FE2']:\n\t\t\t\tatom_num == 26\n\t\t\telse:\n\t\t\t\tatom_num = atom.GetAtomicNum()\n\n\t\t\t# IGNORE WATER RESIDUES\n\t\t\tif residue.GetName() == 'HOH': continue\n\n\t\t\t# IGNORE ZN,FE ETC.\n\t\t\ttry: index = element_pairs[(atom_num, hetatm_num)]\n\t\t\texcept KeyError: continue\n\t\t\t#print(element_pairs, 'ele')\n\n\t\t\t# BIN INTERACTIONS\n\t\t\tif binsize:\n\n\t\t\t\t# GET THE BIN THIS CONTACT BELONGS IN\n\t\t\t\t# DIGITIZE TAKES AN ARRAY-LIKE AS INPUT\n\t\t\t\tbin_id = numpy.digitize([distance,], bins)[0]\n\t\t\t\tdescriptor[1 + index + index*bins.size + bin_id] += 1\n\n\t\t\telse:\n\n\t\t\t\t# ELEMENTS ARE SORTED NUMERICALLY\n\t\t\t\tdescriptor[index] += 1\n\n\tif binsize: sum_descriptor_bins(descriptor, bins)\n\n\treturn descriptor, labels",
"def contract_exchange_descriptors(desc):\n # desc[0:6] = rho_data\n # desc[6:7] = g0\n # desc[7:10] = g1\n # desc[10:15] = g2\n # desc[15] = g0-r^2\n # g1 order: x, y, z\n # g2 order: xy, yz, z^2, xz, x^2-y^2\n\n N = desc.shape[1]\n res = np.zeros((12,N))\n rho_data = desc[:6]\n\n rho, s, alpha, tau_w, tau_unif = get_dft_input2(desc[:6])\n sprefac = 2 * (3 * np.pi * np.pi)**(1.0/3)\n n43 = rho**(4.0/3)\n svec = desc[1:4] / (sprefac * n43 + 1e-16)\n\n res[0] = rho\n res[1] = s**2\n res[2] = alpha\n\n # other setup\n g0 = desc[6]\n g1 = desc[7:10]\n g2 = desc[10:15]\n\n # g1_norm and 1d dot product\n g1_norm = np.linalg.norm(g1, axis=0)**2\n dot1 = np.einsum('an,an->n', svec, g1)\n\n # Clebsch Gordan https://en.wikipedia.org/wiki/Table_of_Clebsch%E2%80%93Gordan_coefficients\n g2_norm = 0\n for i in range(5):\n g2_norm += g2[i] * g2[i]\n g2_norm /= np.sqrt(5)\n\n res[3] = g0\n res[4] = g1_norm\n res[5] = dot1\n res[6] = g2_norm\n\n sgc = contract21(g2, svec)\n sgg = contract21(g2, g1)\n\n res[7] = np.einsum('pn,pn->n', sgc, svec)\n res[8] = np.einsum('pn,pn->n', sgc, g1)\n res[9] = np.einsum('pn,pn->n', sgg, g1)\n\n res[10] = desc[15]\n res[11] = desc[16]\n\n # res\n # 0: rho\n # 1: s\n # 2: alpha\n # 3: g0\n # 4: norm(g1)**2\n # 5: g1 dot svec\n # 6: norm(g2)**2\n # 7: svec dot g2 dot svec\n # 8: g1 dot g2 dot svec\n # 9: g1 dot g2 dot g1\n # 10: g0-r^2\n # 11: g0-r^4\n return res",
"def charge_2(dists, charges):\n d6 = dists <= 6.0\n d8 = dists <= 8.0\n d6_8 = logical_and(logical_not(d6), d8)\n epsilons = (d6*4.0) + \\\n d6_8*(38.0*dists-224.0) + \\\n logical_not(d8)*80.0\n charge = (charges / ( epsilons * dists ))\n return sum(charge)",
"def __add__(self, correlation):\n\n # concatenate distanced and fluctuations\n d_ij = np.hstack((self.d_ij, correlation.d_ij))\n C_ij = np.hstack((self.C_ij, correlation.C_ij))\n\n # sort by distance\n ind = np.argsort(d_ij)\n self.d_ij = d[ind]\n self.C_ij = C_ij[ind]\n\n return self",
"def interesting_metrics_to_compute(self):\n print(\"ECDF\")\n print(\"\")\n print(\"CDF\")\n print(\"\")\n print(\"PDF\")",
"def _updateCost(self):\n assert len(self.multiplicity) > 0\n\n self._numNodes = len(self.multiplicity)\n\n discreteCost = 1\n sharedCost = 2\n if self.chipCounter != self._postChipCounter:\n discreteCost += 1\n sharedCost += 1\n numDiscrete = np.sum(self.multiplicity > 1)\n hasShared = np.any(self.multiplicity == 1)\n self._numAxons = numDiscrete + hasShared\n self._numAxonCfgEntries = \\\n numDiscrete * discreteCost + hasShared * sharedCost\n self._cost = self._numAxonCfgEntries / self._maxNumAxonCfgEntries",
"def get_charges(self, ntile: str) -> int:\n # Charges on the title\n tile_fee = self.schedule_fee['title'].get(ntile, 0)\n # Charges on the constructed properties\n construct_fee = \\\n self.construct_count['house'] * self.schedule_fee['house'] \\\n + self.construct_count['hotel'] * self.schedule_fee['hotel']\n\n return tile_fee + construct_fee",
"def add_coupled_derivatives(self,base):\n\n for vartype in base.variables:\n if vartype in self.variables:\n for i, var in enumerate(base.variables[vartype]):\n if var.coupled:\n for func in range(len(self.derivatives[vartype])):\n self.derivatives[vartype][func][i]+= base.derivatives[vartype][func][i]",
"def _add_information_content(self) -> None:\n total_omim_diseases = len(self.omim_diseases)\n total_orpha_diseases = len(self.orpha_diseases)\n total_decipher_diseases = len(self.decipher_diseases)\n total_genes = len(self.genes)\n for term in self:\n p_omim = len(term.omim_diseases) / total_omim_diseases\n p_orpha = len(term.orpha_diseases) / total_orpha_diseases\n p_decipher = len(term.decipher_diseases) / total_decipher_diseases\n p_gene = len(term.genes) / total_genes\n if p_omim == 0:\n term.information_content.omim = 0\n else:\n term.information_content.omim = -math.log(p_omim)\n\n if p_orpha == 0:\n term.information_content.orpha = 0\n else:\n term.information_content.orpha = -math.log(p_orpha)\n\n if p_decipher == 0:\n term.information_content.decipher = 0\n else:\n term.information_content.decipher = -math.log(p_decipher)\n\n if p_gene == 0:\n term.information_content.gene = 0\n else:\n term.information_content.gene = -math.log(p_gene)",
"def distances(self):\n\n\n # Distances between atoms and ESP points\n self.dist = np.zeros((self.natoms, self.npoints))\n self.dist_3 = np.zeros((self.natoms, self.npoints))\n self.dist_x = np.zeros((self.natoms, self.npoints))\n self.dist_y = np.zeros((self.natoms, self.npoints))\n self.dist_z = np.zeros((self.natoms, self.npoints))\n\n self.dist = 1. / distance.cdist(self.atomcrd, self.crd)\n self.dist_3 = np.power(self.dist, 3) # maybe free afterwards\n self.dist_x = -np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[0], np.transpose(self.crd)[0]),\n self.dist_3)\n # self.dist_x2=np.multiply(np.transpose(np.subtract.outer(np.transpose(self.crd)[0],np.transpose(self.atomcrd)[0])),self.dist_3)\n self.dist_y = -np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[1], np.transpose(self.crd)[1]),\n self.dist_3)\n self.dist_z = -np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[2], np.transpose(self.crd)[2]),\n self.dist_3)\n del self.dist_3\n\n # Distances between atoms and atoms\n self.adist = np.zeros((self.natoms, self.natoms))\n self.adist_3 = np.zeros((self.natoms, self.natoms))\n self.adist_5 = np.zeros((self.natoms, self.natoms))\n self.adist_x = np.zeros((self.natoms, self.natoms))\n self.adist_y = np.zeros((self.natoms, self.natoms))\n self.adist_z = np.zeros((self.natoms, self.natoms))\n self.adistb_x = np.zeros((self.natoms, self.natoms))\n self.adistb_y = np.zeros((self.natoms, self.natoms))\n self.adistb_z = np.zeros((self.natoms, self.natoms))\n\n self.adist = distance.cdist(self.atomcrd, self.atomcrd)\n di = np.diag_indices(self.natoms)\n self.adist[di] = 1.0E10\n # self.adist=np.fill_diagonal(self.adist,1.0)\n self.adist = 1. / self.adist\n self.adist_3 = np.power(self.adist, 3)\n self.adist_5 = np.power(self.adist, 5)\n self.adist[di] = 0.0\n self.adist_x = np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[0], np.transpose(self.atomcrd)[0]),\n self.adist_3) # X distance between two atoms divided by the dist^3\n self.adist_y = np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[1], np.transpose(self.atomcrd)[1]),\n self.adist_3)\n self.adist_z = np.multiply(np.subtract.outer(np.transpose(self.atomcrd)[2], np.transpose(self.atomcrd)[2]),\n self.adist_3)\n self.adistb_x = np.subtract.outer(np.transpose(self.atomcrd)[0],\n np.transpose(self.atomcrd)[0]) # X distances between two atoms\n self.adistb_y = np.subtract.outer(np.transpose(self.atomcrd)[1], np.transpose(self.atomcrd)[1])\n self.adistb_z = np.subtract.outer(np.transpose(self.atomcrd)[2], np.transpose(self.atomcrd)[2])\n\n # self.dist_d=np.multiply(self.dist_d,self.dist_3)\n # for i in range(len(self.atomcrd3)):\n # for j in range(len(self.crd3)):\n # self.dist_d[i][j]=1./(self.atomcrd3[i]-self.crd3[j])",
"def compute_dist(self):\n dist_total = {} # type: Dict\n duplicate_family = {} # type: ignore\n if not self.generic_cluster_name:\n for cluster_number in range(-1, self.clustering.number_clusters): # type: ignore\n chosen = {k: v for k, v in self.stats[cluster_number]['distribution sample'].items() if\n v >= self.threshold * 100}\n if not chosen and cluster_number != -1:\n continue\n total = sum(dict(chosen).values(), 0.0)\n dist = {k: v * 100 / total for k, v in chosen.items()}\n dist_total[cluster_number] = {}\n dist_total[cluster_number]['number_samples'] = sum(\n self.clustering.raw_data[ # type: ignore\n self.clustering.model.labels_ == cluster_number].label.isin( # type: ignore\n list(chosen.keys()))) # type: ignore\n dist_total[cluster_number]['distribution'] = dist\n cluster_name = ' , '.join([x for x in chosen.keys()])[:15]\n if cluster_name in duplicate_family.keys():\n new_cluster_name = '%s_%s' % (cluster_name, str(duplicate_family[cluster_name]))\n duplicate_family[cluster_name] += 1\n else:\n new_cluster_name = cluster_name\n duplicate_family[cluster_name] = 0\n dist_total[cluster_number]['clusterName'] = new_cluster_name\n else:\n for cluster_number in range(-1, self.clustering.number_clusters): # type: ignore\n chosen = self.stats[cluster_number]['distribution sample']\n total = sum(dict(chosen).values(), 0.0)\n dist = {k: v * 100 / total for k, v in chosen.items()}\n dist_total[cluster_number] = {}\n dist_total[cluster_number]['distribution'] = dist\n dist_total[cluster_number]['number_samples'] = self.stats[cluster_number]['number_samples']\n dist_total[cluster_number]['clusterName'] = 'Cluster %s' % str(cluster_number)\n self.stats['number_of_clusterized_sample_after_selection'] = sum(dist_total[cluster_number]['number_samples']\n for cluster_number in dist_total.keys())\n self.selected_clusters = dist_total",
"def test_zernike_descriptor(self):\n self.assertTrue(abs(np.sum(self.des[0,:]) - 43.6876) < 0.01, \"Incorrect sum of feature 0 descriptor\")",
"def get_descriptors(self):\n\n # If any descriptors should be ignored, put their names in the list\n # below.\n blacklist = []\n\n results = []\n for name, descriptor in Chemical:\n if name in blacklist:\n continue\n results.append(sum(descriptor(chem) for chem in self.reactants\n if chem.a.size > 1))\n results.append(sum(descriptor(chem) for chem in self.products\n if chem.a.size > 1))\n return results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Distribution descriptors based on SecondaryStr of AADs.
|
def CalculateDistributionSecondaryStr(ProteinSequence):
result=CalculateDistribution(ProteinSequence,_SecondaryStr,'_SecondaryStr')
return result
|
[
"def calculate_agreement_directness(annotations_1, annotations_2):\n directness_1 = []\n directness_2 = []\n # Search for pairs annotated by both\n for pair in annotations_1:\n if pair in annotations_2:\n # Only take those into account that were annotated as \"affixal\" by both annotators and add those to lists\n if annotations_1[pair][\"affixal\"] == \"affixal\" and annotations_2[pair][\"affixal\"] == \"affixal\":\n directness_1.append(annotations_1[pair][\"directness\"])\n directness_2.append(annotations_2[pair][\"directness\"])\n n = len(directness_1)\n kappa = cohen_kappa_score(directness_1, directness_2)\n return n, kappa",
"def dist_distr_display(self):\n bool_idx = self.all_distance.pair.apply(lambda x: True if x in list(self.friends.pair) else False)\n nbool_idx = bool_idx.apply(lambda x: not x)\n sim_a2b = self.all_distance.ix[bool_idx, \"dist_a2b\"]\n sim_b2a = self.all_distance.ix[bool_idx, \"dist_b2a\"]\n diff_a2b = self.all_distance.ix[nbool_idx, \"dist_a2b\"]\n diff_b2a = self.all_distance.ix[nbool_idx, \"dist_b2a\"]\n\n ## Visualize the ploting\n plt.figure(1)\n plt.subplot(211)\n\n plt.title(\"Distance (A to B) Distribution\")\n sim_a2b.hist(color = 'green', alpha = .5, bins = 20)\n diff_a2b.hist(color = 'red', alpha = .5, bins = 20)\n\n plt.subplot(212)\n plt.title(\"Distance (B to A)Distribution From B to A\")\n sim_b2a.hist(color = 'green', alpha = .5, bins = 20)\n diff_b2a.hist(color = 'red', alpha = .5, bins = 20)",
"def _resolve_distribution_names(dist_fn_args,\n dist_names,\n leaf_name,\n instance_names):\n if dist_names is None:\n dist_names = []\n else:\n dist_names = dist_names.copy()\n n = len(dist_fn_args)\n dist_names.extend([None]*(n - len(dist_names)))\n\n # First, fill in distribution names by the function args used to refer\n # to them (e.g., in `[tfd.Normal(0., 1), lambda x: tfd.Normal(x, 1.)]`\n # the first distribution is named `x`.\n name_is_nontrivial = lambda name: name and name != '_'\n for i_, args in enumerate(reversed(dist_fn_args)):\n if not args:\n continue # There's no args to analyze.\n i = n - i_ - 1\n for j, arg_name in enumerate(args):\n if name_is_nontrivial(arg_name):\n existing_name = dist_names[i - j - 1]\n if (name_is_nontrivial(existing_name) and existing_name != arg_name):\n raise ValueError('Inconsistent names: component with name \"{}\" was '\n 'referred to by a different name \"{}\".'.format(\n arg_name, existing_name))\n dist_names[i - j - 1] = arg_name\n\n # Then, fill in names using any user-provided `name` arguments (e.g.,\n # `tfd.Normal(0., 1., name='x')`.\n for i in range(len(dist_names)):\n if instance_names[i] is not None:\n if (name_is_nontrivial(dist_names[i]) and\n dist_names[i] != instance_names[i]):\n raise ValueError('Inconsistent names: component with name \"{}\" was '\n 'referred to by a different name \"{}\".'.format(\n instance_names[i], dist_names[i]))\n else:\n dist_names[i] = instance_names[i]\n\n # Finally generate unique dummy names for any remaining components.\n unavailable_names = set(dist_names)\n j = 0\n for i_ in range(len(dist_names)):\n i = n - i_ - 1\n if not name_is_nontrivial(dist_names[i]):\n # TODO(davmre): consider wrapping dummy names with `<>` to prevent them\n # from being passed as kwargs.\n dummy_name = '{}{}'.format(leaf_name, j if j else '')\n while dummy_name in unavailable_names:\n j += 1\n dummy_name = '{}{}'.format(leaf_name, j)\n dist_names[i] = dummy_name\n unavailable_names.add(dummy_name)\n\n return tuple(dist_names)",
"def attributes_desc():\n columns = [\n '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',\n 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',\n 'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',\n 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',\n 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',\n 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young',\n ]\n\n return map(str.lower, columns)",
"def calc_aa_propensity(seq):\n\n # count absolute number of each residue in the input string\n number_each_aa_dict = {}\n\n all_aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n # create an dictionary of the numbers {\"A\" : 57, \"C\" : 5, ...} etc\n for aa in all_aa:\n number_each_aa_dict[aa] = seq.count(aa)\n\n # create a dictionary to hold the propensity of each residue\n aa_propensity_dict = {}\n length = len(seq)\n for aa in number_each_aa_dict:\n aa_propensity_dict[aa] = number_each_aa_dict[aa] / length\n\n # turn the dictionary into a pd.Series\n aa_prop_ser = pd.Series(aa_propensity_dict)\n # normalise so that all the aa propensities add up to 1.0\n # this is important if \"X\" or \"U\" is in the sequences\n aa_prop_norm_ser = aa_prop_ser / aa_prop_ser.sum()\n # name the index column\n aa_prop_norm_ser.index.name = \"freq\"\n return aa_prop_norm_ser",
"def element_descriptor(protein, ligand, binsize=0.0):\n\t# SUPPRESS OPENBABEL WARNINGS\n\tpybel.ob.obErrorLog.StopLogging()\n\n\t# ELEMENT TABLE TO DETERMINE VDW AND COVALENT BONDS\n\tet = OBElementTable()\n\n\t# CONVERT ELEMENT SYMBOLS TO ATOMIC NUMBERS\n\tatomicnums = (et.GetAtomicNum(str(element)) for element in config['elements'])\n\tatomicnums_pro = (et.GetAtomicNum(str(element)) for element in config['elements_pro'])\n\t#print(et.GetAtomicNum(\"Me\"), \"Fe\")\n\n\t# CREATE A NUMERICAL ID TO ELEMENT COMBINATION MAPPING\n\t# IMPORTANT TO MAP THE DESCRIPTOR VECTOR BACK TO THE LABELS\n\t#element_pairs = product(sorted(atomicnums),repeat=2)\n\telement_pairs = product(sorted(atomicnums),sorted(atomicnums_pro),repeat=1)\n\telement_pairs = dict((p,i) for i,p in enumerate(element_pairs))\n\n\n\t# ALSO CREATE A COLUMN LABEL FOR THIS DESCRIPTOR\n\tsorted_pairs = zip(*sorted(element_pairs.items(), key=itemgetter(1)))[0]\n\t#print(sorted_pairs)\n\n\tnumcols = len(element_pairs)\n\n\t# GENERATE THE DISTANCE BINS\n\tif binsize:\n\n\t\t# get the distance bins for the given cutoff and bin size\n\t\tbins = get_distance_bins(config['cutoff'], binsize)\n\n\t\t# NUMBER OF TOTAL COLUMNS IN DESCRIPTOR\n\t\tnumcols *= (bins.size + 1)\n\n\t\t# CREATE A COLUMN FOR EACH ELEMENT PAIR AND DISTANCE BIN\n\t\tlabels = []\n\t\tfor x,y in sorted_pairs:\n\t\t\tfor i in range(len(bins) + 1):\n\t\t\t\tlabel = \"{0}.{1}-B{2}\".format(et.GetSymbol(x), et.GetSymbol(y), i)\n\t\t\t\tlabels.append(label)\n\n\t# LABEL WITHOUT BINS\n\telse:\n\t\tlabels = ['.'.join((et.GetSymbol(x),et.GetSymbol(y))) for x,y in sorted_pairs]\n\n\t# DESCRIPTOR THAT WILL CONTAIN THE SUM OF ALL ELEMENT-ELEMENT INTERACTIONS\n\tdescriptor = numpy.zeros(numcols, dtype=int)\n\n\t# GET THE CONTACTS\n\tcontacts = get_contacts(protein, ligand, config['cutoff'])\n\n\t# ITERATE THROUGH CONTACT PAIRS AND DETERMINE SIFT\n\tfor hetatm, hetatm_contacts in contacts:\n\t\thetatm_num = hetatm.GetAtomicNum()\n\n\t# ITERATE THROUGH ALL THE CONTACTS THE HETATM HAS\n\t\tfor atom, distance in hetatm_contacts:\n\t\t\tresidue = atom.GetResidue()\n\n\t\t\tif residue.GetAtomID(atom).strip() in ['FE','FE2']:\n\t\t\t\tatom_num == 26\n\t\t\telse:\n\t\t\t\tatom_num = atom.GetAtomicNum()\n\n\t\t\t# IGNORE WATER RESIDUES\n\t\t\tif residue.GetName() == 'HOH': continue\n\n\t\t\t# IGNORE ZN,FE ETC.\n\t\t\ttry: index = element_pairs[(atom_num, hetatm_num)]\n\t\t\texcept KeyError: continue\n\t\t\t#print(element_pairs, 'ele')\n\n\t\t\t# BIN INTERACTIONS\n\t\t\tif binsize:\n\n\t\t\t\t# GET THE BIN THIS CONTACT BELONGS IN\n\t\t\t\t# DIGITIZE TAKES AN ARRAY-LIKE AS INPUT\n\t\t\t\tbin_id = numpy.digitize([distance,], bins)[0]\n\t\t\t\tdescriptor[1 + index + index*bins.size + bin_id] += 1\n\n\t\t\telse:\n\n\t\t\t\t# ELEMENTS ARE SORTED NUMERICALLY\n\t\t\t\tdescriptor[index] += 1\n\n\tif binsize: sum_descriptor_bins(descriptor, bins)\n\n\treturn descriptor, labels",
"def get_dcat_metadata(self, graph):\n dcat_metadata = dict()\n DCAT = Namespace('http://www.w3.org/ns/dcat#')\n\n datasets = list(graph[:RDF.type:DCAT.Dataset])\n if len(datasets) > 1:\n self.logger.info('FsF-F2-01M : Found more than one DCAT Dataset description, will use first one')\n if len(datasets) > 0:\n dcat_metadata = self.get_metadata(graph, datasets[0], type='Dataset')\n # distribution\n distribution = graph.objects(datasets[0], DCAT.distribution)\n dcat_metadata['object_content_identifier'] = []\n for dist in distribution:\n dtype, durl, dsize = None, None, None\n if not (graph.value(dist, DCAT.accessURL) or graph.value(dist, DCAT.downloadURL)):\n self.logger.info('FsF-F2-01M : Trying to retrieve DCAT distributions from remote location -:' +\n str(dist))\n try:\n distgraph = rdflib.Graph()\n disturl = str(dist)\n distresponse = requests.get(disturl, headers={'Accept': 'application/rdf+xml'})\n if distresponse.text:\n distgraph.parse(data=distresponse.text, format='application/rdf+xml')\n extdist = list(distgraph[:RDF.type:DCAT.Distribution])\n durl = (distgraph.value(extdist[0], DCAT.accessURL) or\n distgraph.value(extdist[0], DCAT.downloadURL))\n dsize = distgraph.value(extdist[0], DCAT.byteSize)\n dtype = distgraph.value(extdist[0], DCAT.mediaType)\n self.logger.info('FsF-F2-01M : Found DCAT distribution URL info from remote location -:' +\n str(durl))\n except Exception as e:\n self.logger.info('FsF-F2-01M : Failed to retrieve DCAT distributions from remote location -:' +\n str(dist))\n #print(e)\n durl = str(dist)\n else:\n durl = (graph.value(dist, DCAT.accessURL) or graph.value(dist, DCAT.downloadURL))\n #taking only one just to check if licence is available\n dcat_metadata['license'] = graph.value(dist, DCTERMS.license)\n # TODO: check if this really works..\n dcat_metadata['access_rights'] = (graph.value(dist, DCTERMS.accessRights) or\n graph.value(dist, DCTERMS.rights))\n dtype = graph.value(dist, DCAT.mediaType)\n dsize = graph.value(dist, DCAT.bytesSize)\n if durl or dtype or dsize:\n if idutils.is_url(str(durl)):\n dtype = '/'.join(str(dtype).split('/')[-2:])\n dcat_metadata['object_content_identifier'].append({\n 'url': str(durl),\n 'type': dtype,\n 'size': str(dsize)\n })\n\n if dcat_metadata['object_content_identifier']:\n self.logger.info('FsF-F3-01M : Found data links in DCAT.org metadata -: ' +\n str(dcat_metadata['object_content_identifier']))\n #TODO: add provenance metadata retrieval\n #else:\n # self.logger.info('FsF-F2-01M : Found DCAT content but could not correctly parse metadata')\n #in order to keep DCAT in the found metadata list, we need to pass at least one metadata value..\n #dcat_metadata['object_type'] = 'Dataset'\n return dcat_metadata\n #rdf_meta.query(self.metadata_mapping.value)\n #print(rdf_meta)\n #return None",
"def docDist(dictA, dictB):\n num = innerProduct(dictA, dictB)\n denom = math.sqrt(innerProduct(dictA,dictB)*innerProduct(dictB,dictB))\n return (math.acos(num/denom))/(math.pi/2)*100",
"def compare_this_other(this_strnd, oth_strnd, oth_strnd_anot, cutoff):\n\n p = Plotter()\n\n (this_sizes, this_dists) = this_strnd\n (other_sizes, other_dists) = oth_strnd\n (annot_other_sizes, annot_other_dists) = oth_strnd_anot\n\n # These are all dictionaries. Compare the (normalized) distribution of\n # lenghts from all of them\n #sizes = {'this': this_sizes, 'other': other_sizes,\n #'annot_other':annot_other_sizes}\n sizes = {'Opposite strand': this_sizes, 'Annotated strand': other_sizes}\n\n distances = {'this': this_dists, 'other': other_dists,\n 'annot_other':annot_other_dists}\n\n ## Get all dists, irrespective of cluster size\n merged_dists = {}\n for (dist_name, dist_dict) in distances.items():\n merged_dists[dist_name] = sum(dist_dict.itervalues(), [])\n\n #p.distance_histogram(merged_dists)\n\n ## Create a zero-array for the max sizes\n all_sizes = {}\n for (size_name, size_dict) in sizes.items():\n this_size = np.zeros(cutoff)\n for (size, size_count) in size_dict.iteritems():\n if size < cutoff:\n this_size[size-1] = size_count\n if size >= cutoff:\n this_size[-1] += size_count\n\n all_sizes[size_name] = this_size\n\n p.cluster_size_distribution(all_sizes, cutoff)\n\n debug()",
"def calc(self, sid):\n\n\t\tchar_collocates = collocates_manager.get(sid, tpe='character',\n\t\t\trole=self.role, ranks=self.ranks)\n\t\tnoun_collocates = collocates_manager.get(sid, tpe='noun')\n\n\t\t# TODO: Figure out which order ir better here.\n\t\td1 = Probability.gen_dist(noun_collocates, smooth=False)\n\t\td2 = Probability.gen_dist(char_collocates, smooth=False)\n\n\t\treturn Probability.kl_divergence(d1, d2)",
"def compute_dist(self):\n dist_total = {} # type: Dict\n duplicate_family = {} # type: ignore\n if not self.generic_cluster_name:\n for cluster_number in range(-1, self.clustering.number_clusters): # type: ignore\n chosen = {k: v for k, v in self.stats[cluster_number]['distribution sample'].items() if\n v >= self.threshold * 100}\n if not chosen and cluster_number != -1:\n continue\n total = sum(dict(chosen).values(), 0.0)\n dist = {k: v * 100 / total for k, v in chosen.items()}\n dist_total[cluster_number] = {}\n dist_total[cluster_number]['number_samples'] = sum(\n self.clustering.raw_data[ # type: ignore\n self.clustering.model.labels_ == cluster_number].label.isin( # type: ignore\n list(chosen.keys()))) # type: ignore\n dist_total[cluster_number]['distribution'] = dist\n cluster_name = ' , '.join([x for x in chosen.keys()])[:15]\n if cluster_name in duplicate_family.keys():\n new_cluster_name = '%s_%s' % (cluster_name, str(duplicate_family[cluster_name]))\n duplicate_family[cluster_name] += 1\n else:\n new_cluster_name = cluster_name\n duplicate_family[cluster_name] = 0\n dist_total[cluster_number]['clusterName'] = new_cluster_name\n else:\n for cluster_number in range(-1, self.clustering.number_clusters): # type: ignore\n chosen = self.stats[cluster_number]['distribution sample']\n total = sum(dict(chosen).values(), 0.0)\n dist = {k: v * 100 / total for k, v in chosen.items()}\n dist_total[cluster_number] = {}\n dist_total[cluster_number]['distribution'] = dist\n dist_total[cluster_number]['number_samples'] = self.stats[cluster_number]['number_samples']\n dist_total[cluster_number]['clusterName'] = 'Cluster %s' % str(cluster_number)\n self.stats['number_of_clusterized_sample_after_selection'] = sum(dist_total[cluster_number]['number_samples']\n for cluster_number in dist_total.keys())\n self.selected_clusters = dist_total",
"def annotate(self, input_data, secondary_data=None):\n out = {}\n\n # get input details\n input_chrom = input_data['chrom'].lower()\n input_pos = input_data['pos']\n input_ref = input_data['ref_base']\n input_alt = input_data['alt_base']\n\n\n sql_q = 'SELECT Frequencies FROM abraom WHERE CHR=\"%s\" AND Start=%s AND REF=\"%s\" and ALT=\"%s\";' \\\n %(input_chrom, input_pos, input_ref, input_alt)\n\n\n self.cursor.execute(sql_q)\n sql_q_result = self.cursor.fetchone()\n\n\n allele_freq = ''\n\n\n if sql_q_result:\n allele_freq += sql_q_result[0]\n #print(allele_freq)\n out['allele_freq'] = allele_freq\n\n\n else :\n out['allele_freq'] = 9999.99\n\n\n return out",
"def calculate_agreement_subtypes_indirect(annotations_1, annotations_2):\n subtypes_1 = []\n subtypes_2 = []\n # Search for pairs annotated by both\n for pair in annotations_1:\n if pair in annotations_2:\n # Only take those into account that were annotated as \"affixal\" and \"indirect\"\n # by both annotators and add those to lists\n if all([annotations_1[pair][\"affixal\"] == \"affixal\",\n annotations_2[pair][\"affixal\"] == \"affixal\",\n annotations_1[pair][\"directness\"] == \"indirect\",\n annotations_2[pair][\"directness\"] == \"indirect\"]):\n subtypes_1.append(annotations_1[pair][\"subtype\"])\n subtypes_2.append(annotations_2[pair][\"subtype\"])\n # Get number of instances\n n = len(subtypes_1)\n # Calculate Cohen's kappa\n kappa = cohen_kappa_score(subtypes_1, subtypes_2)\n # Create confusion matrix\n series_1 = pd.Series(subtypes_1, name='Subtypes_1')\n series_2 = pd.Series(subtypes_2, name='Subtypes_2')\n confusion_matrix = pd.crosstab(series_1, series_2)\n return n, kappa, confusion_matrix",
"def calc(self, sid):\n\n\t\tchar_collocates = collocates_manager.get(sid, tpe='character',\n\t\t\trole=self.role, ranks=self.ranks)\n\t\tnoun_collocates = collocates_manager.get(sid, tpe='noun')\n\n\t\td1 = Probability.gen_dist(char_collocates, smooth=False)\n\t\td2 = Probability.gen_dist(noun_collocates, smooth=False)\n\n\t\treturn Probability.total_variation(d1, d2)",
"def getIDFreqDist(bioguide_id):\n\tvote_history \t\t\t= getIDVoteHistory(bioguide_id)\n\tword_dict\t\t\t\t= {\"bill_words\": [], \"yea_words\": [], \"nay_words\": []}\n\ttemp_words\t\t\t\t= []\n\tyea_words_set\t\t\t= set()\n\tnay_words_set\t\t\t= set()\n\n\tfor vote in sorted(vote_history):\n\t\ttemp_words = getBillWords(vote)\n\t\tword_dict['bill_words'] += temp_words\n\t\tif vote_history[vote] == \"Yea\":\n\t\t\tword_dict['yea_words'] += temp_words\n\t\t\tfor word in temp_words:\n\t\t\t\tyea_words_set.add(word)\n\t\telif vote_history[vote] == \"Nay\":\n\t\t\tword_dict['nay_words'] += temp_words\n\t\t\tfor word in temp_words:\n\t\t\t\tnay_words_set.add(word)\n\t\telse:\n\t\t\tpass\n\t\n\tyea_wordss = yea_words_set.difference(nay_words_set)\n\tnay_wordss = nay_words_set.difference(yea_words_set)\n\t\n\tyea_words = [x for x in word_dict['yea_words'] if x in yea_wordss]\n\tnay_words = [x for x in word_dict['nay_words'] if x in nay_wordss]\n\n\tyea_freq_dist\t= nltk.FreqDist(yea_words).most_common()\n\tnay_freq_dist \t= nltk.FreqDist(nay_words).most_common()\n\n\tfreq_dist \t\t= {\"yea\": yea_freq_dist, \"nay\": nay_freq_dist}\n\n\treturn freq_dist",
"def generate_abundances(ds_list = None, outfile = 'star_abundances.h5', dir = './abundances/', overwrite = False):\n #\n # do this for all\n #\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n if not os.path.isfile(dir + outfile) or overwrite:\n hf = h5py.File(dir + outfile, 'w')\n else:\n hf = h5py.File(dir + outfile, 'a')\n\n if ds_list is None:\n ds_list = np.sort( glob.glob('./DD????/DD????') )\n times = np.zeros(np.size(ds_list))\n elif (not (type(ds_list) is list)):\n # assume a single string passed\n ds_list = [ds_list]\n\n # get elements present:\n ds = yt.load(ds_list[-1])\n fields = ds.field_list\n elements = utilities.species_from_fields(fields, include_primordial=True)\n metals = [x for x in elements if (x != 'H' and x != 'He')]\n metals = metals + ['alpha', 'alpha_5'] # add these two by hand for aggregate metal abundances\n ratios = [ x +'/H' for x in metals]\n\n if 'Mg' in metals:\n ratios = ratios + [ x + '/Mg' for x in metals]\n\n if 'Fe' in metals:\n ratios = ratios + [ x + '/Fe' for x in metals]\n\n if 'O' in metals:\n ratios = ratios + [ x + '/O' for x in metals]\n\n if 'C' in metals:\n ratios = ratios + [ x + '/C' for x in metals]\n\n if 'Ba' in metals:\n ratios = ratios + [ x + '/Ba' for x in metals]\n\n# if 'alpha' in metals:\n# ratios = ratios + [ x + '/alpha' for x in metals]\n\n for i, dsname in enumerate(ds_list):\n ds = yt.load(dsname)\n data = ds.all_data()\n\n groupname = dsname.rsplit('/')[1]\n\n if groupname in hf and not overwrite:\n continue # skip this one, it already exists\n\n if ('io','particle_type') in ds.field_list:\n g = hf.create_group(groupname)\n g.create_dataset('Time' , data = ds.current_time.to('Myr').value)\n\n# if ('io', 'particle_type') in ds.field_list:\n\n #\n # Compute and store abundance ratios and relevant properties for all MS stars\n #\n aratios = compute_aratio(ds, data, ratios) # by default, only does MS stars\n mass_fractions = compute_mass_fractions(ds, data, elements)\n\n MS = data['particle_type'] == 11\n\n Nstars = np.size(data['particle_mass'][MS])\n g.create_dataset('Nstars', data = Nstars)\n g.create_dataset('Mstars', data = np.sum( data['particle_mass'][ MS].to('Msun').value))\n g.create_dataset('creation_time', data = data['creation_time'][MS].to('Myr').value)\n g.create_dataset('birth_mass', data = data['birth_mass'][MS].value)\n g.create_dataset('metallicity', data = data['metallicity_fraction'][MS].value)\n spatial = g.create_group('kinematics')\n\n r = np.zeros(Nstars)\n vr = np.zeros(Nstars)\n for i, xname in enumerate(['x','y','z']):\n x = (data['particle_position_' + xname][MS] - ds.domain_center[i]).to('pc').value\n vx = (data['particle_velocity_' + xname][MS]).to('km/s').value\n r += x**2\n vr += vx**2\n spatial.create_dataset( xname, data = x)\n spatial.create_dataset('r', data = np.sqrt(r))\n spatial.create_dataset('vr', data = np.sqrt(vr))\n\n#\n mf = hf.create_group(groupname + '/mass_fractions')\n for e in elements:\n mf.create_dataset( e, data = mass_fractions[e])\n mf_statgroup = hf.create_group(groupname + '/mass_fraction_statistics')\n all = mf_statgroup.create_group('all_MS')\n for e in elements:\n stats = utilities.compute_stats( mass_fractions[e], return_dict = True)\n g = all.create_group(e)\n for k in stats.keys():\n g.create_dataset(k, data = stats[k])\n\n#\n sg = hf.create_group(groupname + '/abundances')\n for abundance in aratios.keys():\n sg.create_dataset( abundance, data = aratios[abundance])\n\n # now compute statistics on the MS stars, and store them\n #\n statgroup = hf.create_group(groupname + '/statistics')\n all = statgroup.create_group('all_MS')\n for abundance in aratios.keys():\n stats = utilities.compute_stats(aratios[abundance], return_dict = True)\n g = all.create_group(abundance)\n for k in stats.keys():\n g.create_dataset(k, data = stats[k])\n\n #\n # Now, do this for all particles, regardless of type.\n # Aka... ignore observational / physical reality and treat them all as tracers\n #\n aratios = compute_aratio(ds, data, ratios, particle_type = 'all')\n tracers = statgroup.create_group('all_particles')\n for abundance in aratios.keys():\n stats = utilities.compute_stats(aratios[abundance], return_dict = True)\n g = tracers.create_group(abundance)\n\n if COMPUTE_ACF: # hide this for now - not working\n t = data['creation_time'].to('Myr').value\n t_n = t - np.min(t)\n dt = 1.0\n\n bins = np.arange(0.0, np.ceil(np.max(t_n)) + dt, dt)\n y = aratios[abundance]\n y = y + np.min(y)*2.0\n dy = np.abs(0.001 * y) # error should be irrelevant, but must be non-zero\n dy[dy == 0.0] = 0.00001\n acf, acf_error, acf_bins = utilities.acf(t_n, y, dy = dy, bins = bins)\n\n stats['acf'] = acf\n stats['acf_error'] = acf_error\n stats['acf_bins'] = acf_bins\n\n for k in stats.keys():\n g.create_dataset(k, data = stats[k])\n\n mass_fractions = compute_mass_fractions(ds, data, elements, particle_type = 'all')\n tracers = mf_statgroup.create_group('all_particles')\n for e in elements:\n stats = utilities.compute_stats(mass_fractions[e], return_dict = True)\n#\n# left off here\n#\n\n g = mf_statgroup.create_group(\"cumulative\")\n t = ds.current_time.to('Myr').value\n tmax = np.ceil(t)\n tbins = np.arange(0.0, tmax + 0.1, 0.5)\n hist,bins = np.histogram(data['creation_time'].to('Myr').value, bins = tbins)\n g.create_dataset('bins', data = tbins)\n g.create_dataset('hist', data = np.array(hist))\n t_form = data['creation_time'].to('Myr').value\n lifetime = data[('io','particle_model_lifetime')].to('Myr').value\n age = t - t_form\n\n mf_stats_array_dict = {}\n for e in elements:\n mf_stats_array_dict[e] = {}\n for k in stats.keys():\n mf_stats_array_dict[e][k] = np.zeros(np.size(tbins)-1)\n\n for i in np.arange(np.size(tbins)-1):\n\n age = tbins[i] - t_form\n selection = (age >= 0.0)*(age <= lifetime)\n for e in elements:\n if i == 0:\n sub_g = g.create_group(e)\n\n if np.size(age[selection]) > 1:\n stats = utilities.compute_stats(mass_fractions[e][selection], return_dict = True) # +1 b/c index starts at 1\n for k in stats.keys():\n mf_stats_array_dict[e][k][i] = stats[k]\n else:\n for k in stats.keys():\n mf_stats_array_dict[e][k][i] = None\n\n for e in elements:\n g = hf[groupname + '/mass_fraction_statistics/cumulative/' + e]\n for k in mf_stats_array_dict[e].keys():\n g.create_dataset(k, data = mf_stats_array_dict[e][k])\n\n for dt in [0.1, 1, 10]:\n g = mf_statgroup.create_group('%iMyr'%(dt))\n t = ds.current_time.to('Myr').value\n tmax = np.around(t, decimals = -len(str(dt)) + 1)\n if tmax < t:\n tmax = tmax + dt\n tbins = np.arange(0.0, tmax + 0.5*dt, dt)\n\n index = np.digitize(data['creation_time'].to('Myr').value, tbins)\n hist, bins = np.histogram(data['creation_time'].to('Myr').value, bins = tbins)\n g.create_dataset('bins', data = tbins)\n g.create_dataset('hist', data = np.array(hist))\n\n mf_stats_array_dict = {}\n for e in elements:\n mf_stats_array_dict[e] = {}\n for k in stats.keys():\n mf_stats_array_dict[e][k] = np.zeros(np.size(tbins) - 1)\n\n for i in np.arange(np.size(tbins)-1):\n for e in elements:\n if i == 0:\n sub_g = g.create_group(e)\n if hist[i] > 0:\n stats = utilities.compute_stats(mass_fractions[e][index == i+1], return_dict = True) # +1 b/c index starts at$\n for k in stats.keys():\n mf_stats_array_dict[e][k][i] = stats[k]\n else:\n for k in stats.keys():\n mf_stats_array_dict[e][k][i] = None\n\n for e in elements:\n # - - - - - Produce a gap-less, interpolated mean to compute the ACF\n if False: # don't do this anymore\n first = np.where( np.logical_not(np.isnan( mf_stats_array_dict[e]['mean'] )))[0][0]\n mean = mf_stats_array_dict[e]['mean'][first:]\n select = np.logical_not(np.isnan(mean))\n clean_mean = mean[select]\n tcent = 0.5 * (tbins[1:] + tbins[:-1])\n tcent = tcent[first:]\n clean_t = tcent[select]\n f_interp = interp1d(clean_t, clean_mean)\n interp_mean = mean\n interp_mean[np.logical_not(select)] = f_interp( tcent[np.logical_not(select)] )\n mf_stats_array_dict[e]['interp_mean'] = interp_mean\n mf_stats_array_dict[e]['acf'] = utilities.acf(interp_mean, nlags = len(tcent))\n\n g = hf[groupname + '/mass_fraction_statistics/%iMyr/'%(dt) + e]\n for k in mf_stats_array_dict[e].keys():\n g.create_dataset(k, data = mf_stats_array_dict[e][k])\n\n\n #\n # now do it in time bins to get time evolution\n #\n\n # First, lets do the observational version, where we compute the total\n # MDF at each point in time (using all stars) and compute median and spread, etc.\n # next we will do the instantaneous (binned) version of this\n g = statgroup.create_group(\"cumulative\")\n t = ds.current_time.to('Myr').value\n tmax = np.ceil(t)\n tbins = np.arange(0.0, tmax + 0.1, 0.5) # can go arbitrarily small here\n hist, bins = np.histogram(data['creation_time'].to('Myr').value, bins = tbins)\n g.create_dataset('bins', data = tbins)\n g.create_dataset('hist', data = np.array(hist))\n\n t_form = data['creation_time'].to('Myr').value\n # unfortunately we can't use dynamical_time because we are doing this for a single data output\n # and want to get WD and SN remnant stars binned appropriately, but their dynamical_time values change\n # when they form...\n lifetime = data[('io','particle_model_lifetime')].to('Myr').value\n age = t - t_form\n\n stats_array_dict = {}\n for abundance in aratios.keys():\n stats_array_dict[abundance] = {}\n for k in stats.keys():\n stats_array_dict[abundance][k] = np.zeros(np.size(tbins) - 1)\n for i in np.arange(np.size(tbins)-1):\n\n age = tbins[i] - t_form\n selection = (age >= 0.0)*(age <= lifetime)\n for abundance in aratios.keys():\n if i == 0:\n sub_g = g.create_group(abundance)\n\n if np.size(age[selection]) > 1:\n stats = utilities.compute_stats(aratios[abundance][selection], return_dict = True) # +1 b/c index starts at 1\n for k in stats.keys():\n stats_array_dict[abundance][k][i] = stats[k]\n else:\n for k in stats.keys():\n stats_array_dict[abundance][k][i] = None\n\n for abundance in aratios.keys():\n g = hf[groupname + '/statistics/cumulative/' + abundance]\n for k in stats_array_dict[abundance].keys():\n g.create_dataset(k, data = stats_array_dict[abundance][k])\n\n # now bin by times (using various dt) to get instantaneous median and spread in SF\n # at any given point in time. This is NOT an observational quantity, but rather a theoretical\n # bit of information to understand how much formed stars vary in abundance ratio at any\n # given point in time (i.e. this is the stellar analog to the gas version of these plots)\n for dt in [0.1, 1, 10]:\n g = statgroup.create_group('%iMyr'%(dt))\n t = ds.current_time.to('Myr').value\n tmax = np.around(t, decimals = -len(str(dt)) + 1)\n if tmax < t:\n tmax = tmax + dt\n tbins = np.arange(0.0, tmax + 0.5*dt, dt)\n\n index = np.digitize(data['creation_time'].to('Myr').value, tbins)\n hist, bins = np.histogram(data['creation_time'].to('Myr').value, bins = tbins)\n g.create_dataset('bins', data = tbins)\n g.create_dataset('hist', data = np.array(hist))\n\n stats_array_dict = {}\n for abundance in aratios.keys():\n stats_array_dict[abundance] = {}\n for k in stats.keys():\n stats_array_dict[abundance][k] = np.zeros(np.size(tbins) - 1)\n\n for i in np.arange(np.size(tbins)-1):\n for abundance in aratios.keys():\n if i == 0:\n sub_g = g.create_group(abundance)\n if hist[i] > 0:\n stats = utilities.compute_stats(aratios[abundance][index == i+1], return_dict = True) # +1 b/c index starts at 1\n for k in stats.keys():\n stats_array_dict[abundance][k][i] = stats[k]\n else:\n for k in stats.keys():\n stats_array_dict[abundance][k][i] = None\n\n for abundance in aratios.keys():\n # - - - - - Produce a gap-less, interpolated mean to compute the ACF\n if False: # don't do this anymore\n first = np.where( np.logical_not(np.isnan( stats_array_dict[abundance]['mean'] )))[0][0]\n mean = stats_array_dict[abundance]['mean'][first:]\n select = np.logical_not(np.isnan(mean))\n clean_mean = mean[select]\n tcent = 0.5 * (tbins[1:] + tbins[:-1])\n tcent = tcent[first:]\n clean_t = tcent[select]\n f_interp = interp1d(clean_t, clean_mean)\n interp_mean = mean\n interp_mean[np.logical_not(select)] = f_interp( tcent[np.logical_not(select)] )\n stats_array_dict[abundance]['interp_mean'] = interp_mean\n stats_array_dict[abundance]['acf'] = utilities.acf(interp_mean, nlags = len(tcent))\n\n g = hf[groupname + '/statistics/%iMyr/'%(dt) + abundance]\n for k in stats_array_dict[abundance].keys():\n g.create_dataset(k, data = stats_array_dict[abundance][k])\n\n # ------------ can do a correlation across time bins here too ---------\n # Pick some time t_o, for the ith bin past t_o, do correlation between\n # those two populations of stars\n # x = np.array([stars in t_o bin] + [stars in t_i bin])\n # corr[i] = np.correlate(x,x, mode = 'full')\n # allow to plot correlation as a function of time.\n\n\n else:\n continue\n#\n# g.create_dataset('Nstars', data = 0.0)\n# g.create_dataset('Mstars', data = 0.0)\n# sg = hf.create_group(groupname + '/abundances')\n# for abundance in aratios.keys():\n# sg.create_dataset( abundance, data = 0.0)\n\n\n hf.close()\n\n return",
"def electrode_separations(dc_survey, survey_type=\"dipole-dipole\", electrode_pair=\"all\"):\n\n if not isinstance(electrode_pair, list):\n if electrode_pair.lower() == \"all\":\n electrode_pair = [\"AB\", \"MN\", \"AM\", \"AN\", \"BM\", \"BN\"]\n elif isinstance(electrode_pair, str):\n electrode_pair = [electrode_pair.upper()]\n else:\n raise Exception(\n \"electrode_pair must be either a string, list of strings, or an \"\n \"ndarray containing the electrode separation distances you would \"\n \"like to calculate not {}\".format(type(electrode_pair))\n )\n\n elecSepDict = {}\n AB = []\n MN = []\n AM = []\n AN = []\n BM = []\n BN = []\n\n for ii, src in enumerate(dc_survey.source_list):\n Tx = src.location\n Rx = src.receiver_list[0].locations\n nDTx = src.receiver_list[0].nD\n\n if survey_type.lower() == \"dipole-dipole\":\n A = matlib.repmat(Tx[0], nDTx, 1)\n B = matlib.repmat(Tx[1], nDTx, 1)\n M = Rx[0]\n N = Rx[1]\n\n AB.append(np.sqrt(np.sum((A[:, :] - B[:, :]) ** 2.0, axis=1)))\n MN.append(np.sqrt(np.sum((M[:, :] - N[:, :]) ** 2.0, axis=1)))\n AM.append(np.sqrt(np.sum((A[:, :] - M[:, :]) ** 2.0, axis=1)))\n AN.append(np.sqrt(np.sum((A[:, :] - N[:, :]) ** 2.0, axis=1)))\n BM.append(np.sqrt(np.sum((B[:, :] - M[:, :]) ** 2.0, axis=1)))\n BN.append(np.sqrt(np.sum((B[:, :] - N[:, :]) ** 2.0, axis=1)))\n\n elif survey_type.lower() == \"pole-dipole\":\n A = matlib.repmat(Tx, nDTx, 1)\n M = Rx[0]\n N = Rx[1]\n\n MN.append(np.sqrt(np.sum((M[:, :] - N[:, :]) ** 2.0, axis=1)))\n AM.append(np.sqrt(np.sum((A[:, :] - M[:, :]) ** 2.0, axis=1)))\n AN.append(np.sqrt(np.sum((A[:, :] - N[:, :]) ** 2.0, axis=1)))\n\n elif survey_type.lower() == \"dipole-pole\":\n A = matlib.repmat(Tx[0], nDTx, 1)\n B = matlib.repmat(Tx[1], nDTx, 1)\n M = Rx\n\n AB.append(np.sqrt(np.sum((A[:, :] - B[:, :]) ** 2.0, axis=1)))\n AM.append(np.sqrt(np.sum((A[:, :] - M[:, :]) ** 2.0, axis=1)))\n BM.append(np.sqrt(np.sum((B[:, :] - M[:, :]) ** 2.0, axis=1)))\n\n elif survey_type.lower() == \"pole-pole\":\n A = matlib.repmat(Tx, nDTx, 1)\n M = Rx\n\n AM.append(np.sqrt(np.sum((A[:, :] - M[:, :]) ** 2.0, axis=1)))\n\n else:\n raise Exception(\n \"survey_type must be 'dipole-dipole' | 'pole-dipole' | \"\n \"'dipole-pole' | 'pole-pole' not {}\".format(survey_type)\n )\n\n if \"AB\" in electrode_pair:\n if AB:\n AB = np.hstack(AB)\n elecSepDict[\"AB\"] = AB\n if \"MN\" in electrode_pair:\n if MN:\n MN = np.hstack(MN)\n elecSepDict[\"MN\"] = MN\n if \"AM\" in electrode_pair:\n if AM:\n AM = np.hstack(AM)\n elecSepDict[\"AM\"] = AM\n if \"AN\" in electrode_pair:\n if AN:\n AN = np.hstack(AN)\n elecSepDict[\"AN\"] = AN\n if \"BM\" in electrode_pair:\n if BM:\n BM = np.hstack(BM)\n elecSepDict[\"BM\"] = BM\n if \"BN\" in electrode_pair:\n if BN:\n BN = np.hstack(BN)\n elecSepDict[\"BN\"] = BN\n\n return elecSepDict",
"def usage_distribution_across_agencies(dframe=dframe): \n \n funding_colors = ['#6baed6','#3182bd','#08519c','#bdd7e7']\n placement_colors = ['#67001f','#b2182b','#d6604d','#f4a582',\n '#fddbc7','#f7f7f7','#d1e5f0','#92c5de',\n '#4393c3','#2166ac','#053061']\n \n fig = {\n 'data':[\n {\n 'hole':.5,\n 'labels': dframe.groupby('agency')['fellow_cost'].sum().index,\n 'marker':{'colors': placement_colors},\n 'showlegend':False,\n 'type':'pie',\n 'domain':{'x': [0, .48]},\n 'name':'total spending',\n 'values':dframe.groupby('agency')['fellow_cost'].sum().values},\n {\n 'hole':.5,\n 'labels': dframe.groupby('funding')['fellow_cost'].sum().index,\n 'marker': {'colors': funding_colors},\n 'showlegend':True,\n 'type':'pie',\n 'domain':{'x':[.52, 1]},\n 'name':'funding sources',\n 'values':dframe.groupby('funding')['fellow_cost'].sum().values}],\n\n 'layout': {\n 'title':'2014--2019 Fellowship Distribution<br>Utilization by Agency & Funding Type',\n 'hovermode':'closest',\n 'plot_bgcolor': '#bdbdbd',\n 'paper_bgcolor': '#bdbdbd',\n 'annotations': [\n {\n 'font':{'size':12,\n 'color':'#636363'},\n 'showarrow':False,\n 'text':'{} Agencies<br>${:,.0f}'.format(dframe['agency'].nunique(),\n dframe['fellow_cost'].sum()),\n 'x':1.35,\n 'y':0.5,\n 'xref': 'paper',\n 'yref': 'paper'\n },\n {\n 'font':{'size':12},\n 'showarrow':False,\n 'text':'<b>Placements</b>',\n 'x':.045,\n 'y':.91\n },\n {\n 'font':{'size':12},\n 'showarrow':False,\n 'text':'<b>Funding</b>',\n 'x':.95,\n 'y':.91,\n 'xref': 'paper',\n 'yref': 'paper'\n },\n {\n 'font':{'size':12,\n 'color':'darkgrey'},\n 'showarrow':False,\n 'text': '<b>Source:</b> Data Provided by Baltimore Corps June 2019:<br>https://github.com/brl1906/fellowship-analysis',\n 'xref': 'paper',\n 'yref': 'paper',\n 'x':.5,\n 'y':-.1}\n ]}\n }\n\n return fig",
"def atomic_areal_density_nm2(counts_edge: float, counts_spectrum: float, partial_cross_section_nm2: float) -> float:\n pass",
"def get_linkage_dendogram(self):\n\n\t\treturn self._linkage_dendogram"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A method used for calculating Distribution descriptors based on SolventAccessibility of AADs.
|
def CalculateDistributionSolventAccessibility(ProteinSequence):
result=CalculateDistribution(ProteinSequence,_SolventAccessibility,'_SolventAccessibility')
return result
|
[
"def usage_distribution_across_agencies(dframe=dframe): \n \n funding_colors = ['#6baed6','#3182bd','#08519c','#bdd7e7']\n placement_colors = ['#67001f','#b2182b','#d6604d','#f4a582',\n '#fddbc7','#f7f7f7','#d1e5f0','#92c5de',\n '#4393c3','#2166ac','#053061']\n \n fig = {\n 'data':[\n {\n 'hole':.5,\n 'labels': dframe.groupby('agency')['fellow_cost'].sum().index,\n 'marker':{'colors': placement_colors},\n 'showlegend':False,\n 'type':'pie',\n 'domain':{'x': [0, .48]},\n 'name':'total spending',\n 'values':dframe.groupby('agency')['fellow_cost'].sum().values},\n {\n 'hole':.5,\n 'labels': dframe.groupby('funding')['fellow_cost'].sum().index,\n 'marker': {'colors': funding_colors},\n 'showlegend':True,\n 'type':'pie',\n 'domain':{'x':[.52, 1]},\n 'name':'funding sources',\n 'values':dframe.groupby('funding')['fellow_cost'].sum().values}],\n\n 'layout': {\n 'title':'2014--2019 Fellowship Distribution<br>Utilization by Agency & Funding Type',\n 'hovermode':'closest',\n 'plot_bgcolor': '#bdbdbd',\n 'paper_bgcolor': '#bdbdbd',\n 'annotations': [\n {\n 'font':{'size':12,\n 'color':'#636363'},\n 'showarrow':False,\n 'text':'{} Agencies<br>${:,.0f}'.format(dframe['agency'].nunique(),\n dframe['fellow_cost'].sum()),\n 'x':1.35,\n 'y':0.5,\n 'xref': 'paper',\n 'yref': 'paper'\n },\n {\n 'font':{'size':12},\n 'showarrow':False,\n 'text':'<b>Placements</b>',\n 'x':.045,\n 'y':.91\n },\n {\n 'font':{'size':12},\n 'showarrow':False,\n 'text':'<b>Funding</b>',\n 'x':.95,\n 'y':.91,\n 'xref': 'paper',\n 'yref': 'paper'\n },\n {\n 'font':{'size':12,\n 'color':'darkgrey'},\n 'showarrow':False,\n 'text': '<b>Source:</b> Data Provided by Baltimore Corps June 2019:<br>https://github.com/brl1906/fellowship-analysis',\n 'xref': 'paper',\n 'yref': 'paper',\n 'x':.5,\n 'y':-.1}\n ]}\n }\n\n return fig",
"def describe(self, access, element):\n self._prepare(access)\n # Accumulate the descriptor sets from each ability, then turn into a string.\n tags = set()\n for c in self.abilities:\n tags |= c.describe(access, element)\n return ' '.join(list(tags)).lower()",
"def calculate_agreement_directness(annotations_1, annotations_2):\n directness_1 = []\n directness_2 = []\n # Search for pairs annotated by both\n for pair in annotations_1:\n if pair in annotations_2:\n # Only take those into account that were annotated as \"affixal\" by both annotators and add those to lists\n if annotations_1[pair][\"affixal\"] == \"affixal\" and annotations_2[pair][\"affixal\"] == \"affixal\":\n directness_1.append(annotations_1[pair][\"directness\"])\n directness_2.append(annotations_2[pair][\"directness\"])\n n = len(directness_1)\n kappa = cohen_kappa_score(directness_1, directness_2)\n return n, kappa",
"def get_expert_annoation_stats(annotations):\n if annotations:\n coherence = []\n consistency = []\n fluency = []\n relevance = []\n for annotate in annotations:\n coherence.append(annotate['coherence'])\n consistency.append(annotate['consistency'])\n fluency.append(annotate['fluency'])\n relevance.append(annotate['relevance'])\n if coherence and consistency and fluency and relevance:\n return [sum(coherence) / len(coherence), sum(consistency) / len(consistency), sum(fluency) / len(\n fluency), sum(relevance) / len(relevance)]\n else:\n return -1",
"def element_descriptor(protein, ligand, binsize=0.0):\n\t# SUPPRESS OPENBABEL WARNINGS\n\tpybel.ob.obErrorLog.StopLogging()\n\n\t# ELEMENT TABLE TO DETERMINE VDW AND COVALENT BONDS\n\tet = OBElementTable()\n\n\t# CONVERT ELEMENT SYMBOLS TO ATOMIC NUMBERS\n\tatomicnums = (et.GetAtomicNum(str(element)) for element in config['elements'])\n\tatomicnums_pro = (et.GetAtomicNum(str(element)) for element in config['elements_pro'])\n\t#print(et.GetAtomicNum(\"Me\"), \"Fe\")\n\n\t# CREATE A NUMERICAL ID TO ELEMENT COMBINATION MAPPING\n\t# IMPORTANT TO MAP THE DESCRIPTOR VECTOR BACK TO THE LABELS\n\t#element_pairs = product(sorted(atomicnums),repeat=2)\n\telement_pairs = product(sorted(atomicnums),sorted(atomicnums_pro),repeat=1)\n\telement_pairs = dict((p,i) for i,p in enumerate(element_pairs))\n\n\n\t# ALSO CREATE A COLUMN LABEL FOR THIS DESCRIPTOR\n\tsorted_pairs = zip(*sorted(element_pairs.items(), key=itemgetter(1)))[0]\n\t#print(sorted_pairs)\n\n\tnumcols = len(element_pairs)\n\n\t# GENERATE THE DISTANCE BINS\n\tif binsize:\n\n\t\t# get the distance bins for the given cutoff and bin size\n\t\tbins = get_distance_bins(config['cutoff'], binsize)\n\n\t\t# NUMBER OF TOTAL COLUMNS IN DESCRIPTOR\n\t\tnumcols *= (bins.size + 1)\n\n\t\t# CREATE A COLUMN FOR EACH ELEMENT PAIR AND DISTANCE BIN\n\t\tlabels = []\n\t\tfor x,y in sorted_pairs:\n\t\t\tfor i in range(len(bins) + 1):\n\t\t\t\tlabel = \"{0}.{1}-B{2}\".format(et.GetSymbol(x), et.GetSymbol(y), i)\n\t\t\t\tlabels.append(label)\n\n\t# LABEL WITHOUT BINS\n\telse:\n\t\tlabels = ['.'.join((et.GetSymbol(x),et.GetSymbol(y))) for x,y in sorted_pairs]\n\n\t# DESCRIPTOR THAT WILL CONTAIN THE SUM OF ALL ELEMENT-ELEMENT INTERACTIONS\n\tdescriptor = numpy.zeros(numcols, dtype=int)\n\n\t# GET THE CONTACTS\n\tcontacts = get_contacts(protein, ligand, config['cutoff'])\n\n\t# ITERATE THROUGH CONTACT PAIRS AND DETERMINE SIFT\n\tfor hetatm, hetatm_contacts in contacts:\n\t\thetatm_num = hetatm.GetAtomicNum()\n\n\t# ITERATE THROUGH ALL THE CONTACTS THE HETATM HAS\n\t\tfor atom, distance in hetatm_contacts:\n\t\t\tresidue = atom.GetResidue()\n\n\t\t\tif residue.GetAtomID(atom).strip() in ['FE','FE2']:\n\t\t\t\tatom_num == 26\n\t\t\telse:\n\t\t\t\tatom_num = atom.GetAtomicNum()\n\n\t\t\t# IGNORE WATER RESIDUES\n\t\t\tif residue.GetName() == 'HOH': continue\n\n\t\t\t# IGNORE ZN,FE ETC.\n\t\t\ttry: index = element_pairs[(atom_num, hetatm_num)]\n\t\t\texcept KeyError: continue\n\t\t\t#print(element_pairs, 'ele')\n\n\t\t\t# BIN INTERACTIONS\n\t\t\tif binsize:\n\n\t\t\t\t# GET THE BIN THIS CONTACT BELONGS IN\n\t\t\t\t# DIGITIZE TAKES AN ARRAY-LIKE AS INPUT\n\t\t\t\tbin_id = numpy.digitize([distance,], bins)[0]\n\t\t\t\tdescriptor[1 + index + index*bins.size + bin_id] += 1\n\n\t\t\telse:\n\n\t\t\t\t# ELEMENTS ARE SORTED NUMERICALLY\n\t\t\t\tdescriptor[index] += 1\n\n\tif binsize: sum_descriptor_bins(descriptor, bins)\n\n\treturn descriptor, labels",
"async def get_expert_advisors(self) -> List[ExpertAdvisor]:",
"def attributes_desc():\n columns = [\n '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',\n 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',\n 'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',\n 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',\n 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',\n 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young',\n ]\n\n return map(str.lower, columns)",
"def probe_attenuation(self):\n print debug_msg.TBD_MSG",
"def _output_accessibility_aggregated(min_travel_times, interval_num,\n zones, ats, output_dir='.'):\n\n with open(output_dir+'/accessibility_aggregated.csv', 'w', newline='') as f:\n time_budgets = [\n 'TT_'+str(MIN_TIME_BUDGET+BUDGET_TIME_INTVL*i) for i in range(interval_num)\n ]\n\n headers = ['zone_id', 'geometry', 'mode']\n headers.extend(time_budgets)\n\n writer = csv.writer(f)\n writer.writerow(headers)\n\n # calculate accessibility\n for oz, coord in zones.items():\n if oz == -1:\n continue\n\n for atype in ats:\n at_str = atype.get_type_str()\n # number of accessible zones from oz for each agent type\n counts = [0] * interval_num\n for dz in zones.keys():\n if (oz, dz, at_str) not in min_travel_times.keys():\n continue\n\n min_tt = min_travel_times[(oz, dz, at_str)][0]\n if min_tt >= MAX_LABEL_COST:\n continue\n\n id = _get_interval_id(min_tt)\n while id < interval_num:\n counts[id] += 1\n id += 1\n # output assessiblity\n geo = 'POINT (' + coord + ')'\n line = [oz, geo, atype.get_type_str()]\n line.extend(counts)\n writer.writerow(line)\n\n if output_dir == '.':\n print('\\ncheck accessibility_aggregated.csv in '\n +os.getcwd()+' for aggregated accessibility matrix')\n else:\n print('\\ncheck accessibility_aggregated.csv in '\n +os.path.join(os.getcwd(), output_dir)\n +' for aggregated accessibility matrix')",
"def agreements_statistics(number_of_agents, reserve = None):\n stability_folder_location = '../../stability' + ('-reserve' if reserve is not None else '')\n stability_file_location = 'stability-for-' + str(number_of_agents) + '-agents' + ('-reserve-' + str(reserve) if reserve is not None else '') + '.csv'\n agreements100_200 = pd.read_csv(stability_folder_location + '/100-200/' + stability_file_location)\n agreements200_400 = pd.read_csv(stability_folder_location + '/200-400/' + stability_file_location)\n agreements400_800 = pd.read_csv(stability_folder_location + '/400-800/' + stability_file_location)\n \n total_profiles = len(agreements100_200)\n if(total_profiles != len(agreements200_400) or total_profiles != len(agreements400_800)):\n raise ValueError('The length of the stability data do not agree')\n stable_profiles_100_200 = len(agreements100_200[agreements100_200['direction_100'] == agreements100_200['direction_200']])\n stable_profiles_200_400 = len(agreements200_400[agreements200_400['direction_200'] == agreements200_400['direction_400']])\n stable_profiles_400_800 = len(agreements400_800[agreements400_800['direction_400'] == agreements400_800['direction_800']])\n return [total_profiles, stable_profiles_100_200, stable_profiles_200_400, stable_profiles_400_800]",
"def detAcceptanceRateValues(self):\n if(not(self.weight)):\n for nodej in self.g.nodes():\n WeightedSum = len(self.g.in_edges(nodej))\n for edge in self.g.in_edges(nodej):\n self.A[edge[0] + \"-\"+edge[1]] = (1.0/self.NormA[nodej])\n else:\n for nodej in self.g.nodes():\n for edge in self.g.in_edges(nodej):\n self.A[edge[0] + \"-\"+edge[1]] = (self.g.get_edge_data(edge[0],edge[1]) + 0.0)/self.NormA[nodej]",
"def info_gain(self,a):\n entro = self.entropy()\n Dv = dict()\n for d in self.datas:\n a_info = d.data[a]\n if a_info in Dv:\n Dv[a_info].add(d)\n else:\n new_dataset = DataSet()\n new_dataset.add(d)\n Dv[a_info] = new_dataset\n for x in Dv:\n N = len(self.datas) #|D|\n Nv = len(Dv[x].datas)#|Dv|\n entro -= Dv[x].entropy() * Nv / N\n return entro, Dv",
"def get_group_descriptor(self, groups):\n group_count = {}\n for chem in self.reactants + self.products:\n for group, count in chem.functional_groups.items():\n group_count.setdefault(group, 0.0)\n group_count[group] += count\n\n descriptor = []\n for smarts in sorted(groups.keys()):\n descriptor.append(group_count.get(smarts, 0))\n return descriptor",
"def addApproxDiam(dics, verbose=True):\n # surface brightness relations for dwarf stars\n # from Kervella et al. 2004\n k04 = {}\n # coef0 coef1 error\n k04['BV']=[.9095, .4889, .0918]\n k04['BJ']=[.3029, .5216, .0307]\n k04['BH']=[.2630, .5134, .0189]\n k04['BK']=[.2538, .5158, .0100]\n k04['VJ']=[.3547, .5310, .0475]\n k04['VH']=[.2893, .5148, .0185]\n k04['VK']=[.2753, .5175, .0101]\n k04['JK']=[.5256, .5097, .0575]\n\n for k, d in enumerate(dics): # for each star\n diams = []\n errs = []\n for coul in k04.keys(): # for each color\n # check magnitudes are valid, compute diameter and error\n if d.has_key(coul[0]+'MAG') and d[coul[0]+'MAG']>-90 and\\\n d.has_key(coul[1]+'MAG') and d[coul[1]+'MAG']>-90:\n diams.append(diamSurfBri(d[coul[0]+'MAG'], d[coul[1]+'MAG'],\n k04[coul]))\n errs.append(k04[coul][2]*diams[-1])\n if len(diams)>1:\n # weighted average\\\n dics[k]['DIAM'] = reduce(lambda x,y: x+y, [diams[i]*errs[i]\n for i in range(len(diams))])/\\\n reduce(lambda x,y: x+y, errs)\n dics[k]['DIAM'] = round(dics[k]['DIAM'],\n int(-math.log10(dics[k]['DIAM']) +3))\n elif len(diams)==1:\n dics[k]['DIAM'] = round(diams[0], int(-math.log10(diams[0])+3))\n else:\n dics[k]['DIAM'] = 0 \n if verbose: \n print dics[k]['NAME'], '|', dics[k]['DIAM']\n return dics",
"def calculate_agreement_subtypes_indirect(annotations_1, annotations_2):\n subtypes_1 = []\n subtypes_2 = []\n # Search for pairs annotated by both\n for pair in annotations_1:\n if pair in annotations_2:\n # Only take those into account that were annotated as \"affixal\" and \"indirect\"\n # by both annotators and add those to lists\n if all([annotations_1[pair][\"affixal\"] == \"affixal\",\n annotations_2[pair][\"affixal\"] == \"affixal\",\n annotations_1[pair][\"directness\"] == \"indirect\",\n annotations_2[pair][\"directness\"] == \"indirect\"]):\n subtypes_1.append(annotations_1[pair][\"subtype\"])\n subtypes_2.append(annotations_2[pair][\"subtype\"])\n # Get number of instances\n n = len(subtypes_1)\n # Calculate Cohen's kappa\n kappa = cohen_kappa_score(subtypes_1, subtypes_2)\n # Create confusion matrix\n series_1 = pd.Series(subtypes_1, name='Subtypes_1')\n series_2 = pd.Series(subtypes_2, name='Subtypes_2')\n confusion_matrix = pd.crosstab(series_1, series_2)\n return n, kappa, confusion_matrix",
"def Attributes(self) -> _n_5_t_17:",
"def explain_detailed_age_dist_for_inf(influencers=None):\n if influencers is None:\n return None\n\n from debra.classification_data import influencer_age_groups_dict\n\n for inf in influencers:\n\n print(u'INFLUENCER: Id: %s Name: %r Blog name: %r' % (inf.id, inf.name, inf.blogname))\n\n for group_name, group_keywords in influencer_age_groups_dict.items():\n\n print(u'AGE GROUP: %s' % group_name)\n keywords_stats = {}\n\n for keyword in group_keywords:\n\n total = get_posts_total_by_keywords(inf.id, [keyword,])\n if total is None:\n total = 0\n\n if total > 0:\n keywords_stats[keyword] = total\n\n print(u'TOTAL POSTS: %s' % get_posts_total_by_keywords(inf.id, group_keywords))\n print(u'TOTAL WORDS: %s' % len(keywords_stats))\n print(u'PERCENT: %s percents' % getattr(inf, \"dist_age_%s\" % group_name))\n # print(u'WORDS: %s' % u', '.join(keywords_stats.keys()))\n print(u'WORDS: %s' % keywords_stats)\n print(u'')\n\n return",
"def print_avail_calcs()-> None:\n print(skbio.diversity.get_alpha_diversity_metrics())\n print(skbio.diversity.get_beta_diversity_metrics())",
"def dist_distr_display(self):\n bool_idx = self.all_distance.pair.apply(lambda x: True if x in list(self.friends.pair) else False)\n nbool_idx = bool_idx.apply(lambda x: not x)\n sim_a2b = self.all_distance.ix[bool_idx, \"dist_a2b\"]\n sim_b2a = self.all_distance.ix[bool_idx, \"dist_b2a\"]\n diff_a2b = self.all_distance.ix[nbool_idx, \"dist_a2b\"]\n diff_b2a = self.all_distance.ix[nbool_idx, \"dist_b2a\"]\n\n ## Visualize the ploting\n plt.figure(1)\n plt.subplot(211)\n\n plt.title(\"Distance (A to B) Distribution\")\n sim_a2b.hist(color = 'green', alpha = .5, bins = 20)\n diff_a2b.hist(color = 'red', alpha = .5, bins = 20)\n\n plt.subplot(212)\n plt.title(\"Distance (B to A)Distribution From B to A\")\n sim_b2a.hist(color = 'green', alpha = .5, bins = 20)\n diff_b2a.hist(color = 'red', alpha = .5, bins = 20)",
"def test_zernike_descriptor(self):\n self.assertTrue(abs(np.sum(self.des[0,:]) - 43.6876) < 0.01, \"Incorrect sum of feature 0 descriptor\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find all of the tags this post is associated with
|
def tags(self):
return [t['tag'] for t in database.execute(
'SELECT tag FROM tags WHERE post_id = ?', [self['id']])]
|
[
"def get_tags(self):\n return self.tags.order_by(article_tag_mapping.c.created_at.asc()).all()",
"def tags(self) -> List:",
"def get_all_tags():\n return Tag.query.all()",
"def get_tags(self, language=None):\n\n # get tagged post\n entries = self\n if language:\n entries = entries.filter_by_language(language)\n entries = entries.distinct()\n if not entries:\n return []\n kwargs = TaggedItem.bulk_lookup_kwargs(entries)\n\n # aggregate and sort\n counted_tags = dict(TaggedItem.objects\n .filter(**kwargs)\n .values('tag')\n .annotate(count=models.Count('tag'))\n .values_list('tag', 'count'))\n\n # and finally get the results\n tags = Tag.objects.filter(pk__in=counted_tags.keys())\n for tag in tags:\n tag.count = counted_tags[tag.pk]\n return sorted(tags, key=lambda x: -x.count)",
"def get_all_tags(self):\n return self.scenario.get_all_tags()",
"def tags(self):\n return self._named_trees('tag')",
"def get_tags_on_obj(self, obj, key=None, category=None):\r\n tags = _GA(obj, \"db_tags\").all()\r\n if key:\r\n tags = tags.filter(db_key__iexact=key.lower().strip())\r\n if category:\r\n tags = tags.filter(db_category__iexact=category.lower().strip())\r\n return list(tags)",
"def get_tagged_items (self, tags):\n if type(tags) is not list:\n tags = [tags]\n\n all_items = []\n for tag in tags:\n if tag in self.tags:\n if not all_items:\n all_items = self.tags[tag].keys()\n else:\n # set intersection\n all_items = [item for item in all_items if item in self.tags[tag].keys()]\n #\n #\n # \n return all_items",
"def _list(self):\n with self._treant._read:\n tags = self._treant._state['tags']\n\n tags.sort()\n return tags",
"def tag_cloud():\n return Tag.objects.all().order_by(\"name\")",
"def test_tags_posts(self):\n u1 = User(username='me', email='me@gmail.com', password='123456', firstname='moa')\n db.session.add(u1)\n db.session.commit()\n u = User.query.filter_by(username=u1.username).first()\n print u\n utcnow = datetime.utcnow()\n post = Post(body=\"testing post\", user_id=u.id, timestamp=utcnow+timedelta(seconds=1))\n woo = Tag(tag=\"woo\")\n post2 = Post(body=\"testing post 2\", user_id=u.id, timestamp=utcnow+timedelta(seconds=4))\n\n woo.posts.append(post)\n woo.posts.append(post2)\n db.session.add(post)\n db.session.add(woo)\n db.session.add(post2)\n db.session.commit()\n wood = Tag.query.filter_by(tag=\"woo\").first()\n print wood\n print wood.tag\n print wood.posts\n for wp in wood.posts:\n print wp\n #wlist = wood.posts.filter_by(postTags.c.tag == wood.tag).all()\n #wlist = Tag.query.filter_by(tag=\"woo\").all()\n wlist = Post.query.join(postTags).filter(postTags.c.tag == wood.tag).order_by(Post.timestamp.desc()).all()\n print wlist",
"def get_tags(self):\n return self._tag_list.keys()",
"def by_tags(request, tags):\n if tags is None:\n return tag_list(request)\n\n tags_l = tags.split('+')\n\n posts = []\n\n for post in published_posts():\n if all([tag in post.taglist for tag in tags_l]):\n posts.append(post)\n\n return render(request, 'post_list.html', {'posts': posts, 'info': 'Posts tagged \"{tags}\"'.format(tags=tags)})",
"def getAllTags():\n # Create a diary database object.\n db = DiaryDatabaseWrapper.DiaryDatabaseWrapper()\n tagRows = db.selectFromTable('tags',('name',),'')\n db.close()\n return [element[0] for element in tagRows]",
"def get_all_tags(self):\n\n verbose = self.options.verbose\n gh = self.github\n user = self.options.user\n repo = self.options.project\n if verbose:\n print(\"Fetching tags...\")\n\n tags = []\n page = 1\n while page > 0:\n if verbose > 2:\n print(\".\", end=\"\")\n rc, data = gh.repos[user][repo].tags.get(\n page=page, per_page=PER_PAGE_NUMBER)\n if rc == 200:\n tags.extend(data)\n else:\n self.raise_GitHubError(rc, data, gh.getheaders())\n page = NextPage(gh)\n if verbose > 2:\n print(\".\")\n\n if len(tags) == 0:\n if not self.options.quiet:\n print(\"Warning: Can't find any tags in repo. Make sure, that \"\n \"you push tags to remote repo via 'git push --tags'\")\n exit()\n if verbose > 1:\n print(\"Found {} tag(s)\".format(len(tags)))\n return tags",
"def get_entities_of_tag(self, tag):\n return list(filter(lambda entity: entity.tag == tag, self.entities))",
"def serialize_tags(self):\n return [i.name for i in self.tags.all()]",
"def all_tagged_objects(tag = None, related_tags = False, related_tag_counts = True, **kwargs):\n if tag is None:\n raise AttributeError(_('tagged_objects must be called with a tag.'))\n \n tag_instance = get_tag(tag)\n if tag_instance is None:\n raise AttributeError(_('tagged_object must be called with a valid tag'))\n \n queryset = TaggedItem.objects.filter(tag = tag_instance)\n if related_tags:\n related_tags_list = \\\n Tag.objects.related_for_model(tag_instance, queryset, counts = related_tag_counts)\n else:\n related_tags_list = []\n \n return queryset, related_tags_list",
"def _tag_facets(request, parsed_query=None):\n return (parsed_query or _parsed_query(request)).getall('tag')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decode the photos associated with this post (if any)
|
def photos(self):
return json.loads(self['photos']) or []
|
[
"def photos(self):\n try:\n return self._photos\n except AttributeError:\n photo_uuids = [\n uuid\n for uuid, photo in self._db._dbphotos.items()\n if photo[\"momentID\"] == self._pk\n ]\n\n self._photos = self._db.photos_by_uuid(photo_uuids)\n return self._photos",
"def photos(self):\n photos = []\n for photo in self._model.photos:\n photo_data = PhotoPrivateContext(photo).get_data()\n if photo_data:\n photos.append(photo_data)\n return photos",
"def photos(self):\n photos = []\n for photo in self._model.photos:\n if not photo.public:\n continue\n photo_data = PhotoPublicContext(photo).get_data()\n if photo_data:\n photos.append(photo_data)\n return photos",
"def extract_postmeta_images(self, node):\n images = set()\n order = []\n for image_url in get_meta_values_by_key(node, 'Image'):\n images.add(image_url)\n order.append(image_url)\n return sorted(tuple(images), key=lambda x: order.index(x))",
"def get_photos(self):\n response = requests.get('https://api.vk.com/method/photos.get', params={\n 'owner_id': self.id_of_user,\n 'album_id': 'profile',\n 'extended': 1,\n 'photo_sizes': 1,\n 'v': 5.21,\n 'access_token': TOKEN,\n }\n )\n return response.json()",
"def getUserPhotos(self):\n return self.base.get(\"user_photos\", [])",
"def photo(self):\n if \"photo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"photo\"], OneDriveObjectBase):\n return self._prop_dict[\"photo\"]\n else :\n self._prop_dict[\"photo\"] = Photo(self._prop_dict[\"photo\"])\n return self._prop_dict[\"photo\"]\n\n return None",
"def images(self):\n context = aq_inner(self.context)\n results = []\n for field in self.fields():\n full = context.getField(field).tag(context)\n item = {\n 'full-image': '{0} id=\"cropbox\" {1}'.format(full[:4], full[5:]),\n 'select': self.select(),\n 'field': field,\n 'previews': self.previews(field),\n }\n results.append(item)\n return results",
"def getPhotos(self, pics, pics_with_tags, uid):\n\n print 'GET PHOTOS'\n \n # FINDING TAGGED PERSONS IN PHOTOS\n \n tags_list1 = map(lambda y:y['tags'], pics_with_tags) \n tags_data_list = map(lambda z:z['data'], tags_list1)\n tagged_persons=list()\n for index in range(0,len(tags_data_list)):\n having_id=filter(lambda x:'id' in x.keys(), tags_data_list[index])\n idd=map(lambda x:x['id'], having_id)\n no_nones = filter(lambda x:x!=None, idd) \n tagged_persons.extend(no_nones)\n tagged_counts = dict((i,tagged_persons.count(i)) for i in tagged_persons) \n # dictionary of {ID: no_of_occurences} of all tagged persons sorted \n # in descending order of no_of_occurences\n\n # FINDING LIKES FOR THE PHOTOS\n\n likes_list = filter(lambda y:'likes' in y.keys(), pics) \n likes_list1 = map(lambda z:z['likes'], likes_list)\n likes_data_list = map(lambda z:z['data'], likes_list1)\n liked_by=list()\n for list_elem in likes_data_list:\n having_id=filter(lambda x:'id' in x.keys(), list_elem)\n idd=map(lambda x:x['id'], having_id)\n no_nones = filter(lambda x:x!=None, idd)\n liked_by.extend(no_nones)\n \n liked_counts = dict((i,liked_by.count(i)) for i in liked_by) \n # dictionary of {ID: no_of_occurences} of all persons who liked \n # my photos sorted in descending order of no_of_occurences\n \n # FINDING COMMENTS FOR THE PHOTOS\n\n comments_list = filter(lambda y:'comments' in y.keys(), pics) \n comments_list1 = map(lambda z:z['comments'], comments_list)\n comments_data_list = map(lambda z:z['data'], comments_list1)\n commented_by=list()\n for list_elem in comments_data_list:\n having_from = filter(lambda x:'from' in x.keys(), list_elem)\n fromm = map(lambda x:x['from'], list_elem)\n no_nones = filter(lambda x:x!=None, fromm)\n having_id = filter(lambda x:'id' in x.keys(), no_nones) \n idd = map(lambda x:x['id'], having_id)\n commented_by.extend(idd)\n\n commented_counts = dict((i,commented_by.count(i)) for i in commented_by) \n # dictionary of {ID: no_of_occurences} of all tagged persons \n # sorted in descending order of no_of_occurences\n photo_data = {'tagged': tagged_counts, \n 'liked by': liked_counts, \n 'commented by': commented_counts}\n return photo_data",
"def get_images(self):\n pass",
"def getFriendsPhotos(self):\n return self.base.get(\"friends_photos\", [])",
"def get_post_info(self, code):\n url = \"https://www.instagram.com/p/{}/\".format(code)\n with self.session.get(url) as res:\n # media = self._get_shared_data(res)['entry_data']['PostPage'][0]['media']\n media = self._get_shared_data(res)['entry_data']['PostPage'][0]\\\n ['graphql']['shortcode_media']\n # Fix renaming of attributes\n media.setdefault('code', media.get('shortcode'))\n media.setdefault('date', media.get('taken_at_timestamp'))\n media.setdefault('display_src', media.get('display_url'))\n media.setdefault('likes', media['edge_media_preview_like'])\n media.setdefault('comments', media['edge_media_to_comment'])\n return media",
"def get_image_map():\n image_names = os.listdir(join(ROOT, '_raw_images'))\n image_map = {}\n front_matter = re.compile(r\"^---\\n(.+\\n)+---\\n\")\n for file in os.listdir(join(ROOT, '_posts')):\n post_name = splitext(file)[0]\n with open(join(ROOT, '_posts', file)) as in_file:\n content = in_file.read()\n content = front_matter.sub('', content)\n for image_name in image_names:\n if image_name in content:\n image_map[image_name] = post_name\n return image_map",
"def vk_get_photo_album(request,album):\n if not request.user.is_superuser:\n return redirect('%s?next=%s' % (reverse('dc_parse:admin_auth'), request.path))\n debug = {}\n if request.method == \"POST\":\n post = request.POST.copy()\n vk_token,vk_user = get_vk_cookies(request)\n method_name = 'photos.get'\n parameters = {\n 'owner_id': vk_user,\n 'album_id': album,\n 'photo_sizes': 1,\n 'extended': 1,\n }\n if int(post.get('count'))>0:\n # count=0 mean all photos in album\n parameters['count'] = post.get('count')\n parameters['offset'] = post.get('offset')\n\n # to db\n tags = prepare_tags(post.getlist('tags_existed'),post.get('tags_new'))\n content = vk_method(method_name,vk_token,parameters)\n resume = vk_put_photos_to_db(content,tags)\n\n # stat_action = {\n # 'create_media': 'media_new',\n # 'create_tags': 'tag_new',\n # 'bind': 'tag_bonds'}\n # for act,act_res in stat_action.items():\n # if resume[act_res]>0:\n # StatUpload.objects.create(\n # num = resume[act_res],\n # action = act,\n # method = 'album-'+album\n # )\n\n return render(request,'vk_get_photo_result.html',{\n # 'content': content,\n 'imgs': content['items'],\n 'album': album,\n 'tags': tags,\n 'resume': resume\n })\n\n else:\n form = ParseForm(dict(count=8,offset=0))\n return render(request,'vk_get_photo_form.html',{\n 'form': form,\n 'debug': debug\n })",
"def parse_image_data(data):\n img_id = data[\"id\"] if \"id\" in data else data[\"image_id\"]\n url = data[\"url\"]\n width = data[\"width\"]\n height = data[\"height\"]\n coco_id = data[\"coco_id\"]\n flickr_id = data[\"flickr_id\"]\n image = Image(img_id, url, width, height, coco_id, flickr_id)\n return image",
"def get_pictures(self):\n all_pictures = Picture.objects.filter(album__id=self.id)\n return all_pictures if all_pictures else []",
"def get_photolist(self, limit):\n method = 'flickr.photosets.getPhotos'\n query = 'photoset_id=%s' % self.photoset\n\n photolist = self._query_read(method, query, limit)\n try:\n return photolist['photoset']['photo']\n except:\n return None",
"def _get_all_photos(photosdb):\n photos = photosdb.photos(images=True, movies=True)\n photos.extend(photosdb.photos(images=True, movies=True, intrash=True))\n return photos",
"def image_meta(self, i: int) -> Dict[str, Any]:\n return self.coco.imgs[self.img_ids[i]]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a new Lock instance named ``name`` using the Redis client supplied by ``redis``. ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``timeout`` can be specified as a float or integer, both representing the number of seconds to wait. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and another client is currently holding the lock. ``blocking`` indicates whether calling ``acquire`` should block until the lock has been acquired or to fail immediately, causing ``acquire`` to return False and the lock not being acquired. Defaults to True. Note this value can be overridden by passing a ``blocking`` argument to ``acquire``. ``blocking_timeout`` indicates the maximum amount of time in seconds to spend trying to acquire the lock. A value of ``None`` indicates continue trying forever. ``blocking_timeout`` can be specified as a float or integer, both representing the number of seconds to wait. ``thread_local`` indicates whether the lock token is placed in threadlocal storage. By default, the token is placed in thread local storage so that a thread only sees its token, not a token set by
|
def __init__(self, redis, name, timeout=None, sleep=0.1,
blocking=True, blocking_timeout=None, thread_local=True):
self.redis = redis
self.name = name
self.timeout = timeout
self.sleep = sleep
self.blocking = blocking
self.blocking_timeout = blocking_timeout
self.thread_local = bool(thread_local)
self.local = threading.local() if self.thread_local else dummy()
self.local.token = None
if self.timeout and self.sleep > self.timeout:
raise LockError("'sleep' must be less than 'timeout'")
|
[
"async def acquire(self, blocking=None, blocking_timeout=None):\n sleep = self.sleep\n token = b(uuid.uuid1().hex)\n if blocking is None:\n blocking = self.blocking\n if blocking_timeout is None:\n blocking_timeout = self.blocking_timeout\n blocking_timeout = blocking_timeout or self.timeout\n stop_trying_at = mod_time.time() + min(blocking_timeout, self.timeout)\n\n while True:\n if await self.do_acquire(token):\n lock_acquired_at = mod_time.time()\n if await self.check_lock_in_slaves(token):\n check_finished_at = mod_time.time()\n # if time expends on acquiring lock is greater than given time\n # the lock should be released manually\n if check_finished_at > stop_trying_at:\n await self.do_release(token)\n return False\n self.local.token = token\n # validity time is considered to be the\n # initial validity time minus the time elapsed during check\n await self.do_extend(lock_acquired_at - check_finished_at)\n return True\n else:\n await self.do_release(token)\n return False\n if not blocking or mod_time.time() > stop_trying_at:\n return False\n await asyncio.sleep(sleep, loop=self.redis.connection_pool.loop)",
"def test_simple_lock():\n lock = RedLock(\"test_simple_lock\", [{\"host\": \"localhost\"}], ttl=1000)\n locked = lock.acquire()\n lock.release()\n assert locked is True",
"def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n first.acquire(\"A\")\n with pytest.raises(Timeout):\n with lock(BASE_URL, \"A\", timeout=timedelta(seconds=1)):\n pass",
"def get_lock(self, key):\n\n if self.disable_locks:\n raise Return(None)\n\n lock = self.redis_conn.lock(\n \"lock:\" + self.add_namespace(key),\n lock_ttl=self.lock_ttl,\n polling_interval=self.lock_polling_interval\n )\n\n result = yield Task(lock.acquire, blocking=True)\n\n if isinstance(result, RedisError):\n raise result\n\n raise Return(lock)",
"def getMutexLock(self, lock_name, timeout=2):\n c=self.getCursor()\n select=\"GET_LOCK('\"+lock_name+\"',\"+str(timeout)+\")\"\n c.execute(\"SELECT \"+select)\n return c.fetchone()[select]",
"def test_from_url():\n lock = RedLock(\"test_from_url\", [{\"url\": \"redis://localhost/0\"}], ttl=1000)\n locked = lock.acquire()\n lock.release()\n assert locked is True",
"def acquireRead(self, blocking=True, timeout=None):\r\n\r\n if not blocking:\r\n endtime = -1\r\n elif timeout is not None:\r\n endtime = time() + timeout\r\n else:\r\n endtime = None\r\n me = current_thread()\r\n self.__condition.acquire()\r\n try:\r\n if self.__writer is me:\r\n # If we are the writer, grant a new read lock, always.\r\n self.__writercount += 1\r\n return\r\n while True:\r\n if self.__writer is None:\r\n # Only test anything if there is no current writer.\r\n if self.__upgradewritercount or self.__pendingwriters:\r\n if me in self.__readers:\r\n # Only grant a read lock if we already have one\r\n # in case writers are waiting for their turn.\r\n # This means that writers can't easily get starved\r\n # (but see below, readers can).\r\n self.__readers[me] += 1\r\n return\r\n # No, we aren't a reader (yet), wait for our turn.\r\n else:\r\n # Grant a new read lock, always, in case there are\r\n # no pending writers (and no writer).\r\n self.__readers[me] = self.__readers.get(me, 0) + 1\r\n return\r\n if timeout is not None:\r\n remaining = endtime - time()\r\n if remaining <= 0:\r\n # Timeout has expired, signal caller of this.\r\n raise RuntimeError(\"Acquiring read lock timed out\")\r\n self.__condition.wait(remaining)\r\n else:\r\n self.__condition.wait()\r\n finally:\r\n self.__condition.release()",
"def test_lock_with_validity():\n ttl = 1000\n lock = RedLock(\"test_simple_lock\", [{\"host\": \"localhost\"}], ttl=ttl)\n locked, validity = lock.acquire_with_validity()\n lock.release()\n assert locked is True\n assert 0 < validity < ttl - ttl * CLOCK_DRIFT_FACTOR - 2",
"def test_lock_already_exists(self):\n\n # Create a lock using a new mutex\n new_mutex = RedisMutex(self.redis, block_time=10, expiry=12)\n new_mutex = new_mutex.acquire_lock(self.key)\n\n self.mutex.block_time = 1\n with self.assertRaises(BlockTimeExceedError):\n self.mutex.acquire_lock(self.key)\n\n # A blocking mutex will raise a MutexLockError instead of\n # BlockTimeExceedError as blcok time does not comes into play\n # during locking of a non blocking mutex.\n self.mutex.blocking = False\n with self.assertRaises(MutexLockError):\n self.mutex.acquire_lock(self.key)\n\n new_mutex.release_lock()",
"def lock(self, hash_key, timeout=60):\n owner = \"%s_%i\" % (self.magic, threading.current_thread().ident)\n with self.entity_set.open() as locks:\n tnow = time.time()\n tstop = tnow + timeout\n twait = 0\n while tnow < tstop:\n time.sleep(twait)\n lock = locks.new_entity()\n lock['hash'].set_from_value(hash_key)\n lock['owner'].set_from_value(owner)\n lock['created'].set_from_value(TimePoint.from_now_utc())\n try:\n locks.insert_entity(lock)\n return LockStoreContext(self, hash_key)\n except edm.ConstraintError:\n pass\n try:\n lock = locks[hash_key]\n except KeyError:\n # someone deleted the lock, go straight round again\n twait = 0\n tnow = time.time()\n continue\n # has this lock expired?\n locktime = lock['created'].value.with_zone(zdirection=0)\n if locktime.get_unixtime() + self.lock_timeout < tnow:\n # use optimistic locking\n lock['owner'].set_from_value(owner)\n try:\n locks.update_entity(lock)\n logging.warn(\"LockingBlockStore removed stale lock \"\n \"on %s\", hash_key)\n return LockStoreContext(self, hash_key)\n except KeyError:\n twait = 0\n tnow = time.time()\n continue\n except edm.ConstraintError:\n pass\n twait = random.randint(0, timeout // 5)\n tnow = time.time()\n logging.warn(\"LockingBlockStore: timeout locking %s\", hash_key)\n raise LockError",
"def acquire(self):\n assert not self.has_lock\n\n wait_reporter = p4gf_log.LongWaitReporter(\"accessing p4key-lock\", LOG)\n while True:\n if self.do_acquire():\n self.has_lock = True\n LOG.debug2(\"lock-acquired %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-acquired stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n return self\n\n # lock held by others, attempt to remove stale owners\n if self.remove_stale_owners():\n continue\n\n # non-blocking case can only raise\n if not self.blocking:\n LOG.debug2(\"lock-busy %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-busy stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n raise LockBusy(self)\n\n wait_reporter.been_waiting()\n # just wait until lock can be acquired, either due to release or transfer death\n LOG.debug2(\"lock-waiting %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-waiting stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n time.sleep(_RETRY_PERIOD)",
"def lock(self, client_id, time_limit):\n if self.estado == 'UNLOCKED':\n self.estado = 'LOCKED'\n self.time = time.time() + time_limit\n self.clientLock = client_id\n self.qntLocks += 1\n return True\n else:\n if client_id == self.clientLock:\n self.qntLocks += 1\n self.time += time_limit\n return True\n else:\n return False",
"def acquire_concurrency_lock(self):\n\n if self.concurrency_type is None:\n return None\n\n result = None\n start_time = local_time()\n if self.concurrency_type == CONCURRENCY_TYPE_NETWORK_SEMAPHORE:\n logging.debug(f\"acquiring network concurrency semaphore {self.concurrency_semaphore} \"\n f\"for hunt type {self.hunt_type}\")\n result = NetworkSemaphoreClient(cancel_request_callback=self.manager_control_event.is_set)\n # make sure we cancel outstanding request \n # when shutting down\n result.acquire(self.concurrency_semaphore)\n else:\n logging.debug(f\"acquiring local concurrency semaphore for hunt type {self.hunt_type}\")\n while not self.manager_control_event.is_set():\n if self.concurrency_semaphore.acquire(blocking=True, timeout=0.1):\n result = self.concurrency_semaphore\n break\n\n if result is not None:\n total_seconds = (local_time() - start_time).total_seconds()\n logging.debug(f\"acquired concurrency semaphore for hunt type {self.hunt_type} in {total_seconds} seconds\")\n\n return result",
"def setup_redis(name: str, host: str, port: int, **kw) -> None:\n redis_client = kw.pop(\"redis_client\", redis.StrictRedis)\n SYSTEMS[name] = redis_client(host=host, port=port, **kw)",
"def cluster_lock_acquire(context, cluster_id, action_id, scope=CLUSTER_SCOPE,\n forced=False):\n\n # Step 1: try lock the cluster - if the returned owner_id is the\n # action id, it was a success\n owners = db_api.cluster_lock_acquire(cluster_id, action_id, scope)\n if action_id in owners:\n return True\n # Will reach here only because scope == CLUSTER_SCOPE\n if action_on_dead_engine(context, owners[0]):\n LOG.debug(_('The cluster %(c)s is locked by dead action %(a)s, '\n 'try to steal the lock.') % {\n 'c': cluster_id,\n 'a': owners[0]\n })\n act = base.Action.load(context, owners[0])\n reason = _('Engine died when executing this action.')\n act.set_status(result=base.Action.RES_ERROR,\n reason=reason)\n owners = db_api.cluster_lock_steal(cluster_id, action_id)\n return action_id in owners\n\n # Step 2: retry using global configuration options\n retries = cfg.CONF.lock_retry_times\n retry_interval = cfg.CONF.lock_retry_interval\n\n while retries > 0:\n scheduler.sleep(retry_interval)\n owners = db_api.cluster_lock_acquire(cluster_id, action_id, scope)\n if action_id in owners:\n return True\n retries = retries - 1\n\n # Step 3: Last resort is 'forced locking', only needed when retry failed\n if forced:\n owners = db_api.cluster_lock_steal(cluster_id, action_id)\n return action_id in owners\n\n LOG.error(_LE('Cluster is already locked by action %(old)s, '\n 'action %(new)s failed grabbing the lock'),\n {'old': str(owners), 'new': action_id})\n\n return False",
"def synchronized_with_memcache(key=None, timeout=10):\n def decorator(func):\n @wraps(func)\n def wrapped(*arg, **kwargs):\n start = time.time()\n end = start\n\n lock_key = key\n if lock_key is None:\n lock_key = \"%s.%s__\" % (func.__module__, func.__name__)\n \n lock_key = \"__synchronized_with_memcache_\" + lock_key \n \n client = memcache.Client()\n got_lock = False\n try:\n # Make sure the func gets called only one at a time\n while not got_lock and end - start < timeout:\n locked = client.gets(lock_key)\n\n while locked is None:\n # Initialize the lock if necessary\n client.set(lock_key, False)\n locked = client.gets(lock_key)\n\n if not locked:\n # Lock looks available, try to take it with compare \n # and set (expiration of 10 seconds)\n got_lock = client.cas(lock_key, True, time=timeout)\n \n if not got_lock:\n # If we didn't get it, wait a bit and try again\n time.sleep(0.1)\n\n end = time.time()\n\n if not got_lock:\n logging.warning((\"synchronization lock on %s:%s timed out \"\n \"after %f seconds\")\n % (func.__module__, func.__name__,\n end - start))\n elif end - start > timeout * 0.75:\n # its possible that the func didn't finish but the\n # cas timeout was reached, so if we get these\n # warnings we should probably bump the timeout as well\n logging.warning((\"synchronization lock %s:%s almost timed \"\n \"out, but got lock after %f seconds\")\n % (func.__module__, func.__name__,\n end - start))\n \n results = func(*arg, **kwargs)\n\n finally:\n if got_lock:\n # Release the lock\n client.set(lock_key, False)\n\n return results\n return wrapped\n return decorator",
"def acquire_lock (self):\n\n try:\n self.cache[self.id].lock.acquire ()\n self.locked = True\n except KeyError:\n pass",
"def renew_lock(self, message: MessageEnvelope, lock_timeout: int):",
"def __init__(self, name, acquired=False):\n self.name = name\n self.acquired = acquired\n ret = _CreateMutex(None, False, name)\n if not ret:\n raise ctypes.WinError()\n self.handle = ret\n if acquired:\n self.acquire()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds more time to an already acquired lock. ``additional_time`` can be specified as an integer or a float, both representing the number of seconds to add.
|
async def extend(self, additional_time):
if self.local.token is None:
raise LockError("Cannot extend an unlocked lock")
if self.timeout is None:
raise LockError("Cannot extend a lock with no timeout")
return await self.do_extend(additional_time)
|
[
"def extend(self, additional_time=0.0):\n\t\tself._last_time += additional_time",
"def add_time(self, amount):\n self._time += amount",
"def add_integration_time(self, time):\n self.integration_time += time",
"def add_time(self, user_info, time_to_add):\n user_info['time_collected'] = time_to_add\n return user_info",
"def addtime(self, time):\r\n self._clock += time\r\n if self._clock >= self._delay:\r\n self.is_fired = True\r\n self.fired(*self.params)",
"def advanceTime(self, amount):\n if self.blocked:\n assert self.workTime == 0\n self.timeWaiting += amount\n else:\n assert self.workTime - amount >= -FLOAT_ERR\n self.workTime = max(self.workTime - amount, 0)\n if self.workTime == 0:\n printHandler(\"I\", self.name, \"finishes a - \", self.currentComponent.name)\n \n if self.workTime == 0:\n oldComponent = self.currentComponent\n workstationUsed = self.placeComponentInBuffer()\n if workstationUsed:\n printHandler(\"I\", self.name, \"places a\", oldComponent.name, 'in', workstationUsed.name)\n self.blocked = False\n self.workOnNextComponent()\n else:\n self.blocked = True",
"def acquire_time(self, acquire_time):\n\n self._acquire_time = acquire_time",
"def update_timer(self, time):\n self.timer += time",
"def update_time(self, elevator, time):\n for p in itertools.chain(self.waiting_list, elevator.passenger_list):\n p.waittime += time",
"def add_status_time(self, time):\r\n self.logistics_model.add_status_time(time)",
"def add_time(self, response_time):\n if type(response_time) != float:\n raise TypeError(\"response time must be a float\")\n self.resp_time.append([time.time(), response_time*1000.0])",
"def lock(self,time=None):\n #print self.allowed,self.locked\n if self.allowed and not self.locked:\n if time is None:\n time = pf.GUI.drawwait\n if time > 0:\n pf.debug('STARTING TIMER')\n self.locked = True\n self.timer = threading.Timer(time,self.release)\n self.timer.start()",
"def append_time(self, order_time):\n # find the queue which is going to be finished latest.\n # or the get the queue with the least echo time.\n cache = min(self.cache)\n\n # find the waiting time.\n wait_time = 0\n if len(cache):\n wait_time = max((cache[0] - order_time), 0)\n # append the time required including the waiting time in epochs\n cache.appendleft(order_time + self.task_time + wait_time)\n\n return self.task_time + wait_time",
"def timeout_add(self, seconds, method, method_args=()):\r\n self.log.debug('Adding timer for %r in %.2fs', method, seconds)\r\n if int(seconds) == seconds:\r\n return gobject.timeout_add_seconds(\r\n int(seconds), method, *method_args\r\n )\r\n else:\r\n return gobject.timeout_add(\r\n int(seconds * 1000), method, *method_args\r\n )",
"def add_delta_to_time(time: time, timezone, minutes: int = 0, seconds: int = 0):\n time_combine = dt.datetime.combine(dt.date(1, 1, 1), time)\n if minutes > 0:\n delta_time = time_combine + timedelta(minutes=minutes)\n else:\n delta_time = time_combine - timedelta(minutes=minutes)\n if seconds > 0:\n delta_time += timedelta(seconds=seconds)\n else:\n delta_time -= timedelta(seconds=seconds)\n\n return delta_time.replace(tzinfo=pytz.timezone(timezone)).time()",
"def increase(self, additional_bet):\n self._amount += additional_bet",
"def __add__(self, other):\n return MyTime(0, 0, self.to_seconds() + other.to_seconds())",
"def add_second(self):\n\n self.model.time += 1",
"def ex_extend_request_time(self, node, minutes):\r\n return self._vcl_request(\r\n \"XMLRPCextendRequest\",\r\n node.id,\r\n minutes\r\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use Redis to hold a shared, distributed lock named ``name``. Returns True once the lock is acquired. If ``blocking`` is False, always return immediately. If the lock was acquired, return True, otherwise return False. ``blocking_timeout`` specifies the maximum number of seconds to wait trying to acquire the lock. It should not be greater than expire time of the lock
|
async def acquire(self, blocking=None, blocking_timeout=None):
sleep = self.sleep
token = b(uuid.uuid1().hex)
if blocking is None:
blocking = self.blocking
if blocking_timeout is None:
blocking_timeout = self.blocking_timeout
blocking_timeout = blocking_timeout or self.timeout
stop_trying_at = mod_time.time() + min(blocking_timeout, self.timeout)
while True:
if await self.do_acquire(token):
lock_acquired_at = mod_time.time()
if await self.check_lock_in_slaves(token):
check_finished_at = mod_time.time()
# if time expends on acquiring lock is greater than given time
# the lock should be released manually
if check_finished_at > stop_trying_at:
await self.do_release(token)
return False
self.local.token = token
# validity time is considered to be the
# initial validity time minus the time elapsed during check
await self.do_extend(lock_acquired_at - check_finished_at)
return True
else:
await self.do_release(token)
return False
if not blocking or mod_time.time() > stop_trying_at:
return False
await asyncio.sleep(sleep, loop=self.redis.connection_pool.loop)
|
[
"def Shared(self, blocking=False, timeout=None):\n self._flock(fcntl.LOCK_SH, blocking, timeout,\n \"Failed to lock %s in shared mode\" % self.filename)",
"def Exclusive(self, blocking=False, timeout=None):\n self._flock(fcntl.LOCK_EX, blocking, timeout,\n \"Failed to lock %s in exclusive mode\" % self.filename)",
"def acquire(self):\n assert not self.has_lock\n\n wait_reporter = p4gf_log.LongWaitReporter(\"accessing p4key-lock\", LOG)\n while True:\n if self.do_acquire():\n self.has_lock = True\n LOG.debug2(\"lock-acquired %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-acquired stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n return self\n\n # lock held by others, attempt to remove stale owners\n if self.remove_stale_owners():\n continue\n\n # non-blocking case can only raise\n if not self.blocking:\n LOG.debug2(\"lock-busy %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-busy stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n raise LockBusy(self)\n\n wait_reporter.been_waiting()\n # just wait until lock can be acquired, either due to release or transfer death\n LOG.debug2(\"lock-waiting %s\", self)\n if DEBUG_TRACE:\n LOG.debug3(\"lock-waiting stack trace:\\n%s\",\n \"\".join(traceback.format_stack()))\n time.sleep(_RETRY_PERIOD)",
"def lock(self, name):\n \n return self.storage.lock(self.indexname + \"_\" + name)",
"def get_lock(self, key):\n\n if self.disable_locks:\n raise Return(None)\n\n lock = self.redis_conn.lock(\n \"lock:\" + self.add_namespace(key),\n lock_ttl=self.lock_ttl,\n polling_interval=self.lock_polling_interval\n )\n\n result = yield Task(lock.acquire, blocking=True)\n\n if isinstance(result, RedisError):\n raise result\n\n raise Return(lock)",
"def test_simple_lock():\n lock = RedLock(\"test_simple_lock\", [{\"host\": \"localhost\"}], ttl=1000)\n locked = lock.acquire()\n lock.release()\n assert locked is True",
"def lock(self, blocking = False):\n return self.next(blocking)",
"def acquireRead(self, blocking=True, timeout=None):\r\n\r\n if not blocking:\r\n endtime = -1\r\n elif timeout is not None:\r\n endtime = time() + timeout\r\n else:\r\n endtime = None\r\n me = current_thread()\r\n self.__condition.acquire()\r\n try:\r\n if self.__writer is me:\r\n # If we are the writer, grant a new read lock, always.\r\n self.__writercount += 1\r\n return\r\n while True:\r\n if self.__writer is None:\r\n # Only test anything if there is no current writer.\r\n if self.__upgradewritercount or self.__pendingwriters:\r\n if me in self.__readers:\r\n # Only grant a read lock if we already have one\r\n # in case writers are waiting for their turn.\r\n # This means that writers can't easily get starved\r\n # (but see below, readers can).\r\n self.__readers[me] += 1\r\n return\r\n # No, we aren't a reader (yet), wait for our turn.\r\n else:\r\n # Grant a new read lock, always, in case there are\r\n # no pending writers (and no writer).\r\n self.__readers[me] = self.__readers.get(me, 0) + 1\r\n return\r\n if timeout is not None:\r\n remaining = endtime - time()\r\n if remaining <= 0:\r\n # Timeout has expired, signal caller of this.\r\n raise RuntimeError(\"Acquiring read lock timed out\")\r\n self.__condition.wait(remaining)\r\n else:\r\n self.__condition.wait()\r\n finally:\r\n self.__condition.release()",
"def __init__(self, redis, name, timeout=None, sleep=0.1,\n blocking=True, blocking_timeout=None, thread_local=True):\n self.redis = redis\n self.name = name\n self.timeout = timeout\n self.sleep = sleep\n self.blocking = blocking\n self.blocking_timeout = blocking_timeout\n self.thread_local = bool(thread_local)\n self.local = threading.local() if self.thread_local else dummy()\n self.local.token = None\n if self.timeout and self.sleep > self.timeout:\n raise LockError(\"'sleep' must be less than 'timeout'\")",
"def getMutexLock(self, lock_name, timeout=2):\n c=self.getCursor()\n select=\"GET_LOCK('\"+lock_name+\"',\"+str(timeout)+\")\"\n c.execute(\"SELECT \"+select)\n return c.fetchone()[select]",
"def get_lock(name, nonblocking=False):\n\tfn = \"/var/run/%s.pid\" % name\n\tfd = open(fn, 'w')\n\ttry:\n\t\tif nonblocking:\n\t\t\tfcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\n\t\telse:\n\t\t\tfcntl.lockf(fd, fcntl.LOCK_EX)\n\texcept IOError, e:\n\t\tif e.errno == 11:\n\t\t\treturn None\n\t\traise\n\tprint >>fd, '%s\\n' % os.getpid()\n\treturn fd",
"def acquire_lock (self):\n\n try:\n self.cache[self.id].lock.acquire ()\n self.locked = True\n except KeyError:\n pass",
"def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n first.acquire(\"A\")\n with pytest.raises(Timeout):\n with lock(BASE_URL, \"A\", timeout=timedelta(seconds=1)):\n pass",
"def cluster_lock_acquire(context, cluster_id, action_id, scope=CLUSTER_SCOPE,\n forced=False):\n\n # Step 1: try lock the cluster - if the returned owner_id is the\n # action id, it was a success\n owners = db_api.cluster_lock_acquire(cluster_id, action_id, scope)\n if action_id in owners:\n return True\n # Will reach here only because scope == CLUSTER_SCOPE\n if action_on_dead_engine(context, owners[0]):\n LOG.debug(_('The cluster %(c)s is locked by dead action %(a)s, '\n 'try to steal the lock.') % {\n 'c': cluster_id,\n 'a': owners[0]\n })\n act = base.Action.load(context, owners[0])\n reason = _('Engine died when executing this action.')\n act.set_status(result=base.Action.RES_ERROR,\n reason=reason)\n owners = db_api.cluster_lock_steal(cluster_id, action_id)\n return action_id in owners\n\n # Step 2: retry using global configuration options\n retries = cfg.CONF.lock_retry_times\n retry_interval = cfg.CONF.lock_retry_interval\n\n while retries > 0:\n scheduler.sleep(retry_interval)\n owners = db_api.cluster_lock_acquire(cluster_id, action_id, scope)\n if action_id in owners:\n return True\n retries = retries - 1\n\n # Step 3: Last resort is 'forced locking', only needed when retry failed\n if forced:\n owners = db_api.cluster_lock_steal(cluster_id, action_id)\n return action_id in owners\n\n LOG.error(_LE('Cluster is already locked by action %(old)s, '\n 'action %(new)s failed grabbing the lock'),\n {'old': str(owners), 'new': action_id})\n\n return False",
"def test_lock_already_exists(self):\n\n # Create a lock using a new mutex\n new_mutex = RedisMutex(self.redis, block_time=10, expiry=12)\n new_mutex = new_mutex.acquire_lock(self.key)\n\n self.mutex.block_time = 1\n with self.assertRaises(BlockTimeExceedError):\n self.mutex.acquire_lock(self.key)\n\n # A blocking mutex will raise a MutexLockError instead of\n # BlockTimeExceedError as blcok time does not comes into play\n # during locking of a non blocking mutex.\n self.mutex.blocking = False\n with self.assertRaises(MutexLockError):\n self.mutex.acquire_lock(self.key)\n\n new_mutex.release_lock()",
"def test_lock_with_validity():\n ttl = 1000\n lock = RedLock(\"test_simple_lock\", [{\"host\": \"localhost\"}], ttl=ttl)\n locked, validity = lock.acquire_with_validity()\n lock.release()\n assert locked is True\n assert 0 < validity < ttl - ttl * CLOCK_DRIFT_FACTOR - 2",
"def lock_file(filename, mode='r+', blocking=False):\n # TODO(wickman) We should probably adopt the lockfile project here as has\n # a platform-independent file locking implementation.\n if not HAS_FCNTL:\n raise RuntimeError('Interpreter does not support fcntl!')\n\n try:\n fp = open(filename, mode)\n except IOError:\n return None\n\n try:\n fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB if not blocking else fcntl.LOCK_EX)\n except IOError as e:\n if e.errno in (errno.EACCES, errno.EAGAIN):\n fp.close()\n return False\n\n return fp",
"def test_write_lock_acquired(self) -> None:\n # First to acquire this lock, so it should complete\n lock = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n assert lock is not None\n\n # Enter the context manager\n self.get_success(lock.__aenter__())\n\n # Attempting to acquire the lock again fails, as both read and write.\n lock2 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n self.assertIsNone(lock2)\n\n lock3 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=False)\n )\n self.assertIsNone(lock3)\n\n # Calling `is_still_valid` reports true.\n self.assertTrue(self.get_success(lock.is_still_valid()))\n\n # Drop the lock\n self.get_success(lock.__aexit__(None, None, None))\n\n # We can now acquire the lock again.\n lock4 = self.get_success(\n self.store.try_acquire_read_write_lock(\"name\", \"key\", write=True)\n )\n assert lock4 is not None\n self.get_success(lock4.__aenter__())\n self.get_success(lock4.__aexit__(None, None, None))",
"def lockNode(lockName=bool, lockUnpublished=bool, ignoreComponents=bool, lock=bool):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set current time display.
|
def set_current_time(self, time: str) -> None:
self._logger.debug("running")
self._current_time_val.setText(time)
self._logger.debug("done")
|
[
"def setTime(self):\n self.btnStart.setEnabled(True)\n self.btnReset.setEnabled(True)\n \n t = self.time.time()\n\n self.hours = t.hour()\n self.minutes = t.minute()\n self.seconds = t.second()\n \n self.tick = self.hours*3600 + self.minutes*60 + self.seconds\n\n self.lcd.display(t.toString())",
"def drawNow(self):\n now = datetime.now()\n self.drawTime(now.hour,now.minute,now.second)",
"def time(self):\n now = datetime.datetime.now()\n self.speak(f\"Agora são {now.hour} horas e {now.minute} minutos\")",
"def update_clock(self, _):\n self.clock = utils.get_time_human_readable()",
"def timezero(self, time=None):\n if time!=None:\n self.zerotime = time\n else:\n self.zerotime = self.modelcache.get_model(self.current_model).time\n self.time_text.set_text(\"Time: %e s\"%self.get_time())\n self.draw()",
"def set_time(self, enable=True):\r\n if enable:\r\n self.time = datetime.now\r\n else:\r\n self.time = None",
"def setTime():\n global local_time\n local_time = time.time()",
"def draw_time(self):\n\n self.draw_rect(0, 0, 127, 31, self.time_color)\n self.draw_rect(1, 1, 126, 30, self.time_color)\n now = self.current_time()\n time_str = now.strftime(\"%H:%M:%S\")\n self.graphics.DrawText(self.canvas, self.extra_large_font, 6, 28, self.time_color, time_str)",
"def update():\n seconds = 0 if self.start_time == 0 else round(time.time() - self.start_time)\n hours = seconds // 3600\n seconds = seconds % 3600\n minutes = seconds // 60\n seconds = seconds % 60\n cur_time = \"\"\n if hours < 10:\n cur_time += \"0\" + str(hours) + \":\"\n else:\n cur_time += str(hours) + \":\"\n if minutes < 10:\n cur_time += \"0\" + str(minutes) + \":\"\n else:\n cur_time += str(minutes) + \":\"\n if seconds < 10:\n cur_time += \"0\" + str(seconds)\n else:\n cur_time += str(seconds)\n\n self.formatted_time.set(cur_time)\n self.last_after = self.root.after(200, update)",
"def lcd_display_datetime(timezone):\n lcd.display_datetime(timezone)",
"def on_action_set_time(self, content):\n self.set_time(as_qtime(content['time']))",
"def get_current_time(self):\n return datetime.datetime.now().strftime(\"%H:%M:%S\")",
"def getnowstrftime(self):\n print()\n print(\"Current date and time\")\n print(self.now.strftime(\"%m-%d-%Y %H:%M\"))",
"def draw_time_date(self):\n\n now = self.current_time()\n time_str = now.strftime(\"%H:%M:%S\")\n date_str = now.strftime(\"%y-%m-%d\")\n self.graphics.DrawText(self.canvas, self.font, 0, 12, self.time_color, time_str)\n self.graphics.DrawText(self.canvas, self.font, 0, 31, self.time_color, date_str)",
"def reset(self):\n self.formatted_time.set(\"00:00:00\")",
"def setSystemTime(self, t):\r\n print(t)\r\n\r\n # Convert to full datetime\r\n now = dt.datetime.now()\r\n d = dt.datetime.combine(dt.date(now.year, now.month, now.day), t)\r\n # Convert to seconds \r\n seconds = (d-dt.datetime(1970,1,1)).total_seconds()\r\n # set clock\r\n time.clock_settime(time.CLOCK_REALTIME, seconds)\r\n print('Clock set')",
"def set_times_to_now(self, atime=False, mtime=False, ctime=False):\n now = datetime.datetime.utcnow()\n if atime:\n self.dt_atime = now\n if mtime:\n self.dt_mtime = now\n if ctime:\n self.dt_ctime = now",
"def _set_display(self, value,\n font='time',\n size=15,\n colour='white',\n typeface='bold',\n overwrite=False):\n if not self._isfrozen() or overwrite:\n self._display_info['font'] = (font, size, typeface)\n self._display_info['value'] = value\n self._display_info['colour'] = colour",
"def user_time(self, console):\n self.writeCommand('user_time', console)\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set exp start time display.
|
def set_exp_start_time(self, time: str) -> None:
self._logger.debug("running")
self._start_time_val.setText(time)
self._logger.debug("done")
|
[
"def start_time(self, start_time):\n self.__start = start_time",
"def start_time(self, start_time: str):\n\n self._start_time = start_time",
"def set_start_time(self):\n self.solve_start = time.time()",
"def setTime(self):\n self.btnStart.setEnabled(True)\n self.btnReset.setEnabled(True)\n \n t = self.time.time()\n\n self.hours = t.hour()\n self.minutes = t.minute()\n self.seconds = t.second()\n \n self.tick = self.hours*3600 + self.minutes*60 + self.seconds\n\n self.lcd.display(t.toString())",
"def set_start( self, value ):\n\t\tend = self.get_end( )\n\t\tself.set_time( value )\n\t\tself.set_end( end )",
"def start_timing(self):\n self._time = time()",
"def enable_time_tracking(self):\n self.start_time = time.time()",
"def setStartTime(self, startTime):\n self.startTime = startTime",
"def start_timer(self):\n\t\tself.start_time = time.clock()",
"def update_start_time(self, delay_t=0.0):\n self.start_time = self.end_time + delay_t",
"def start_timing(self):\n self._time = time()\n self._timer = time()",
"def _inc_start_time(self):\n if (self.state == Editor.State.wave and\n self.wave_edit_mode == Editor.WaveEditMode.start_time and\n self.selected_wave is not None):\n self.selected_wave.start_time += 1",
"def start_performance_timer(self):\n BuiltIn().set_test_variable(\"${__start_time}\", datetime.now())",
"def set_block_start_time(self, time: str) -> None:\n self._logger.debug(\"running\")\n self._block_start_time_val.setText(time)\n self._logger.debug(\"done\")",
"def set_start_date(self):\n self.start_date_button.config(text=f\"Start - {self.calendar.get_date()}\")\n self.start_date = dt.datetime.strptime(self.calendar.get_date(), '%m/%d/%y')",
"def time_window_start(self, time_window_start):\n\n self._time_window_start = time_window_start",
"def setEditTime(self,when):\n self.editTime = when\n if not self.joinTime:\n self.setJoinTime( when )",
"def set_playbacktime():\n set_hours = input('Enter hours: ')\n set_minutes = input('Enter minutes: ')\n\n set_hours = ((set_hours * 60) * 60) * 1000\n set_minutes = (set_minutes * 60) * 1000\n\n # Sets the time in milliseconds\n player.set_time(set_hours + set_minutes)",
"def utc_start_time(self):\n return self.label['START_TIME']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set block number display.
|
def set_block_num(self, num: str) -> None:
self._logger.debug("running")
self._block_num_val.setText(num)
self._logger.debug("done")
|
[
"def setBlockNumber(self, blockNo: int):\n self.__blockNumber = blockNo",
"def setDisplayBlockOffset(self, offset):\n self.getHandle().setDisplayBlockOffset(offset)",
"def setDisplayBlock(self, material):\n if material != None:\n block = CraftMagicNumbers.getBlock(material.getItemTypeId()).fromLegacyData(material.getData())\n self.getHandle().setDisplayBlock(block)\n else:\n self.getHandle().setDisplayBlock(Blocks.AIR.getBlockData())\n self.getHandle().a(False)",
"def __set_block_number(self, block_number: int):\n _block_number = self.db.query(IDXPersonalInfoBlockNumber).first()\n if _block_number is None:\n _block_number = IDXPersonalInfoBlockNumber()\n _block_number.latest_block_number = block_number\n else:\n _block_number.latest_block_number = block_number\n self.db.merge(_block_number)",
"def __show_bignum(self, num):\n fill = 'lightgreen'\n font = (\"Courier\", 150, 'bold')\n for x in range(3):\n self.canvas.delete(f'bignum-{x}')\n self.canvas.drawText(num[x], 75 + 150 * x, 100, font=font,\n tag=f'bignum-{x}', fill=fill)",
"def set_element_variable_number(self, number):\n self.__ex_put_variable_param('EX_ELEM_BLOCK', number)\n return True",
"def setDigit(self, position = 0, digit = 0):\n\t\tgrovepi.fourDigit_digit(self.display, position, digit)",
"def setNumber(self, value = 0, leading_zero = 1):\n\t\tgrovepi.fourDigit_number(self.display, value, leading_zero)",
"def updateBlockHeigth(self, blockNumber, blockRealNumber):\n\t\tquery = 'UPDATE blocks SET real_number = %s WHERE id = %s'\n\t\tself.executeQuery(query, (blockRealNumber, blockNumber))\n\t\tself.commit()",
"def show_number(self, number):\r\n\r\n\t\tself.show_message(str(number))",
"def n_blocks(self, n):\n self.SetNumberOfBlocks(n)\n self.Modified()",
"def set_number(self, number:int):\n self.number = number #set number, let this card to be..)",
"def getBlockNumber(self) -> int:\n return self.__blockNumber",
"def block1(self, value):\n option = Option()\n option.number = defines.inv_options['Block1']\n num, m, size = value\n if size > 1024:\n szx = 6\n elif 512 < size <= 1024:\n szx = 6\n elif 256 < size <= 512:\n szx = 5\n elif 128 < size <= 256:\n szx = 4\n elif 64 < size <= 128:\n szx = 3\n elif 32 < size <= 64:\n szx = 2\n elif 16 < size <= 32:\n szx = 1\n else:\n szx = 0\n\n value = (num << 4)\n value |= (m << 3)\n value |= szx\n\n option.value = value\n self.add_option(option)",
"def DrawNumVal(self):\n self.DrawNum()",
"def display(self, display):\n allowed_values = [\"BANNER\", \"TOASTER\"]\n if display not in allowed_values:\n raise ValueError(\n \"Invalid value for `display`, must be one of {0}\"\n .format(allowed_values)\n )\n self._display = display",
"def set_block_size(self, width=BLOCK_WIDTH, height=BLOCK_HEIGHT):\n self._block_width = width\n self._block_height = height",
"def _set_display(self, value,\n font='time',\n size=15,\n colour='white',\n typeface='bold',\n overwrite=False):\n if not self._isfrozen() or overwrite:\n self._display_info['font'] = (font, size, typeface)\n self._display_info['value'] = value\n self._display_info['colour'] = colour",
"def print_selected_number(self):\r\n print()\r\n print(f\"РАУНД {self.qt_rounds_played}\")\r\n try:\r\n print(f\"Выпало число: {self.number}! {str(number_names[self.number])}\")\r\n except KeyError:\r\n print(f\"Выпало число: {self.number}!\")\r\n print(f\"Осталось {len(self.numbers_left)} боченков \\n\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set block start time display.
|
def set_block_start_time(self, time: str) -> None:
self._logger.debug("running")
self._block_start_time_val.setText(time)
self._logger.debug("done")
|
[
"def start_time(self, start_time):\n self.__start = start_time",
"def setStartTime(self, startTime):\n self.startTime = startTime",
"def start_time(self, start_time: str):\n\n self._start_time = start_time",
"def set_start( self, value ):\n\t\tend = self.get_end( )\n\t\tself.set_time( value )\n\t\tself.set_end( end )",
"def node_start_time(self, node_start_time):\n\n self._node_start_time = node_start_time",
"def set_exp_start_time(self, time: str) -> None:\n self._logger.debug(\"running\")\n self._start_time_val.setText(time)\n self._logger.debug(\"done\")",
"def set_start_time(self):\n self.solve_start = time.time()",
"def enable_time_tracking(self):\n self.start_time = time.time()",
"def start(self):\n if not hasattr(self, 'lines'):\n self.lines = ['%s Start' % self.tstamp,]\n self.blank()",
"def start_timing(self):\n self._time = time()",
"def sim_set_start_time(self, time):\n if len(self._registered_callbacks) > 0:\n raise RuntimeError(\"You can not set start time while callbacks are scheduled\")\n\n if type(time) == datetime.time:\n time = datetime.datetime.combine(self._now.date(), time)\n self._start_time = self._now = time",
"def startOnly(self):\n self.begin=time.time()",
"def update_start_time(self, delay_t=0.0):\n self.start_time = self.end_time + delay_t",
"def setTime(self):\n self.btnStart.setEnabled(True)\n self.btnReset.setEnabled(True)\n \n t = self.time.time()\n\n self.hours = t.hour()\n self.minutes = t.minute()\n self.seconds = t.second()\n \n self.tick = self.hours*3600 + self.minutes*60 + self.seconds\n\n self.lcd.display(t.toString())",
"def start_timer(self):\n\t\tself.start_time = time.clock()",
"def resetStartTime(self):\n self.__startTime = time.time()",
"def _inc_start_time(self):\n if (self.state == Editor.State.wave and\n self.wave_edit_mode == Editor.WaveEditMode.start_time and\n self.selected_wave is not None):\n self.selected_wave.start_time += 1",
"def time_window_start(self, time_window_start):\n\n self._time_window_start = time_window_start",
"def start_process(self):\n self.start_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n self.starttime_num = time.time()\n self.end_time = \"running...\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.