query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Get the first storage varnode for this variable the first storage varnode associated with this variable getVariableStorage()
|
def getFirstStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:
...
|
[
"def getVariableStorage(self) -> ghidra.program.model.listing.VariableStorage:\n ...",
"def getLastStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def get_storage_variable(self, path):\n raise NotImplementedError(\"get_storage_variable has not been implemented!\")",
"def getVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def get_storage_master(self):\n return self.storage_master",
"def get_storage_variable(self, path):\n self._check_bind_to_file()\n path = normalize_path(path)\n try:\n # Check if the codec is already known to this instance\n codec = self._variables[path]\n except KeyError:\n try:\n # Attempt to read the disk and bind to that variable\n # Navigate the path down from top NC file to last entry\n head_group = self.ncfile\n split_path = decompose_path(path)\n for header in split_path[:-1]:\n head_group = head_group.groups[header]\n # Check if this is a group type\n is_group = False\n if split_path[-1] in head_group.groups:\n # Check if storage object IS a group (e.g. dict)\n try:\n obj = head_group.groups[split_path[-1]]\n store_type = obj.getncattr('IODriver_Storage_Type')\n if store_type == 'groups':\n variable = obj\n is_group = True\n except AttributeError: # Trap the case of no group name in head_group, non-fatal\n pass\n if not is_group:\n # Bind to the specific variable instead since its not a group\n variable = head_group.variables[split_path[-1]]\n except KeyError:\n raise KeyError(\"No variable found at {} on file!\".format(path))\n try:\n # Bind to the storage type by mapping IODriver_Type -> Known Codec\n data_type = variable.getncattr('IODriver_Type')\n head_path = '/'.join(split_path[:-1])\n target_name = split_path[-1]\n # Remember the group for the future while also getting storage binder\n if head_path == '':\n storage_object = self.ncfile\n else:\n storage_object = self._bind_group(head_path)\n uninstanced_codec = self._IOMetaDataReaders[data_type]\n self._variables[path] = uninstanced_codec(self, target_name, storage_object=storage_object)\n codec = self._variables[path]\n except AttributeError:\n raise AttributeError(\"Cannot auto-detect variable type, ensure that 'IODriver_Type' is a set ncattr\")\n except KeyError:\n raise KeyError(\"No mapped type codecs known for 'IODriver_Type' = '{}'\".format(data_type))\n return codec",
"def storage(self):\n try:\n return self._storage\n\n except AttributeError:\n return MissingComponent(self, \"Vessel Storage\")",
"def _read_variable(self, addr):\n if addr == 0x0:\n return self._stackmanager.pop_stack()\n elif 0x0 < addr < 0x10:\n return self._stackmanager.get_local_variable(addr - 1)\n else:\n return self._memory.read_global(addr)",
"def get_variable_space():\n return tf.get_variable_scope()",
"def get_variable(self, layer, name):\n scope = self._get_layer_str(layer)\n collection = tf.get_collection(tf.GraphKeys.VARIABLES, scope=scope)\n # TBD: Ugly!\n for var in collection:\n if var.name[:-2] == scope+'/'+name:\n return var\n return None",
"def get_variable(self):\n return self._variable_label",
"def get_variable(self, var_name: str) -> SimDebugVariable:\n kb = self.state.project.kb\n cle_var = kb.dvars[var_name][self.state.ip]\n if cle_var:\n return SimDebugVariable.from_cle_variable(self.state, cle_var, self.dwarf_cfa)\n return None",
"def storage(self):\n if self._storage is None:\n raise TypeError(\n \"gt4py backend was not specified when initializing this object\"\n )\n return self._storage",
"def get_variable(self, varname):\n return next((v for v in self.config.data_spec if v['name'] == varname))",
"def get_variable(self, var_name):\n assert(isinstance(var_name, str))\n if isinstance(var_name, str):\n for var in self.variable_list:\n if var.name == var_name:\n return var\n new_var = Variable(var_name)\n self.variable_list.append(new_var)\n return new_var",
"def resolve_storage(self, storage_data: StorageData[T_co]) -> T_Storage:",
"def default_variable(self):\n try:\n return self.__default_variable\n except AttributeError:\n pass\n for _, fun in self._list:\n try:\n fun = SR(fun)\n if fun.variables():\n v = fun.variables()[0]\n self.__default_variable = v\n return v\n except TypeError:\n # pass if fun is lambda function\n pass\n # default to x\n v = var('x')\n self.__default_value = v\n return v",
"def storage(self) -> Optional[pulumi.Input['ThanosRulerSpecStorageArgs']]:\n return pulumi.get(self, \"storage\")",
"def storage_type(self):\n raise NotImplementedError(\"I have not been set to 'variables' or 'groups'\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
the first use offset relative to the function entry point.
|
def getFirstUseOffset(self) -> int:
...
|
[
"def min_file_offset(self):\t\n\t\treturn idaapi.get_fileregion_offset(MinEA())",
"def _cal_offset_local(func_name,var_type_map):\n local_vars = func_dict[func_name][\"local_var\"]\n offset = -4\n var_offset = {}\n for var in local_vars:\n if var in var_offset:\n raise SystemError(\"ERROR: Same variable \"+var+\" repeated\")\n var_offset[var] = offset\n offset -= var_type_map[var][0].size\n return var_offset",
"def current_file_offset(self):\n\t\treturn idaapi.get_fileregion_offset(ScreenEA())",
"def getVariableOffset(self) -> ghidra.program.model.listing.VariableOffset:\n ...",
"def earliest_available_offset(self):\n raise NotImplementedError",
"def getEntryPoint(self):\n return HopperLowLevel.getEntryPoint(self.__internal_document_addr__)",
"def calc_sag_offset_idx(self):\n return self.offset_pnt-1",
"def getFileOffset(self):\n return HopperLowLevel.getFileOffset(self.__internal_segment_addr__)",
"def get_main_arena(libc_file):\n mallocHook = int(os.popen('objdump -j .data -d '+ str(libc_file)+'| grep \"__malloc_hook\" |cut -d\" \" -f 1').read(),16)\n reallocHook = int(os.popen('objdump -j .data -d '+ str(libc_file)+'| grep \"__realloc_hook\"|cut -d\" \" -f 1').read(),16)\n\n\n offset = mallocHook-reallocHook\n main_arean_offset = hex(mallocHook + offset*2)\n \n log.success('main_arean_offset: {}'.format(main_arean_offset))\n return main_arean_offset",
"def get_offset(self):\n self.cursor.execute(\"\"\"\n SELECT value FROM core\n WHERE variable=\"offset\"\n \"\"\")\n result = self.cursor.fetchone()\n return result[0]",
"def get_base_offset(self):\n\n if self.elffile.num_segments() == 0:\n log.e(TAG, \"Unable to read program header!\")\n raise BufferError\n\n for segment in self.elffile.iter_segments():\n if describe_p_type(segment['p_type']) == P_TYPE_PHDR:\n\n p_offset = segment['p_offset']\n p_vaddr = segment['p_vaddr']\n\n return p_vaddr - p_offset\n\n log.e(TAG, \"Unable to find base address!\")\n raise BufferError",
"def GetCheckerboardOffset(self):\n ...",
"def getFileOffsetForAddress(self,addr):\n return self.getFileOffset() + addr - self.getStartingAddress()",
"def _section_offset(self, n):\r\n return self['e_shoff'] + n * self['e_shentsize']",
"def get_caller(offset=0):\n _, filename, linenum, funcname, _, _ = inspect.stack()[offset]\n return {'filename': filename,\n 'linenum': linenum,\n 'funcname': funcname}",
"def _cal_offset_params(func_name,var_type_map):\n params = var_type_map[func_name][0]\n assert(params.name == \"function\")\n local_vars = params.param_actual_names\n local_vars.reverse()\n offset = 8\n var_offset = {}\n for var in local_vars:\n if var in var_offset:\n raise SystemError(\"ERROR: Same variable \"+var+\" repeated\")\n var_offset[var] = offset\n offset += var_type_map[var][0].size\n return var_offset",
"def _get_offset(self, X=None):\n last_index = X.shape[0] - 1\n starting_index = self.__starting_test_index\n if starting_index > last_index:\n return 0\n return (last_index - starting_index) % self.periods_between_splits",
"def getStartingAddress(self):\n return HopperLowLevel.getBasicBlockStartingAddress(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__)",
"def __strip_offset(self):\n if self._ChannelStripController__view_returns:\n return self._ChannelStripController__bank_cha_offset_returns\n else:\n return self._ChannelStripController__bank_cha_offset"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the last storage varnode for this variable the last storage varnode associated with this variable getVariableStorage()
|
def getLastStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:
...
|
[
"def getVariableStorage(self) -> ghidra.program.model.listing.VariableStorage:\n ...",
"def getFirstStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def get_storage_variable(self, path):\n raise NotImplementedError(\"get_storage_variable has not been implemented!\")",
"def get_last_node(self):\n return self._nodes[-1]",
"def last_node(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.lastnode\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n index = p2e._base._util._convert_str_to_type(val, int)\r\n return p2e.model._nodes[index]",
"def getVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def variables(self):\n return self.variables_history[0]",
"def get_variable_space():\n return tf.get_variable_scope()",
"def getLastKeyVarData(self, keyVar, ind=0):\n allVals = self.keyVarDict[self._keyVarID(keyVar)]\n if not allVals:\n return None\n lastVal = allVals[-1]\n if ind is None:\n return lastVal\n return lastVal[ind]",
"def get_last_value( self , key ):\n return self[key].last_value",
"def last_node_index(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.lastnode\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)",
"def storage(self):\n try:\n return self._storage\n\n except AttributeError:\n return MissingComponent(self, \"Vessel Storage\")",
"def getLastChild(self):\n children = self.getChildNodes()\n if children:\n return children._data[-1]\n return None",
"def last(self):\n return self.element_at(self.count() - 1)",
"def last(self):\n\t\tif self.is_empty():\n\t\t\treturn None\n\t\telse:\n\t\t\tlast = (self._size - 1)\n\t\t\treturn self._make_position(last) #position of last element",
"def storage(self):\n if self._storage is None:\n raise TypeError(\n \"gt4py backend was not specified when initializing this object\"\n )\n return self._storage",
"def get_storage_variable(self, path):\n self._check_bind_to_file()\n path = normalize_path(path)\n try:\n # Check if the codec is already known to this instance\n codec = self._variables[path]\n except KeyError:\n try:\n # Attempt to read the disk and bind to that variable\n # Navigate the path down from top NC file to last entry\n head_group = self.ncfile\n split_path = decompose_path(path)\n for header in split_path[:-1]:\n head_group = head_group.groups[header]\n # Check if this is a group type\n is_group = False\n if split_path[-1] in head_group.groups:\n # Check if storage object IS a group (e.g. dict)\n try:\n obj = head_group.groups[split_path[-1]]\n store_type = obj.getncattr('IODriver_Storage_Type')\n if store_type == 'groups':\n variable = obj\n is_group = True\n except AttributeError: # Trap the case of no group name in head_group, non-fatal\n pass\n if not is_group:\n # Bind to the specific variable instead since its not a group\n variable = head_group.variables[split_path[-1]]\n except KeyError:\n raise KeyError(\"No variable found at {} on file!\".format(path))\n try:\n # Bind to the storage type by mapping IODriver_Type -> Known Codec\n data_type = variable.getncattr('IODriver_Type')\n head_path = '/'.join(split_path[:-1])\n target_name = split_path[-1]\n # Remember the group for the future while also getting storage binder\n if head_path == '':\n storage_object = self.ncfile\n else:\n storage_object = self._bind_group(head_path)\n uninstanced_codec = self._IOMetaDataReaders[data_type]\n self._variables[path] = uninstanced_codec(self, target_name, storage_object=storage_object)\n codec = self._variables[path]\n except AttributeError:\n raise AttributeError(\"Cannot auto-detect variable type, ensure that 'IODriver_Type' is a set ncattr\")\n except KeyError:\n raise KeyError(\"No mapped type codecs known for 'IODriver_Type' = '{}'\".format(data_type))\n return codec",
"def last(hub, ref):\n return hub.pop.ref.path(ref)[-1]",
"def read_memory(self, key, last=False):\n if last:\n return self._memory[key][-1]\n else:\n return self._memory[key]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
the minimum address corresponding to the first varnode of this storage
|
def getMinAddress(self) -> ghidra.program.model.address.Address:
...
|
[
"def getFirstStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def _get_min_addr(self) -> Optional[int]:\n\n if not self._regions:\n if self.project.arch.name != \"Soot\":\n l.error(\"self._regions is empty or not properly set.\")\n return None\n\n return next(self._regions.irange())",
"def first_address(self):\n \n # The first word in the hex file is the address of the first byte \n # sequence.\n first_word = self.hex_view()[:8]\n \n # The address is a hexadecimal value. Convert it to decimal.\n return int(first_word, 16)",
"def first_node_index(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.firstnode\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)",
"def getStartingAddress(self):\n return HopperLowLevel.getSegmentStartingAddress(self.__internal_segment_addr__)",
"def _get_least_used(self):\n if self.idx < self.size:\n return self.priority[:self.idx].min(0)[1][0, 0]\n else:\n return self.priority.min(0)[1][0, 0]",
"def find_min(self)->(any, any):\n #---- to do ----\n # complete this method by calling bst.find_min()\n # return the key and the value associated with the smallest key in the tree\n # raise ValueError if the tree is empty\n #---------------\n if self.num_items == 0:\n raise ValueError\n return bst.find_min(self.tree)",
"def getStartingAddress(self):\n return HopperLowLevel.getSectionStartingAddress(self.__internal_section_addr__)",
"def getMin( self ):\n return min( self.getBinLefts() )",
"def getMinKey(self):\n try:\n return list(self.valdictionary[self.minvalue])[0]\n except IndexError:\n return \"\"\n\n\n # Your AllOne object will be instantiated and called as such:",
"def stack_min(self):\n if not self.next_min[-1]: return self.stack[-1]\n return min(self.next_min[-1], self.stack[-1])",
"def first_node(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.firstnode\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n index = p2e._base._util._convert_str_to_type(val, int)\r\n return p2e.model._nodes[index]",
"def find_min():\r\n minimum = inf\r\n min_node = None\r\n for node in open_list:\r\n if configurations[node][0] < minimum:\r\n minimum = configurations[node][0]\r\n min_node = node\r\n return min_node",
"def getVariableOffset(self) -> ghidra.program.model.listing.VariableOffset:\n ...",
"def get_min_x(self):\n\n return self.x0",
"def getMin(self) -> \"void *\":\n return _coin.SbHeap_getMin(self)",
"def find_min(self):\n if not self.root:\n raise ValueError\n\n curr = self.root\n while curr.left is not None:\n curr = curr.left\n return curr.key",
"def adresse1(self):\n return self.__adresse1",
"def getStartingAddress(self):\n return HopperLowLevel.getBasicBlockStartingAddress(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__)",
"def local_address(self): \n return self._local_addr"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
the stack offset associated with simple stack variable (i.e., { isStackVariable()} returns true). UnsupportedOperationException if storage is not a simple stack variable
|
def getStackOffset(self) -> int:
...
|
[
"def getVariableOffset(self) -> ghidra.program.model.listing.VariableOffset:\n ...",
"def stack_pointer(self):\n return self._stack_pointer",
"def getStackIndex(self) -> \"int\":\n return _coin.SoElement_getStackIndex(self)",
"def _find_thunk_offset(self, codeobj, instructions):\n\n STORE_FAST = dis.opmap['STORE_FAST']\n var_idx = codeobj.co_varnames.index(self.local_name)\n\n for idx, instr in enumerate(instructions):\n if instr.opcode == STORE_FAST and instr.arg == var_idx:\n offset = instr.offset + instr.n_bytes\n return (idx + 1, offset)\n else:\n raise RuntimeError(\n f'no local variable by the name {self.local_name}')",
"def peek(stackNumber):\n assert(-1<stackNumber<3)\n if isEmpty(stackNumber):\n return None\n return array[top[stackNumber]]",
"def stack_read(self, offset, length, bp=False):\n sp = self.regs.bp if bp else self.regs.sp\n return self.memory.load(sp + offset, length, endness=self.arch.memory_endness)",
"def stack_size(size=None): # real signature unknown; restored from __doc__\n pass",
"def peek_stack(self, stack):\n\n value = None\n\n if stack.lower() == 'a' and self.a_len > 0:\n value = self.array[self.a_len - 1]\n\n elif stack.lower() == 'b' and self.b_len > 0:\n value = self.array[self.a_len + self.b_len - 1]\n\n elif stack.lower() == 'c' and self.c_len > 0:\n value = self.array[-1]\n\n print(value)",
"def _read_variable(self, addr):\n if addr == 0x0:\n return self._stackmanager.pop_stack()\n elif 0x0 < addr < 0x10:\n return self._stackmanager.get_local_variable(addr - 1)\n else:\n return self._memory.read_global(addr)",
"def GetOffset(self):\n return self.st_pos",
"def check_stack_value(self, stack):\n if len(stack) != 2:\n raise TypeError\n stack[0] = int(stack[0])\n if stack[0] < 1:\n stack[0] = 1\n elif stack[0] > 10:\n stack[0] = 10\n stack[1] = str(stack[1])\n if stack[1] in self._operations:\n return stack\n else:\n raise TypeError",
"def look(self, n):\n\t\treturn self.stack[-n-1]",
"def stack_pop(self):\n sp = self.regs.sp\n self.regs.sp = sp - self.arch.stack_change\n return self.memory.load(sp, self.arch.bytes, endness=self.arch.memory_endness)",
"def top(self):\n if self.is_empty():\n raise Empty(\"stack is empty\")\n return self._data[-1]",
"def _cal_offset_local(func_name,var_type_map):\n local_vars = func_dict[func_name][\"local_var\"]\n offset = -4\n var_offset = {}\n for var in local_vars:\n if var in var_offset:\n raise SystemError(\"ERROR: Same variable \"+var+\" repeated\")\n var_offset[var] = offset\n offset -= var_type_map[var][0].size\n return var_offset",
"def _find_stack_level():\n import metpy\n\n frame = inspect.currentframe()\n n = 0\n while frame:\n if inspect.getfile(frame).startswith(metpy.__path__[0]):\n n += 1\n frame = frame.f_back\n else:\n break\n return n",
"def test_p_property_accesses_top_of_call_stack():\n from esolang_whitespace import SpaceInterpreter\n i = SpaceInterpreter('')\n i._call_stack = [0, 1, 2, 3]\n assert i.p == 3",
"def stack_peek_n(self, n):\n\t\tif len(self.stack) - n < 0:\n\t\t\treturn [ StackEntry(NULL_STACK_TOK, -1, self.null_stack_tok_embed) ] * (n - len(self.stack)) \\\n\t\t\t\t + self.stack[:]\n\t\treturn self.stack[-n:]",
"def peek(self):\n return self.operators_stack[-1] if len(self.operators_stack) > 0 else None",
"def find_size(self):\n return len(self.stack)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the variable storage associated with this variable. the variable storage for this variable
|
def getVariableStorage(self) -> ghidra.program.model.listing.VariableStorage:
...
|
[
"def get_storage_variable(self, path):\n raise NotImplementedError(\"get_storage_variable has not been implemented!\")",
"def storage(self):\n try:\n return self._storage\n\n except AttributeError:\n return MissingComponent(self, \"Vessel Storage\")",
"def storage(self):\n if self._storage is None:\n raise TypeError(\n \"gt4py backend was not specified when initializing this object\"\n )\n return self._storage",
"def get_storage(context):\n zope_root = context.getPhysicalRoot()\n annotations = IAnnotations(zope_root)\n storage = annotations.get(KEY, None)\n\n if storage is None:\n storage = annotations[KEY] = PersistentDict()\n\n return storage",
"def storage(self) -> Optional[pulumi.Input['ThanosRulerSpecStorageArgs']]:\n return pulumi.get(self, \"storage\")",
"def storage(self) -> Optional[pulumi.Input['PrometheusSpecStorageArgs']]:\n return pulumi.get(self, \"storage\")",
"def get_variable_space():\n return tf.get_variable_scope()",
"def type_of_storage(self):\n return self._type_of_storage",
"def get_storage_variable(self, path):\n self._check_bind_to_file()\n path = normalize_path(path)\n try:\n # Check if the codec is already known to this instance\n codec = self._variables[path]\n except KeyError:\n try:\n # Attempt to read the disk and bind to that variable\n # Navigate the path down from top NC file to last entry\n head_group = self.ncfile\n split_path = decompose_path(path)\n for header in split_path[:-1]:\n head_group = head_group.groups[header]\n # Check if this is a group type\n is_group = False\n if split_path[-1] in head_group.groups:\n # Check if storage object IS a group (e.g. dict)\n try:\n obj = head_group.groups[split_path[-1]]\n store_type = obj.getncattr('IODriver_Storage_Type')\n if store_type == 'groups':\n variable = obj\n is_group = True\n except AttributeError: # Trap the case of no group name in head_group, non-fatal\n pass\n if not is_group:\n # Bind to the specific variable instead since its not a group\n variable = head_group.variables[split_path[-1]]\n except KeyError:\n raise KeyError(\"No variable found at {} on file!\".format(path))\n try:\n # Bind to the storage type by mapping IODriver_Type -> Known Codec\n data_type = variable.getncattr('IODriver_Type')\n head_path = '/'.join(split_path[:-1])\n target_name = split_path[-1]\n # Remember the group for the future while also getting storage binder\n if head_path == '':\n storage_object = self.ncfile\n else:\n storage_object = self._bind_group(head_path)\n uninstanced_codec = self._IOMetaDataReaders[data_type]\n self._variables[path] = uninstanced_codec(self, target_name, storage_object=storage_object)\n codec = self._variables[path]\n except AttributeError:\n raise AttributeError(\"Cannot auto-detect variable type, ensure that 'IODriver_Type' is a set ncattr\")\n except KeyError:\n raise KeyError(\"No mapped type codecs known for 'IODriver_Type' = '{}'\".format(data_type))\n return codec",
"def storage(self) -> Optional[pulumi.Input['AlertmanagerSpecStorageArgs']]:\n return pulumi.get(self, \"storage\")",
"def _get_storage(self, for_write=False):",
"def storage_type(self):\n raise NotImplementedError(\"I have not been set to 'variables' or 'groups'\")",
"def get_storage_master(self):\n return self.storage_master",
"def getFirstStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def resolve_storage(self, storage_data: StorageData[T_co]) -> T_Storage:",
"def storage_name(self):\n return self._storage_name",
"def get_shared_storage(self):\n shared_storage = self.redis_client.get(self.service_type)\n shared_storage = json.loads(shared_storage)\n validate_json(shared_storage, self.schema)\n return shared_storage",
"def _get_storage(agent: AbstractAgent) -> Optional[Storage]:\n if agent.storage_uri:\n # threaded has to be always True, cause synchronous operations are supported\n return Storage(agent.storage_uri, threaded=True)\n return None # pragma: nocover",
"def __cmdset_storage_get(self):\r\n storage = _GA(self, \"db_cmdset_storage\")\r\n # we need to check so storage is not None\r\n return [path.strip() for path in storage.split(',')] if storage else []"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
true if this variable has been assigned storage. This is equivalent to { getVariableStorage()} != null
|
def hasAssignedStorage(self) -> bool:
...
|
[
"def hasVar(self):\n return hasattr(self, 'v') and self.v is not None",
"def vars_inited(self):\n inited, init_sess = self._var_inited\n return inited and init_sess == self.session",
"def isMemoryVariable(self) -> bool:\n ...",
"def has_storage(self, cls):\r\n return True",
"def isVariable(self):\n return (len(self) == 1)",
"def has_storage(self, cls):\n return self.nextstore.has_storage(cls)",
"def has_variable(self, var: Variable) -> bool:\n return self._vars_by_name.get(var.name) == var",
"def has_variable(self, varname):\n return varname in self._file.variables",
"def HasVAR(self):\n return self.__has('VAR')",
"def _has_variable(self, variable):\n return variable in self",
"def HasqVAR(self):\n return self.__has('qVAR')",
"def isRegisterVariable(self) -> bool:\n ...",
"def isVar(self) -> bool:\n if self.tokenLeft():\n return self.currentToken().type == \"var\"\n else:\n return False",
"def IsAllocated(self):\n if self._stat_object is None:\n self._stat_object = self._GetStat()\n return self._stat_object and self._stat_object.is_allocated",
"def isset(self):\n return self._value != NODEFAULT",
"def is_assigned(self):\n return bool(self.current_property())",
"def should_store_locals(node):\n types = (AliasExpr, Binding, FuncDef)\n for item in node.body:\n if isinstance(item, types):\n return True\n if isinstance(item, StorageExpr) and item.expr is not None:\n return True\n return False",
"def isUniqueVariable(self) -> bool:\n ...",
"def isset(self, attr):\n try:\n _var = getattr(self, attr)\n except:\n return False\n if isinstance(_var, list):\n return len(_var) > 0\n return _var is not None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
true if this variable uses simple or compound storage which contains a stack element. If true, the last storage varnode will always be the stack element. getLastStorageVarnode()
|
def hasStackStorage(self) -> bool:
...
|
[
"def getLastStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def is_last_node(self, node):\n return True if self.get_last_node() == node else False",
"def getFirstStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def has_storage(self, cls):\n return self.nextstore.has_storage(cls)",
"def should_store_locals(node):\n types = (AliasExpr, Binding, FuncDef)\n for item in node.body:\n if isinstance(item, types):\n return True\n if isinstance(item, StorageExpr) and item.expr is not None:\n return True\n return False",
"def hasAssignedStorage(self) -> bool:\n ...",
"def isLast(self):\n pass",
"def isLast(obj, namespace):",
"def is_stvar(self):\n return self.ty == Type.STVAR",
"def is_last_node_valid(self):\n end_index = len(self.nodes) - 1\n\n if self.nodes[end_index].type is ASTToken.INTEGER:\n return True\n\n return False",
"def isVar(self) -> bool:\n if self.tokenLeft():\n return self.currentToken().type == \"var\"\n else:\n return False",
"def has_storage(self, cls):\r\n return True",
"def isVariable(self):\n return (len(self) == 1)",
"def _is_highest_root(self)->bool: # TEST\n return self.findPosition(-1) == 1",
"def is_last_position(self):\n if self.read_offset == len(self.underlying_read.seq) - 1:\n return True\n if self.underlying_read.cigar[-1][1] == _CIG_INSERTION:\n # read ends with insertion (RARE)\n if self._cig_offset == len(self.underlying_read.cigar) - 2:\n # at the 2nd-to-last cigar element\n return self._cig_elem_offset == self._cigarelement()[1] - 1 # last base\n return False",
"def need_new_stack(self):\n return self.size() >= self.capacity",
"def is_on_last_item(self):\n return self.index == len(self) - 1",
"def is_last_question(self):\n questions = self.module.question_set\n return self == questions.last()",
"def is_last(self):\n return self._order == \\\n len(self.parent_node.idevices.get_queryset()) - 1",
"def is_rightmost(self) -> bool:\n if self.parent is None: return True\n return self.parent.children[-1] is self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
true if this variable uses compound storage consisting of two or more storage elements which will be returned by the { getVariableStorage()} method. Compound variables will always use a register(s) optionally followed by other storage (i.e., stack).
|
def isCompoundVariable(self) -> bool:
...
|
[
"def hasAssignedStorage(self) -> bool:\n ...",
"def should_store_locals(node):\n types = (AliasExpr, Binding, FuncDef)\n for item in node.body:\n if isinstance(item, types):\n return True\n if isinstance(item, StorageExpr) and item.expr is not None:\n return True\n return False",
"def isRegisterVariable(self) -> bool:\n ...",
"def isVariable(self):\n return (len(self) == 1)",
"def isMemoryVariable(self) -> bool:\n ...",
"def has_storage(self, cls):\r\n return True",
"def _is_same_storage(x: torch.Tensor, y: torch.Tensor) -> bool:\n return x.storage().data_ptr() == y.storage().data_ptr()",
"def has_storage(self, cls):\n return self.nextstore.has_storage(cls)",
"def is_multi_reg(self):\n return True",
"def contains_multiregs(self):\n for r in self.regs:\n if isinstance(r, MultiReg):\n return True\n return False",
"def has_full_house(self):\n return self.has_two_pair() and self.has_three_of_a_kind()",
"def check_stacking(primitive, inputs):\n if primitive.stack_on_self is False:\n for f in inputs:\n if isinstance(f.primitive, primitive.__class__):\n return False\n\n if primitive.stack_on_exclude is not None:\n for f in inputs:\n if isinstance(f.primitive, tuple(primitive.stack_on_exclude)):\n return False\n\n # R TODO: handle this\n for f in inputs:\n if f.number_output_features > 1:\n return False\n\n for f in inputs:\n if f.primitive.base_of_exclude is not None:\n if isinstance(primitive, tuple(f.primitive.base_of_exclude)):\n return False\n\n for f in inputs:\n if primitive.stack_on_self is True:\n if isinstance(f.primitive, primitive.__class__):\n continue\n if primitive.stack_on is not None:\n if isinstance(f.primitive, tuple(primitive.stack_on)):\n continue\n else:\n continue\n if f.primitive.base_of is not None:\n if primitive.__class__ in f.primitive.base_of:\n continue\n else:\n continue\n return False\n\n return True",
"def _is_stackable(self):\n if (\n self.acceptance_off is None\n or self.acceptance is None\n or self.counts_off is None\n ):\n return False\n else:\n return True",
"def check_namespace_for_storage_ref(space, storage_ref):\n for lang, mappings in space.items():\n for nlu_ref, nlp_ref in mappings.items():\n if nlu_ref == storage_ref or nlp_ref == storage_ref : return True\n return False",
"def _is_same_storage(x: torch.Tensor, y: torch.Tensor):\n return x.storage().data_ptr() == y.storage().data_ptr()",
"def storage_capacity(self) -> bool:\n return typing.cast(\n bool,\n self._properties.get(\"storageCapacity\"),\n )",
"def check_if_storage_ref_exists(storage_ref):\n spaces = [nlu.spellbook.Spellbook.pretrained_models_references, nlu.Spellbook.pretrained_healthcare_model_references]\n for space in spaces :\n if check_namespace_for_storage_ref(space,storage_ref) : return True\n return False",
"def storage_type(self):\n raise NotImplementedError(\"I have not been set to 'variables' or 'groups'\")",
"def is_stack(cls, value):\n return cls.is_lcard(value) and len(value) > 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
true if the specified variable is equivalent to this variable
|
def isEquivalent(self, variable: ghidra.program.model.listing.Variable) -> bool:
...
|
[
"def flag_instvar(inst: Entity, flag: Property) -> bool:\n values = flag.value.split(' ', 3)\n if len(values) == 3:\n val_a, op, val_b = values\n op = inst.fixup.substitute(op)\n comp_func = INSTVAR_COMP.get(op, operator.eq)\n elif len(values) == 2:\n val_a, val_b = values\n if val_b in INSTVAR_COMP:\n # User did \"$var ==\", treat as comparing against an empty string.\n comp_func = INSTVAR_COMP[val_b]\n val_b = \"\"\n else:\n # With just two vars, assume equality.\n op = '=='\n comp_func = operator.eq\n else:\n # For just a name.\n return conv_bool(inst.fixup.substitute(values[0]))\n if '$' not in val_a and '$' not in val_b:\n # Handle pre-substitute behaviour, where val_a is always a var.\n LOGGER.warning(\n 'Comparison \"{}\" has no $var, assuming first value. '\n 'Please use $ when referencing vars.',\n flag.value,\n )\n val_a = '$' + val_a\n\n val_a = inst.fixup.substitute(val_a, default='')\n val_b = inst.fixup.substitute(val_b, default='')\n comp_a: str | float\n comp_b: str | float\n try:\n # Convert to floats if possible, otherwise handle both as strings.\n # That ensures we normalise different number formats (1 vs 1.0)\n comp_a, comp_b = float(val_a), float(val_b)\n except ValueError:\n comp_a, comp_b = val_a, val_b\n try:\n return bool(comp_func(comp_a, comp_b))\n except (TypeError, ValueError) as e:\n LOGGER.warning('InstVar comparison failed: {} {} {}', val_a, op, val_b, exc_info=e)\n return False",
"def isUniqueVariable(self) -> bool:\n ...",
"def contains_var(self, variable):\n return variable in self._map",
"def isVariable(self):\n return (len(self) == 1)",
"def _has_variable(self, variable):\n return variable in self",
"def areVariablesEqual(varA, varB):\n equal = True\n \n if equal: \n equal = areDomainsIdentical(varA, varB)\n\n if equal:\n equal = isVariableDataEqual(varA, varB)\n \n if equal: \n equal = areGridsEqual(varA, varB)\n \n if equal: \n equal = areAttributesEqual(varA, varB)\n \n return equal",
"def isCompoundVariable(self) -> bool:\n ...",
"def hasVar(self):\n return hasattr(self, 'v') and self.v is not None",
"def isEquivalent(self, other: ghidra.program.model.symbol.ExternalLocation) -> bool:\n ...",
"def isSame(self, field: 'SoField') -> \"SbBool\":\n return _coin.SoSFUShort_isSame(self, field)",
"def isSame(self, field: 'SoField') -> \"SbBool\":\n return _coin.SoSFVec4ub_isSame(self, field)",
"def contains_one_var(self, variables):\n for variable in variables:\n if variable in self._map:\n return True\n\n return False",
"def __eq__(self, a):\n if ~isinstance(a, tm):\n return False\n if np.all(self.TAA == a.TAA):\n return True\n return False",
"def consistent_with(self, assignment, sub_variables):\n for sub_variable in sub_variables:\n if assignment.get_value(sub_variable) is None:\n return False\n\n if self._map.get(sub_variable, None) is None:\n return False\n\n if assignment.get_value(sub_variable) != self._map[sub_variable]:\n return False\n\n return True",
"def is_equivalent_to(self, other: 'Type') -> bool:\n return self.is_assignable_from(other) and other.is_assignable_from(self)",
"def is_variable(obj):\n return isinstance(obj, Expr) and not obj.args and is_var_symbol(obj.op)",
"def has_variable(self, var: Variable) -> bool:\n return self._vars_by_name.get(var.name) == var",
"def contains(self, assignment):\n for variable in assignment.get_variables():\n if variable in self._map:\n value = assignment.get_value(variable)\n self_value = self._map[variable]\n if self_value is None and value is not None:\n return False\n elif value != self_value:\n return False\n else:\n return False\n\n return True",
"def test_equivalent(self):\n op1 = And(BoolVar(), PedestriansCrossingRoad())\n op2 = And(PedestriansCrossingRoad(), BoolVar())\n op3 = And(DriversAwaitingGreenLightVar(), BoolVar())\n\n op1.check_equivalence(op2)\n op2.check_equivalence(op1)\n\n assert_raises(AssertionError, op1.check_equivalence, op3)\n assert_raises(AssertionError, op2.check_equivalence, op3)\n assert_raises(AssertionError, op3.check_equivalence, op1)\n assert_raises(AssertionError, op3.check_equivalence, op2)\n\n ok_(op1 == op2)\n ok_(op2 == op1)\n ok_(op1 != op3)\n ok_(op2 != op3)\n ok_(op3 != op1)\n ok_(op3 != op2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
true if this is a simple variable consisting of a single storage memory element which will be returned by either the { getFirstStorageVarnode()} or { getVariableStorage()} methods.
|
def isMemoryVariable(self) -> bool:
...
|
[
"def getFirstStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def isVariable(self):\n return (len(self) == 1)",
"def is_stvar(self):\n return self.ty == Type.STVAR",
"def hasAssignedStorage(self) -> bool:\n ...",
"def getVariableStorage(self) -> ghidra.program.model.listing.VariableStorage:\n ...",
"def hasVar(self):\n return hasattr(self, 'v') and self.v is not None",
"def isRegisterVariable(self) -> bool:\n ...",
"def isVar(self) -> bool:\n if self.tokenLeft():\n return self.currentToken().type == \"var\"\n else:\n return False",
"def should_store_locals(node):\n types = (AliasExpr, Binding, FuncDef)\n for item in node.body:\n if isinstance(item, types):\n return True\n if isinstance(item, StorageExpr) and item.expr is not None:\n return True\n return False",
"def has_shared_memory(self):\n if self.obj is self.arr:\n return True\n if not isinstance(self.obj, np.ndarray):\n return False\n obj_attr = wrap.array_attrs(self.obj)\n return obj_attr[0] == self.arr_attr[0]",
"def is_scalar_field(self):\n if self.functionspace.num_sub_spaces() == 0:\n return True",
"def variable(self):\n return self.level == SLC_VARIABLE",
"def isCompoundVariable(self) -> bool:\n ...",
"def isUniqueVariable(self) -> bool:\n ...",
"def freevar(self, var, expr):\n parsed = logic.Parser().parse(expr)\n variable = logic.Variable(var)\n return variable in parsed.free()",
"def has_storage(self, cls):\n return self.nextstore.has_storage(cls)",
"def _is_same_storage(x: torch.Tensor, y: torch.Tensor) -> bool:\n return x.storage().data_ptr() == y.storage().data_ptr()",
"def has_storage(self, cls):\r\n return True",
"def _has_variable(self, variable):\n return variable in self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
true if this is a simple variable consisting of a single register varnode which will be returned by either the { getFirstStorageVarnode()} or { getLastStorageVarnode()} methods. The register can be obtained using the { getRegister()} method.
|
def isRegisterVariable(self) -> bool:
...
|
[
"def getFirstStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def isVariable(self):\n return (len(self) == 1)",
"def isMemoryVariable(self) -> bool:\n ...",
"def is_stvar(self):\n return self.ty == Type.STVAR",
"def hasVar(self):\n return hasattr(self, 'v') and self.v is not None",
"def isUniqueVariable(self) -> bool:\n ...",
"def _has_variable(self, variable):\n return variable in self",
"def HasVAR(self):\n return self.__has('VAR')",
"def isVar(self) -> bool:\n if self.tokenLeft():\n return self.currentToken().type == \"var\"\n else:\n return False",
"def variable(self):\n return self.level == SLC_VARIABLE",
"def has_variable(self, var: Variable) -> bool:\n return self._vars_by_name.get(var.name) == var",
"def contains_one_var(self, variables):\n for variable in variables:\n if variable in self._map:\n return True\n\n return False",
"def has_variable(self, varname):\n return varname in self._file.variables",
"def is_scalar_field(self):\n if self.functionspace.num_sub_spaces() == 0:\n return True",
"def isCompoundVariable(self) -> bool:\n ...",
"def varExists(self, varName, autoCall=True):\n try:\n valueFromSearchList(self.searchList(),\n varName.replace('$', ''), autoCall)\n return True\n except NotFound:\n return False",
"def contains_var(self, variable):\n return variable in self._map",
"def is_registered(self) -> np.bool_:\n from arkouda.util import is_registered\n\n if self.registered_name is None:\n return np.bool_(is_registered(self.values.name, as_component=True))\n else:\n return np.bool_(is_registered(self.registered_name))",
"def isEquivalent(self, variable: ghidra.program.model.listing.Variable) -> bool:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
true if this is a simple variable consisting of a single storage unique/hash element which will be returned by either the { getFirstStorageVarnode()} or { getVariableStorage()} methods. The unique hash can be obtained from the storage address offset corresponding to the single storage element.
|
def isUniqueVariable(self) -> bool:
...
|
[
"def getFirstStorageVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def isVariable(self):\n return (len(self) == 1)",
"def isMemoryVariable(self) -> bool:\n ...",
"def isEquivalent(self, variable: ghidra.program.model.listing.Variable) -> bool:\n ...",
"def isRegisterVariable(self) -> bool:\n ...",
"def has_variable(self, var: Variable) -> bool:\n return self._vars_by_name.get(var.name) == var",
"def hasAssignedStorage(self) -> bool:\n ...",
"def _has_variable(self, variable):\n return variable in self",
"def variable(self):\n return self.level == SLC_VARIABLE",
"def hasVar(self):\n return hasattr(self, 'v') and self.v is not None",
"def _is_same_storage(x: torch.Tensor, y: torch.Tensor) -> bool:\n return x.storage().data_ptr() == y.storage().data_ptr()",
"def is_stvar(self):\n return self.ty == Type.STVAR",
"def HasqVAR(self):\n return self.__has('qVAR')",
"def HasVAR(self):\n return self.__has('VAR')",
"def contains_one_var(self, variables):\n for variable in variables:\n if variable in self._map:\n return True\n\n return False",
"def varExists(self, varName, autoCall=True):\n try:\n valueFromSearchList(self.searchList(),\n varName.replace('$', ''), autoCall)\n return True\n except NotFound:\n return False",
"def has_shared_memory(self):\n if self.obj is self.arr:\n return True\n if not isinstance(self.obj, np.ndarray):\n return False\n obj_attr = wrap.array_attrs(self.obj)\n return obj_attr[0] == self.arr_attr[0]",
"def _is_same_storage(x: torch.Tensor, y: torch.Tensor):\n return x.storage().data_ptr() == y.storage().data_ptr()",
"def has_variable(self, varname):\n return varname in self._file.variables"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the comment for this variable comment the comment
|
def setComment(self, comment: unicode) -> None:
...
|
[
"def comment(self, comment):\n self._comment = comment",
"def comment(self, comment):\n if comment is None:\n raise ValueError(\"Invalid value for `comment`, must not be `None`\")\n\n self._comment = comment",
"def set_comment(self, cmt):\n if cmt and cmt[:8] == 'Ansible:':\n self.marker = 'Ansible'\n self.pre_comment = True\n self.comment = cmt[8:].lstrip()\n else:\n self.comment = cmt",
"def set_comment_for(self, source, comment):\n with self._config as conf:\n self._ensure_source(conf, source)\n conf[K_SOURCES][source][K_SOURCE_COMMENT] = comment",
"def comments(self, value):\n self.native.comments = value",
"def set_Comment(self, value):\n super(UpdateTicketInputSet, self)._set_input('Comment', value)",
"async def set_comment(self, ctx, *, comment=None):\n await sql.execute(\"UPDATE servers SET comment=? WHERE serverid=?\", (comment, str(ctx.message.guild.id)))\n em = discord.Embed(colour=discord.Colour.dark_green())\n if comment:\n em.title = f\"Successfully changed comment symbol to `{comment}`.\"\n else:\n em.title = \"Successfully removed comment symbol.\"\n await ctx.send(embed=em)",
"def set_comment_content(self, comment_content):\n\n\t\tif comment_content is not None and not isinstance(comment_content, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: comment_content EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__comment_content = comment_content\n\t\tself.__key_modified['comment_content'] = 1",
"def comment(self, comment):\r\n\r\n if not isinstance(comment, basestring):\r\n comment = str(comment)\r\n\r\n # Remote phone comment\r\n if self._name != 'Main':\r\n comment = self._name + \" : \" + comment\r\n\r\n if self.result:\r\n # clear current test step for performance measuring\r\n self.result.clearCurrentTestStep()\r\n\r\n debug.brf(comment)\r\n if core.FW_conf['blackbox'] != None and core.FW_conf['blackbox'].isVideoRecorderAvailable():\r\n core.FW_conf['blackbox'].videoRecording_SetText(comment)\r\n\r\n if self.result:\r\n # FIXME: Remove list approach from addStepComment\r\n comment = [comment]\r\n self.result.addStepComment(comment)",
"def setCommentAtAddress(self,addr,comment):\n return HopperLowLevel.setCommentAtAddress(self.__internal_segment_addr__,addr,comment)",
"def comment(self, uuid, comment):\n # TODO: add overwrite (false by default) and append options\n cur = self.conn.cursor()\n cur.execute(\n \"\"\"\n UPDATE experiments\n SET comment = ?\n WHERE uuid = ?\n \"\"\",\n [comment, uuid],\n )\n cur.close()\n self.conn.commit()",
"def comment(self):\n if hasattr(self, 'description'):\n descr = \"'%s'\" % self.description\n else:\n descr = 'NULL'\n return \"COMMENT ON %s %s IS %s\" % (\n self.objtype, self.identifier(), descr)",
"def set_payment_comment(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_PAYMENT_COMMENT).\n send_keys(value))",
"def set_status_comment(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_STATUS_COMMENT).\n send_keys(value))",
"def add_comment(self, comment):\n self.config.append(\"% \" + \"\\n% \".join(comment.split(\"\\n\")))",
"def update_comments(self, x):\n self.text.set(str(self.text.get()) + \"\\n\" + str(x))",
"def set_testrail_comment(comment, clear_previous = 'False'):\r\n try:\r\n test_comments\r\n except NameError:\r\n comment_exist = 'False'\r\n\r\n #option 1:\r\n if comment_exist =='False':\r\n testrail_comments = comment\r\n elif comment_exist == 'True' and clear_previous == 'True':\r\n testrail_comments = comment\r\n elif comment_exist == 'True' and clear_previous == 'False':\r\n testrail_comments = testrail_comments + '\\n' + comment",
"def comment(self, s=None):\n if s is None:\n self._add('\\n')\n elif self.show_comments:\n self._add('(' + s + ')\\n')",
"def get_comment(self):\n return str(self.gui.txt_comment.text())",
"def testComment(self):\n cyto = self.session.create_cytokine()\n\n self.util.stringTypeTest(self, cyto, \"comment\")\n\n self.util.stringPropertyTest(self, cyto, \"comment\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the Data Type of this variable. The given dataType must have a fixed length. type the data type alignStack maintain proper stack alignment/justification if supported by implementation. If false and this is a stack variable, the current stack address/offset will not change. If true, the affect is implementation dependent since alignment can not be performed without access to a compiler specification. force overwrite conflicting variables source signature source InvalidInputException if data type is not a fixed length or violates storage constraints. VariableSizeException if force is false and data type size causes a conflict with other variables
|
def setDataType(self, type: ghidra.program.model.data.DataType, alignStack: bool, force: bool, source: ghidra.program.model.symbol.SourceType) -> None:
...
|
[
"def setDataType(self, type: ghidra.program.model.data.DataType, storage: ghidra.program.model.listing.VariableStorage, force: bool, source: ghidra.program.model.symbol.SourceType) -> None:\n ...",
"def setDataType(self, dt: ghidra.program.model.data.DataType) -> None:\n ...",
"def set_data_type(self, a_data_type):\n self.parameters[\"type\"] = str(a_data_type)\n return self",
"def set_dtype(self, value):\n self._dtype = value\n for x in (self._position, self._orientation, self._velocity,\n self._mass, self._charge, self._diameter,\n self._moment_inertia, self._angmom):\n if x is not None:\n x = x.astype(value)\n for frame in self.frames:\n frame.dtype = value",
"def set_dtype(self,dtype):\n self.dtype = dtype",
"def setTypeAtAddress(self,addr,length,typeValue):\n \"\"\"The type must be <b>TYPE_UNDEFINED</b>, <b>TYPE_INT8</b>, ...\"\"\"\n return HopperLowLevel.setTypeAtAddress(self.__internal_segment_addr__,addr,length,typeValue)",
"def _set_value_with_type_check(self, value):\n if self._type == AttributeType.Boolean:\n self._value = bool(value)\n elif self._type == AttributeType.Integer:\n self._value = int(value)\n elif self._type == AttributeType.Number:\n self._value = float(value)\n elif self._type == AttributeType.String:\n assert isinstance(value, str)\n self._value = value\n else:\n self.log.warning('Need to assign value which has unsupported type!')\n self.set_type(type(value)) # Try to set the type\n self._value = value",
"def set_type(self, the_type: [bool, int, float, str]):\n if self._value:\n raise CloudioModificationException('The Attribute has already a type (Changing the type is not allowed)!')\n\n if the_type in (bool, int, float, bytes, str):\n self._value = the_type()\n\n # Init to invalid\n self._type = AttributeType(AttributeType.Invalid)\n\n # Set cloudio attribute type accordingly\n if the_type in (bool,):\n self._type = AttributeType(AttributeType.Boolean)\n elif the_type in (int,):\n self._type = AttributeType(AttributeType.Integer)\n elif the_type in (float,):\n self._type = AttributeType(AttributeType.Number)\n else:\n assert the_type in (bytes, str), 'Seems we got a new type!'\n self._type = AttributeType(AttributeType.String)\n else:\n raise InvalidCloudioAttributeException(the_type)",
"def addDataType(self, dataType):\r\n \r\n self._dataTypes[dataType.name] = deepcopy(dataType)",
"def dataset_type(self, value):\n if type(value) != str:\n raise ValueError('dataset_type should be a str')\n else:\n self._dataset_type = value",
"def SetVariable_propagate_taint(ql, address, params):\n begin = params[\"Data\"]\n end = params[\"Data\"] + params[\"DataSize\"]\n if is_range_tainted(ql, begin, end):\n ql.dprint(D_INFO, \"Detected potential info leak in SetVariable()\")\n ql.os.emu_error()\n os.abort()",
"def setFieldType(self, type: 'int') -> \"void\":\n return _coin.SoField_setFieldType(self, type)",
"def update(self,varType = None):\n dataBuffer = self.value \n requestVarType = self.returnVarTypeFromCall(varType)\n\t \n if not mc.optionVar(exists = self.name):\n if requestVarType:\n self.create(self.form)\n return\n else:\n return log.warning(\"'%s' is not a valid variable type\"%varType) \n \n else:\n #If it exists, first check for data buffer\n typeBuffer = search.returnDataType(dataBuffer) or False\n if not typeBuffer:\n #log.debug('Changing to int!')\n typeBuffer = 'int'\n \n if varType is not None: \n if typeBuffer == requestVarType:\n\t\t #log.debug(\"Checks out\")\n return \n else:\n\t\t log.debug(\"Converting optionVar type...\")\n self.create(requestVarType)\n\t\t if dataBuffer is not None:\n\t\t\t#log.debug(\"Attempting to set with: %s\"%dataBuffer)\n\t\t\tself.value = dataBuffer\n\t\t\t#log.debug(\"Value : %s\"%self.value)\n return",
"def set_data_structure(self, ds):\n try:\n self.ds_handle = ds\n self.vis_type = ds.get_data_structure_type()\n except ValueError:\n print(\"Exception Thrown: Data structure passed to BRIDGES is null!\\n\")",
"def set_type(self, var, _type, statement_index):\n if _type is None:\n raise exception.BananaTypeCheckerBug(\n \"'None' is not a valid banana type\"\n )\n\n if isinstance(var, ast.Ident):\n self._check_needs_for_snapshot(var, _type, statement_index)\n self._variables[var] = _type\n return\n\n if isinstance(var, ast.DotPath):\n if util.is_comp(_type) and len(var.properties) > 0:\n raise exception.BananaAssignCompError(var.span)\n\n if len(var.properties) == 0:\n self._check_needs_for_snapshot(\n var.varname,\n _type,\n statement_index\n )\n self._variables[var.varname] = _type\n else:\n if var.varname in self._variables:\n var_type = self._variables[var.varname]\n if isinstance(var_type, util.Object):\n new_type = util.create_object_tree(\n var.next_dot_path(), _type)\n util.attach_to_root(var_type, new_type, var.span,\n erase_existing=True)\n elif isinstance(var_type, util.Component):\n var_type[var.next_dot_path()] = _type\n else:\n raise exception.BananaTypeError(\n expected_type=util.Object,\n found_type=type(var)\n )\n # Var undeclared, declare its own type\n else:\n new_type = util.create_object_tree(var.next_dot_path(),\n _type)\n self._variables[var.varname] = new_type\n return\n raise exception.BananaTypeCheckerBug(\"Unreachable code reached.\")",
"def np_changedtype(self, dtype):\n self.data = self.data.astype(dtype)",
"def set_vtype(self,vtype=None,python_type=None):\n if vtype is not None and python_type is None:\n (self.vtype,self.width) = decode_vtype(vtype)\n assert 1 <= self.width <= schema.WIDTH_MAX\n self.python_type = schema.PYTHON_TYPE_MAP[self.vtype]\n assert self.python_type is not None\n elif vtype is None and python_type is not None:\n self.vtype = SQL_TYPE_MAP[python_type]['type']\n self.width = SQL_TYPE_MAP[python_type]['width']\n self.python_type = python_type\n else:\n self.vtype = None\n self.width = None\n self.python_type = None",
"def _set_types(self):\n # If we given something that is not an int or a float we raise\n # a RuntimeError as we do not want to have to guess if the given\n # input should be interpreted as an int or a float, for example the\n # interpretation of the string \"1\" vs the interpretation of the string\n # \"1.0\".\n for c in (self.x, self.y, self.z):\n if not (isinstance(c, int) or isinstance(c, float)):\n raise(RuntimeError('x, y coords should be int or float'))\n\n if (isinstance(self.x, int)\n and isinstance(self.y, int) and isinstance(self.z, int)):\n self._dtype = \"int\"\n else:\n # At least one value is a float so promote both to float.\n self.x = float(self.x)\n self.y = float(self.y)\n self.z = float(self.z)\n self._dtype = \"float\"",
"def store_type(group, name, obj):\n group[name] = obj.dtype\n htype = group[name]\n update_attrs(htype, obj.attrs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the Data Type of this variable and the associated storage whose size matches the data type length.
|
def setDataType(self, type: ghidra.program.model.data.DataType, storage: ghidra.program.model.listing.VariableStorage, force: bool, source: ghidra.program.model.symbol.SourceType) -> None:
...
|
[
"def set_dtype(self, value):\n self._dtype = value\n for x in (self._position, self._orientation, self._velocity,\n self._mass, self._charge, self._diameter,\n self._moment_inertia, self._angmom):\n if x is not None:\n x = x.astype(value)\n for frame in self.frames:\n frame.dtype = value",
"def setDataType(self, dt: ghidra.program.model.data.DataType) -> None:\n ...",
"def set_dtype(self,dtype):\n self.dtype = dtype",
"def setDataType(self, type: ghidra.program.model.data.DataType, alignStack: bool, force: bool, source: ghidra.program.model.symbol.SourceType) -> None:\n ...",
"def set_data_type(self, a_data_type):\n self.parameters[\"type\"] = str(a_data_type)\n return self",
"def _set_length(self, val):\n self.Data.Length = val",
"def type_of_storage(self, type_of_storage):\n\n self._type_of_storage = type_of_storage",
"def np_changedtype(self, dtype):\n self.data = self.data.astype(dtype)",
"def dtype(self, new_dtype):\n if isinstance(new_dtype, tuple):\n for component, new_dt in safe_zip(self.components, new_dtype):\n component.dtype = new_dt\n elif new_dtype is None or isinstance(new_dtype, str):\n for component in self.components:\n if not isinstance(component, NullSpace):\n component.dtype = new_dtype",
"def put(self, dtyp ):\n if isinstance(dtyp, dihtype):\n self.maxgid += 1\n self.dihtypes[self.maxgid] = copy.deepcopy(dtyp)\n else:\n print \"Attempting to add non-dihtype type to container\"\n raise TypeError",
"def addDataType(self, dataType):\r\n \r\n self._dataTypes[dataType.name] = deepcopy(dataType)",
"def store_type(group, name, obj):\n group[name] = obj.dtype\n htype = group[name]\n update_attrs(htype, obj.attrs)",
"def set_data(self, val):\n self._data = val",
"def __set__(self, instance, value):\n if type(value) != type(None):\n\n if (\n type(value) != self.dtype\n and not (\n isinstance(value, np.generic) and type(value.item()) == self.dtype\n )\n and str(self.dtype)[1:] != value.__class__.__name__\n ):\n raise TypeError(\n \"{} should be {} got type {}: {}\".format(\n self.name, self.dtype, type(value), str(value)[:50]\n )\n )\n if hasattr(value, \"__copy__\"):\n value = value.copy()\n super().__set__(instance, value)",
"def check_dtype(self):\n # for data\n if not isinstance(self.data, torch.Tensor):\n if isinstance(self.data, np.ndarray):\n self.data = torch.from_numpy(self.data)\n else:\n raise TypeError(f\"invalid dtype {type(self.data)}\")\n # for label\n if not isinstance(self.label, torch.Tensor):\n if isinstance(self.label, np.ndarray):\n self.label = torch.from_numpy(self.label).long()\n elif isinstance(self.label, list):\n self.label = torch.tensor(self.label, dtype=torch.long)\n else:\n raise TypeError(f\"invalid dtype {type(self.data)}\")",
"def set_unpacked_size(self):\n self.byte_size = struct.calcsize(self.data_format)",
"def dataset_type(self, value):\n if type(value) != str:\n raise ValueError('dataset_type should be a str')\n else:\n self._dataset_type = value",
"def dtype(self, value):\n value = np.dtype(value)\n if value.kind not in \"iuf\":\n raise RuntimeError(\"Unsupported dtype. Only integer/floating-point types are supported.\")\n ok = False\n if np.issubdtype(value, np.integer):\n if np.issubdtype(self.dtype, np.integer):\n ok = True\n elif np.array_equal(self._frequencies, self._errors2):\n ok = True\n elif np.issubdtype(value, np.float):\n ok = True\n if ok:\n self._dtype = value\n self._frequencies = self._frequencies.astype(value)\n self._errors2 = self._errors2.astype(value)\n self._missed = self._missed.astype(value)\n # TODO: Overflows and underflows and stuff...\n else:\n raise RuntimeError(\"Cannot change histogram dtype.\")",
"def setTypeAtAddress(self,addr,length,typeValue):\n \"\"\"The type must be <b>TYPE_UNDEFINED</b>, <b>TYPE_INT8</b>, ...\"\"\"\n return HopperLowLevel.setTypeAtAddress(self.__internal_segment_addr__,addr,length,typeValue)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds a 3D point
|
def addPoint(self,x,y,z):
self.x = x
self.y = y
self.z = z
|
[
"def point_3d(self, x, y, z):\n self._point_3d(x, y, z)",
"def add_point(self, point):\n\t\tself.vertices.append(point)",
"def add_point(self, point):\n\t\tself.cloud[point.get_coords()] = point",
"def addPoint(x, y, z, meshSize=0., tag=-1):\n ierr = c_int()\n api__result__ = lib.gmshModelGeoAddPoint(\n c_double(x),\n c_double(y),\n c_double(z),\n c_double(meshSize),\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGeoAddPoint returned non-zero error code: \",\n ierr.value)\n return api__result__",
"def __add__(self, pPos2):\n return _almathswig.Position3D___add__(self, pPos2)",
"def addCube(self,x,y,z,size):\n p1 = mp.point(x,y,z)\n p2 = mp.point(x+size,y,z)\n p3 = mp.point(x,y+size,z)\n p4 = mp.point(x,y,z+size)\n p5 = mp.point(x+size,y+size,z)\n p6 = mp.point(x+size,y,z+size)\n p7 = mp.point(x,y+size,z+size)\n p8 = mp.point(x+size,y+size,z+size)\n self.surfaces.append(surface([0,0,0],[p1,p2,p5,p3])) #z constant\n self.surfaces.append(surface([0,0,0],[p4,p6,p8,p7])) #z constant\n self.surfaces.append(surface([0,0,0],[p1,p2,p6,p4])) #y constant\n self.surfaces.append(surface([0,0,0],[p3,p5,p8,p7])) #y constant\n self.surfaces.append(surface([0,0,0],[p1,p3,p7,p4])) #x constant\n self.surfaces.append(surface([0,0,0],[p2,p5,p8,p6])) #x constant",
"def __add__(self, u: 'SbVec3d') -> \"SbVec3d\":\n return _coin.SbVec3d___add__(self, u)",
"def __iadd__(self, pPos2):\n return _almathswig.Position3D___iadd__(self, pPos2)",
"def addPoint(self, pt: 'SbVec3f', userdata: 'void *const'=None) -> \"int\":\n return _coin.SbBSPTree_addPoint(self, pt, userdata)",
"def addPoint(self, point, index=-1):\n if not isinstance(point,Point):\n raise TypeError('parameter must be a Point instance')\n if index>-1:\n self._points.insert(point, index)\n else:\n self._points.append(point)\n if len(self._points)==1: # first point added\n self._reference = Point(point.getX(), point.getY())\n self._objectChanged(True,False,False)",
"def addPoint(x, y, z, meshSize=0., tag=-1):\n ierr = c_int()\n api__result__ = lib.gmshModelOccAddPoint(\n c_double(x),\n c_double(y),\n c_double(z),\n c_double(meshSize),\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelOccAddPoint returned non-zero error code: \",\n ierr.value)\n return api__result__",
"def addPos(self, xyz):\n x, y, z = xyz\n self.path.append((x, y, z))",
"def addPoint(self, point):\n self.point[self.current_player] += point",
"def translate(self, x: float, y: float, z: float):\n self.x += x\n self.y += y\n self.z += z",
"def add(self, node_3d: 'Node3D') -> None:\n\n for n in (node_3d.get_leaf_nodes()):\n object_3d_type = type(n)\n if issubclass(object_3d_type, PointLight):\n if len(self.render_list.point_lights) < 1:\n self.render_list.point_lights.append(n)\n else:\n print('Warning! Not more than one point light in a scene is possible right now.')\n else:\n self.render_list.geometry.append(n)\n\n super(Scene, self).add(node_3d)",
"def add(self, point3d_idx: int, keypoints_type: str, image_filename: str, keypoint_idx: int):\n # enforce type checking\n if not isinstance(point3d_idx, int):\n raise TypeError('invalid type for point3d_idx')\n if not isinstance(keypoints_type, str):\n raise TypeError('invalid type for keypoints_type')\n if not isinstance(image_filename, str):\n raise TypeError('invalid type for image_filename')\n if not isinstance(keypoint_idx, int):\n raise TypeError('invalid type for keypoint_idx')\n self.setdefault(point3d_idx, {}).setdefault(keypoints_type, []).append((image_filename, keypoint_idx))",
"def add_point(self, c1, c2, c3, scatter_index=0, **kwargs):\n try:\n c1s, c2s, c3s, kw = self._scatter[scatter_index]\n kw.update(kwargs)\n self._scatter[scatter_index] = (np.append(c1s, [c1, ]),\n np.append(c2s, [c2, ]),\n np.append(c3s, [c3, ]),\n kw)\n except IndexError:\n self._scatter.append((np.array([c1, ]), np.array([c2, ]),\n np.array([c3, ]), kwargs))",
"def add_point(self, p, index = None):\n\t\tif index == None:\n\t\t\tindex = len(self.point_list)\n\t\t\n\t\tself.point_list.insert(index, p)\n\t\t\n\t\tmax_x = self.collision_box.lower_right.x\n\t\tmax_y = self.collision_box.lower_right.y\n\t\tmin_x = self.collision_box.upper_left.x\n\t\tmin_y = self.collision_box.upper_left.y\n\t\t\n\t\tif p.x > max_x:\n\t\t\tself.collision_box.lower_right.x = p.x\n\t\telif p.x < min_x:\n\t\t\tself.collision_box.upper_left.x = p.x\n\t\t\n\t\tif p.y > max_y:\n\t\t\tself.collision_box.lower_right.y = p.y\n\t\telif p.y < min_y:\n\t\t\tself.collision_box.upper_left.y = p.y",
"def add_point(self, x, y, **props):\n self.add_scatter([x], [y], **props)\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Query the video categories endpoint.
|
def get_video_categories(key: str, category_ids: List[str], localization_code: str = None) -> dict:
param_dict = {
"key": key,
"part": "snippet",
"id": ",".join(category_ids)}
if localization_code:
param_dict["hl"] = localization_code
return query_endpoint("videoCategories", param_dict)
|
[
"def test_api_v3_categories_get(self):\n pass",
"def __send_get_categories(self):\n self.__send_command(CommandsBytes.GET_CATEGORIES)",
"def test_tv_categories_get(self):\n pass",
"def get_categories():\n # URL example: https://channelstore.roku.com/api/v6/channels/categories?country=US&language=en\n query_params = [qp_country(), qp_language()]\n resp = requests.get(categories_url, params=query_params)\n if resp.status_code != requests.codes.ok:\n print(\"WARNING: categories query returned non-200 response\")\n return None\n return resp.json()",
"def get_categories():\r\n return VIDEOS.iterkeys()",
"def get_categories():\n return VIDEOS.keys()",
"def display_categories(self):\n self.get()",
"def test_tv_categories_get_0(self):\n pass",
"def test_view_categories(self):\n self.create_user()\n self.create_category()\n with self.client:\n headers = self.helper_login_with_token()\n response = self.client.get('/category/',\n content_type='application/json',\n headers=headers)\n reply = json.loads(response.data.decode())\n self.assertEqual(reply['count'], \"1\")\n self.assertEqual(reply['message'], 'categories found')\n self.assertEqual(reply['number_of_pages'], 1)\n self.assertEqual(reply['current_page'], 1)\n self.assertEqual(reply['next_page'], None)\n self.assertEqual(reply['previous_page'], None)\n self.assertTrue(reply['categories'], msg='no categories')",
"def get_video_categories(\n self,\n *,\n category_id: Optional[Union[str, list, tuple, set]] = None,\n region_code: Optional[str] = None,\n parts: Optional[Union[str, list, tuple, set]] = None,\n hl: Optional[str] = \"en_US\",\n return_json: Optional[bool] = False,\n ):\n args = {\n \"part\": enf_parts(resource=\"videoCategories\", value=parts),\n \"hl\": hl,\n }\n\n if category_id is not None:\n args[\"id\"] = enf_comma_separated(field=\"category_id\", value=category_id)\n elif region_code is not None:\n args[\"regionCode\"] = region_code\n else:\n raise PyYouTubeException(\n ErrorMessage(\n status_code=ErrorCode.MISSING_PARAMS,\n message=\"Specify at least one of category_id or region_code\",\n )\n )\n\n resp = self._request(resource=\"videoCategories\", method=\"GET\", args=args)\n data = self._parse_response(resp)\n\n if return_json:\n return data\n else:\n return VideoCategoryListResponse.from_dict(data)",
"def download_categories(self):\n response = requests.get(\n CATEGORIES_ENDPOINT,\n headers={},\n params={}\n )\n self.categories_response = response.json()",
"def query(self, **kwargs):\n\n self.__checkkwargs(**kwargs)\n if self.videoid is None or len(self.videoid) == 0:\n raise ValueError('A valid videoid must be given to find '\n 'caption tracks.')\n dom = super(CaptionSearch, self).query(CaptionSearch.URL,\n {'type': 'list',\n 'v': self.videoid})\n if dom is None:\n return\n self.__parseList(dom)\n return self.results",
"def list_categories():\n # Set plugin category. It is displayed in some skins as the name of the current section.\n xbmcplugin.setPluginCategory(_handle, 'Tous les contenus par genre')\n # Set plugin content. It allows Kodi to select appropriate views for this type of content.\n xbmcplugin.setContent(_handle, 'videos')\n\n categories = GENRES\n # Iterate through categories\n for key, category in categories.items():\n # Create a list item with a text label and a thumbnail image.\n list_item = xbmcgui.ListItem(label=category['label'])\n # Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.\n list_item.setArt({'thumb': category['thumb'],\n 'icon': category['thumb'],\n 'fanart': category['thumb']})\n # Set additional info for the list item.\n # For available properties see the following link:\n # http://mirrors.xbmc.org/docs/python-docs/15.x-isengard/xbmcgui.html#ListItem-setInfo\n list_item.setInfo('video', {'title': category['label'], 'genre': category['label']})\n # Create a URL for a plugin recursive call.\n # Example: plugin://plugin.video.example/?action=listing&category=Animals\n url = get_url(action='listing', category=key)\n # is_folder = True means that this item opens a sub-list of lower level items.\n is_folder = True\n # Add our item to the Kodi virtual folder listing.\n xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)\n # Add a sort method for the virtual folder items (alphabetically, ignore articles)\n xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n # Finish creating a virtual folder.\n xbmcplugin.endOfDirectory(_handle)",
"def show_categories():\n categories = service.get_categories()\n latest_items = service.get_latest_items()\n\n return render_template(\"categories.html\", categories=categories,\n items=latest_items)",
"def get_video_categories_in_region(key: str, region_code: str, localization_code: str = None) -> dict:\n param_dict = {\n \"key\": key,\n \"part\": \"snippet\",\n \"regionCode\": region_code}\n if localization_code:\n param_dict[\"hl\"] = localization_code\n return query_endpoint(\"videoCategories\", param_dict)",
"def get(self, category_type):\n categories = get_category_by_type(category_type)\n if not categories:\n api.abort(404)\n else:\n return categories",
"def request_categories(categories):\n _logger.debug(\"getting categories\")\n result = get_data(url)\n categories_id = []\n categories_name = []\n cat_dict = {}\n for res in result['data']:\n cat_id = int(res['id'])\n cat_name = res['category']\n if cat_name in categories:\n if cat_name not in categories_name:\n categories_id.append(cat_id)\n categories_name.append(cat_name)\n else:\n continue\n for i in range(0,len(categories_id)):\n cat_dict[categories_id[i]] = categories_name[i]\n\n return cat_dict, categories_id",
"def GetRelatedVideos(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def category_id(self):\n return self.video_data.get('categoryId')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Query the video categories endpoint for video categories in a specific region.
|
def get_video_categories_in_region(key: str, region_code: str, localization_code: str = None) -> dict:
param_dict = {
"key": key,
"part": "snippet",
"regionCode": region_code}
if localization_code:
param_dict["hl"] = localization_code
return query_endpoint("videoCategories", param_dict)
|
[
"def get_video_categories(\n self,\n *,\n category_id: Optional[Union[str, list, tuple, set]] = None,\n region_code: Optional[str] = None,\n parts: Optional[Union[str, list, tuple, set]] = None,\n hl: Optional[str] = \"en_US\",\n return_json: Optional[bool] = False,\n ):\n args = {\n \"part\": enf_parts(resource=\"videoCategories\", value=parts),\n \"hl\": hl,\n }\n\n if category_id is not None:\n args[\"id\"] = enf_comma_separated(field=\"category_id\", value=category_id)\n elif region_code is not None:\n args[\"regionCode\"] = region_code\n else:\n raise PyYouTubeException(\n ErrorMessage(\n status_code=ErrorCode.MISSING_PARAMS,\n message=\"Specify at least one of category_id or region_code\",\n )\n )\n\n resp = self._request(resource=\"videoCategories\", method=\"GET\", args=args)\n data = self._parse_response(resp)\n\n if return_json:\n return data\n else:\n return VideoCategoryListResponse.from_dict(data)",
"def get_video_categories(key: str, category_ids: List[str], localization_code: str = None) -> dict:\n param_dict = {\n \"key\": key,\n \"part\": \"snippet\",\n \"id\": \",\".join(category_ids)}\n if localization_code:\n param_dict[\"hl\"] = localization_code\n return query_endpoint(\"videoCategories\", param_dict)",
"def test_api_v3_categories_get(self):\n pass",
"def get_categories():\n # URL example: https://channelstore.roku.com/api/v6/channels/categories?country=US&language=en\n query_params = [qp_country(), qp_language()]\n resp = requests.get(categories_url, params=query_params)\n if resp.status_code != requests.codes.ok:\n print(\"WARNING: categories query returned non-200 response\")\n return None\n return resp.json()",
"def test_tv_categories_get(self):\n pass",
"def __send_get_categories(self):\n self.__send_command(CommandsBytes.GET_CATEGORIES)",
"def get_categories():\n return VIDEOS.keys()",
"def download_categories(self):\n response = requests.get(\n CATEGORIES_ENDPOINT,\n headers={},\n params={}\n )\n self.categories_response = response.json()",
"def get_categories():\r\n return VIDEOS.iterkeys()",
"def getSpecCategories(self, field):\n pc = getToolByName(self, 'portal_catalog')\n categories = []\n\n for spec in field.getResultsRange():\n service = pc(portal_type='AnalysisService',\n getKeyword=spec['keyword'])[0].getObject()\n if service.getCategoryUID() not in categories:\n categories.append({'UID': service.getCategoryUID(),\n 'Title': service.getCategoryName()})\n return categories",
"def test_view_categories(self):\n self.create_user()\n self.create_category()\n with self.client:\n headers = self.helper_login_with_token()\n response = self.client.get('/category/',\n content_type='application/json',\n headers=headers)\n reply = json.loads(response.data.decode())\n self.assertEqual(reply['count'], \"1\")\n self.assertEqual(reply['message'], 'categories found')\n self.assertEqual(reply['number_of_pages'], 1)\n self.assertEqual(reply['current_page'], 1)\n self.assertEqual(reply['next_page'], None)\n self.assertEqual(reply['previous_page'], None)\n self.assertTrue(reply['categories'], msg='no categories')",
"def test_tv_categories_get_0(self):\n pass",
"def my_category(self, cat):\n categories = Category.objects.all(name=cat)\n return categories",
"def display_categories(self):\n self.get()",
"def _get_language_categories() -> List[str]:\n requests_params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"list\": \"categorymembers\",\n \"cmtitle\": \"Category:Terms with IPA pronunciation by language\",\n \"cmlimit\": \"500\",\n }\n language_categories = []\n while True:\n data = requests.get(_URL, params=requests_params).json()\n for member in data[\"query\"][\"categorymembers\"]:\n category = member[\"title\"]\n language_categories.append(category)\n if \"continue\" not in data:\n break\n continue_code = data[\"continue\"][\"cmcontinue\"]\n requests_params.update({\"cmcontinue\": continue_code})\n return language_categories",
"def test_country_query_filter_by_region(self):\n\n url = get_country_url_with_filters({'region': 'Polar'})\n\n response = self.client.get(url)\n\n object_list = response.context.get('object_list', None)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTemplateUsed(response, 'country_list.html')\n self.assertEqual(len(object_list), 1)\n self.assertEqual(object_list[0]['name'], 'Antarctica')",
"async def categories_by_phrase_handler(request):\n phrase = request.query.get('phrase')\n\n if phrase is None:\n return web.json_response({\n 'success': False,\n 'message': 'Invalid phrase'\n })\n\n try:\n found_categories = categories_collection.get_categories_by_phrase(phrase)\n return web.json_response({\n \"success\": True,\n \"data\": {\n \"categories\": [category.name for category in found_categories]\n }\n })\n except Exception as error:\n logging.error('Error on category search by phrase:', error)\n\n return web.json_response({\n 'success': False,\n 'message': 'internal error %s' % error.message\n })",
"def request_categories(categories):\n _logger.debug(\"getting categories\")\n result = get_data(url)\n categories_id = []\n categories_name = []\n cat_dict = {}\n for res in result['data']:\n cat_id = int(res['id'])\n cat_name = res['category']\n if cat_name in categories:\n if cat_name not in categories_name:\n categories_id.append(cat_id)\n categories_name.append(cat_name)\n else:\n continue\n for i in range(0,len(categories_id)):\n cat_dict[categories_id[i]] = categories_name[i]\n\n return cat_dict, categories_id",
"def list_categories():\n # Set plugin category. It is displayed in some skins as the name of the current section.\n xbmcplugin.setPluginCategory(_handle, 'Tous les contenus par genre')\n # Set plugin content. It allows Kodi to select appropriate views for this type of content.\n xbmcplugin.setContent(_handle, 'videos')\n\n categories = GENRES\n # Iterate through categories\n for key, category in categories.items():\n # Create a list item with a text label and a thumbnail image.\n list_item = xbmcgui.ListItem(label=category['label'])\n # Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.\n list_item.setArt({'thumb': category['thumb'],\n 'icon': category['thumb'],\n 'fanart': category['thumb']})\n # Set additional info for the list item.\n # For available properties see the following link:\n # http://mirrors.xbmc.org/docs/python-docs/15.x-isengard/xbmcgui.html#ListItem-setInfo\n list_item.setInfo('video', {'title': category['label'], 'genre': category['label']})\n # Create a URL for a plugin recursive call.\n # Example: plugin://plugin.video.example/?action=listing&category=Animals\n url = get_url(action='listing', category=key)\n # is_folder = True means that this item opens a sub-list of lower level items.\n is_folder = True\n # Add our item to the Kodi virtual folder listing.\n xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)\n # Add a sort method for the virtual folder items (alphabetically, ignore articles)\n xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n # Finish creating a virtual folder.\n xbmcplugin.endOfDirectory(_handle)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set a value v at index i in list l.
|
def set_list(l, i, v):
try:
l[i] = v
except IndexError:
for _ in range(i - len(l) + 1):
l.append(None)
l[i] = v
|
[
"def modify_list(lst, i, val):\r\n if len(lst) > i:\r\n lst[i] = val\r\n return None",
"def __setitem__(self, i, value):\n self.set(i, value)\n return self",
"def SetCount(self, i, val):\n self.countList[i] = val",
"def set(self, i: 'int const', ptr: 'SoBase') -> \"void\":\n return _coin.SoBaseList_set(self, i, ptr)",
"def __setitem__(self, i: 'int const', value: 'SbVec3f') -> \"void\":\n return _coin.SbVec3fList___setitem__(self, i, value)",
"def set( i, value ):\n if i in __param: __param[i] = value",
"def __setitem__(self, i, v):\n if type(i) == slice:\n copy = self.data[:]\n copy[i] = v\n if len(copy) != len(self.data):\n raise ValueError(\"slice operation should not chage vector length\")\n try:\n self.data[i] = v\n except TypeError, e:\n raise TypeError(\"can only assign an iterable\")\n except:\n raise IndexError(\"vector index out of range\")",
"def set(self, v):\n self.data[0] = v[0] ; self.data[1] = v[1] ; self.data[2] = v[2]",
"def __setitem__(self, i: 'int const', value: 'SoField') -> \"void\":\n return _coin.SoFieldList___setitem__(self, i, value)",
"def __setitem__(self, i: 'int const', value: 'int *') -> \"void\":\n return _coin.SbIntList___setitem__(self, i, value)",
"def __setitem__(self, i: 'int const', value: 'SoNode') -> \"void\":\n return _coin.SoNodeList___setitem__(self, i, value)",
"def add(self, k, l, i):\n # increment number of atoms in cell list\n self.cl[k,l,0] += 1\n # store the atom in the list at position self.cl[k, l, 0]\n # print( f'VerletList.add: {(k,l)}: {i}')\n self.cl[ k, l, self.cl[k, l, 0] ] = i",
"def __setitem__(self, index, value):\n self.setValue(value, index)",
"def set_value(self, value):\n self.state = self.iterable.index(value)",
"def __setitem__(self, i: 'int', val: 'void *') -> \"void\":\n return _coin.SbPList___setitem__(self, i, val)",
"def __setitem__(self, i: 'int const', value: 'SoBase') -> \"void\":\n return _coin.SoBaseList___setitem__(self, i, value)",
"def set(self, idx: 'int const', field: 'SoField') -> \"void\":\n return _coin.SoFieldList_set(self, idx, field)",
"def set_row(self, i, vec):\n self.data[3*i+0] = vec[0]\n self.data[3*i+1] = vec[1]\n self.data[3*i+2] = vec[2]",
"def __setitem__(self, i, t):\n #Compute all of the coefficients up to (and including) the ith one\n test = self[i]\n\n if i < len(self._list):\n #If we are here, we can just change the entry in self._list\n self._list[i] = t\n else:\n #If we are here, then the stream has become constant. We just\n #extend self._list with self._constant and then change the\n #last entry.\n self._list += [ self._constant ] * (i+1 - len(self._list))\n self._last_index = i\n self._list[i] = t"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function is used to access the tab preceding the active tab (global TABS list). Given an active_tab (corresponding to one of START, DATASET, ...), it returns the previous element of the array.
|
def previous_tab(active_tab):
return TABS[TABS_INDEXES[active_tab] - 1]
|
[
"def next_tab(active_tab):\n return TABS[TABS_INDEXES[active_tab] + 1]",
"def prev_next_tabs(tablist):\n prev_tab = None\n next_tab = None\n current_tab = None\n for ix, tab in enumerate(tablist):\n if tab[0] == request.endpoint:\n current_tab = ix\n break\n\n if current_tab is None:\n return None, None\n\n if current_tab > 0:\n prev_tab = tablist[current_tab - 1]\n\n try:\n next_tab = tablist[current_tab + 1]\n except IndexError:\n pass\n\n return prev_tab, next_tab",
"def previous(array, current_index):\n return array[int(current_index) - 1]",
"def focus_prev(self) -> None:\n if self.focus.index is not None:\n idx = self.focus.index - 1\n if self.inbounds(idx):\n self.focus.flow = self[idx]\n else:\n pass",
"def _previous(self, idx):\n return max(filter(lambda x_: x_ < idx, self.root.active))",
"def previous_field(self):\n self.stack[-1].previous()",
"def get_prev_question(self, current):\n if current > 0:\n questions = self.get_ordered_question_list()\n return questions[current-1]\n return None",
"def get_prev_timestep(self):\n self.skip_back_timestep()\n m, data = self.get_next_timestep()\n self.skip_back_timestep()\n\n return m, data",
"def restore_previous_tab(self):\n\n if self._previous_tab:\n if not self.set_current_tab(self._previous_tab):\n self.set_current_index(0)\n else:\n self.set_current_index(0)",
"def focus_previous(self, win):\n # First: try to focus previous client in column (self.columns is non-empty)\n # pylint: disable=undefined-loop-variable\n for idx, col in enumerate(self.columns):\n if win in col:\n prev = col.focus_previous(win)\n if prev:\n return prev\n else:\n break\n # If there was no previous, get last from previous column\n if idx > 0:\n return self.columns[idx - 1].focus_last()",
"def prevItem(self):\n if self.item_count == 0: return\n \n prev_focus_index = self.focus_index \n while True:\n if self.focus_index < 0: self.focus_index = 0\n else: self.focus_index -= 1 \n\n if self.focus_index == prev_focus_index: return \n elif self.focus_index == -1: \n self.focus_index = self.item_count - 1\n \n item_key = self.item_indexes[self.focus_index]\n if self.items[item_key].is_active == True:\n if self.items[item_key].focusable == True: break\n \n self.changeFocusItem(item_key)",
"def prev_activity(self, id):\n assert id in self.activities()\n A = self.activitylist()\n k = [k for (k,a) in enumerate(A) if a.id() == id][0]\n return A[k-1] if k>=1 else None",
"def user32_GetNextDlgTabItem(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDlg\", \"hCtl\", \"bPrevious\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def previous_active_value(values, active, default=0, inclusive=False):\n\n # Associative scan with state (most_recent_value, has_some_active) accumulated\n # across spans. This will produce the desired output but shifted, so that\n # it produces the most recent inclusive active value at each position but\n # possibly including the current position.\n\n def step(prevstate, nextstate):\n pval, pactive = prevstate\n nval, nactive = nextstate\n return jnp.where(nactive, nval, pval), (pactive | nactive)\n\n inclusive_result, has_active = jax.lax.associative_scan(\n step, (values, active.astype(jnp.int32)))\n inclusive_result = jnp.where(has_active, inclusive_result, default)\n if inclusive:\n return inclusive_result\n result = jnp.concatenate([jnp.array(default)[None], inclusive_result[:-1]])\n return result",
"def prev_num(self):\n self.current_page - 1",
"def previous(self):\r\n return _osgDB.SwigPyIterator_previous(self)",
"def getCurrentIndex(self):\r\n for i in range(MpGlobal.Window.tabMain.count()):\r\n \r\n widget = MpGlobal.Window.tabMain.widget( i )\r\n \r\n if widget == self:\r\n return i\r\n \r\n raise IndexError(\"Tab not in TabBar. index out of range.\")",
"def get_previous_timestep(timesteps, timestep):\n return timesteps[timesteps.ord(timestep) - 1]",
"def previous(self):\n raise NotImplementedError()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function is used to access the tab following the active tab (global TABS list). Given an active_tab (corresponding to one of START, DATASET, ...), it returns the previous element of the array.
|
def next_tab(active_tab):
return TABS[TABS_INDEXES[active_tab] + 1]
|
[
"def previous_tab(active_tab):\n return TABS[TABS_INDEXES[active_tab] - 1]",
"def prev_next_tabs(tablist):\n prev_tab = None\n next_tab = None\n current_tab = None\n for ix, tab in enumerate(tablist):\n if tab[0] == request.endpoint:\n current_tab = ix\n break\n\n if current_tab is None:\n return None, None\n\n if current_tab > 0:\n prev_tab = tablist[current_tab - 1]\n\n try:\n next_tab = tablist[current_tab + 1]\n except IndexError:\n pass\n\n return prev_tab, next_tab",
"def previous(array, current_index):\n return array[int(current_index) - 1]",
"def _previous(self, idx):\n return max(filter(lambda x_: x_ < idx, self.root.active))",
"def focus_prev(self) -> None:\n if self.focus.index is not None:\n idx = self.focus.index - 1\n if self.inbounds(idx):\n self.focus.flow = self[idx]\n else:\n pass",
"def previous_field(self):\n self.stack[-1].previous()",
"def getCurrentIndex(self):\r\n for i in range(MpGlobal.Window.tabMain.count()):\r\n \r\n widget = MpGlobal.Window.tabMain.widget( i )\r\n \r\n if widget == self:\r\n return i\r\n \r\n raise IndexError(\"Tab not in TabBar. index out of range.\")",
"def restore_previous_tab(self):\n\n if self._previous_tab:\n if not self.set_current_tab(self._previous_tab):\n self.set_current_index(0)\n else:\n self.set_current_index(0)",
"def user32_GetNextDlgTabItem(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDlg\", \"hCtl\", \"bPrevious\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def prev_activity(self, id):\n assert id in self.activities()\n A = self.activitylist()\n k = [k for (k,a) in enumerate(A) if a.id() == id][0]\n return A[k-1] if k>=1 else None",
"def get_prev_question(self, current):\n if current > 0:\n questions = self.get_ordered_question_list()\n return questions[current-1]\n return None",
"def prevItem(self):\n if self.item_count == 0: return\n \n prev_focus_index = self.focus_index \n while True:\n if self.focus_index < 0: self.focus_index = 0\n else: self.focus_index -= 1 \n\n if self.focus_index == prev_focus_index: return \n elif self.focus_index == -1: \n self.focus_index = self.item_count - 1\n \n item_key = self.item_indexes[self.focus_index]\n if self.items[item_key].is_active == True:\n if self.items[item_key].focusable == True: break\n \n self.changeFocusItem(item_key)",
"def get_prev_timestep(self):\n self.skip_back_timestep()\n m, data = self.get_next_timestep()\n self.skip_back_timestep()\n\n return m, data",
"def focus_previous(self, win):\n # First: try to focus previous client in column (self.columns is non-empty)\n # pylint: disable=undefined-loop-variable\n for idx, col in enumerate(self.columns):\n if win in col:\n prev = col.focus_previous(win)\n if prev:\n return prev\n else:\n break\n # If there was no previous, get last from previous column\n if idx > 0:\n return self.columns[idx - 1].focus_last()",
"def backbutton(self, title, tabnext, name=\"Back\", active=1):\n flags = 3 if active else 1 # Visible|Enabled or Visible\n return self.pushbutton(\n name, 180, self.h - 27, 56, 17, flags, title, tabnext\n )",
"def get_current(self, elements):\n if len(elements) > 0:\n return elements[-1]\n\n return None",
"def previous_active_value(values, active, default=0, inclusive=False):\n\n # Associative scan with state (most_recent_value, has_some_active) accumulated\n # across spans. This will produce the desired output but shifted, so that\n # it produces the most recent inclusive active value at each position but\n # possibly including the current position.\n\n def step(prevstate, nextstate):\n pval, pactive = prevstate\n nval, nactive = nextstate\n return jnp.where(nactive, nval, pval), (pactive | nactive)\n\n inclusive_result, has_active = jax.lax.associative_scan(\n step, (values, active.astype(jnp.int32)))\n inclusive_result = jnp.where(has_active, inclusive_result, default)\n if inclusive:\n return inclusive_result\n result = jnp.concatenate([jnp.array(default)[None], inclusive_result[:-1]])\n return result",
"def shuffle_current_tab(self,forward):\n pos = self.tab_list.index(self.current_tab)\n self.shuffle_tab(pos,forward)\n self.recalculate(None,None)",
"def change_tab_reverse(self):\r\n self.stack.setCurrentWidget(self.splitter)\r\n self._files_handler.previous_item()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Start form (job creation)
|
def start(request):
active_tab = START
if request.method == 'POST':
form = FORMS_NEW[active_tab](request.POST, request=request)
active_tab = save_form(form, request, active_tab)
else:
form = FORMS_NEW[active_tab](request=request)
if active_tab == START:
return render(
request,
"job/create.html",
{
'active_tab': active_tab,
'disable_other_tabs': True,
'start_form': form,
}
)
else:
return redirect('job_data_model_edit', id=request.session['draft_job']['id'])
|
[
"def quick_jobpost(context):\n context[\"form\"] = JobPostForm()\n return context",
"def launch(request, id):\n\n active_tab = LAUNCH\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n if active_tab != SUBMITTED:\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )\n else:\n return redirect('job_list')",
"def submit(self):\n \n # TODO: send job to scheduler ",
"def start(self):\n\n\t\t#refresh the view\n\t\ttry:\n\t\t\tself.details.destroy()\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttask = self._get_select_item()\n\t\t\n\t\t\n\t\tWritter.event('start to work on {}'.format(task.name))\n\n\n\t\tif task:\n\n\t\t\tself.new_worktime = WorkTime(task)\n\n\t\t\tstarted_time = time.localtime(self.new_worktime.begin )\n\n\t\t\t#call back stop button clicked\n\t\t\tdef callback():\n\t\t\t\tself.new_worktime.add()\n\t\t\t\tself.show_details()\n\t\t\t\tself._tree()\n\n\n\n\t\t\tself.details = LabelFrame(self, text='\"{}\" in progress...'.\n\t\t\t\tformat(task.name), \n\t\t\t\t\trelief=FLAT,\n\t\t\t\t\tpadx=Setting.PADDING, pady=Setting.PADDING, \n\t\t\t\t\tfont=Setting.FONT_TITLE ,\n\t\t\t\t\tforeground=Setting.COLOR_TXT, background=Setting.COLOR_BKG)\n\n\t\t\tself.time_value = StringVar()\n\n\n\t\t\tself.time_value.set(\"Tâche en cours\")\n\t\t\tLabel(self.details , \n\t\t\t\ttext='Started @{}'.format(time.strftime('%H:%M',started_time)),\n\t\t\t\tfont=Setting.FONT_TEXT , \n\t\t\t\tforeground=Setting.COLOR_TXT, \n\t\t\t\tbackground=Setting.COLOR_BKG).pack(fill=X)\n\t\t\tLabel(self.details , textvariable=self.time_value,font=Setting.FONT_TEXT , \n\t\t\t\tforeground=Setting.COLOR_TXT, \n\t\t\t\tbackground=Setting.COLOR_BKG).pack(fill=X)\n\t\t\tButton(self.details, text=\"stop\", command=callback).pack(fill=X)\n\n\n\n\t\t\tdef update_time():\n\t\t\t\t\"\"\"get time delat & update string var\"\"\"\n\t\t\t\tself.time_value.set( self.new_worktime.spend_from_now() )\n\t\t\t\tself.after(100, update_time)\n\n\t\t\tupdate_time()\n\n\n\n\t\t\tself.details.pack(fill=X )\n\n\t\telse:\n\t\t\tprint('task not found')",
"def submit(self):\n subprocess.run([self.launch_command, str(self.job_filename)])\n print('Job sent: ', [self.launch_command, str(self.job_filename)])",
"def run_job():",
"def submit(self):\n \n print 'Submitting the job'\n runner = Runner(self)\n runner.start()",
"def initiate_schedule():\r\n root = tk.Tk()\r\n page = interface.Window(root)\r\n root.mainloop()",
"def _start_submit_agent(self):\n res = self.systemAdmin.startComponent(\"SimuDB\", \"SubmitAgent\")\n if not res['OK']:\n return res\n return self.systemAdmin.startComponent(\"SimuDB\", \"StopRunAgent\")",
"def start_thread():\n subject = request.form.get('subject') or ''\n comment = request.form.get('comment') or ''\n if not subject:\n return error('start_thread:subject')\n\n storage.start_thread(g.username, subject, comment)\n flash('New Thread Started: {0}'.format(subject), 'success')\n\n return to_threads()",
"def new_job():\n content = request.json\n job = content.get('job', None)\n casename = content.get('casename', None)\n source = content.get('source', None)\n if not job or not casename or not source:\n abort(403)\n params = content.get('params', {})\n path = content.get('path', None)\n if path == '':\n path = None\n pid = run_rvt2(job, casename=casename, source=source, path=path, params=params, background=True)\n return dict(pid=pid)",
"def test_create_job(self):\n job = pyani_jobs.Job(\"empty\", \"\")\n self.assertEqual(job.script, \"\")",
"def makeStart():\n return render_template('maker/starting.html', title=\"Maker - Getting Started\", year=year)",
"def new_meeting(self):\n threading.Thread(target=self.auth_client.new_meeting).start()\n self.show_loading()",
"def OnCreate(self, form):\n # Get parent widget\n # print(\"......\")\n # print(type(form))\n self.parent = self.FormToPyQtWidget(form)\n self.PopulateForm()",
"def generate_task_form(formid=\"deform\"):\n schema = TodoSchema()\n options = \"\"\"\n {success:\n function (rText, sText, xhr, form) {\n deform.focusFirstInput();\n var loc = xhr.getResponseHeader('X-Relocate');\n if (loc) {\n document.location = loc;\n };\n }\n }\n \"\"\"\n # Unneeded deform javascript options\n # deform.processCallbacks();\n return Form(\n schema,\n buttons=('submit',),\n formid=formid,\n use_ajax=True,\n ajax_options=options,\n )",
"def go(self):\n self.get_details()\n self.run()\n if hasattr(self, 'result'):\n self.submit_results()",
"def job_new(input_params={}, always_retry=False, **kwargs):\n return DXHTTPRequest('/job/new', input_params, always_retry=always_retry, **kwargs)",
"def edit_job_name(request, id):\n active_tab = START\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to edit the job basic information (name, description). It also returns forms to be rendered in other tabs (models).
|
def edit_job_name(request, id):
active_tab = START
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
|
[
"def edit_job_data_model(request, id):\n active_tab = DMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_psf(request, id):\n\n active_tab = PSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def launch(request, id):\n\n active_tab = LAUNCH\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n if active_tab != SUBMITTED:\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )\n else:\n return redirect('job_list')",
"def edit_job_params(request, id):\n\n active_tab = PARAMS\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_galaxy_model(request, id):\n\n active_tab = GMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_dataset(request, id):\n\n active_tab = DATASET\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_lsf(request, id):\n\n active_tab = LSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_fitter(request, id):\n\n active_tab = FITTER\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def quick_jobpost(context):\n context[\"form\"] = JobPostForm()\n return context",
"def editjob(request):\n job_id = request.GET.get('q', '')\n username = request.user.username\n usr, tkn = user_authenticate(username)\n headers = {'Authorization': \"Token \"+tkn +\n \"\", \"Content-Type\": \"application/json\"}\n\n url = \"http://172.20.0.70:8087/beapi/job/\" + job_id+\"/\"\n r = requests.get(url=url, headers=headers)\n data = r.json()\n team_data = data[0]['job_progress'][0]['jobteam_members']\n end_date = data[0]['job_progress'][0]['end_dt']\n team_leader = data[0]['job_progress'][0]['jobteam_members'][0]['teamleader']\n employee_data = execsys(team_leader)\n fullname = employee_data['firstname'] + \" \" + employee_data['surname']\n context = {\n \"job\": data[0],\n \"team_data\": data[0]['job_progress'][0]['jobteam_members'],\n \"team_members\": len(team_data),\n \"open_mileage\": data[0]['job_progress'][0]['open_mileage'],\n \"close_mileage\": data[0]['job_progress'][0]['close_mileage'],\n \"status\": data[0]['job_progress'][0]['status'],\n \"start_date\": data[0]['job_progress'][0]['start_dt'],\n \"fleet\": data[0]['job_progress'][0]['fleet_no'],\n \"job_progress\": data[0]['job_progress'],\n \"team_leader\": fullname\n }\n return render(request, 'beweb/job/jobedit.html', context)",
"def editForm(self,id,table=1):\t\n\t\tif table:\n\t\t\tform=tableForm(server['query'] + '&action=do')\n\t\telse:\n\t\t\tform=Form(server['query'] + '&action=do')\n\t\ttables=[ i for i in self.db.query('SHOW TABLES;') ]\n\t\tfor row in self.content:\n\t\t\tif str(row['id']) == str(id):\n\t\t\t\tself.db.cur.execute('SHOW FULL COLUMNS FROM ' + self.name)\n\t\t\t\ttb = self.db.cur.fetchall()\n\t\t\t\tfor field in tb:\n\t\t\t\t\tfname = field[0]\n\t\t\t\t\tftype = field[1]\n\t\t\t\t\tfdefault = field[5]\n\t\t\t\t\tfcomment = field[8]\n\t\t\t\t\tfdesc = fname[0].upper() + fname[1:].lower() + ':'\n\t\t\t\t\tif fname == 'id':\n\t\t\t\t\t\tform.hidden(fname,row[fname])\n\t\t\t\t\telif fname[-3:] == '_id' and fname[:-3] in tables:\n\t\t\t\t\t\t# Relations N - 1\n\t\t\t\t\t\tocolumns = self.db.query('SHOW COLUMNS FROM ' + fname[:-3])\n\t\t\t\t\t\torows=self.db.query('SELECT ' + ocolumns[0] + ', ' + ocolumns[1] + ' FROM ' + fname[:-3])\n\t\t\t\t\t\tdict={}\n\t\t\t\t\t\tfor orow in orows:\n\t\t\t\t\t\t\tdict.update({orow[1]:orow[0]})\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.select(fdesc,fname,dict,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.select(fname,dict,row[fname])\t\t\t\t\t\t\n\t\t\t\t\telif ftype == 'text':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.text(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.text(fname,row[fname])\n\t\t\t\t\telif fcomment == 'pass':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.pwd(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.pwd(fname,row[fname])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.input(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.input(fname,row[fname])\n\t\tform.submit('Edit')\n\t\treturn form.end()",
"def display_form():\n\n roles = [\"Software Engineer\", \"QA Engineer\", \"Product Manager\"]\n return render_template(\"application-form.html\",\n jobs=roles)",
"def browse_edit():\n # if user is creating a new empty deck, render only the deckname field with newtermfield and newdeffield\n # if user is editing a preexisting deck, render the deckname field, all cards as term, definition, a term/def row for new entries, a \"return to decks\" button, and a \"review this deck\" button\n \n if request.method == 'POST':\n class _BrowseEditForm(BrowseEditForm):\n pass\n\n # print(request.form)\n\n # set decktitle field to the deck's name:\n deckid = request.form['hidden_deckid_field']\n deck = Deck.query.get(deckid)\n setattr(_BrowseEditForm.deckname, 'default', deck.deckname)\n\n browse_edit_form = _BrowseEditForm()\n class _CardForm(CardForm):\n pass\n \n for card in Card.query.filter_by(deck_id=deckid).all():\n _CardForm.term = card.term\n _CardForm.definition = card.definition\n browse_edit_form.cards.append_entry(_CardForm())\n # for empty decks, `cards` should not render.\n if browse_edit_form.validate_on_submit:\n return render_template('browse_edit.html', title='Enter a card', form=browse_edit_form)\n return redirect(url_for('browse_edit.html'))",
"def job_overview(request, id):\n\n active_tab = LAUNCH\n # This could be cleaned to avoid getting forms and only gather views.\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/job_overview.html\",\n {\n 'job_id': id,\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n }\n )",
"def edit_form(self, resource):\n fs = FieldSet(resource.model)\n return fs.render()",
"def display_pet_details_and_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n print(\"*!*!*!*!*! IT WORKED !*!!\"*10)\n pet.photo_url=form.photo_url.data\n pet.notes=form.notes.data\n pet.available=form.available.data\n db.session.commit()\n flash(f\"Edited pet: {pet.name}\")\n return redirect(f\"/{pet_id}\")\n else:\n return render_template(\"edit_pet.html\", form=form, pet=pet)",
"def edit_formazione(self, event):\n self.Disable()\n ViewFormazione(parent=self, title='Formazione')",
"def getConfigureForm(room_jid):",
"def getEditgameUpdateForm (game):\n\n\tfrom gluon import current, redirect, URL, SQLFORM\n\tdb = current.db\n\n\t#Hide some fields of the form\n\thideFields (db.game, ['id', 'host_id', 'game_status', 'password'])\n\n\tformUpdate = SQLFORM(db.game, game.id)\n\tformUpdate.add_class('assassins-form')\n\n\tif formUpdate.process().accepted:\n\t\tresizeImage(db.game, game.id)\n\t\tredirect(getUrl('edit', game.id))\n\n\treturn formUpdate"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to edit the data model information. It also returns forms to be rendered in other tabs (models).
|
def edit_job_data_model(request, id):
active_tab = DMODEL
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
|
[
"def edit_form(self, resource):\n fs = FieldSet(resource.model)\n return fs.render()",
"def editForm(self,id,table=1):\t\n\t\tif table:\n\t\t\tform=tableForm(server['query'] + '&action=do')\n\t\telse:\n\t\t\tform=Form(server['query'] + '&action=do')\n\t\ttables=[ i for i in self.db.query('SHOW TABLES;') ]\n\t\tfor row in self.content:\n\t\t\tif str(row['id']) == str(id):\n\t\t\t\tself.db.cur.execute('SHOW FULL COLUMNS FROM ' + self.name)\n\t\t\t\ttb = self.db.cur.fetchall()\n\t\t\t\tfor field in tb:\n\t\t\t\t\tfname = field[0]\n\t\t\t\t\tftype = field[1]\n\t\t\t\t\tfdefault = field[5]\n\t\t\t\t\tfcomment = field[8]\n\t\t\t\t\tfdesc = fname[0].upper() + fname[1:].lower() + ':'\n\t\t\t\t\tif fname == 'id':\n\t\t\t\t\t\tform.hidden(fname,row[fname])\n\t\t\t\t\telif fname[-3:] == '_id' and fname[:-3] in tables:\n\t\t\t\t\t\t# Relations N - 1\n\t\t\t\t\t\tocolumns = self.db.query('SHOW COLUMNS FROM ' + fname[:-3])\n\t\t\t\t\t\torows=self.db.query('SELECT ' + ocolumns[0] + ', ' + ocolumns[1] + ' FROM ' + fname[:-3])\n\t\t\t\t\t\tdict={}\n\t\t\t\t\t\tfor orow in orows:\n\t\t\t\t\t\t\tdict.update({orow[1]:orow[0]})\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.select(fdesc,fname,dict,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.select(fname,dict,row[fname])\t\t\t\t\t\t\n\t\t\t\t\telif ftype == 'text':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.text(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.text(fname,row[fname])\n\t\t\t\t\telif fcomment == 'pass':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.pwd(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.pwd(fname,row[fname])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.input(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.input(fname,row[fname])\n\t\tform.submit('Edit')\n\t\treturn form.end()",
"def browse_edit():\n # if user is creating a new empty deck, render only the deckname field with newtermfield and newdeffield\n # if user is editing a preexisting deck, render the deckname field, all cards as term, definition, a term/def row for new entries, a \"return to decks\" button, and a \"review this deck\" button\n \n if request.method == 'POST':\n class _BrowseEditForm(BrowseEditForm):\n pass\n\n # print(request.form)\n\n # set decktitle field to the deck's name:\n deckid = request.form['hidden_deckid_field']\n deck = Deck.query.get(deckid)\n setattr(_BrowseEditForm.deckname, 'default', deck.deckname)\n\n browse_edit_form = _BrowseEditForm()\n class _CardForm(CardForm):\n pass\n \n for card in Card.query.filter_by(deck_id=deckid).all():\n _CardForm.term = card.term\n _CardForm.definition = card.definition\n browse_edit_form.cards.append_entry(_CardForm())\n # for empty decks, `cards` should not render.\n if browse_edit_form.validate_on_submit:\n return render_template('browse_edit.html', title='Enter a card', form=browse_edit_form)\n return redirect(url_for('browse_edit.html'))",
"def show_update_form():\n\n current_user = session.get('current_user')\n user_obj = crud.get_user_by_id(current_user)\n\n return render_template(\"update_info.html\")",
"def edit_formazione(self, event):\n self.Disable()\n ViewFormazione(parent=self, title='Formazione')",
"def edit(self, obj):\n data = request.data or request.form.get('data') or ''\n g.modify_flag = 'edit'\n data = self.validate_data(data, obj)\n\n\n for key in self._readonly:\n data.pop(key, None)\n\n obj, models = self.deserialize_object(data, obj)\n\n obj = self.before_save(obj)\n self.save_related_objects(obj, data)\n obj = self.save_object(obj, data)\n self.after_save(obj)\n\n return self.response(self.serialize_object(obj))",
"def update(self):\n\n self.fields = self.getOverriderFields()\n z3c.form.form.EditForm.update(self)",
"def get_forms(self):\n metadata = self.get_metadata()\n\n MetadataForm = self.get_metadata_form_class()\n metadata_form = MetadataForm(\n self.form_data,\n instance=metadata,\n category=self.trs_import.doc_category)\n\n revision_num = self.csv_data['revision']\n revision = metadata.get_revision(revision_num) if metadata else None\n\n RevisionForm = self.get_revision_form_class()\n revision_form = RevisionForm(\n self.form_data,\n instance=revision,\n category=self.trs_import.doc_category)\n\n return metadata_form, revision_form",
"def edit_job_dataset(request, id):\n\n active_tab = DATASET\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def test_update_model_with_form(self):\n pass",
"def show_edit_post_form(id):\n post = Post.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_edit.html\" , post=post , tags=tags)",
"def ShowForm(self, _):\n return {'layer_model': model.Layer}",
"def edit(request, id):\r\n id = int(id)\r\n msg=\"\"\r\n if request.method == 'POST': \r\n form = GraphForm(request.POST) \r\n msg = validate_json(request.POST['nodes'])\r\n if not msg and form.is_valid(): \r\n g = Graph.get_by_id(id)\r\n g.name=form.cleaned_data['name']\r\n g.nodes=form.cleaned_data['nodes'] \r\n g.put()\r\n memcache.delete('stats')\r\n return HttpResponseRedirect('/graphs/') \r\n else:\r\n g = Graph.get_by_id(id)\r\n form = GraphForm(initial={'name':g.name,'nodes':g.nodes}) \r\n return render(request, 'detail.html', {\r\n 'form': form, 'msg':msg,\r\n })",
"def get_form(self, step=None, data=None, files=None):\n self.form_obj = super(FormWizardAdminView, self).get_form(\n step=step, data=data, files=files)\n return self.form_obj",
"def _create_model_form(self):\n global Model\n Model = self.model\n class _ModelForm(ModelForm):\n class Meta:\n model = Model\n \n return _ModelForm",
"def edit_job_galaxy_model(request, id):\n\n active_tab = GMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_psf(request, id):\n\n active_tab = PSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def post_edit_form(post_id):\n posts = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n return render_template(\"/edit-post.html\", posts=posts, tags=tags)",
"def edit_module_detail_screens(request, domain, app_id, module_unique_id):\n # HELPME\n #\n # This method has been flagged for refactoring due to its complexity and\n # frequency of touches in changesets\n #\n # If you are writing code that touches this method, your changeset\n # should leave the method better than you found it.\n #\n # Please remove this flag when this method no longer triggers an 'E' or 'F'\n # classification from the radon code static analysis\n\n params = json_request(request.POST)\n detail_type = params.get('type')\n short = params.get('short', None)\n long_ = params.get('long', None)\n tabs = params.get('tabs', None)\n filter = params.get('filter', ())\n custom_xml = params.get('custom_xml', None)\n parent_select = params.get('parent_select', None)\n fixture_select = params.get('fixture_select', None)\n sort_elements = params.get('sort_elements', None)\n print_template = params.get('printTemplate', None)\n search_properties = params.get(\"search_properties\")\n custom_variables = {\n 'short': params.get(\"short_custom_variables\", None),\n 'long': params.get(\"long_custom_variables\", None)\n }\n\n app = get_app(domain, app_id)\n\n try:\n module = app.get_module_by_unique_id(module_unique_id)\n except ModuleNotFoundException:\n # temporary fallback\n module = app.get_module(module_unique_id)\n\n if detail_type == 'case':\n detail = module.case_details\n else:\n try:\n detail = getattr(module, '{0}_details'.format(detail_type))\n except AttributeError:\n return HttpResponseBadRequest(format_html(\"Unknown detail type '{}'\", detail_type))\n\n lang = request.COOKIES.get('lang', app.langs[0])\n _update_short_details(detail, short, params, lang)\n\n if long_ is not None:\n detail.long.columns = list(map(DetailColumn.from_json, long_))\n if tabs is not None:\n detail.long.tabs = list(map(DetailTab.wrap, tabs))\n if print_template is not None:\n detail.long.print_template = print_template\n if filter != ():\n # Note that we use the empty tuple as the sentinel because a filter\n # value of None represents clearing the filter.\n detail.short.filter = filter\n if custom_xml is not None:\n detail.short.custom_xml = custom_xml\n\n if custom_variables['short'] is not None:\n try:\n etree.fromstring(\"<variables>{}</variables>\".format(custom_variables['short']))\n except etree.XMLSyntaxError as error:\n return HttpResponseBadRequest(\n \"There was an issue with your custom variables: {}\".format(error)\n )\n detail.short.custom_variables = custom_variables['short']\n\n if custom_variables['long'] is not None:\n try:\n etree.fromstring(\"<variables>{}</variables>\".format(custom_variables['long']))\n except etree.XMLSyntaxError as error:\n return HttpResponseBadRequest(\n \"There was an issue with your custom variables: {}\".format(error)\n )\n detail.long.custom_variables = custom_variables['long']\n\n if sort_elements is not None:\n # Attempt to map new elements to old so we don't lose translations\n # Imperfect because the same field may be used multiple times, or user may change field\n old_elements_by_field = {e['field']: e for e in detail.short.sort_elements}\n\n detail.short.sort_elements = []\n for sort_element in sort_elements:\n item = SortElement()\n item.field = sort_element['field']\n item.type = sort_element['type']\n item.direction = sort_element['direction']\n item.blanks = sort_element['blanks']\n if item.field in old_elements_by_field:\n item.display = old_elements_by_field[item.field].display\n item.display[lang] = sort_element['display']\n if toggles.SORT_CALCULATION_IN_CASE_LIST.enabled(domain):\n item.sort_calculation = sort_element['sort_calculation']\n else:\n item.sort_calculation = \"\"\n detail.short.sort_elements.append(item)\n if parent_select is not None:\n module.parent_select = ParentSelect.wrap(parent_select)\n if module_case_hierarchy_has_circular_reference(module):\n return HttpResponseBadRequest(_(\"The case hierarchy contains a circular reference.\"))\n if fixture_select is not None:\n module.fixture_select = FixtureSelect.wrap(fixture_select)\n if search_properties is not None:\n if (\n search_properties.get('properties') is not None\n or search_properties.get('default_properties') is not None\n ):\n title_label = module.search_config.title_label\n title_label[lang] = search_properties.get('title_label', '')\n\n description = module.search_config.description\n description[lang] = search_properties.get('description', '')\n\n search_label = module.search_config.search_label\n search_label.label[lang] = search_properties.get('search_label', '')\n if search_properties.get('search_label_image_for_all'):\n search_label.use_default_image_for_all = (\n search_properties.get('search_label_image_for_all') == 'true')\n if search_properties.get('search_label_audio_for_all'):\n search_label.use_default_audio_for_all = (\n search_properties.get('search_label_audio_for_all') == 'true')\n search_label.set_media(\"media_image\", lang, search_properties.get('search_label_image'))\n search_label.set_media(\"media_audio\", lang, search_properties.get('search_label_audio'))\n\n search_again_label = module.search_config.search_again_label\n search_again_label.label[lang] = search_properties.get('search_again_label', '')\n if search_properties.get('search_again_label_image_for_all'):\n search_again_label.use_default_image_for_all = (\n search_properties.get('search_again_label_image_for_all') == 'true')\n if search_properties.get('search_again_label_audio_for_all'):\n search_again_label.use_default_audio_for_all = (\n search_properties.get('search_again_label_audio_for_all') == 'true')\n search_again_label.set_media(\"media_image\", lang, search_properties.get('search_again_label_image'))\n search_again_label.set_media(\"media_audio\", lang, search_properties.get('search_again_label_audio'))\n\n try:\n properties = [\n CaseSearchProperty.wrap(p)\n for p in _update_search_properties(\n module,\n search_properties.get('properties'), lang\n )\n ]\n except CaseSearchConfigError as e:\n return HttpResponseBadRequest(e)\n xpath_props = [\n \"search_filter\", \"blacklisted_owner_ids_expression\",\n \"search_button_display_condition\", \"additional_relevant\"\n ]\n\n def _check_xpath(xpath, location):\n is_valid, message = validate_xpath(xpath)\n if not is_valid:\n raise ValueError(\n f\"Please fix the errors in xpath expression '{xpath}' \"\n f\"in {location}. The error is {message}\"\n )\n\n for prop in xpath_props:\n xpath = search_properties.get(prop, \"\")\n if xpath:\n try:\n _check_xpath(xpath, \"Search and Claim Options\")\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n\n additional_registry_cases = []\n for case_id_xpath in search_properties.get('additional_registry_cases', []):\n if not case_id_xpath:\n continue\n\n try:\n _check_xpath(case_id_xpath, \"the Case ID of Additional Data Registry Query\")\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n\n additional_registry_cases.append(case_id_xpath)\n\n data_registry_slug = search_properties.get('data_registry', \"\")\n data_registry_workflow = search_properties.get('data_registry_workflow', \"\")\n # force auto launch when data registry load case workflow selected\n force_auto_launch = data_registry_slug and data_registry_workflow == REGISTRY_WORKFLOW_LOAD_CASE\n\n module.search_config = CaseSearch(\n search_label=search_label,\n search_again_label=search_again_label,\n title_label=title_label,\n description=description,\n properties=properties,\n additional_case_types=module.search_config.additional_case_types,\n additional_relevant=search_properties.get('additional_relevant', ''),\n auto_launch=force_auto_launch or bool(search_properties.get('auto_launch')),\n default_search=bool(search_properties.get('default_search')),\n search_filter=search_properties.get('search_filter', \"\"),\n search_button_display_condition=search_properties.get('search_button_display_condition', \"\"),\n blacklisted_owner_ids_expression=search_properties.get('blacklisted_owner_ids_expression', \"\"),\n default_properties=[\n DefaultCaseSearchProperty.wrap(p)\n for p in search_properties.get('default_properties')\n ],\n data_registry=data_registry_slug,\n data_registry_workflow=data_registry_workflow,\n additional_registry_cases=additional_registry_cases,\n custom_related_case_property=search_properties.get('custom_related_case_property', \"\"),\n inline_search=search_properties.get('inline_search', False),\n include_all_related_cases=search_properties.get('include_all_related_cases', False)\n )\n\n resp = {}\n app.save(resp)\n return JsonResponse(resp)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to edit the dataset information. It also returns forms to be rendered in other tabs (models).
|
def edit_job_dataset(request, id):
active_tab = DATASET
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
|
[
"def edit_form(self, resource):\n fs = FieldSet(resource.model)\n return fs.render()",
"def edit_job_data_model(request, id):\n active_tab = DMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def editForm(self,id,table=1):\t\n\t\tif table:\n\t\t\tform=tableForm(server['query'] + '&action=do')\n\t\telse:\n\t\t\tform=Form(server['query'] + '&action=do')\n\t\ttables=[ i for i in self.db.query('SHOW TABLES;') ]\n\t\tfor row in self.content:\n\t\t\tif str(row['id']) == str(id):\n\t\t\t\tself.db.cur.execute('SHOW FULL COLUMNS FROM ' + self.name)\n\t\t\t\ttb = self.db.cur.fetchall()\n\t\t\t\tfor field in tb:\n\t\t\t\t\tfname = field[0]\n\t\t\t\t\tftype = field[1]\n\t\t\t\t\tfdefault = field[5]\n\t\t\t\t\tfcomment = field[8]\n\t\t\t\t\tfdesc = fname[0].upper() + fname[1:].lower() + ':'\n\t\t\t\t\tif fname == 'id':\n\t\t\t\t\t\tform.hidden(fname,row[fname])\n\t\t\t\t\telif fname[-3:] == '_id' and fname[:-3] in tables:\n\t\t\t\t\t\t# Relations N - 1\n\t\t\t\t\t\tocolumns = self.db.query('SHOW COLUMNS FROM ' + fname[:-3])\n\t\t\t\t\t\torows=self.db.query('SELECT ' + ocolumns[0] + ', ' + ocolumns[1] + ' FROM ' + fname[:-3])\n\t\t\t\t\t\tdict={}\n\t\t\t\t\t\tfor orow in orows:\n\t\t\t\t\t\t\tdict.update({orow[1]:orow[0]})\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.select(fdesc,fname,dict,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.select(fname,dict,row[fname])\t\t\t\t\t\t\n\t\t\t\t\telif ftype == 'text':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.text(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.text(fname,row[fname])\n\t\t\t\t\telif fcomment == 'pass':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.pwd(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.pwd(fname,row[fname])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.input(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.input(fname,row[fname])\n\t\tform.submit('Edit')\n\t\treturn form.end()",
"def browse_edit():\n # if user is creating a new empty deck, render only the deckname field with newtermfield and newdeffield\n # if user is editing a preexisting deck, render the deckname field, all cards as term, definition, a term/def row for new entries, a \"return to decks\" button, and a \"review this deck\" button\n \n if request.method == 'POST':\n class _BrowseEditForm(BrowseEditForm):\n pass\n\n # print(request.form)\n\n # set decktitle field to the deck's name:\n deckid = request.form['hidden_deckid_field']\n deck = Deck.query.get(deckid)\n setattr(_BrowseEditForm.deckname, 'default', deck.deckname)\n\n browse_edit_form = _BrowseEditForm()\n class _CardForm(CardForm):\n pass\n \n for card in Card.query.filter_by(deck_id=deckid).all():\n _CardForm.term = card.term\n _CardForm.definition = card.definition\n browse_edit_form.cards.append_entry(_CardForm())\n # for empty decks, `cards` should not render.\n if browse_edit_form.validate_on_submit:\n return render_template('browse_edit.html', title='Enter a card', form=browse_edit_form)\n return redirect(url_for('browse_edit.html'))",
"def edit_formazione(self, event):\n self.Disable()\n ViewFormazione(parent=self, title='Formazione')",
"def show_edit_post_form(id):\n post = Post.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_edit.html\" , post=post , tags=tags)",
"def get_forms(self):\n metadata = self.get_metadata()\n\n MetadataForm = self.get_metadata_form_class()\n metadata_form = MetadataForm(\n self.form_data,\n instance=metadata,\n category=self.trs_import.doc_category)\n\n revision_num = self.csv_data['revision']\n revision = metadata.get_revision(revision_num) if metadata else None\n\n RevisionForm = self.get_revision_form_class()\n revision_form = RevisionForm(\n self.form_data,\n instance=revision,\n category=self.trs_import.doc_category)\n\n return metadata_form, revision_form",
"def post_edit_form(post_id):\n posts = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n return render_template(\"/edit-post.html\", posts=posts, tags=tags)",
"def show_edit_tag_form(id):\n tag = Tag.query.get_or_404(id)\n return render_template(\"tag_edit.html\" , tag=tag)",
"def edit(request, id):\r\n id = int(id)\r\n msg=\"\"\r\n if request.method == 'POST': \r\n form = GraphForm(request.POST) \r\n msg = validate_json(request.POST['nodes'])\r\n if not msg and form.is_valid(): \r\n g = Graph.get_by_id(id)\r\n g.name=form.cleaned_data['name']\r\n g.nodes=form.cleaned_data['nodes'] \r\n g.put()\r\n memcache.delete('stats')\r\n return HttpResponseRedirect('/graphs/') \r\n else:\r\n g = Graph.get_by_id(id)\r\n form = GraphForm(initial={'name':g.name,'nodes':g.nodes}) \r\n return render(request, 'detail.html', {\r\n 'form': form, 'msg':msg,\r\n })",
"def edit(self, obj):\n data = request.data or request.form.get('data') or ''\n g.modify_flag = 'edit'\n data = self.validate_data(data, obj)\n\n\n for key in self._readonly:\n data.pop(key, None)\n\n obj, models = self.deserialize_object(data, obj)\n\n obj = self.before_save(obj)\n self.save_related_objects(obj, data)\n obj = self.save_object(obj, data)\n self.after_save(obj)\n\n return self.response(self.serialize_object(obj))",
"def edit_job_psf(request, id):\n\n active_tab = PSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def update(self):\n\n self.fields = self.getOverriderFields()\n z3c.form.form.EditForm.update(self)",
"def edit_job_galaxy_model(request, id):\n\n active_tab = GMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_name(request, id):\n active_tab = START\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_data_source_item(self, request, form):\n\n layout = ManageDataSourceItemsLayout(self.source, request)\n\n form.populate(self.source)\n\n if form.submitted(request):\n form.update_model(self)\n request.message(_(\"Mapping modified.\"), 'success')\n return morepath.redirect(layout.manage_model_link)\n\n if not form.errors:\n form.apply_model(self)\n\n return {\n 'layout': layout,\n 'form': form,\n 'title': self.name,\n 'subtitle': _(\"Edit mapping\"),\n 'cancel': layout.manage_model_link\n }",
"def edit_job_lsf(request, id):\n\n active_tab = LSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def get_form(self, step=None, data=None, files=None):\n self.form_obj = super(FormWizardAdminView, self).get_form(\n step=step, data=data, files=files)\n return self.form_obj",
"def ShowForm(self, _):\n return {'layer_model': model.Layer}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to edit the PSF information. It also returns forms to be rendered in other tabs (models).
|
def edit_job_psf(request, id):
active_tab = PSF
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
|
[
"def edit_form(self, resource):\n fs = FieldSet(resource.model)\n return fs.render()",
"def show_update_form():\n\n current_user = session.get('current_user')\n user_obj = crud.get_user_by_id(current_user)\n\n return render_template(\"update_info.html\")",
"def edit_formazione(self, event):\n self.Disable()\n ViewFormazione(parent=self, title='Formazione')",
"def show_edit_post_form(postid):\n post = Post.query.get(postid)\n return render_template('edit_post.html', post=post)",
"def update(self):\n\n self.fields = self.getOverriderFields()\n z3c.form.form.EditForm.update(self)",
"def editForm(self,id,table=1):\t\n\t\tif table:\n\t\t\tform=tableForm(server['query'] + '&action=do')\n\t\telse:\n\t\t\tform=Form(server['query'] + '&action=do')\n\t\ttables=[ i for i in self.db.query('SHOW TABLES;') ]\n\t\tfor row in self.content:\n\t\t\tif str(row['id']) == str(id):\n\t\t\t\tself.db.cur.execute('SHOW FULL COLUMNS FROM ' + self.name)\n\t\t\t\ttb = self.db.cur.fetchall()\n\t\t\t\tfor field in tb:\n\t\t\t\t\tfname = field[0]\n\t\t\t\t\tftype = field[1]\n\t\t\t\t\tfdefault = field[5]\n\t\t\t\t\tfcomment = field[8]\n\t\t\t\t\tfdesc = fname[0].upper() + fname[1:].lower() + ':'\n\t\t\t\t\tif fname == 'id':\n\t\t\t\t\t\tform.hidden(fname,row[fname])\n\t\t\t\t\telif fname[-3:] == '_id' and fname[:-3] in tables:\n\t\t\t\t\t\t# Relations N - 1\n\t\t\t\t\t\tocolumns = self.db.query('SHOW COLUMNS FROM ' + fname[:-3])\n\t\t\t\t\t\torows=self.db.query('SELECT ' + ocolumns[0] + ', ' + ocolumns[1] + ' FROM ' + fname[:-3])\n\t\t\t\t\t\tdict={}\n\t\t\t\t\t\tfor orow in orows:\n\t\t\t\t\t\t\tdict.update({orow[1]:orow[0]})\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.select(fdesc,fname,dict,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.select(fname,dict,row[fname])\t\t\t\t\t\t\n\t\t\t\t\telif ftype == 'text':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.text(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.text(fname,row[fname])\n\t\t\t\t\telif fcomment == 'pass':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.pwd(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.pwd(fname,row[fname])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.input(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.input(fname,row[fname])\n\t\tform.submit('Edit')\n\t\treturn form.end()",
"def display_pet_details_and_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n print(\"*!*!*!*!*! IT WORKED !*!!\"*10)\n pet.photo_url=form.photo_url.data\n pet.notes=form.notes.data\n pet.available=form.available.data\n db.session.commit()\n flash(f\"Edited pet: {pet.name}\")\n return redirect(f\"/{pet_id}\")\n else:\n return render_template(\"edit_pet.html\", form=form, pet=pet)",
"def fase_page():\n return fase_form()",
"def getEditgameUpdateForm (game):\n\n\tfrom gluon import current, redirect, URL, SQLFORM\n\tdb = current.db\n\n\t#Hide some fields of the form\n\thideFields (db.game, ['id', 'host_id', 'game_status', 'password'])\n\n\tformUpdate = SQLFORM(db.game, game.id)\n\tformUpdate.add_class('assassins-form')\n\n\tif formUpdate.process().accepted:\n\t\tresizeImage(db.game, game.id)\n\t\tredirect(getUrl('edit', game.id))\n\n\treturn formUpdate",
"def edit_job_lsf(request, id):\n\n active_tab = LSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def browse_edit():\n # if user is creating a new empty deck, render only the deckname field with newtermfield and newdeffield\n # if user is editing a preexisting deck, render the deckname field, all cards as term, definition, a term/def row for new entries, a \"return to decks\" button, and a \"review this deck\" button\n \n if request.method == 'POST':\n class _BrowseEditForm(BrowseEditForm):\n pass\n\n # print(request.form)\n\n # set decktitle field to the deck's name:\n deckid = request.form['hidden_deckid_field']\n deck = Deck.query.get(deckid)\n setattr(_BrowseEditForm.deckname, 'default', deck.deckname)\n\n browse_edit_form = _BrowseEditForm()\n class _CardForm(CardForm):\n pass\n \n for card in Card.query.filter_by(deck_id=deckid).all():\n _CardForm.term = card.term\n _CardForm.definition = card.definition\n browse_edit_form.cards.append_entry(_CardForm())\n # for empty decks, `cards` should not render.\n if browse_edit_form.validate_on_submit:\n return render_template('browse_edit.html', title='Enter a card', form=browse_edit_form)\n return redirect(url_for('browse_edit.html'))",
"def edit_personal_info(request):\n if 'is_active' in request.session:\n if request.method == 'POST':\n personal_information_form = PersonalInformationForm(request.POST)\n if personal_information_form.is_valid():\n personal_information_data = personal_information_form.cleaned_data\n personal_info_obj = PersonalInformation()\n personal_info_data = {\n 'forename': personal_information_data['forename'],\n 'surname': personal_information_data['surname'],\n 'address': {\n 'addressLine1': personal_information_data['addressLine1'],\n 'addressLine2': personal_information_data['addressLine2'],\n 'addressLine3': personal_information_data['addressLine3'],\n 'city': personal_information_data['city'],\n 'country': personal_information_data['country'],\n 'postcode': personal_information_data['postcode'],\n },\n 'mobile': personal_information_data['mobile'],\n 'email': personal_information_data['email']\n }\n personal_info_obj.save_personal_info(personal_info_data)\n messages.success(request, _(\"Your information has been updated with Magpie.\"))\n return HttpResponseRedirect('/my_account/')\n else:\n return render(request, \"WebApp/users/edit_personal_info.html\",\n {\"form\": personal_information_form})\n else:\n personal_information_form = webapp_utils.add_initial_values(\n webapp_utils.get_personal_info(request), PersonalInformationForm())\n return render(request, \"WebApp/users/edit_personal_info.html\",\n {\"form\": personal_information_form})\n else:\n return HttpResponseRedirect('/')",
"def editListView(request, pk):\n\n\t# Get current user and list object to check ownership\n\tcurrent_user = request.oauth.credentials.id_token['email']\n\tparentList = List.objects.get(pk=pk)\n\tif current_user != parentList.owner and not request.user.is_superuser: \n\t\traise PermissionDenied\n\n\t# Generate formset for the parent list, so we can edit the list name\n\tListFormSet = modelformset_factory(\n\t\tList, \n\t\tfields=(\"name\",),\n\t\textra=0,\n\t\twidgets={\n \t\t'name': TextInput(attrs={'class': 'form-control form-title form-inactive', 'required': True}),\n \t\t}\n\t\t)\n\n\t# If no items exist create an extra field to add new list item.\n\titemCount = Item.objects.filter(parentList = parentList).count()\n\tif itemCount > 0:\n\t\textraItemField = 0\n\telse:\n\t\textraItemField = 1 \n\n\t# Generate the formset of list member items, so we can edit the shopping list\n\tItemInlineFormSet = inlineformset_factory(\n\t\tList, \n\t\tItem, \n\t\tfields=(\"name\", \"parentList\"), \n\t\textra=extraItemField, \n\t\twidgets={\n \t\t'name': TextInput(attrs={'class': 'form-control form-inactive', 'required': True}),\n \t\t}\n \t)\n\n\t# if POST generate form data and save if valid\n\tif request.method == 'POST':\n\t\titem_formset = ItemInlineFormSet( request.POST, instance=parentList )\n\t\tlist_formset = ListFormSet( request.POST, queryset= List.objects.filter(pk=pk) )\n\t\tif item_formset.is_valid() and list_formset.is_valid():\n\t\t\t# Transactions - only save list and items together\n\t\t\twith transaction.atomic():\n\t\t\t\titem_formset.save()\n\t\t\t\tlist_formset.save() \n\t\t\t# Redirect to list page.\n\t\t\tmessages.add_message(request, messages.SUCCESS, 'List changes saved.')\n\t\t\treturn HttpResponseRedirect(reverse('lists:edit', kwargs={'pk':pk}))\n\t\telse:\n\t\t\t# Add errors and redirect to form.\n\t\t\tfor errors in list_formset.errors:\n\t\t\t\tmessages.add_message(request, messages.INFO, errors, extra_tags='danger')\n\t\t\tfor errors in item_formset.errors:\n\t\t\t\tmessages.add_message(request, messages.INFO, errors, extra_tags='danger')\n\t\t\treturn HttpResponseRedirect(reverse('lists:edit', kwargs={'pk':pk}))\n\t# if a GET (or any other method) create a blank form\n\telse:\n\t\t# Limit list items to only include parent list members (not from any old list)\n\t\titem_formset = ItemInlineFormSet( instance=parentList )\n\t\t# Limit list item to one. We only want to edit the title of the current list\n\t\tlist_formset = ListFormSet( queryset= List.objects.filter(pk=pk) )\n\t\treturn render(request, 'lists/item_form.html', {\n\t\t\t'item_formset': item_formset, \n\t\t\t'list_formset': list_formset, \n\t\t\t})",
"def show_edit_post_form(id):\n post = Post.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_edit.html\" , post=post , tags=tags)",
"def post_edit_form(post_id):\n posts = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n return render_template(\"/edit-post.html\", posts=posts, tags=tags)",
"def forms(request):\n return render(request, \"forms.html\", {})",
"def edit_isp(isp_id):\n isp = db_session.query(ISP).filter_by(id=isp_id).one()\n\n if request.method == \"POST\":\n if request.form[\"choice\"] == \"edit\":\n isp.name = request.form[\"name\"]\n db_session.add(isp)\n db_session.commit()\n flash(\"ISP Successfully Edited.\")\n return redirect(url_for(\"show_isps\"))\n else:\n return render_template(\"edit_isp.html\", isp=isp, title=\"Edit ISP\")",
"def user_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template(\"/user-edit.html\", user=user)",
"def KLP_Staff_Update(request, staff_id):\n\t# Checking user Permissions for Staff update\n KLP_user_Perm(request.user, \"Staff\", \"Update\")\n\tbuttonType = request.POST.get('form-buttonType')\n\treferKey = request.POST.get('form-0-boundary')\n querysetstaff=Staff.objects.filter(pk=staff_id)\n\tstaff = querysetstaff[0] #Staff.objects.get(pk=staff_id)\n\tstgrps = StudentGroup.objects.filter(institution = staff.institution, active=2)\n\tinstitutionObj = staff.institution\n\tif institutionObj.boundary.boundary_category.boundary_category.lower() == 'circle':\n\t\t# if the boundary category is circle get anganwadi staff types.\n\t\tinstitutionType = 'Anganwadi'\n\t\tStaff_Types = Staff_Type.objects.filter(categoryType=2)\n\telse:\n\t\t# if the boundary category is not circle get Institution staff types.\n\t\tinstitutionType = 'Institution'\n\t\tStaff_Types = Staff_Type.objects.filter(categoryType=1)\n #before Staff.objects.all()\n\tKLP_Edit_Staff =KLP_Staff(queryset = querysetstaff, permitted_methods = ('GET', 'POST'), responder = TemplateResponder(template_dir = 'edittemplates', template_object_name = 'staff', extra_context={'buttonType':buttonType, 'referKey':referKey, 'stgrps':stgrps, 'institutionType':institutionType, 'Staff_Types':Staff_Types}), receiver = XMLReceiver(),)\n\tresponse = KLP_Edit_Staff.responder.update_form(request, pk=staff_id, form_class=Staff_Form)\n\treturn HttpResponse(response)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to edit the LSF information. It also returns forms to be rendered in other tabs (models).
|
def edit_job_lsf(request, id):
active_tab = LSF
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
|
[
"def edit_form(self, resource):\n fs = FieldSet(resource.model)\n return fs.render()",
"def edit_job_psf(request, id):\n\n active_tab = PSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def show_update_form():\n\n current_user = session.get('current_user')\n user_obj = crud.get_user_by_id(current_user)\n\n return render_template(\"update_info.html\")",
"def editForm(self,id,table=1):\t\n\t\tif table:\n\t\t\tform=tableForm(server['query'] + '&action=do')\n\t\telse:\n\t\t\tform=Form(server['query'] + '&action=do')\n\t\ttables=[ i for i in self.db.query('SHOW TABLES;') ]\n\t\tfor row in self.content:\n\t\t\tif str(row['id']) == str(id):\n\t\t\t\tself.db.cur.execute('SHOW FULL COLUMNS FROM ' + self.name)\n\t\t\t\ttb = self.db.cur.fetchall()\n\t\t\t\tfor field in tb:\n\t\t\t\t\tfname = field[0]\n\t\t\t\t\tftype = field[1]\n\t\t\t\t\tfdefault = field[5]\n\t\t\t\t\tfcomment = field[8]\n\t\t\t\t\tfdesc = fname[0].upper() + fname[1:].lower() + ':'\n\t\t\t\t\tif fname == 'id':\n\t\t\t\t\t\tform.hidden(fname,row[fname])\n\t\t\t\t\telif fname[-3:] == '_id' and fname[:-3] in tables:\n\t\t\t\t\t\t# Relations N - 1\n\t\t\t\t\t\tocolumns = self.db.query('SHOW COLUMNS FROM ' + fname[:-3])\n\t\t\t\t\t\torows=self.db.query('SELECT ' + ocolumns[0] + ', ' + ocolumns[1] + ' FROM ' + fname[:-3])\n\t\t\t\t\t\tdict={}\n\t\t\t\t\t\tfor orow in orows:\n\t\t\t\t\t\t\tdict.update({orow[1]:orow[0]})\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.select(fdesc,fname,dict,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.select(fname,dict,row[fname])\t\t\t\t\t\t\n\t\t\t\t\telif ftype == 'text':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.text(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.text(fname,row[fname])\n\t\t\t\t\telif fcomment == 'pass':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.pwd(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.pwd(fname,row[fname])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.input(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.input(fname,row[fname])\n\t\tform.submit('Edit')\n\t\treturn form.end()",
"def edit_formazione(self, event):\n self.Disable()\n ViewFormazione(parent=self, title='Formazione')",
"def browse_edit():\n # if user is creating a new empty deck, render only the deckname field with newtermfield and newdeffield\n # if user is editing a preexisting deck, render the deckname field, all cards as term, definition, a term/def row for new entries, a \"return to decks\" button, and a \"review this deck\" button\n \n if request.method == 'POST':\n class _BrowseEditForm(BrowseEditForm):\n pass\n\n # print(request.form)\n\n # set decktitle field to the deck's name:\n deckid = request.form['hidden_deckid_field']\n deck = Deck.query.get(deckid)\n setattr(_BrowseEditForm.deckname, 'default', deck.deckname)\n\n browse_edit_form = _BrowseEditForm()\n class _CardForm(CardForm):\n pass\n \n for card in Card.query.filter_by(deck_id=deckid).all():\n _CardForm.term = card.term\n _CardForm.definition = card.definition\n browse_edit_form.cards.append_entry(_CardForm())\n # for empty decks, `cards` should not render.\n if browse_edit_form.validate_on_submit:\n return render_template('browse_edit.html', title='Enter a card', form=browse_edit_form)\n return redirect(url_for('browse_edit.html'))",
"def getEditgameUpdateForm (game):\n\n\tfrom gluon import current, redirect, URL, SQLFORM\n\tdb = current.db\n\n\t#Hide some fields of the form\n\thideFields (db.game, ['id', 'host_id', 'game_status', 'password'])\n\n\tformUpdate = SQLFORM(db.game, game.id)\n\tformUpdate.add_class('assassins-form')\n\n\tif formUpdate.process().accepted:\n\t\tresizeImage(db.game, game.id)\n\t\tredirect(getUrl('edit', game.id))\n\n\treturn formUpdate",
"def forms(request):\n return render(request, \"forms.html\", {})",
"def editListView(request, pk):\n\n\t# Get current user and list object to check ownership\n\tcurrent_user = request.oauth.credentials.id_token['email']\n\tparentList = List.objects.get(pk=pk)\n\tif current_user != parentList.owner and not request.user.is_superuser: \n\t\traise PermissionDenied\n\n\t# Generate formset for the parent list, so we can edit the list name\n\tListFormSet = modelformset_factory(\n\t\tList, \n\t\tfields=(\"name\",),\n\t\textra=0,\n\t\twidgets={\n \t\t'name': TextInput(attrs={'class': 'form-control form-title form-inactive', 'required': True}),\n \t\t}\n\t\t)\n\n\t# If no items exist create an extra field to add new list item.\n\titemCount = Item.objects.filter(parentList = parentList).count()\n\tif itemCount > 0:\n\t\textraItemField = 0\n\telse:\n\t\textraItemField = 1 \n\n\t# Generate the formset of list member items, so we can edit the shopping list\n\tItemInlineFormSet = inlineformset_factory(\n\t\tList, \n\t\tItem, \n\t\tfields=(\"name\", \"parentList\"), \n\t\textra=extraItemField, \n\t\twidgets={\n \t\t'name': TextInput(attrs={'class': 'form-control form-inactive', 'required': True}),\n \t\t}\n \t)\n\n\t# if POST generate form data and save if valid\n\tif request.method == 'POST':\n\t\titem_formset = ItemInlineFormSet( request.POST, instance=parentList )\n\t\tlist_formset = ListFormSet( request.POST, queryset= List.objects.filter(pk=pk) )\n\t\tif item_formset.is_valid() and list_formset.is_valid():\n\t\t\t# Transactions - only save list and items together\n\t\t\twith transaction.atomic():\n\t\t\t\titem_formset.save()\n\t\t\t\tlist_formset.save() \n\t\t\t# Redirect to list page.\n\t\t\tmessages.add_message(request, messages.SUCCESS, 'List changes saved.')\n\t\t\treturn HttpResponseRedirect(reverse('lists:edit', kwargs={'pk':pk}))\n\t\telse:\n\t\t\t# Add errors and redirect to form.\n\t\t\tfor errors in list_formset.errors:\n\t\t\t\tmessages.add_message(request, messages.INFO, errors, extra_tags='danger')\n\t\t\tfor errors in item_formset.errors:\n\t\t\t\tmessages.add_message(request, messages.INFO, errors, extra_tags='danger')\n\t\t\treturn HttpResponseRedirect(reverse('lists:edit', kwargs={'pk':pk}))\n\t# if a GET (or any other method) create a blank form\n\telse:\n\t\t# Limit list items to only include parent list members (not from any old list)\n\t\titem_formset = ItemInlineFormSet( instance=parentList )\n\t\t# Limit list item to one. We only want to edit the title of the current list\n\t\tlist_formset = ListFormSet( queryset= List.objects.filter(pk=pk) )\n\t\treturn render(request, 'lists/item_form.html', {\n\t\t\t'item_formset': item_formset, \n\t\t\t'list_formset': list_formset, \n\t\t\t})",
"def update(self):\n\n self.fields = self.getOverriderFields()\n z3c.form.form.EditForm.update(self)",
"def lacop_page():\n return lacop_form()",
"def show_edit_post_form(id):\n post = Post.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_edit.html\" , post=post , tags=tags)",
"def display_form():\n\n roles = [\"Software Engineer\", \"QA Engineer\", \"Product Manager\"]\n return render_template(\"application-form.html\",\n jobs=roles)",
"def KLP_Staff_Update(request, staff_id):\n\t# Checking user Permissions for Staff update\n KLP_user_Perm(request.user, \"Staff\", \"Update\")\n\tbuttonType = request.POST.get('form-buttonType')\n\treferKey = request.POST.get('form-0-boundary')\n querysetstaff=Staff.objects.filter(pk=staff_id)\n\tstaff = querysetstaff[0] #Staff.objects.get(pk=staff_id)\n\tstgrps = StudentGroup.objects.filter(institution = staff.institution, active=2)\n\tinstitutionObj = staff.institution\n\tif institutionObj.boundary.boundary_category.boundary_category.lower() == 'circle':\n\t\t# if the boundary category is circle get anganwadi staff types.\n\t\tinstitutionType = 'Anganwadi'\n\t\tStaff_Types = Staff_Type.objects.filter(categoryType=2)\n\telse:\n\t\t# if the boundary category is not circle get Institution staff types.\n\t\tinstitutionType = 'Institution'\n\t\tStaff_Types = Staff_Type.objects.filter(categoryType=1)\n #before Staff.objects.all()\n\tKLP_Edit_Staff =KLP_Staff(queryset = querysetstaff, permitted_methods = ('GET', 'POST'), responder = TemplateResponder(template_dir = 'edittemplates', template_object_name = 'staff', extra_context={'buttonType':buttonType, 'referKey':referKey, 'stgrps':stgrps, 'institutionType':institutionType, 'Staff_Types':Staff_Types}), receiver = XMLReceiver(),)\n\tresponse = KLP_Edit_Staff.responder.update_form(request, pk=staff_id, form_class=Staff_Form)\n\treturn HttpResponse(response)",
"def ShowForm(self, _):\n return {'layer_model': model.Layer}",
"def show_edit_post_form(postid):\n post = Post.query.get(postid)\n return render_template('edit_post.html', post=post)",
"def user_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template(\"/user-edit.html\", user=user)",
"def edit_job_data_model(request, id):\n active_tab = DMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def fase_page():\n return fase_form()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to edit the galaxy model information. It also returns forms to be rendered in other tabs (models).
|
def edit_job_galaxy_model(request, id):
active_tab = GMODEL
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
|
[
"def ShowForm(self, _):\n return {'layer_model': model.Layer}",
"def edit_form(self, resource):\n fs = FieldSet(resource.model)\n return fs.render()",
"def edit_formazione(self, event):\n self.Disable()\n ViewFormazione(parent=self, title='Formazione')",
"def getEditgameUpdateForm (game):\n\n\tfrom gluon import current, redirect, URL, SQLFORM\n\tdb = current.db\n\n\t#Hide some fields of the form\n\thideFields (db.game, ['id', 'host_id', 'game_status', 'password'])\n\n\tformUpdate = SQLFORM(db.game, game.id)\n\tformUpdate.add_class('assassins-form')\n\n\tif formUpdate.process().accepted:\n\t\tresizeImage(db.game, game.id)\n\t\tredirect(getUrl('edit', game.id))\n\n\treturn formUpdate",
"def browse_edit():\n # if user is creating a new empty deck, render only the deckname field with newtermfield and newdeffield\n # if user is editing a preexisting deck, render the deckname field, all cards as term, definition, a term/def row for new entries, a \"return to decks\" button, and a \"review this deck\" button\n \n if request.method == 'POST':\n class _BrowseEditForm(BrowseEditForm):\n pass\n\n # print(request.form)\n\n # set decktitle field to the deck's name:\n deckid = request.form['hidden_deckid_field']\n deck = Deck.query.get(deckid)\n setattr(_BrowseEditForm.deckname, 'default', deck.deckname)\n\n browse_edit_form = _BrowseEditForm()\n class _CardForm(CardForm):\n pass\n \n for card in Card.query.filter_by(deck_id=deckid).all():\n _CardForm.term = card.term\n _CardForm.definition = card.definition\n browse_edit_form.cards.append_entry(_CardForm())\n # for empty decks, `cards` should not render.\n if browse_edit_form.validate_on_submit:\n return render_template('browse_edit.html', title='Enter a card', form=browse_edit_form)\n return redirect(url_for('browse_edit.html'))",
"def edit_job_data_model(request, id):\n active_tab = DMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def editForm(self,id,table=1):\t\n\t\tif table:\n\t\t\tform=tableForm(server['query'] + '&action=do')\n\t\telse:\n\t\t\tform=Form(server['query'] + '&action=do')\n\t\ttables=[ i for i in self.db.query('SHOW TABLES;') ]\n\t\tfor row in self.content:\n\t\t\tif str(row['id']) == str(id):\n\t\t\t\tself.db.cur.execute('SHOW FULL COLUMNS FROM ' + self.name)\n\t\t\t\ttb = self.db.cur.fetchall()\n\t\t\t\tfor field in tb:\n\t\t\t\t\tfname = field[0]\n\t\t\t\t\tftype = field[1]\n\t\t\t\t\tfdefault = field[5]\n\t\t\t\t\tfcomment = field[8]\n\t\t\t\t\tfdesc = fname[0].upper() + fname[1:].lower() + ':'\n\t\t\t\t\tif fname == 'id':\n\t\t\t\t\t\tform.hidden(fname,row[fname])\n\t\t\t\t\telif fname[-3:] == '_id' and fname[:-3] in tables:\n\t\t\t\t\t\t# Relations N - 1\n\t\t\t\t\t\tocolumns = self.db.query('SHOW COLUMNS FROM ' + fname[:-3])\n\t\t\t\t\t\torows=self.db.query('SELECT ' + ocolumns[0] + ', ' + ocolumns[1] + ' FROM ' + fname[:-3])\n\t\t\t\t\t\tdict={}\n\t\t\t\t\t\tfor orow in orows:\n\t\t\t\t\t\t\tdict.update({orow[1]:orow[0]})\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.select(fdesc,fname,dict,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.select(fname,dict,row[fname])\t\t\t\t\t\t\n\t\t\t\t\telif ftype == 'text':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.text(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.text(fname,row[fname])\n\t\t\t\t\telif fcomment == 'pass':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.pwd(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.pwd(fname,row[fname])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.input(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.input(fname,row[fname])\n\t\tform.submit('Edit')\n\t\treturn form.end()",
"def edit(request, id):\r\n id = int(id)\r\n msg=\"\"\r\n if request.method == 'POST': \r\n form = GraphForm(request.POST) \r\n msg = validate_json(request.POST['nodes'])\r\n if not msg and form.is_valid(): \r\n g = Graph.get_by_id(id)\r\n g.name=form.cleaned_data['name']\r\n g.nodes=form.cleaned_data['nodes'] \r\n g.put()\r\n memcache.delete('stats')\r\n return HttpResponseRedirect('/graphs/') \r\n else:\r\n g = Graph.get_by_id(id)\r\n form = GraphForm(initial={'name':g.name,'nodes':g.nodes}) \r\n return render(request, 'detail.html', {\r\n 'form': form, 'msg':msg,\r\n })",
"def edit(request):\n if request.method == 'POST':\n form = VMEditForm(request.POST)\n if form.is_valid():\n VM_id = form.cleaned_data['VM_id']\n flavor_id = form.cleaned_data['flavor_id']\t\n\t\t\tapi.editVM(VM_id, flavor_id)\n\t\t\treturn HttpResponseRedirect('/project_space/manage')\n else:\n\t\treturn HttpResponseRedirect('/project_space/manage')",
"def show_update_form():\n\n current_user = session.get('current_user')\n user_obj = crud.get_user_by_id(current_user)\n\n return render_template(\"update_info.html\")",
"def phenomodel_edit(institute_id, model_id):\n institute_and_case(store, institute_id)\n controllers.update_phenomodel(model_id, request.form)\n return redirect(request.referrer)",
"def edit_job_lsf(request, id):\n\n active_tab = LSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def display_pet_details_and_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n print(\"*!*!*!*!*! IT WORKED !*!!\"*10)\n pet.photo_url=form.photo_url.data\n pet.notes=form.notes.data\n pet.available=form.available.data\n db.session.commit()\n flash(f\"Edited pet: {pet.name}\")\n return redirect(f\"/{pet_id}\")\n else:\n return render_template(\"edit_pet.html\", form=form, pet=pet)",
"def edit_job_psf(request, id):\n\n active_tab = PSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def display_form():\n\n roles = [\"Software Engineer\", \"QA Engineer\", \"Product Manager\"]\n return render_template(\"application-form.html\",\n jobs=roles)",
"def _create_model_form(self):\n global Model\n Model = self.model\n class _ModelForm(ModelForm):\n class Meta:\n model = Model\n \n return _ModelForm",
"def edit_form(request, athlete_id, year, month, day, competition_id, template = 'athletelog/competition_form.html'):\n year, month, day = int(year), int(month), int(day)\n date = datetime.date(year, month, day)\n athlete = models.Athlete.objects.get(person__user__username=athlete_id)\n\n competition = get_object_or_404(models.Competition, pk=competition_id)\n competition_data = {'id': competition_id, 'day': date, 'event': competition.event.name,\n 'event_info': competition.event_info, 'result': competition.result,\n 'place': competition.place, 'note': competition.note}\n return display_form(request, 'edit', athlete, date, competition_data, edit_submit, template)",
"def show_edit_tag_form(id):\n tag = Tag.query.get_or_404(id)\n return render_template(\"tag_edit.html\" , tag=tag)",
"def test_update_model_with_form(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to edit the fitter information. It also returns forms to be rendered in other tabs (models).
|
def edit_job_fitter(request, id):
active_tab = FITTER
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
|
[
"def edit_form(self, resource):\n fs = FieldSet(resource.model)\n return fs.render()",
"def browse_edit():\n # if user is creating a new empty deck, render only the deckname field with newtermfield and newdeffield\n # if user is editing a preexisting deck, render the deckname field, all cards as term, definition, a term/def row for new entries, a \"return to decks\" button, and a \"review this deck\" button\n \n if request.method == 'POST':\n class _BrowseEditForm(BrowseEditForm):\n pass\n\n # print(request.form)\n\n # set decktitle field to the deck's name:\n deckid = request.form['hidden_deckid_field']\n deck = Deck.query.get(deckid)\n setattr(_BrowseEditForm.deckname, 'default', deck.deckname)\n\n browse_edit_form = _BrowseEditForm()\n class _CardForm(CardForm):\n pass\n \n for card in Card.query.filter_by(deck_id=deckid).all():\n _CardForm.term = card.term\n _CardForm.definition = card.definition\n browse_edit_form.cards.append_entry(_CardForm())\n # for empty decks, `cards` should not render.\n if browse_edit_form.validate_on_submit:\n return render_template('browse_edit.html', title='Enter a card', form=browse_edit_form)\n return redirect(url_for('browse_edit.html'))",
"def show_update_form():\n\n current_user = session.get('current_user')\n user_obj = crud.get_user_by_id(current_user)\n\n return render_template(\"update_info.html\")",
"def editForm(self,id,table=1):\t\n\t\tif table:\n\t\t\tform=tableForm(server['query'] + '&action=do')\n\t\telse:\n\t\t\tform=Form(server['query'] + '&action=do')\n\t\ttables=[ i for i in self.db.query('SHOW TABLES;') ]\n\t\tfor row in self.content:\n\t\t\tif str(row['id']) == str(id):\n\t\t\t\tself.db.cur.execute('SHOW FULL COLUMNS FROM ' + self.name)\n\t\t\t\ttb = self.db.cur.fetchall()\n\t\t\t\tfor field in tb:\n\t\t\t\t\tfname = field[0]\n\t\t\t\t\tftype = field[1]\n\t\t\t\t\tfdefault = field[5]\n\t\t\t\t\tfcomment = field[8]\n\t\t\t\t\tfdesc = fname[0].upper() + fname[1:].lower() + ':'\n\t\t\t\t\tif fname == 'id':\n\t\t\t\t\t\tform.hidden(fname,row[fname])\n\t\t\t\t\telif fname[-3:] == '_id' and fname[:-3] in tables:\n\t\t\t\t\t\t# Relations N - 1\n\t\t\t\t\t\tocolumns = self.db.query('SHOW COLUMNS FROM ' + fname[:-3])\n\t\t\t\t\t\torows=self.db.query('SELECT ' + ocolumns[0] + ', ' + ocolumns[1] + ' FROM ' + fname[:-3])\n\t\t\t\t\t\tdict={}\n\t\t\t\t\t\tfor orow in orows:\n\t\t\t\t\t\t\tdict.update({orow[1]:orow[0]})\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.select(fdesc,fname,dict,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.select(fname,dict,row[fname])\t\t\t\t\t\t\n\t\t\t\t\telif ftype == 'text':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.text(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.text(fname,row[fname])\n\t\t\t\t\telif fcomment == 'pass':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.pwd(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.pwd(fname,row[fname])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.input(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.input(fname,row[fname])\n\t\tform.submit('Edit')\n\t\treturn form.end()",
"def edit_formazione(self, event):\n self.Disable()\n ViewFormazione(parent=self, title='Formazione')",
"def show_edit_post_form(id):\n post = Post.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_edit.html\" , post=post , tags=tags)",
"def edit_job_psf(request, id):\n\n active_tab = PSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def fase_page():\n return fase_form()",
"def forms(request):\n return render(request, \"forms.html\", {})",
"def update(self):\n\n self.fields = self.getOverriderFields()\n z3c.form.form.EditForm.update(self)",
"def show_edit_tag_form(id):\n tag = Tag.query.get_or_404(id)\n return render_template(\"tag_edit.html\" , tag=tag)",
"def post_edit_form(post_id):\n posts = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n return render_template(\"/edit-post.html\", posts=posts, tags=tags)",
"def edit_job_lsf(request, id):\n\n active_tab = LSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def show_edit_post_form(postid):\n post = Post.query.get(postid)\n return render_template('edit_post.html', post=post)",
"def get_forms(self):\n metadata = self.get_metadata()\n\n MetadataForm = self.get_metadata_form_class()\n metadata_form = MetadataForm(\n self.form_data,\n instance=metadata,\n category=self.trs_import.doc_category)\n\n revision_num = self.csv_data['revision']\n revision = metadata.get_revision(revision_num) if metadata else None\n\n RevisionForm = self.get_revision_form_class()\n revision_form = RevisionForm(\n self.form_data,\n instance=revision,\n category=self.trs_import.doc_category)\n\n return metadata_form, revision_form",
"def display_pet_details_and_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n print(\"*!*!*!*!*! IT WORKED !*!!\"*10)\n pet.photo_url=form.photo_url.data\n pet.notes=form.notes.data\n pet.available=form.available.data\n db.session.commit()\n flash(f\"Edited pet: {pet.name}\")\n return redirect(f\"/{pet_id}\")\n else:\n return render_template(\"edit_pet.html\", form=form, pet=pet)",
"def msg_edit_form(msg_id):\n found_msg = Message.query.get(msg_id)\n tags = Tag.query.all()\n return render_template('msg_edit.html', found_msg=found_msg, tags=tags)",
"def edit_personal_info(request):\n if 'is_active' in request.session:\n if request.method == 'POST':\n personal_information_form = PersonalInformationForm(request.POST)\n if personal_information_form.is_valid():\n personal_information_data = personal_information_form.cleaned_data\n personal_info_obj = PersonalInformation()\n personal_info_data = {\n 'forename': personal_information_data['forename'],\n 'surname': personal_information_data['surname'],\n 'address': {\n 'addressLine1': personal_information_data['addressLine1'],\n 'addressLine2': personal_information_data['addressLine2'],\n 'addressLine3': personal_information_data['addressLine3'],\n 'city': personal_information_data['city'],\n 'country': personal_information_data['country'],\n 'postcode': personal_information_data['postcode'],\n },\n 'mobile': personal_information_data['mobile'],\n 'email': personal_information_data['email']\n }\n personal_info_obj.save_personal_info(personal_info_data)\n messages.success(request, _(\"Your information has been updated with Magpie.\"))\n return HttpResponseRedirect('/my_account/')\n else:\n return render(request, \"WebApp/users/edit_personal_info.html\",\n {\"form\": personal_information_form})\n else:\n personal_information_form = webapp_utils.add_initial_values(\n webapp_utils.get_personal_info(request), PersonalInformationForm())\n return render(request, \"WebApp/users/edit_personal_info.html\",\n {\"form\": personal_information_form})\n else:\n return HttpResponseRedirect('/')",
"def getEditgameUpdateForm (game):\n\n\tfrom gluon import current, redirect, URL, SQLFORM\n\tdb = current.db\n\n\t#Hide some fields of the form\n\thideFields (db.game, ['id', 'host_id', 'game_status', 'password'])\n\n\tformUpdate = SQLFORM(db.game, game.id)\n\tformUpdate.add_class('assassins-form')\n\n\tif formUpdate.process().accepted:\n\t\tresizeImage(db.game, game.id)\n\t\tredirect(getUrl('edit', game.id))\n\n\treturn formUpdate"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to edit the params information. It also returns forms to be rendered in other tabs (models).
|
def edit_job_params(request, id):
active_tab = PARAMS
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
|
[
"def get_parameters_form():",
"def ShowForm(self, _):\n return {'layer_model': model.Layer}",
"def edit_job_psf(request, id):\n\n active_tab = PSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def editForm(self,id,table=1):\t\n\t\tif table:\n\t\t\tform=tableForm(server['query'] + '&action=do')\n\t\telse:\n\t\t\tform=Form(server['query'] + '&action=do')\n\t\ttables=[ i for i in self.db.query('SHOW TABLES;') ]\n\t\tfor row in self.content:\n\t\t\tif str(row['id']) == str(id):\n\t\t\t\tself.db.cur.execute('SHOW FULL COLUMNS FROM ' + self.name)\n\t\t\t\ttb = self.db.cur.fetchall()\n\t\t\t\tfor field in tb:\n\t\t\t\t\tfname = field[0]\n\t\t\t\t\tftype = field[1]\n\t\t\t\t\tfdefault = field[5]\n\t\t\t\t\tfcomment = field[8]\n\t\t\t\t\tfdesc = fname[0].upper() + fname[1:].lower() + ':'\n\t\t\t\t\tif fname == 'id':\n\t\t\t\t\t\tform.hidden(fname,row[fname])\n\t\t\t\t\telif fname[-3:] == '_id' and fname[:-3] in tables:\n\t\t\t\t\t\t# Relations N - 1\n\t\t\t\t\t\tocolumns = self.db.query('SHOW COLUMNS FROM ' + fname[:-3])\n\t\t\t\t\t\torows=self.db.query('SELECT ' + ocolumns[0] + ', ' + ocolumns[1] + ' FROM ' + fname[:-3])\n\t\t\t\t\t\tdict={}\n\t\t\t\t\t\tfor orow in orows:\n\t\t\t\t\t\t\tdict.update({orow[1]:orow[0]})\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.select(fdesc,fname,dict,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.select(fname,dict,row[fname])\t\t\t\t\t\t\n\t\t\t\t\telif ftype == 'text':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.text(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.text(fname,row[fname])\n\t\t\t\t\telif fcomment == 'pass':\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.pwd(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.pwd(fname,row[fname])\n\t\t\t\t\telse:\n\t\t\t\t\t\tif table:\n\t\t\t\t\t\t\tform.input(fdesc,fname,row[fname])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tform.input(fname,row[fname])\n\t\tform.submit('Edit')\n\t\treturn form.end()",
"def getConfigureForm(room_jid):",
"def param2form(self, dico, verbose=DEBUG):\n myform = {} # a dico to handle widgets in the form\n for vb in self.form.children:\n myform[vb.description] = vb\n keys = myform.keys() # keys of form\n # then propagate\n for k,v in dico.items():\n k = k.replace('_',' ')\n if k not in keys:\n if verbose:\n print ('key not in form:',k)\n else:\n myform[k].value = v",
"def edit_form(self, resource):\n fs = FieldSet(resource.model)\n return fs.render()",
"def getEditgameUpdateForm (game):\n\n\tfrom gluon import current, redirect, URL, SQLFORM\n\tdb = current.db\n\n\t#Hide some fields of the form\n\thideFields (db.game, ['id', 'host_id', 'game_status', 'password'])\n\n\tformUpdate = SQLFORM(db.game, game.id)\n\tformUpdate.add_class('assassins-form')\n\n\tif formUpdate.process().accepted:\n\t\tresizeImage(db.game, game.id)\n\t\tredirect(getUrl('edit', game.id))\n\n\treturn formUpdate",
"def edit_job_data_model(request, id):\n active_tab = DMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def update(self):\n\n self.fields = self.getOverriderFields()\n z3c.form.form.EditForm.update(self)",
"def show_update_form():\n\n current_user = session.get('current_user')\n user_obj = crud.get_user_by_id(current_user)\n\n return render_template(\"update_info.html\")",
"def prepare_form(self):\n raise NotImplementedError(\"Just use get_form() method instead\")",
"def launch(request, id):\n\n active_tab = LAUNCH\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n if active_tab != SUBMITTED:\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )\n else:\n return redirect('job_list')",
"def edit_job_galaxy_model(request, id):\n\n active_tab = GMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit(request, id):\r\n id = int(id)\r\n msg=\"\"\r\n if request.method == 'POST': \r\n form = GraphForm(request.POST) \r\n msg = validate_json(request.POST['nodes'])\r\n if not msg and form.is_valid(): \r\n g = Graph.get_by_id(id)\r\n g.name=form.cleaned_data['name']\r\n g.nodes=form.cleaned_data['nodes'] \r\n g.put()\r\n memcache.delete('stats')\r\n return HttpResponseRedirect('/graphs/') \r\n else:\r\n g = Graph.get_by_id(id)\r\n form = GraphForm(initial={'name':g.name,'nodes':g.nodes}) \r\n return render(request, 'detail.html', {\r\n 'form': form, 'msg':msg,\r\n })",
"def show_edit_post_form(id):\n post = Post.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_edit.html\" , post=post , tags=tags)",
"def edit_job_fitter(request, id):\n\n active_tab = FITTER\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_lsf(request, id):\n\n active_tab = LSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_formazione(self, event):\n self.Disable()\n ViewFormazione(parent=self, title='Formazione')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Form to launch a job (changes the job status to submitted) It also returns forms to be rendered in other tabs (models).
|
def launch(request, id):
active_tab = LAUNCH
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
if active_tab != SUBMITTED:
return render(
request,
"job/edit.html",
{
'job_id': id,
'active_tab': active_tab,
'disable_other_tabs': False,
'start_form': forms[TABS_INDEXES[START]],
'dataset_form': forms[TABS_INDEXES[DATASET]],
'data_model_form': forms[TABS_INDEXES[DMODEL]],
'psf_form': forms[TABS_INDEXES[PSF]],
'lsf_form': forms[TABS_INDEXES[LSF]],
'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],
'fitter_form': forms[TABS_INDEXES[FITTER]],
'params_form': forms[TABS_INDEXES[PARAMS]],
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
# 'max_file_size': MAX_FILE_SIZE
}
)
else:
return redirect('job_list')
|
[
"def quick_jobpost(context):\n context[\"form\"] = JobPostForm()\n return context",
"def start(request):\n active_tab = START\n if request.method == 'POST':\n form = FORMS_NEW[active_tab](request.POST, request=request)\n active_tab = save_form(form, request, active_tab)\n else:\n form = FORMS_NEW[active_tab](request=request)\n\n if active_tab == START:\n return render(\n request,\n \"job/create.html\",\n {\n 'active_tab': active_tab,\n 'disable_other_tabs': True,\n 'start_form': form,\n }\n )\n else:\n return redirect('job_data_model_edit', id=request.session['draft_job']['id'])",
"def edit_job_name(request, id):\n active_tab = START\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_psf(request, id):\n\n active_tab = PSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_lsf(request, id):\n\n active_tab = LSF\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def display_form():\n\n roles = [\"Software Engineer\", \"QA Engineer\", \"Product Manager\"]\n return render_template(\"application-form.html\",\n jobs=roles)",
"def edit_job_data_model(request, id):\n active_tab = DMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_params(request, id):\n\n active_tab = PARAMS\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_galaxy_model(request, id):\n\n active_tab = GMODEL\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def seejob(request):\n return render(\n request, 'beweb/view_job.html'\n )",
"def edit_job_dataset(request, id):\n\n active_tab = DATASET\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def edit_job_fitter(request, id):\n\n active_tab = FITTER\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def submit(self):\n subprocess.run([self.launch_command, str(self.job_filename)])\n print('Job sent: ', [self.launch_command, str(self.job_filename)])",
"def job_submitted(self, job, jobid):\n self.job_set_status(job, 'SUBMIT', \"batchjobid={}\".format(jobid))",
"def completion_task_form(task, post_data, with_default=True):\n\n class CompletionTaskForm(Form):\n\n \"\"\"\n Form to allow altering the completion status of tasks\n \"\"\"\n\n # completion % of task\n task_completion_status = SelectField(u'Completion (%)', coerce=int)\n\n def __init__(self, formdata=None, obj=None, prefix='', assign_default=True, **kwargs):\n\n \"\"\"\n Override init to provide default data to form\n \"\"\"\n\n if assign_default:\n kwargs.setdefault('task_completion_status', task.completion_status)\n Form.__init__(self, formdata, obj, prefix, **kwargs)\n self.task_completion_status.choices = self.choices()\n\n def choices(self):\n # choices for select field\n fhoices = [\n (0, ' 0%: Not started'),\n (20, '20%: Started'),\n (40, '40%: Implementing'),\n (60, '60%: Debugging/Bugfixing'),\n (80, '80%: Ready for review'),\n (100, '100%: Completed'),\n ]\n return fhoices\n\n # init form\n form = CompletionTaskForm(post_data, assign_default=with_default)\n\n # return form\n return form",
"def show(self):\n return self._job",
"def load_jobs(request):\n if request.method == 'POST':\n form = UploadJobsForm(request.POST, request.FILES)\n load_jobs_file(request.FILES['file'])\n if form.is_valid():\n return HttpResponse('<pre>Uploaded jobs</pre>') \n else:\n form = UploadJobsForm()\n\n vars = RequestContext(request, {'form': form})\n return render_to_response('jobs/upload_jobs.html', vars)",
"def job_overview(request, id):\n\n active_tab = LAUNCH\n # This could be cleaned to avoid getting forms and only gather views.\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/job_overview.html\",\n {\n 'job_id': id,\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n }\n )",
"def submit(url, job, headers, inputs=[],verbose=True):\n my_headers = headers.copy()\n my_headers['Content-Type']=\"application/json\"\n if len(inputs)>0:\n # make sure UNICORE does not start the job \n # before we have uploaded data\n job['haveClientStageIn']='true'\n \n r = requests.post(url,data=json.dumps(job), headers=my_headers, verify=False)\n if r.status_code!=201:\n if r.status_code==500:\n jobURL=''\n js = \"<script>alert('System is in maintenance. Please try again later.');</script>\"\n if verbose==True:\n display(HTML(js))\n else:\n if r.status_code==403:\n jobURL=''\n js = \"<script>alert('Authentication service is restarting. Please try again later.');</script>\"\n if verbose==True:\n display(HTML(js))\n else:\n raise RuntimeError(\"Error submitting job: %s\" % r.status_code)\n else:\n jobURL = r.headers['Location']\n\n # upload input data and explicitely start job\n if len(inputs)>0:\n working_directory = get_working_directory(jobURL, headers)\n for input in inputs:\n upload(working_directory+\"/files\", input, headers)\n try:\n invoke_action(jobURL, \"start\", headers)\n except:\n pass \n return jobURL"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function to handle the overview view of a job
|
def job_overview(request, id):
active_tab = LAUNCH
# This could be cleaned to avoid getting forms and only gather views.
active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)
return render(
request,
"job/job_overview.html",
{
'job_id': id,
'start_view': views[TABS_INDEXES[START]],
'dataset_view': views[TABS_INDEXES[DATASET]],
'data_model_view': views[TABS_INDEXES[DMODEL]],
'psf_view': views[TABS_INDEXES[PSF]],
'lsf_view': views[TABS_INDEXES[LSF]],
'galaxy_model_view': views[TABS_INDEXES[GMODEL]],
'fitter_view': views[TABS_INDEXES[FITTER]],
'params_view': views[TABS_INDEXES[PARAMS]],
}
)
|
[
"def overview(self):\n return _execute_rest_request(url=f\"{self.prefix}/overview\")[\"jobs\"]",
"def seejob(request):\n return render(\n request, 'beweb/view_job.html'\n )",
"def show(self):\n return self._job",
"def info(job_id):\n print(json.dumps(API().info(job_id), indent=True))",
"def ShowJobList():\n tags = FrontendJob.ListJobs()\n page_title = 'Active Jobs'\n\n if not tags:\n return flask.render_template('log.html', body='No active job.',\n title=page_title)\n\n html = ''\n for tag in tags:\n html += flask.Markup(\n '<li><a href=\"%s\">%s</a></li>') % (FrontendJob.GetJobURL(tag), tag)\n html += flask.Markup('</ul>')\n return flask.render_template('log.html', body=html, title=page_title)",
"def do_backupjob_show(cs, args):\n backupjob = _find_backupjob(cs, args.backupjob_id)\n info = dict()\n info.update(backupjob._info)\n\n if 'links' in info:\n info.pop('links')\n\n utils.print_dict(info)",
"def show_job_info(job_id='',show_output=False):\n from balsam.launcher.dag import BalsamJob as Job\n import pathlib\n jobs = Job.objects.all().filter(job_id__contains=job_id)\n if len(jobs) == 1:\n thejob = jobs[0]\n print(jobs[0])\n if show_output:\n output = f'{thejob.working_directory}/{thejob.name}.out'\n if pathlib.Path(output).is_file():\n with open(output) as f:\n out = f.read()\n print(f'Output file {output} content:')\n print(out)\n else:\n print(f'{output} not found.')\n print(f'Job state: {thejob.state}')\n if thejob.state =='CREATED':\n print('The job has not run yet.')\n elif len(jobs) == 0:\n print('No matching jobs')\n else:\n print(f'{len(jobs)} jobs matched, enter full id.')\n print('Matched jobs:')\n for job in jobs:\n print(f'{job.name}: {job.job_id} ')\n return",
"def edit_job_name(request, id):\n active_tab = START\n active_tab, forms, views = act_on_request_method_edit(request, active_tab, id)\n\n return render(\n request,\n \"job/edit.html\",\n {\n 'job_id': id,\n 'active_tab': active_tab,\n 'disable_other_tabs': False,\n\n 'start_form': forms[TABS_INDEXES[START]],\n 'dataset_form': forms[TABS_INDEXES[DATASET]],\n 'data_model_form': forms[TABS_INDEXES[DMODEL]],\n 'psf_form': forms[TABS_INDEXES[PSF]],\n 'lsf_form': forms[TABS_INDEXES[LSF]],\n 'galaxy_model_form': forms[TABS_INDEXES[GMODEL]],\n 'fitter_form': forms[TABS_INDEXES[FITTER]],\n 'params_form': forms[TABS_INDEXES[PARAMS]],\n\n 'start_view': views[TABS_INDEXES[START]],\n 'dataset_view': views[TABS_INDEXES[DATASET]],\n 'data_model_view': views[TABS_INDEXES[DMODEL]],\n 'psf_view': views[TABS_INDEXES[PSF]],\n 'lsf_view': views[TABS_INDEXES[LSF]],\n 'galaxy_model_view': views[TABS_INDEXES[GMODEL]],\n 'fitter_view': views[TABS_INDEXES[FITTER]],\n 'params_view': views[TABS_INDEXES[PARAMS]],\n # 'max_file_size': MAX_FILE_SIZE\n }\n )",
"def describe_jobs(self, jobs: List) -> Dict:\n pass",
"def list_jobs(self, status=True):",
"def job_info_html(cls, job_id, job_type='jobs'):\n url = '/%s/%s' % (job_type, job_id)\n rv = cls.app.get(url)\n assert rv.status_code == 200, 'Cannot get info from job %s. \"%s\" returned %s' % (job_id, url, rv.status_code)\n return rv.data",
"def listjobsbyrun(request, runtag):\n\n # Put the column headers into a dict form the template can understand.\n colheads = [ ( \"Job Name\", \"jobname\" ),\n ( \"Start Time\", \"start_time\" ),\n ( \"Wall Time\", \"walltime\" ),\n ( \"Exit Stage\", \"exit_status__stage_fail\" ),\n ( \"Status\", \"exit_status__description\" ), ]\n\n # Sort out any other filters we might have specified.\n allowed_filters = [attr for (name, attr) in colheads]\n allowed_filters += ['walltime__gt', 'walltime__lt']\n jobfilters = dict([(key, val) for (key, val) in request.GET.items()\n if key in allowed_filters])\n\n # Get all the jobs associated with a particular run, and use the\n # \"order_by\" field to order the QuerySet accordingly.\n sort_headers = SortHeaders(request, colheads,\n default_order_field=0,\n default_order_type='asc',\n additional_params=jobfilters)\n run = js.PipelineRun.objects.get(runtag=runtag)\n job_list = run.pipelinejob_set.select_related('exit_status')\n if len(jobfilters) < 1: job_list = job_list.all()\n else: job_list = job_list.filter(**jobfilters)\n job_list = job_list.order_by(sort_headers.get_order_by())\n\n # Put the data themselves into a dict form the template can understand.\n rows = [ ({ 'html': \"class=nowrap\", 'data': job.jobname, },\n { 'html': \"class=nowrap\", 'data': job.start_time, },\n { 'html': \"class=nowrap\", 'data': \"{0:.1f} sec\".format(job.walltime), },\n { 'html': \"class=nowrap\", 'data': job.exit_status.stage_fail, },\n { 'data': job.exit_status.description }) for job in job_list ]\n\n # Add some useful links to the data in the columns.\n for r in rows:\n # The user can click on a job name to view the log file.\n r[0]['url'] = reverse('joblogs',args=(r[0]['data'],))\n # The user can filter on failure conditions just by clicking on them.\n for i in (3, 4):\n tmpdict = dict(request.GET.items())\n tmpdict.update({ allowed_filters[i]: r[i]['data'] })\n r[i]['url'] = '?{0}'.format('&'.join(\n ['{0}={1}'.format(key, val) for key, val in tmpdict.items()]))\n\n # Render the page\n return render_to_response(\"listjobsby_run.html\",\n { \"colheads\": colheads, \"rows\": rows, \"runtag\": runtag,\n \"headers\": list(sort_headers.headers()),\n \"jobfilters\" : jobfilters },\n context_instance=RequestContext(request))",
"def editjob(request):\n job_id = request.GET.get('q', '')\n username = request.user.username\n usr, tkn = user_authenticate(username)\n headers = {'Authorization': \"Token \"+tkn +\n \"\", \"Content-Type\": \"application/json\"}\n\n url = \"http://172.20.0.70:8087/beapi/job/\" + job_id+\"/\"\n r = requests.get(url=url, headers=headers)\n data = r.json()\n team_data = data[0]['job_progress'][0]['jobteam_members']\n end_date = data[0]['job_progress'][0]['end_dt']\n team_leader = data[0]['job_progress'][0]['jobteam_members'][0]['teamleader']\n employee_data = execsys(team_leader)\n fullname = employee_data['firstname'] + \" \" + employee_data['surname']\n context = {\n \"job\": data[0],\n \"team_data\": data[0]['job_progress'][0]['jobteam_members'],\n \"team_members\": len(team_data),\n \"open_mileage\": data[0]['job_progress'][0]['open_mileage'],\n \"close_mileage\": data[0]['job_progress'][0]['close_mileage'],\n \"status\": data[0]['job_progress'][0]['status'],\n \"start_date\": data[0]['job_progress'][0]['start_dt'],\n \"fleet\": data[0]['job_progress'][0]['fleet_no'],\n \"job_progress\": data[0]['job_progress'],\n \"team_leader\": fullname\n }\n return render(request, 'beweb/job/jobedit.html', context)",
"def addjobinfo(self):\n self.job = {}\n if self.sid:\n try:\n response, content = rest.simpleRequest('search/jobs/%s' % self.sid,\n sessionKey=self.session_key,\n getargs={'output_mode': 'json'})\n if response.status == 200:\n self.job = json.loads(content)['entry'][0]['content']\n self.message('Successfully retrieved search job info')\n self.logger.debug(self.job)\n else:\n self.message('Could not retrieve search job info', level=logging.WARN)\n except Exception as e:\n self.message('Could not retrieve search job info', level=logging.WARN)",
"def print_job_info(job):\n print_console()\n print_console(job.center(80, \"=\"))\n print_console()\n print_console(\" last state: %s\" % (jobs[job].state))\n parse_submit_file(jobs[job])\n\n # Handle subdag jobs from the dag file\n if jobs[job].is_subdag is True:\n print_console(\" This is a SUBDAG job:\")\n print_console(\" For more information, please run the following command:\")\n user_cmd = \" %s -s \" % (prog_base)\n if output_dir is not None:\n user_cmd = user_cmd + \" --output-dir %s\" % (output_dir)\n print_console(f\"{user_cmd} -f {jobs[job].dag_path}\")\n print_console()\n return\n\n sub_wf_cmd = None\n if jobs[job].sub_file_parsed is False:\n print_console(\" site: submit file not available\")\n else:\n print_console(\" site: %s\" % (jobs[job].site or \"-\"))\n print_console(\"submit file: %s\" % (jobs[job].sub_file))\n print_console(\"output file: %s\" % (find_latest_log(jobs[job].out_file)))\n print_console(\" error file: %s\" % (find_latest_log(jobs[job].err_file)))\n if print_invocation:\n print_console()\n print_console(\n \"To re-run this job, use: %s %s\"\n % (jobs[job].executable, jobs[job].arguments)\n )\n print_console()\n if print_pre_script and len(jobs[job].pre_script) > 0:\n print_console()\n print_console(\"SCRIPT PRE:\")\n print_console(jobs[job].pre_script)\n print_console()\n if len(jobs[job].dagman_out) > 0:\n # This job has a sub workflow\n user_cmd = \" %s\" % (prog_base)\n if output_dir is not None:\n user_cmd = user_cmd + \" --output-dir %s\" % (output_dir)\n\n # get any options that need to be invoked for the sub workflow\n extraOptions = addon(options)\n sub_wf_cmd = \"{} {} -d {}\".format(\n user_cmd, extraOptions, os.path.split(jobs[job].dagman_out)[0],\n )\n\n if not recurse_mode:\n print_console(\" This job contains sub workflows!\")\n print_console(\" Please run the command below for more information:\")\n print_console(sub_wf_cmd)\n\n print_console()\n print_console()\n\n # Now dump file contents to screen if we are not in quiet mode\n if not quiet_mode:\n print_output_error(jobs[job])\n\n # recurse for sub workflow\n if sub_wf_cmd is not None and recurse_mode:\n print_console((\"Failed Sub Workflow\").center(80, \"=\"))\n subprocess.Popen(sub_wf_cmd, shell=True).communicate()[0]\n print_console((\"\").center(80, \"=\"))",
"def get_job_info():\n\n # get callback, source, and index\n job_id = request.args.get('id', None)\n if job_id is None:\n return jsonify({\n 'success': False,\n 'message': \"Job ID was not specified.\"\n }), 500\n\n job_info = get_job_info(job_id)\n\n return jsonify({\n 'success': False,\n 'result': job_info\n })",
"def view(job_id: str, debug: bool) -> None:\n try:\n fdp_hist.show_job_log(os.getcwd(), job_id)\n except fdp_exc.FAIRCLIException as e:\n e.err_print()\n if e.level.lower() == \"error\":\n sys.exit(e.exit_code)",
"def display_job_listings_with_apply_link():\r\n result_elements = get_job_listings_from_website()\r\n relevant_jobs = result_elements.find_all('h2',string=re.compile(\"Data Scien*\")) \r\n # print(relevant_jobs)\r\n #print(results.prettify())\r\n for job in relevant_jobs:\r\n link = job.find('a')['href']\r\n print(job.text.strip())\r\n print(f\"Apply here: {link}\\n\")",
"def list_jobs(state='ALL',workflow='ALL',app='ALL',name=''):\n from balsam.launcher.dag import BalsamJob as Job\n from balsam.core.models import ApplicationDefinition as App\n jobs = Job.objects.all()\n print(f'Total number of jobs: {len(jobs)}')\n if state != 'ALL':\n jobs = jobs.filter(state=state)\n if workflow != 'ALL':\n jobs = jobs.filter(workflow=workflow)\n if app != 'ALL':\n jobs = jobs.filter(application=app)\n if name:\n jobs = jobs.filter(name__icontains=name)\n print(f'Selected number of jobs: {len(jobs)}')\n if len(jobs) > 0: \n t = '{:<20}'.format('Name')\n t += ' {:>8}'.format('Nodes')\n t += ' {:>12}'.format('Ranks')\n t += ' {:^8}'.format('ID')\n if state =='JOB_FINISHED':\n t += '{:>12}'.format('Runtime')\n elif state =='ALL':\n t += '{:>15}'.format('State')\n print(t)\n for job in jobs:\n s = '{:<20.15}'.format(job.name)\n s += ' {:>8}'.format(job.num_nodes)\n s += ' {:>12}'.format(job.num_ranks)\n s += ' {:>8}'.format(str(job.job_id).split('-')[0]) \n\n if state =='JOB_FINISHED':\n s += '{:>12.3f}'.format(job.runtime_seconds)\n elif state =='ALL':\n s += '{:>15}'.format(job.state)\n print(s)\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Transfer the date input information to corresponding frequency
|
def date_freq_transfer(date,freq):
year = (date[0:4])
month = (date[5:7])
day = (date[8:10])
if(freq == 'monthly'):
date2 = str(year)+'-'+str(month)
if(freq == 'quarterly'):
quarter = (math.ceil(int(month)/3))
date2 = str(year)+'Q'+str(quarter)
if(freq == 'daily'):
date2 = str(year)+'-'+str(month)+'-'+str(day)
if(freq == 'annually'):
date2 = str(year)
return date2
|
[
"def get_model_data_per_date(date):",
"def convertfreq(freq):\r\n\r\n freq = freq.upper()\r\n\r\n days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']\r\n months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',\r\n 'OCT', 'NOV', 'DEC']\r\n\r\n weekoffsets = ['W-%s' % d for d in days]\r\n qtroffsets = ['Q-%s' % m for m in months] \\\r\n + ['QS-%s' % m for m in months] \\\r\n + ['BQ-%s' % m for m in months] \\\r\n + ['BQS-%s' % m for m in months]\r\n annoffsets = ['A-%s' % m for m in months] \\\r\n + ['AS-%s' % m for m in months] \\\r\n + ['BA-%s' % m for m in months] \\\r\n + ['BAS-%s' % m for m in months]\r\n\r\n freqs = {'D' : 252., 'W' : 52., 'M' : 12., 'Q' : 4., 'A' : 1.}\r\n freqs.update(zip(weekoffsets, [52.] * len(weekoffsets)))\r\n freqs.update(zip(qtroffsets, [4.] * len(qtroffsets)))\r\n freqs.update(zip(annoffsets, [1.] * len(annoffsets)))\r\n\r\n return freqs[freq]",
"def freq_per_day_of_the_week(self):\n feat = [int(log.split('\\t')[4]) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(1, 8):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq",
"def _get_frequency(dates):\n dd = np.diff(dates)\n frequencies, counts = np.unique([d.total_seconds() for d in dd], return_counts=True)\n frequencies = np.ceil(frequencies / 3600 / 24).astype('int')\n if len(frequencies) == 1:\n frequency = frequencies[0]\n elif counts.max() >= (counts[counts != counts.max()].max() * 10):\n # most frequent is 10x more common than second-most frequent\n frequency = frequencies[np.argmax(counts)]\n else:\n raise ValueError(\n f'Unable to determine representative frequency. '\n f'Found these (with counts): {list(zip(counts, frequencies))}'\n )\n return frequency",
"def _daycount_act_act_Euro(i_date, f_date):",
"def get_model_predictions_by_day(date):",
"def freq(n, dt):\n import numpy as np\n return 1.0*np.arange(n)/n/dt",
"def freq_per_month(self):\n feat = [int(log.split('\\t')[6]) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(1, 13):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq",
"def _get_annualisation_factor(dates: np.array):\n\n time_delta = (dates[1:] - dates[0:-1])\n average_return_frequency = max(np.mean([x.days for x in time_delta]), 1)\n freq = TSeriesHelper._get_frequency_for_time_diff(average_return_frequency)\n annualising_scaling = np.sqrt(freq)\n\n return annualising_scaling",
"def computeTime(inputData):\n \n import pickle\n data = None\n with open(inputData, \"rb\") as f:\n data = pickle.load(f)\n \n years = {}\n months = {}\n days = {}\n \n for tweet in data:\n year = tweet[3].year\n month = tweet[3].month\n day = tweet[3].day\n # Strings\n year = str(year)\n month = str(month)\n if len(month) == 1:\n month = \"0\" + month\n month = str(year) + month\n day = str(day)\n if len(day) == 1:\n day = \"0\" + day\n day = month + day\n if year in years.keys():\n altvalue = years[year]\n years[year] = altvalue + 1\n else:\n years[year] = 1\n if month in months.keys():\n altvalue = months[month]\n months[month] = altvalue + 1\n else:\n months[month] = 1\n if day in days.keys():\n altvalue = days[day]\n days[day] = altvalue + 1\n else:\n days[day] = 1\n \n# with open(\"data/\"+ inputData + \"_FreqOfyear.db\", \"wb\") as f:\n# pickle.dump(years, f)\n# print(inputData + \"_FreuOfYear.db was stored!\")\n# \n# with open(\"data/\"+ inputData + \"_FreqOfmonth.db\", \"wb\") as f:\n# pickle.dump(months, f)\n# print(inputData + \"_FreuOfmonth.db was stored!\")\n# \n# with open(\"data/\"+ inputData + \"_FreqOfday.db\", \"wb\") as f:\n# pickle.dump(days, f)\n# print(\"data/\"+ inputData + \"_FreuOfday.db was stored!\")\n \n return (years, months, days)",
"def get_freq(self):\n return float(self.ask('FREQ?'))",
"def calculate_frequencies(self, *args):\n self._clear_frequencies()\n self._calculate_freq(*args)",
"def freq_per_hour_daily(self):\n feat = [int(log.split('\\t')[8]) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(24):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq",
"def temp_to_freq(temp):\n return temp * kb('J/K') / h('J s')",
"def _check_get_freq(data: TimeSeriesData) -> str:\n freq = pd.infer_freq(data.time)\n if freq is None:\n raise ValueError(\n \"Unable to infer time series frequency. Please check for \"\n \"missing or duplicate times or irregularly-spaced times.\"\n )\n\n return freq",
"def calculateFrequency(self):\n repeat = 0\n f =0.0\n with i2clib.I2CMaster() as b:\n results = b.transaction(\n reading(self.add, 5)\n )\n\n uF = results[0][0]&0x3F\n lF = results[0][1]\n # this is probably not the best way of doing this but I was having issues with the\n # frequency being off by as much as 1.5 MHz\n current_freq = round((float(round(int(((int(uF)<<8)+int(lF))*cof/4-22500)/100000)/10)-.2)*10)/10\n return current_freq",
"def processmeasures(df):\n dates = df.date.values\n return dates",
"def set_daily_info(self, date):\n self.mode = MODE_DAILY\n self.date = date",
"def readFreq():\r\n print(\"Reading Frequency Data: \\n\")\r\n f = open(\"frequency.dat\", \"r\")\r\n groceries= {}\r\n hist={}\r\n histVal=\"\"\r\n val=0\r\n name=\"\"\r\n i = 0\r\n end=0\r\n tempVal=0\r\n tempVal2=0\r\n for x in f:\r\n length = len(x)\r\n while i < length:\r\n temp = x[i]\r\n if temp == \"=\":\r\n tempVal+=1\r\n \r\n i += 1\r\n i = 0\r\n \r\n while i < length:\r\n temp = x[i]\r\n if temp == \":\":\r\n end=i\r\n break;\r\n \r\n i += 1\r\n name = x[0:end]\r\n tempx = \"\"\r\n\r\n for j in range(val):\r\n tempx=tempx+\"=\"\r\n \r\n histVal= tempx\r\n val= tempVal\r\n \r\n groceries[name]= val\r\n hist[name]=histVal\r\n tempVal=0\r\n for x in hist.keys():\r\n key = x\r\n space = \" \"\r\n val=hist.get(key)\r\n tempKey = x\r\n tempVal = groceries.get(x)\r\n #print (tempKey.replace(\"\\n\",\"\") + \" \" + str(tempVal))\r\n factorIS= tempKey.replace(\"\\n\",\"\")\r\n factorI = len(factorIS)\r\n factor= 13 - factorI\r\n #print(\"%s: %10d\\n\" % (tempKey.replace(\"\\n\",\"\"), tempVal))\r\n if factor == 1 :\r\n print(\"%s:%21s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 2:\r\n print(\"%s:%20s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 3:\r\n print(\"%s:%23s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 4:\r\n print(\"%s:%24s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 5 :\r\n print(\"%s:%25s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 6:\r\n print(\"%s:%26s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 7:\r\n print(\"%s:%27s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 8:\r\n print(\"%s:%28s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 9 :\r\n print(\"%s:%29s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n else:\r\n print(\"%s:%30s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n \r\n for x in groceries.keys():\r\n key = x\r\n space = \" \"\r\n val=groceries.get(key)\r\n tempKey = x\r\n tempVal = groceries.get(x)\r\n #print (tempKey.replace(\"\\n\",\"\") + \" \" + str(tempVal))\r\n factorIS= tempKey.replace(\"\\n\",\"\")\r\n factorI = len(factorIS)\r\n factor= 13 - factorI\r\n #print(\"%s: %10d\\n\" % (tempKey.replace(\"\\n\",\"\"), tempVal))\r\n if factor == 1 :\r\n print(\"%s:%1s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 2:\r\n print(\"%s:%2s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 3:\r\n print(\"%s:%3s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 4:\r\n print(\"%s:%4s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 5 :\r\n print(\"%s:%5s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 6:\r\n print(\"%s:%6s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 7:\r\n print(\"%s:%7s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 8:\r\n print(\"%s:%8s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n elif factor == 9 :\r\n print(\"%s:%9s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n else:\r\n print(\"%s:%10s%10s\\n\" % (tempKey.replace(\"\\n\",\"\"),space, val))\r\n \r\n \r\n \r\n print(\"Done.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
select stocks which meet the criteria for user input stock factors The criteria should only be some filter conditions for value of factor e.g. user choose to select the stock which santisfies the net_profit>=100000;
|
def stock_screener_filter_condition(conn_path,var_list,date,condition_list,threshold_list,industry='None',since_ipo = {'min': 0, 'max': 30},view_all = True,top = 30):
var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')
var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]
var2 = (var2.iloc[:,0])
conn = sql.connect(conn_path+'/data.db')
freq,table_name = table_lookup(conn,var2.iloc[0])
date = date_freq_transfer(date,freq)
db = get_data(conn,var2.iloc[0],date,table_name,condition = condition_list[0],threshold = threshold_list[0])
db = (db.drop_duplicates())
n = 1
while(n<len(var_list)):
freq,table_name = table_lookup(conn,var2.iloc[n])
date = date_freq_transfer(date,freq)
temp = get_data(conn,var2.iloc[n],date,table_name,condition = condition_list[n],threshold = threshold_list[n])
temp = (temp.drop_duplicates())
db = db.merge(pd.DataFrame(temp[['Code',var2.iloc[n]]]),how = 'inner',left_on = 'Code',right_on = 'Code')
n = n + 1
if(db.empty):
raise ValueError('No Stock meets criteria!')
industry_table = pd.read_excel(conn_path+'/Industry.xlsx',dtype=str)
db = pd.merge(db,industry_table,how = 'left',left_on = 'Code',right_on='Code')
ipo_date = pd.read_sql('select code as Code,timeToMarket from stock_basics',conn)
ipo_date['timeToMarket']=list(map(int,ipo_date['timeToMarket']))
ipo_date['timeToMarket']=list(map(str,ipo_date['timeToMarket']))
ipo_date['timeToMarket'] = pd.to_datetime(ipo_date['timeToMarket'])
ipo_date['duration'] = pd.to_datetime(date) - ipo_date['timeToMarket']
db = pd.merge(db,ipo_date,left_on = 'Code',right_on = 'Code',how = 'left')
db = db[eval("(db['duration']>="+"pd.to_timedelta(365*since_ipo["+"'min'],'d'))&(db['duration']<="+"pd.to_timedelta(365*since_ipo["+"'max'],'d'))")]
db = db[db['duration']>=pd.to_timedelta(0)]
if industry == 'None':
db = db
else:
if isinstance(industry,str):
db = db[db['Industry']==(industry)]
else:
db = db[db['Industry'].isin(industry)]
if(db.empty):
raise ValueError('No Stock meets criteria!')
if(view_all):
return db
else:
db = db.iloc[range(min(top,len(db))),:]
return db
|
[
"def screen_stocks(df, **kwargs):\r\n\r\n for column, thresholds in kwargs.items():\r\n df = df[(df[column] > thresholds[0]) & (df[column] < thresholds[1]) | (df[column].isnull())]\r\n\r\n ticker_list = list(df.symbol)\r\n\r\n return ticker_list",
"def stock_screener_filter_top(conn_path,var_list,date,order,top,industry='None',since_ipo = {'condition': '>=', 't': 0},in_universe = False):\n var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')\n var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]\n var2 = (var2.iloc[:,0]) \n if in_universe == True:\n industry2 = 'None'\n since_ipo['min'] = 0\n since_ipo['max'] = 30\n else:\n industry2 = industry\n db = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top = top[0],order = order[0])\n n = 1\n while(n<len(var_list)):\n temp = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top=top[n],order = order[n])\n db = db.merge(pd.DataFrame(temp.iloc[:,[0,5,6]]),how = 'inner',left_on = 'Code',right_on = 'Code')\n n = n + 1\n if industry == 'None':\n db = db\n else:\n if isinstance(industry,str):\n db = db[db['Industry']==(industry)]\n else:\n db = db[db['Industry'].isin(industry)]\n if(db.empty):\n raise ValueError('No Stock meets criteria!')\n return db",
"def _stockBestBefore(self, username, stock_result, stockType, recipeName,dummyAllocate=0):\n\t\tsys.stderr.write(\"\\nSTART: _stockBestBefore() %s\\n\" %(stockType))\n\t\t# just a bit of protection\n\t\tif not stock_result.has_key( stockType ):\n\t\t\tstock_result[ stockType ] = {}\n\n\t\t# i knew this was going to burn us when we were playing with \n\t\t# adding ingredients\n\t\tif stockType == \"hops\":\n\t\t\tourRecipeIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND hopAddAt <= :4\",username,recipeName,stockType,0.0)\n\t\telse:\n\t\t\tourRecipeIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3\",username,recipeName,stockType)\n\n\t\t# gIngredients will NOT catch both real recipe ingredients and consumables\n\t\t# need something more but lets get ingredients done first\n\t\t# will need to build this in\n\t\t# if ITEM.category != \"bottle\" and ITEM.category != \"bottlecaps\":\n\n\t\tfor ITEM in ourRecipeIngredients.fetch(40000):\n\t\t\tqty = ITEM.qty\n\t\t\tourStockCheck = self.dbWrapper.GqlQuery(\"SELECT * FROM gPurchases WHERE owner = :1 AND storeitem = :2\",username,ITEM.ingredient)\n\t\t\tourStock = ourStockCheck.fetch(20000)\n\t\t\tif len(ourStock) > 0 :\n#US.has_key( ITEM ):\n\t\t\t\tqtyNeeded = qty\n\t\t\t\t# A future improvement might attempt to use whole bags rather than\n\t\t\t\t# cause leaving opened packets.\n\t\t\t\tbest_before_dates_obj = {}\n\t\t\t\tbest_before_dates = []\n\n\t\t\t\tfor purchasedItem in ourStock:\n\t\t\t\t\tif not best_before_dates_obj.has_key( purchasedItem.bestBeforeEnd ):\n\t\t\t\t\t\tbest_before_dates_obj[ purchasedItem.bestBeforeEnd ] = []\n\t\t\t\t\t\tbest_before_dates.append( purchasedItem.bestBeforeEnd )\n\t\t\t\t\tbest_before_dates_obj[ purchasedItem.bestBeforeEnd].append( purchasedItem )\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t# soonest best before end date first\n\t\t\t\tbest_before_dates.sort()\n\t\t\t\t#uMake the qty required tenfold as we would really like to know \n\t\t\t\t# how muct we can adjust up to.\n\t\t\t\tif dummyAllocate:\tqtyNeeded = qtyNeeded * 100\n\n\t\t\t\tfor best_before_date in best_before_dates:\n\t\t\t\t\tfor item in best_before_dates_obj[ best_before_date ]:\t\n\t\t\t\t\t\tif item.qty > 0 and qtyNeeded >0:\n\t\t\t\t\t\t\tif not stock_result[ stockType ].has_key( item.storeitem ):\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ] = []\t\n\n\t\t\t\t\t\t\tif item.qty > qtyNeeded:\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ].append( (qtyNeeded/item.qty,qtyNeeded, item.stocktag, item.storeitem, item) )\n\t\t\t\t\t\t\t\t# If we need multiple quantities then we won't do wastage\n\t\t\t\t\t\t\t\t# assumption is that the multiple qty is set appropriately.\n\t\t\t\t\t\t\t\t# item qty multiple thingy?\n\t\t\t\t\t\t\t\tif item.qtyMultiple != 1:\t\n\t\t\t\t\t\t\t\t\tqtyUsed = math.ceil( qtyNeeded / item.qtyMultiple ) * item.qtyMultiple\n\n\t\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - qtyUsed\n\t\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s\\n\" %(item.storeitem,item.stocktag,item.qty-qtyUsed))\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# Check the wastage in this step.\n\t\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - qtyNeeded\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - item.wastageFixed\n\t\t\t\t\t\t\t\t\t\tif item.qty < 0:\n\t\t\t\t\t\t\t\t\t\t\titem.qty = 0\n\t\t\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s (Wastage)\\n\" %(item.storeitem,item.stocktag,0))\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tqtyNeeded = 0\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# This is a full use of the item in stock\n\t\t\t\t\t\t\t\t# therefore we do't introduce wastage\n\t\t\t\t\t\t\t\tqtyNeeded = qtyNeeded - item.qty\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ].append( (1,item.qty, item.stocktag,item.storeitem,item) )\n\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\titem.qty = float(0)\t\n\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s (Used All)\\n\" %(item.storeitem,item.stocktag,0))\t\t\t\t\t\t\t\t\t\t\n\n\n\n\t\tsys.stderr.write(\"END: _stockBestBefore() %s\\n\" %(stockType))\n\t\treturn stock_result",
"def all_stocks(strict=True):\n if strict:\n all_securities = []\n for security in Security.objects.all():\n name = security.security_name.lower()\n if 'etf' in name or 'ordinary' in name:\n #print(name)\n all_securities.append(security.asx_code)\n else:\n all_securities = Security.objects.values_list(\"asx_code\", flat=True)\n \n return set(all_securities)",
"def get_stock_data(x):",
"def stock_analyst(stock_list):\n B = stock_list.index(min(stock_list))\n buy_value = min(stock_list)\n\n sell_value = -1\n if buy_value > 1:\n for sell_indx in range(B, len(stock_list)):\n if sell_value < stock_list[sell_indx]:\n sell_value = stock_list[sell_indx]\n S = sell_indx\n else:\n return 'Buy stock first'\n return [B, S]",
"def recherche(self, event = None):\n self.stock.updateLst()\n self.frmModif.pack_forget()\n self.frmRecherche.pack()\n self.stockAffiche.lstVetement = []\n PrixMin = 0.0\n PrixMax = 9999999.0\n try:\n if len(self.minPrix_art_recherche.get()) !=0:\n PrixMin = float(self.minPrix_art_recherche.get())\n if len(self.maxPrix_art_recherche.get()) !=0:\n PrixMax = float(self.maxPrix_art_recherche.get())\n except :\n messagebox.showerror(title=\"Error\", message=\"Erreur d'encodage dans le Prix!\")\n \n if str(self.num_art_recherche.get()) in [str(v.EAN) for v in self.stock.lstVetement]: \n for vetm in self.stock.lstVetement:\n if str(self.num_art_recherche.get()) == str(vetm.EAN) and (PrixMin <= vetm.dblPrixHTVA and PrixMax >= vetm.dblPrixHTVA):\n self.stockAffiche.lstVetement.append(vetm)\n break\n else: \n for vetm in self.stock.lstVetement:\n if PrixMin <= vetm.dblPrixHTVA and PrixMax >= vetm.dblPrixHTVA:\n self.stockAffiche.lstVetement.append(vetm)\n if vetm in self.stockAffiche.lstVetement :\n if len(self.libelle_art_recherche.get())!=0 and str(vetm.strLibelle).upper() != self.libelle_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n elif len(self.Marque_art_recherche.get()) and str(vetm.strMarque).upper() != self.Marque_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n elif len(self.cat_art_recherche.get()) and str(vetm.strCategorie).upper() != self.cat_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n elif len(self.taille_art_recherche.get()) and str(vetm.Taille).upper() != self.taille_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n elif len(self.couleur_art_recherche.get()) and str(vetm.strCouleur).upper() != self.couleur_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n \n self.updateStock()",
"def test_stock_using_stock(self):\n with mn.model() as m:\n mn.stock('First', 1)\n mn.stock('Second', lambda f: f, ('First',), 0)\n mn.stock('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 0)\n self.assertEqual(m['Third'][''], 0)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 1)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 4)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 10)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 20)",
"def get_filter_settings(self):\n\n # Saved filter\n saved_filters = self.investor.lc.get_saved_filters()\n if len(saved_filters) > 0 and util.prompt_yn('Would you like to select one of your saved filters from LendingClub.com?', self.investing['filter_id'] is not None):\n\n # Get the selected one from list (has to be same-same object)\n selected = None\n if self.investing['filter_id']:\n selected = self.investing['filter_id']\n\n print('\\nSelect a saved filter from the list below:')\n saved = self.list_picker(\n items=saved_filters,\n default=selected,\n label_key='name',\n id_key='id')\n\n if saved is False:\n print('\\nDefine all your filters manually...')\n else:\n print('Using {0}'.format(saved))\n self.investing['filters'] = saved\n self.investing['filter_id'] = saved.id\n return\n\n filters = Filter()\n\n # Manual entry\n print('The following questions are from the filters section of the Invest page on LendingClub\\n')\n\n # Existing loans\n filters['exclude_existing'] = util.prompt_yn('Exclude loans already invested in?', filters['exclude_existing'])\n\n # Funding progress rounded to the nearest tenth\n print('---------')\n print('Funding progress')\n progress = util.prompt_float('Only include loans which already have at least __% funding (0 - 100)', filters['funding_progress'])\n filters['funding_progress'] = int(round(progress / 10) * 10)\n\n print('---------')\n print('Choose term (36 - 60 month)')\n\n while(True):\n filters['term']['Year3'] = util.prompt_yn('Include 36 month term loans?', filters['term']['Year3'])\n filters['term']['Year5'] = util.prompt_yn('Include 60 month term loans?', filters['term']['Year5'])\n\n # Validate 1 was chosen\n if not filters['term']['Year3'] and not filters['term']['Year5']:\n print('You have to AT LEAST choose one term length!')\n else:\n break\n\n print('---------')\n print('Choose interest rate grades (7.4% - 24.84%)')\n while(True):\n if util.prompt_yn('Include ALL interest rate grades', filters['grades']['All']):\n filters['grades']['All'] = True\n else:\n filters['grades']['All'] = False\n filters['grades']['A'] = util.prompt_yn('A - ~7.41%', filters['grades']['A'])\n filters['grades']['B'] = util.prompt_yn('B - ~12.12%', filters['grades']['B'])\n filters['grades']['C'] = util.prompt_yn('C - ~15.80%', filters['grades']['C'])\n filters['grades']['D'] = util.prompt_yn('D - ~18.76%', filters['grades']['D'])\n filters['grades']['E'] = util.prompt_yn('E - ~21.49%', filters['grades']['E'])\n filters['grades']['F'] = util.prompt_yn('F - ~23.49%', filters['grades']['F'])\n filters['grades']['G'] = util.prompt_yn('G - ~24.84%', filters['grades']['G'])\n\n # Verify one was chosen\n gradeChosen = False\n for grade in filters['grades']:\n if filters['grades'][grade] is True:\n gradeChosen = True\n if not gradeChosen:\n print('You have to AT LEAST choose one interest rate grade!')\n else:\n break\n\n self.investing['filters'] = filters",
"def filter_df(self):\n df = self.fusion_df()\n df1 = df.loc[df['Price per month (£)'] >= self.price_min]\n df2 = df1.loc[df1['Price per month (£)'] <= self.price_max]\n df3 = df2.loc[df2['Bedrooms']>=self.bedroom]\n df4 = df3.loc[df3['Bathrooms']>=self.bathroom]\n \n return df4",
"def filter_candidates(self):\n\n try:\n if not os.path.exists(self.stock_ticker_file):\n RobinhoodInstance.get_all_instruments(self.stock_ticker_file)\n except Exception as e:\n print \"[Error]: %s\" % str(e)\n raise\n\n stock_file = open(self.stock_ticker_file, \"r\")\n filtered_stock_file = open(self.filtered_stock_ticker_file, \"w\")\n\n for stock_ticker in stock_file.readlines():\n print \"Testing: %s\" % stock_ticker\n stock_ticker = stock_ticker.strip()\n for special_char in SPECIAL_CHAR_LIST:\n stock_ticker = stock_ticker.replace(special_char, \"\")\n\n # Get the bollinger band history along with the 5 day moving average\n try:\n close, lower_band, five_day_ma = self.calculate_bands(stock_ticker)\n except Exception as e:\n print \"Could not test ticker: %s\" % stock_ticker\n print \"Error: %s\" % str(e)\n continue\n\n # If I get bad data, just continue to the next stock\n if len(close) < 5 or len(lower_band) < 5 or len(five_day_ma) < 5:\n print \"Could not test ticker: %s\" % stock_ticker\n continue\n\n print \"Adding: %s\" % stock_ticker\n filtered_stock_file.write(\"%s\\n\" % stock_ticker)",
"def test_three_condition_query_in_list():\n rally = Rally(server=RALLY, user=RALLY_USER, password=RALLY_PSWD)\n #qualifiers = [\"State = Submitted\", \"FormattedID != DE100\", \"Owner.UserName != horsefeathers\"]\n qualifiers = [\"State = Submitted\", \"FormattedID != DE100\", \"Severity != UltraMegaHurt\"]\n response = rally.get('Defect', fetch=True, query=qualifiers, limit=10)\n assert response.resultCount > 0",
"def get_stock_factor(self, code=None, factor=None, begin_date=None, end_date=None):\n\n # 没有指定代码返回None\n if code is None:\n raise Exception('没有指定股票代码')\n\n if factor is None or factor == '':\n raise Exception('没有指定因子名称')\n\n # 没有指定开始日期,默认当前日期\n if begin_date is None:\n begin_date = datetime.now().strftime('%Y%m%d')\n\n # 没有指定结束日期,默认开始日期\n if end_date is None:\n end_date = begin_date\n\n factor_cursor = DB_CONN[factor].find(\n {'code': code, 'name': factor, 'date': {'$gte': begin_date, '$lte': end_date}}\n , sort=[('date', ASCENDING)]\n , projection={'_id': False}\n )\n\n # 多个字典一秒变DataFrame!\n df_factor = pd.DataFrame([doc for doc in factor_cursor])\n # 这个df可能为空\n if df_factor.index.size > 0:\n df_factor.set_index('date', inplace=True)\n\n return df_factor",
"def check_stock(self):\n quantity = int(self.quantityEdit.text())\n \n if len(self.item) > 0 and not self.stock_item:#item pd.Series() is set and not adding stock\n if quantity > self.item.loc['stock']:\n self.show_not_enough_stock_message(quantity)",
"def find_qualifying_loans(bank_data, credit_score, debt, income, loan, home_value):\n\n # Calculate the monthly debt ratio\n monthly_debt_ratio = calculate_monthly_debt_ratio(debt, income)\n print(f\"The monthly debt to income ratio is {monthly_debt_ratio:.02f}\")\n\n # Calculate loan to value ratio\n loan_to_value_ratio = calculate_loan_to_value_ratio(loan, home_value)\n print(f\"The loan to value ratio is {loan_to_value_ratio:.02f}.\")\n\n # Run qualification filters\n bank_data_filtered = filter_max_loan_size(loan, bank_data)\n bank_data_filtered = filter_credit_score(credit_score, bank_data_filtered)\n bank_data_filtered = filter_debt_to_income(monthly_debt_ratio, bank_data_filtered)\n bank_data_filtered = filter_loan_to_value(loan_to_value_ratio, bank_data_filtered)\n# the bank data that meets the qualification criteria are filtered and printed as qualifying loans\n print(f\"Found {len(bank_data_filtered)} qualifying loans\")\n\n return bank_data_filtered",
"def really_goods(take_me, free):\n global winningConditions\n goods_lst = []\n for n in take_me:\n condition_n = [condition for condition in winningConditions if n in condition] #all winning conditions contains n\n for cond in condition_n: #for all conditions contains n\n p, q = [m for m in cond if m != n] #list of the two others positions to win with n\n if p in free and q in free: #If the missing positions to win are free\n goods_lst.append(p)\n goods_lst.append(q)\n return goods_lst",
"def make_conditions(self):\n conditions = list()\n\n position_stages = self.positionstage_set.order_by('price').all()\n if position_stages.count() == 0:\n return []\n\n # make all stage into a list\n operators = list()\n for s in position_stages:\n operators += [(s.price, '<', s.lt_stage, s.lt_amount),\n (s.price, '==', s.e_stage, s.e_amount),\n (s.price, '>', s.gt_stage, s.gt_amount)]\n\n # make a list of same stage\n stages = list()\n last = 0\n for key, (s0, s1) in enumerate(zip(operators[:-1], operators[1:])):\n if s0[2] != s1[2]:\n stages.append(operators[last:key + 1])\n last = key + 1\n else:\n stages.append(operators[last:len(operators)])\n\n for stage_list in stages:\n condition0 = list()\n amounts = list()\n\n for price in sorted(set([s[0] for s in stage_list])):\n condition1 = list()\n\n for stage in [s for s in stage_list if s[0] == price]:\n condition1.append('{x} {operator} {price}'.format(\n x='{x}',\n operator=stage[1],\n price=stage[0],\n ))\n\n amounts.append(stage[3])\n\n condition0.append(' or '.join(condition1))\n else:\n if all([a == amounts[0] for a in amounts]):\n stage_operators = {\n 'MAX_PROFIT': '{y} == {amount}',\n 'PROFIT': '{amount} < {y}',\n 'EVEN': '{y} == {amount}',\n 'LOSS': '{y} < {amount}',\n 'MAX_LOSS': '{y} == {amount}',\n }\n\n amount = stage_operators[stage_list[0][2]].format(\n y='{y}', amount=amounts[0]\n )\n else:\n stage_operators = {\n 'PROFIT': '{amount0} < {y} < {amount1}',\n 'LOSS': '{amount0} < {y} < {amount1}',\n }\n amount = stage_operators[stage_list[0][2]].format(\n y='{y}', amount0=amounts[0], amount1=amounts[1],\n )\n\n conditions.append([stage_list[0][2], ' and '.join(condition0), amount])\n else:\n for condition in conditions:\n if 'or' in condition[1]:\n if '<' in condition[1] and '==' in condition[1]:\n condition[1] = '{x} <= {price}'.format(\n x='{x}', price=condition[1].split()[-1]\n )\n elif '>' in condition[1] and '==' in condition[1]:\n condition[1] = '{price} <= {x}'.format(\n x='{x}', price=condition[1].split()[-1]\n )\n elif 'and' in condition[1]:\n if '{x} >' in condition[1] and '{x} <' in condition[1]:\n price0 = condition[1][condition[1].index('>') + 1:].split()[0]\n price1 = condition[1][condition[1].index('<') + 1:].split()[0]\n condition[1] = '{price0} < {x} < {price1}'.format(\n x='{x}', price0=price0, price1=price1\n )\n elif '>' in condition[1]:\n x, operator, price = condition[1].split()\n condition[1] = '{price} {operator} {x}'.format(\n price=price, operator=operator.replace('>', '<'), x='{x}'\n )\n\n # logger.info('POS: %s stages generated %s conditions' % (self.symbol.upper(), len(conditions)))\n\n return conditions",
"def stock_screener_ranking(conn_path,var_list,date,rank_by,industry='None',since_ipo = {'condition': '>=', 't': 2},in_universe=False,top=50,order='ascending'): \n if in_universe == True:\n industry2 = 'None'\n since_ipo['min'] = 0\n since_ipo['max'] = 30\n else:\n industry2 = industry\n conn = sql.connect(conn_path+'/data.db') \n var_list.remove(rank_by)\n var_list.insert(0,rank_by)\n var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')\n var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]\n var2 = (var2.iloc[:,0]) \n db = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top=top,order = order)\n n = 1\n while(n<len(var_list)):\n freq,table_name = table_lookup(conn,var2.iloc[n])\n date = date_freq_transfer(date,freq)\n temp = get_data(conn,var2.iloc[n],date,table_name)\n temp = (temp.drop_duplicates())\n db = db.merge(pd.DataFrame(temp[['Code',var2.iloc[n]]]),how = 'left',left_on = 'Code',right_on = 'Code')\n n = n + 1\n if industry == 'None':\n db = db\n else:\n db = db[db['Industry'].isin(list(industry))]\n if(db.empty):\n raise ValueError('No Stock meets criteria!')\n return db",
"def goods(take_me, free = False):\n global winningConditions\n goods_lst = []\n for n in take_me:\n condition_n = [condition for condition in winningConditions if n in condition] #all winning conditions contains n\n for cond in condition_n: #for all conditions contains n\n goods_lst += [m for m in cond if m != n] #add to goods_lst the two others positions to win with n\n if free:\n goods_free = [n for n in goods_lst if n in free] #list of good and free positions\n return goods_free\n else:\n return goods_lst"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
select several stocks which meet the creteria for user input stock factors The creteria should only be some ranking conditions for value of factor e.g. user would like to choose to select the stock which has the top 30 net_profit and top 10 roe over all stocks as of that date
|
def stock_screener_filter_top(conn_path,var_list,date,order,top,industry='None',since_ipo = {'condition': '>=', 't': 0},in_universe = False):
var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')
var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]
var2 = (var2.iloc[:,0])
if in_universe == True:
industry2 = 'None'
since_ipo['min'] = 0
since_ipo['max'] = 30
else:
industry2 = industry
db = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top = top[0],order = order[0])
n = 1
while(n<len(var_list)):
temp = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top=top[n],order = order[n])
db = db.merge(pd.DataFrame(temp.iloc[:,[0,5,6]]),how = 'inner',left_on = 'Code',right_on = 'Code')
n = n + 1
if industry == 'None':
db = db
else:
if isinstance(industry,str):
db = db[db['Industry']==(industry)]
else:
db = db[db['Industry'].isin(industry)]
if(db.empty):
raise ValueError('No Stock meets criteria!')
return db
|
[
"def stock_screener_ranking(conn_path,var_list,date,rank_by,industry='None',since_ipo = {'condition': '>=', 't': 2},in_universe=False,top=50,order='ascending'): \n if in_universe == True:\n industry2 = 'None'\n since_ipo['min'] = 0\n since_ipo['max'] = 30\n else:\n industry2 = industry\n conn = sql.connect(conn_path+'/data.db') \n var_list.remove(rank_by)\n var_list.insert(0,rank_by)\n var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')\n var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]\n var2 = (var2.iloc[:,0]) \n db = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top=top,order = order)\n n = 1\n while(n<len(var_list)):\n freq,table_name = table_lookup(conn,var2.iloc[n])\n date = date_freq_transfer(date,freq)\n temp = get_data(conn,var2.iloc[n],date,table_name)\n temp = (temp.drop_duplicates())\n db = db.merge(pd.DataFrame(temp[['Code',var2.iloc[n]]]),how = 'left',left_on = 'Code',right_on = 'Code')\n n = n + 1\n if industry == 'None':\n db = db\n else:\n db = db[db['Industry'].isin(list(industry))]\n if(db.empty):\n raise ValueError('No Stock meets criteria!')\n return db",
"def stock_screener_filter_condition(conn_path,var_list,date,condition_list,threshold_list,industry='None',since_ipo = {'min': 0, 'max': 30},view_all = True,top = 30): \n var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')\n var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]\n var2 = (var2.iloc[:,0]) \n conn = sql.connect(conn_path+'/data.db') \n freq,table_name = table_lookup(conn,var2.iloc[0])\n date = date_freq_transfer(date,freq)\n db = get_data(conn,var2.iloc[0],date,table_name,condition = condition_list[0],threshold = threshold_list[0])\n db = (db.drop_duplicates())\n n = 1\n while(n<len(var_list)):\n freq,table_name = table_lookup(conn,var2.iloc[n])\n date = date_freq_transfer(date,freq)\n temp = get_data(conn,var2.iloc[n],date,table_name,condition = condition_list[n],threshold = threshold_list[n])\n temp = (temp.drop_duplicates())\n db = db.merge(pd.DataFrame(temp[['Code',var2.iloc[n]]]),how = 'inner',left_on = 'Code',right_on = 'Code')\n n = n + 1\n if(db.empty):\n raise ValueError('No Stock meets criteria!')\n industry_table = pd.read_excel(conn_path+'/Industry.xlsx',dtype=str)\n db = pd.merge(db,industry_table,how = 'left',left_on = 'Code',right_on='Code')\n ipo_date = pd.read_sql('select code as Code,timeToMarket from stock_basics',conn)\n ipo_date['timeToMarket']=list(map(int,ipo_date['timeToMarket']))\n ipo_date['timeToMarket']=list(map(str,ipo_date['timeToMarket']))\n ipo_date['timeToMarket'] = pd.to_datetime(ipo_date['timeToMarket'])\n ipo_date['duration'] = pd.to_datetime(date) - ipo_date['timeToMarket']\n db = pd.merge(db,ipo_date,left_on = 'Code',right_on = 'Code',how = 'left')\n \n db = db[eval(\"(db['duration']>=\"+\"pd.to_timedelta(365*since_ipo[\"+\"'min'],'d'))&(db['duration']<=\"+\"pd.to_timedelta(365*since_ipo[\"+\"'max'],'d'))\")]\n db = db[db['duration']>=pd.to_timedelta(0)]\n if industry == 'None':\n db = db\n else:\n if isinstance(industry,str):\n db = db[db['Industry']==(industry)]\n else:\n db = db[db['Industry'].isin(industry)]\n if(db.empty):\n raise ValueError('No Stock meets criteria!')\n if(view_all):\n return db\n else:\n db = db.iloc[range(min(top,len(db))),:]\n return db",
"def _stockBestBefore(self, username, stock_result, stockType, recipeName,dummyAllocate=0):\n\t\tsys.stderr.write(\"\\nSTART: _stockBestBefore() %s\\n\" %(stockType))\n\t\t# just a bit of protection\n\t\tif not stock_result.has_key( stockType ):\n\t\t\tstock_result[ stockType ] = {}\n\n\t\t# i knew this was going to burn us when we were playing with \n\t\t# adding ingredients\n\t\tif stockType == \"hops\":\n\t\t\tourRecipeIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND hopAddAt <= :4\",username,recipeName,stockType,0.0)\n\t\telse:\n\t\t\tourRecipeIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3\",username,recipeName,stockType)\n\n\t\t# gIngredients will NOT catch both real recipe ingredients and consumables\n\t\t# need something more but lets get ingredients done first\n\t\t# will need to build this in\n\t\t# if ITEM.category != \"bottle\" and ITEM.category != \"bottlecaps\":\n\n\t\tfor ITEM in ourRecipeIngredients.fetch(40000):\n\t\t\tqty = ITEM.qty\n\t\t\tourStockCheck = self.dbWrapper.GqlQuery(\"SELECT * FROM gPurchases WHERE owner = :1 AND storeitem = :2\",username,ITEM.ingredient)\n\t\t\tourStock = ourStockCheck.fetch(20000)\n\t\t\tif len(ourStock) > 0 :\n#US.has_key( ITEM ):\n\t\t\t\tqtyNeeded = qty\n\t\t\t\t# A future improvement might attempt to use whole bags rather than\n\t\t\t\t# cause leaving opened packets.\n\t\t\t\tbest_before_dates_obj = {}\n\t\t\t\tbest_before_dates = []\n\n\t\t\t\tfor purchasedItem in ourStock:\n\t\t\t\t\tif not best_before_dates_obj.has_key( purchasedItem.bestBeforeEnd ):\n\t\t\t\t\t\tbest_before_dates_obj[ purchasedItem.bestBeforeEnd ] = []\n\t\t\t\t\t\tbest_before_dates.append( purchasedItem.bestBeforeEnd )\n\t\t\t\t\tbest_before_dates_obj[ purchasedItem.bestBeforeEnd].append( purchasedItem )\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t# soonest best before end date first\n\t\t\t\tbest_before_dates.sort()\n\t\t\t\t#uMake the qty required tenfold as we would really like to know \n\t\t\t\t# how muct we can adjust up to.\n\t\t\t\tif dummyAllocate:\tqtyNeeded = qtyNeeded * 100\n\n\t\t\t\tfor best_before_date in best_before_dates:\n\t\t\t\t\tfor item in best_before_dates_obj[ best_before_date ]:\t\n\t\t\t\t\t\tif item.qty > 0 and qtyNeeded >0:\n\t\t\t\t\t\t\tif not stock_result[ stockType ].has_key( item.storeitem ):\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ] = []\t\n\n\t\t\t\t\t\t\tif item.qty > qtyNeeded:\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ].append( (qtyNeeded/item.qty,qtyNeeded, item.stocktag, item.storeitem, item) )\n\t\t\t\t\t\t\t\t# If we need multiple quantities then we won't do wastage\n\t\t\t\t\t\t\t\t# assumption is that the multiple qty is set appropriately.\n\t\t\t\t\t\t\t\t# item qty multiple thingy?\n\t\t\t\t\t\t\t\tif item.qtyMultiple != 1:\t\n\t\t\t\t\t\t\t\t\tqtyUsed = math.ceil( qtyNeeded / item.qtyMultiple ) * item.qtyMultiple\n\n\t\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - qtyUsed\n\t\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s\\n\" %(item.storeitem,item.stocktag,item.qty-qtyUsed))\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# Check the wastage in this step.\n\t\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - qtyNeeded\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - item.wastageFixed\n\t\t\t\t\t\t\t\t\t\tif item.qty < 0:\n\t\t\t\t\t\t\t\t\t\t\titem.qty = 0\n\t\t\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s (Wastage)\\n\" %(item.storeitem,item.stocktag,0))\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tqtyNeeded = 0\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# This is a full use of the item in stock\n\t\t\t\t\t\t\t\t# therefore we do't introduce wastage\n\t\t\t\t\t\t\t\tqtyNeeded = qtyNeeded - item.qty\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ].append( (1,item.qty, item.stocktag,item.storeitem,item) )\n\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\titem.qty = float(0)\t\n\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s (Used All)\\n\" %(item.storeitem,item.stocktag,0))\t\t\t\t\t\t\t\t\t\t\n\n\n\n\t\tsys.stderr.write(\"END: _stockBestBefore() %s\\n\" %(stockType))\n\t\treturn stock_result",
"def stock_analyst(stock_list):\n B = stock_list.index(min(stock_list))\n buy_value = min(stock_list)\n\n sell_value = -1\n if buy_value > 1:\n for sell_indx in range(B, len(stock_list)):\n if sell_value < stock_list[sell_indx]:\n sell_value = stock_list[sell_indx]\n S = sell_indx\n else:\n return 'Buy stock first'\n return [B, S]",
"def check_sell(self, data={}):\r\n \r\n to_sell = []\r\n rank_dict = {}\r\n for my_position in self.position:\r\n \r\n # compare current_price with value\r\n actual_value = my_position['current_price'] * my_position['num_shares']\r\n bought_value = my_position['total_invested']\r\n # check if current price significantly dropped from bought\r\n if bought_value * (1 - self.stop_loss) >= actual_value:\r\n to_sell.append(my_position)\r\n #rank the coin based on distance from bought value to ensure priority over other sell conditions \r\n rank_dict[my_position['code']] = actual_value - bought_value\r\n elif bought_value * self.profit_take <= actual_value:\r\n to_sell.append(my_position)\r\n # rank the coin based on the gain of selling\r\n rank_dict[my_position['code']] = bought_value - actual_value\r\n elif data[my_position[\"code\"]][\"close\"] >= self.statbot.calc_bands(my_position[\"code\"])[1] and self.statbot.get_rsi(my_position[\"code\"]) >= 70:\r\n diff = abs(data[my_position[\"code\"]][\"close\"] - self.statbot.calc_bands(my_position[\"code\"])[1])\r\n to_sell.append(my_position)\r\n #rank the coin based on the score calculated in get score using difference between bands and rsi\r\n rank_dict[my_position['code']] = self.get_score(SELL, self.statbot.get_rsi(my_position['code']), diff)\r\n \r\n for my_position in to_sell:\r\n self.sell(my_position['code'], my_position['current_price'])\r\n \r\n if len(self.selling) != 0:\r\n # sorts buying based on value of rank\r\n self.selling.sort(key = lambda x : rank_dict[x['code']])",
"def price_statistic_train(a_freq=[1, 2, 5, 10, 20, 60, 120, 240, 500, 750], past=10, q_step=5, df=DB.get_stock_market_all()):\n df_result = pd.DataFrame()\n # for future in a_freq:\n # df[f\"tomorrow{future}\"] = df[\"close\"].shift(-future) / df[\"close\"]\n # df[f\"past{future}\"] = df[\"close\"] / df[\"close\"].shift(future)\n\n for key, df_filtered in LB.custom_quantile(df=df, column=f\"past{past}\", p_setting=[x/100 for x in range(0, 101, q_step)]).items():\n df_result.at[key, \"count\"] = len(df_filtered)\n df_result.at[key, \"q1\"] ,df_result.at[key, \"q2\"] ,df_result.at[key, \"q1_val\"] ,df_result.at[key, \"q2_val\"]= [float(x) for x in key.split(\",\")]\n for future in a_freq:\n # df_result.at[f\"{from_price,to_price}\", f\"tomorrow{future}_mean\"] = (df_filtered[f\"tomorrow{future}\"].mean())\n # df_result.at[f\"{from_price,to_price}\", f\"tomorrow{future}_std\"] = (df_filtered[f\"tomorrow{future}\"].std())\n df_result.at[key, f\"tomorrow{future}gmean\"] = gmean(df_filtered[f\"tomorrow{future}\"].dropna())\n\n # a_path=LB.a_path(f\"Market/CN/Atest/seasonal/all_date_price_statistic_past_{past}\")\n # LB.to_csv_feather(df_result,a_path,skip_feather=True)\n return df_result",
"def recherche(self, event = None):\n self.stock.updateLst()\n self.frmModif.pack_forget()\n self.frmRecherche.pack()\n self.stockAffiche.lstVetement = []\n PrixMin = 0.0\n PrixMax = 9999999.0\n try:\n if len(self.minPrix_art_recherche.get()) !=0:\n PrixMin = float(self.minPrix_art_recherche.get())\n if len(self.maxPrix_art_recherche.get()) !=0:\n PrixMax = float(self.maxPrix_art_recherche.get())\n except :\n messagebox.showerror(title=\"Error\", message=\"Erreur d'encodage dans le Prix!\")\n \n if str(self.num_art_recherche.get()) in [str(v.EAN) for v in self.stock.lstVetement]: \n for vetm in self.stock.lstVetement:\n if str(self.num_art_recherche.get()) == str(vetm.EAN) and (PrixMin <= vetm.dblPrixHTVA and PrixMax >= vetm.dblPrixHTVA):\n self.stockAffiche.lstVetement.append(vetm)\n break\n else: \n for vetm in self.stock.lstVetement:\n if PrixMin <= vetm.dblPrixHTVA and PrixMax >= vetm.dblPrixHTVA:\n self.stockAffiche.lstVetement.append(vetm)\n if vetm in self.stockAffiche.lstVetement :\n if len(self.libelle_art_recherche.get())!=0 and str(vetm.strLibelle).upper() != self.libelle_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n elif len(self.Marque_art_recherche.get()) and str(vetm.strMarque).upper() != self.Marque_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n elif len(self.cat_art_recherche.get()) and str(vetm.strCategorie).upper() != self.cat_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n elif len(self.taille_art_recherche.get()) and str(vetm.Taille).upper() != self.taille_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n elif len(self.couleur_art_recherche.get()) and str(vetm.strCouleur).upper() != self.couleur_art_recherche.get().upper():\n self.stockAffiche.lstVetement.remove(vetm)\n \n self.updateStock()",
"def get_stock_data(x):",
"def test_stock_using_stock(self):\n with mn.model() as m:\n mn.stock('First', 1)\n mn.stock('Second', lambda f: f, ('First',), 0)\n mn.stock('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 0)\n self.assertEqual(m['Third'][''], 0)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 1)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 4)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 10)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 20)",
"def runQueryatBrandLevel():\r\n\r\n df = pd.DataFrame()\r\n\r\n query1 = \"SELECT brand,count(id) AS totalProduct from productinfo where date=%s group by brand ORDER BY count(id) DESC \"\r\n results1 = sql.read_sql(query1, con=conn, params=[date1])\r\n results1['retailer']=retName[0]\r\n df = df.append(results1)\r\n\r\n query1 = \"SELECT brand,count(id) AS totalProduct from bub_productinfo where date=%s group by brand ORDER BY count(id) DESC \"\r\n results2 = sql.read_sql(query1, con=conn, params=[date1])\r\n results2['retailer'] = retName[1]\r\n df = df.append(results2)\r\n\r\n query1 = \"SELECT brand,count(id) AS totalProduct from boo_productinfo where date=%s group by brand ORDER BY count(id) DESC \"\r\n results3 = sql.read_sql(query1, con=conn, params=[date1])\r\n results3['retailer'] = retName[2]\r\n df = df.append(results3)\r\n\r\n list1 = results1['brand'].tolist()\r\n list2 = results2['brand'].tolist()\r\n list3 = results3['brand'].tolist()\r\n\r\n\r\n for brand in list1:\r\n if brand in list2 and brand in list3:\r\n brandName.append(brand)\r\n topBrand=brandName[:5]\r\n\r\n df.set_index('brand',inplace=True)\r\n\r\n df = df.ix[topBrand, :]\r\n\r\n df.reset_index(inplace=True)\r\n header = df.dtypes.index\r\n graphs.multipleBar(df, header[0], header[1], header[2])\r\n\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\".........No of offered products in top 5 common brands for every Retailer.......... \"\"\"\r\n\r\n df = pd.DataFrame()\r\n for o, i, z in zip(var, var1, retName):\r\n query1 = \"select o.brand,count(DISTINCT i.id) as offeredProduct from %s as o INNER JOIN %s as i on o.id=i.id \" % (o, i)\r\n query2 = query1 + \"WHERE o.date=%s AND i.date=%s AND (o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s) AND i.discountPercentage >0 GROUP BY o.brand ORDER BY offeredProduct DESC \"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4]])\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n header = df.dtypes.index\r\n graphs.multipleBar(df, header[0], header[1], header[2])\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\"..........Offer Percentage in top 5 common brands for every Retailer..........\"\"\"\r\n\r\n df = pd.DataFrame()\r\n for o, i, z in zip(var, var1, retName):\r\n query1 = \"select o.brand,AVG (i.discountPercentage) as offeredPercentage from %s as o INNER JOIN %s as i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" WHERE o.date=%s AND i.date=%s AND (o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s) AND i.discountPercentage >0 \" \\\r\n \"GROUP BY o.brand ORDER BY offeredPercentage DESC \"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4]])\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n header = df.dtypes.index\r\n graphs.multipleBar(df, header[0], header[1], header[2])\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\".........Color Variation in top 5 common brands for every Retailer.........\"\"\"\r\n\r\n df = pd.DataFrame()\r\n for o, i,z in zip(var, var1, retName):\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0]])\r\n results['brand'] = topBrand[0]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[1]])\r\n results['brand'] = topBrand[1]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[2]])\r\n results['brand'] = topBrand[2]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1, topBrand[3]])\r\n results['brand'] = topBrand[3]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1, topBrand[4]])\r\n results['brand'] = topBrand[4]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n header = df.dtypes.index\r\n graphs.brandStackedMultiBar(df, header[0], header[1], header[2], header[3])\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\"..........Size Variation in top 5 common brands for every Retailer...........\"\"\"\r\n\r\n df = pd.DataFrame()\r\n for o, i, p, z in zip(var, var1, var2, retName):\r\n query1= \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[0],date1, date1, date1])\r\n results['brand'] = topBrand[0]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n query1 = \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[1],date1, date1, date1])\r\n results['brand'] = topBrand[1]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n query1 = \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[2],date1, date1, date1])\r\n results['brand'] = topBrand[2]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n query1 = \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[3], date1, date1, date1])\r\n results['brand'] = topBrand[3]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n query1 = \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[4], date1, date1, date1])\r\n results['brand'] = topBrand[4]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n header = df.dtypes.index\r\n graphs.brandStackedMultiBar(df, header[0], header[1], header[2], header[3])\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\"....Items Sold in top 5 common brands.... \"\"\"\r\n\r\n df = pd.DataFrame()\r\n df1 = pd.DataFrame()\r\n df2 = pd.DataFrame()\r\n\r\n \"\"\"....Yesterday.....\"\"\"\r\n for o, i, p, z in zip(var[1:], var1[1:], var2[1:], retName[1:]):\r\n query1 = \"select o.brand,p.size,p.sku,p.quantity from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4],date1, date1, date1])\r\n results['retailer'] = z\r\n df1 = df1.append(results)\r\n\r\n \"\"\"....Before Yesterday.....\"\"\"\r\n\r\n query1 = \"select o.brand,p.size,p.sku,p.quantity from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query2, con=conn,params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4], date2, date2,date2])\r\n results['retailer'] = z\r\n df2 = df2.append(results)\r\n\r\n query = \"SELECT o.brand as brand_x,p.size as size_x,p.sku as sku,q.itemQuantity as itemsold from productsize2 AS q INNER JOIN productsize AS p ON p.sku=q.sku INNER JOIN productcolor AS i on p.colorId=i.colorId \" \\\r\n \"INNER JOIN productinfo as o ON o.id=i.id WHERE o.date=%s AND i.date=%s AND p.date=%s AND q.date=%s AND (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and \" \\\r\n \"(p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') \"\r\n results = sql.read_sql(query, con=conn, params=[date1, date1, date1, date1,topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4]])\r\n results['retailer_x'] = retName[0]\r\n\r\n df = pd.merge(df1, df2, on='sku', how='inner')\r\n df['itemsold'] = df['quantity_y'] - df['quantity_x']\r\n df = df[df['itemsold'] > 0]\r\n df = df.append(results)\r\n #df1['itemsold'] = df1['quantity'] - df2['quantity']\r\n #df = df1.ix[:, ['category', 'retailer', 'itemsold', 'size']].copy()\r\n header = df.dtypes.index\r\n graphs.brandStackedMultiBar(df, header[7], header[2], header[0], header[5])\r\n\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\"....Revenue in top 5 common brands.... \"\"\"\r\n\r\n df = pd.DataFrame()\r\n df1 = pd.DataFrame()\r\n df2 = pd.DataFrame()\r\n\r\n \"\"\"....Yesterday.....\"\"\"\r\n\r\n for o, i, p, z in zip(var[1:], var1[1:], var2[1:], retName[1:]):\r\n query1 = \"select o.brand,p.size,p.sku,p.quantity,i.originalPrice,i.discountPercentage from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query = query1 + \" where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query, con=conn, params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4],date1, date1, date1])\r\n results['retailer'] = z\r\n df1 = df1.append(results)\r\n\r\n \"\"\"........Before Yesterday......\"\"\"\r\n\r\n query1 = \"select o.brand,p.size,p.sku,p.quantity,i.originalPrice,i.discountPercentage from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query = query1 + \" where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query, con=conn,params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4], date2, date2,date2])\r\n results['retailer'] = z\r\n df2 = df2.append(results)\r\n\r\n\r\n query = \"SELECT o.brand as brand_x,p.size as size_x,p.sku,q.itemQuantity as itemsold,q.itemRevenue as revenue from productsize2 AS q INNER JOIN productsize AS p ON p.sku=q.sku INNER JOIN productcolor AS i on p.colorId=i.colorId \" \\\r\n \"INNER JOIN productinfo as o ON o.id=i.id WHERE o.date=%s AND i.date=%s AND p.date=%s AND q.date=%s AND (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) AND \" \\\r\n \"(p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query, con=conn, params=[date1, date1, date1, date1,topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4]])\r\n results['retailer_x'] = retName[0]\r\n\r\n df = pd.merge(df1, df2, on='sku', how='inner')\r\n df['itemsold'] = df['quantity_y'] - df['quantity_x']\r\n df = df[df['itemsold'] > 0]\r\n df['price'] = df['originalPrice_x'] - (df['discountPercentage_x'] / 100)\r\n\r\n df['revenue'] = df['price'] * df['itemsold']\r\n df = df.append(results)\r\n\r\n header = df.dtypes.index\r\n graphs.brandStackedMultiBar(df, header[13], header[12], header[0], header[10])",
"def profit(nbConso, prix,coutMenu,coutEntretien):",
"def bestOfferForGroup(df, portfolio, group_def):\n\n # Filter demographic group\n for feat_col, group in group_def:\n df = df[df[feat_col]==group]\n\n # Group by offer code\n agg_metrics = {\"daily_offer_spending\": [\"median\",\"size\"]}\n metric_names = [\"spending_median\", \"size\"]\n df = df.groupby(\"offer_code\").agg(agg_metrics).reset_index()\n df.columns = [\"offer_code\"] + metric_names\n\n # Sort\n df = df.sort_values(\"spending_median\", ascending=False) \n\n return df.head()",
"def screen_stocks(df, **kwargs):\r\n\r\n for column, thresholds in kwargs.items():\r\n df = df[(df[column] > thresholds[0]) & (df[column] < thresholds[1]) | (df[column].isnull())]\r\n\r\n ticker_list = list(df.symbol)\r\n\r\n return ticker_list",
"def display_stock():",
"def stockvals(df,start_date,end_date):\r\n #convert pd dataframes to strings\r\n symbols, names = df.Symbol, df.Security\r\n symbols = symbols.to_numpy()\r\n symbols = symbols.astype(str)\r\n names = names.to_numpy()\r\n names = names.astype(str)\r\n start_date_int = datetime_to_integer(start_date)\r\n #Stocks under consideration (from S&P500)\r\n n_stocks = len(symbols)\r\n #Open - Closing value of stocks (as float)\r\n indices = []; open_val = []; close_val = []\r\n for j in tqdm(range(0,n_stocks),position=0,desc='Loading Stock Data'):\r\n if j == 91:\r\n continue\r\n date_string=(df.iloc[j][6]).replace('-',''); #print(date_string)\r\n date_added = int(date_string[:8])\r\n if(date_added <= start_date_int):\r\n index = j\r\n indices = np.append(indices,index)\r\n quotes = web.DataReader(symbols[j], 'yahoo', start_date, end_date)\r\n opening = quotes.Open\r\n closing = quotes.Close\r\n open_val = np.append(open_val,opening,axis=0)\r\n close_val = np.append(close_val,closing,axis=0)\r\n open_val = open_val.reshape(len(indices),-1)\r\n close_val = close_val.reshape(len(indices),-1)\r\n variation = open_val-close_val\r\n return names[indices.astype(int)],symbols[indices.astype(int)],variation,close_val,open_val",
"def test_stock_using_stock_alt_ordering(self):\n with mn.model() as m:\n mn.stock('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n mn.stock('Second', lambda f: f, ('First',), 0)\n mn.stock('First', 1)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 0)\n self.assertEqual(m['Third'][''], 0)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 1)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 4)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 10)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 20)",
"def get_sectors_with_max_and_min_stocks():\n sectors = Counter(row['sector'] for row in data if row['sector'] != 'n/a')\n print((sectors.most_common()[0][0], sectors.most_common()[-1][0]))\n pass",
"def all_stocks(strict=True):\n if strict:\n all_securities = []\n for security in Security.objects.all():\n name = security.security_name.lower()\n if 'etf' in name or 'ordinary' in name:\n #print(name)\n all_securities.append(security.asx_code)\n else:\n all_securities = Security.objects.values_list(\"asx_code\", flat=True)\n \n return set(all_securities)",
"def stock_exchange():\n company = namedtuple('company', 'name symbol open high low close')\n \n stocks = namedtuple('company',['stock'] )\n for i in range(100):\n weight = random.uniform(1,50)\n open_price = random.uniform(10,2000) * weight\n\n close_price = open_price * random.uniform(0.1,10.15)\n\n high_value = open_price * random.uniform(0.5,10)\n low_value = open_price * random.uniform(0.1,0.5)\n \n if high_value < open_price:\n high_value = open_price\n if high_value < close_price:\n high_value = close_price\n if low_value > high_value:\n if high_value>open_price:\n low_value = open_price\n else:\n low_value = close_price\n if(i==0):\n s = company(fake.company(),''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)),open_price, high_value, low_value, close_price)\n comp = stocks(s)\n else:\n s= company(fake.company(),''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)),open_price, high_value, low_value, close_price)\n comp+=stocks(s)\n \n return comp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
select stocks as well as user input stock factors with rank on one primary factor e.g. user would like to see the roe,net_profit,and capital ratio for stocks but with net_profit ranked in top 30 over all stocks at that time
|
def stock_screener_ranking(conn_path,var_list,date,rank_by,industry='None',since_ipo = {'condition': '>=', 't': 2},in_universe=False,top=50,order='ascending'):
if in_universe == True:
industry2 = 'None'
since_ipo['min'] = 0
since_ipo['max'] = 30
else:
industry2 = industry
conn = sql.connect(conn_path+'/data.db')
var_list.remove(rank_by)
var_list.insert(0,rank_by)
var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')
var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]
var2 = (var2.iloc[:,0])
db = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top=top,order = order)
n = 1
while(n<len(var_list)):
freq,table_name = table_lookup(conn,var2.iloc[n])
date = date_freq_transfer(date,freq)
temp = get_data(conn,var2.iloc[n],date,table_name)
temp = (temp.drop_duplicates())
db = db.merge(pd.DataFrame(temp[['Code',var2.iloc[n]]]),how = 'left',left_on = 'Code',right_on = 'Code')
n = n + 1
if industry == 'None':
db = db
else:
db = db[db['Industry'].isin(list(industry))]
if(db.empty):
raise ValueError('No Stock meets criteria!')
return db
|
[
"def display_stock():",
"def rank_stock(self):\n stock_list = []\n for player in self.player_list:\n stock_list.extend(player.stock_list)\n\n stock_list.sort(key=lambda stock: stock.transaction_count,\n reverse=True)\n\n for stock in stock_list[:100]:\n if stock.transaction_count == 0:\n continue\n print (\"Stock {0} current price {1}, \"\n \"transaction count {2}\".format(\n stock.identifier,\n stock.sales_price,\n stock.transaction_count))\n\n return stock_list",
"def test_stock_using_stock_alt_ordering(self):\n with mn.model() as m:\n mn.stock('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n mn.stock('Second', lambda f: f, ('First',), 0)\n mn.stock('First', 1)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 0)\n self.assertEqual(m['Third'][''], 0)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 1)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 4)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 10)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 20)",
"def ranking():\n \n # Make sure my-team is set. Error if not.\n if session.get('my-team') in [None, '@@']:\n return {'success': 0, 'errors': \"You haven't selected a team as YOUR team.\", \"data\": list()}, 400\n \n # Get the raw scores for all non-excluded teams.\n raw_data = [db.row_to_dict(r) for r in db.query_db(\"select * from raw_scores left join teams on raw_scores.teamId = teams.teamId where teams.exclude=0\")]\n \n # Split into rounds\n rounds = clu.split_into_rounds(raw_data, round_col='round')\n \n # Calculate scores\n raw_scores = list()\n \n point_vals = {\n 'autonomous': session.get('params.autonomous-points', 1), \n 'climb': session.get('params.climb-points', 1),\n 'spin_by_colour': session.get('params.spin-by-col-points', 1),\n 'spin_by_rotate': session.get('params.spin-by-rot-points', 1)\n }\n \n missing_my_team = set()\n for rnd in rounds.values():\n try:\n rnd_scores = clu.calc_scores_for_round(rnd, \n us_id=session['my-team'], \n id_col='teamId',\n point_values=point_vals, \n zero_balls=session.get('params.zero-balls', 0),\n balls_low_col='low_balls', \n balls_high_col='high_balls',\n auto_col='autonomous', \n climb_col='climb', \n spin_clr_col='spin_by_colour',\n spin_rot_col='spin_by_rotate', \n round_col='round')\n raw_scores.extend(rnd_scores)\n except ValueError: # My team not in this round -- ignore for now.\n missing_my_team.add(rnd[0]['round'])\n\n # Aggregate scores\n ag_scores = pd.DataFrame(clu.aggregate_scores(raw_scores), columns=['pair', 'score', 'std_dev', 'adj_score'])\n total = len(ag_scores)\n\n rqv = request.values # saves me some typing.\n \n # Ordering\n if 'order[0][column]' in rqv:\n col = rqv['order[0][column]']\n col_name = rqv['columns[{}][name]'.format(col)]\n asc = rqv['order[0][dir]'] not in ['dsc', 'des', 'desc']\n ag_scores = ag_scores.sort_values(by=[col_name], ascending=asc)\n \n # Filter ...\n if ('search[value]' in rqv) and rqv['search[value]'].strip():\n sv = rqv['search[value]'].strip()\n ag_scores = ag_scores[[sv in str(x) for x in ag_scores['pair'].to_list()]]\n\n filtered = len(ag_scores)\n \n # Any searching / filtering?\n if 'start' in rqv:\n ag_scores = ag_scores[int(rqv['start']):]\n if 'length' in rqv:\n ag_scores = ag_scores[:int(rqv['length'])]\n \n return {'success': 1,\n 'warning': \"My Team not set in round(s): {}\".format(missing_my_team) if missing_my_team else None,\n 'data': ag_scores.to_dict(orient='records'),\n 'rounds': len(rounds),\n \"recordsTotal\": total,\n \"recordsFiltered\": filtered,\n }, 200",
"def best_and_worst_selling_items(data): \n\n sorted_list = data.groupby('item_nbr', as_index=False).sum()[\"units\"].sort_values()\n\n lowest_5 = sorted_list[:5]\n highest_5 = sorted_list[106:]\n\n\n #Combining data in dataframe\n new_item_data = pd.DataFrame(dict(lowest_5 = lowest_5, \n highest_5=highest_5)).reset_index()\n new_item_data = new_item_data.fillna(0)\n new_item_data['Total number sold'] = new_item_data['lowest_5'] + new_item_data['highest_5']\n new_item_data = new_item_data.drop(columns = ['lowest_5', 'highest_5'])\n\n\n #Renaming columns, sorting dataframe and adjusting formatting\n\n new_item_data = new_item_data.rename(columns={\"index\": \"Item_Number\"})\n new_item_data = new_item_data.sort_values(['Total number sold'])\n new_item_data['Total number sold'] = new_item_data['Total number sold'].apply(lambda x: '%.1f' % x)\n new_item_data = new_item_data.reset_index(drop=True)\n\n #Showing table\n return new_item_data",
"def test_stock_using_stock(self):\n with mn.model() as m:\n mn.stock('First', 1)\n mn.stock('Second', lambda f: f, ('First',), 0)\n mn.stock('Third', lambda f, s: f + s, ('First', 'Second'), 0)\n\n m.step()\n self.assertEqual(m['First'][''], 1)\n self.assertEqual(m['Second'][''], 0)\n self.assertEqual(m['Third'][''], 0)\n m.step()\n self.assertEqual(m['First'][''], 2)\n self.assertEqual(m['Second'][''], 1)\n self.assertEqual(m['Third'][''], 1)\n m.step()\n self.assertEqual(m['First'][''], 3)\n self.assertEqual(m['Second'][''], 3)\n self.assertEqual(m['Third'][''], 4)\n m.step()\n self.assertEqual(m['First'][''], 4)\n self.assertEqual(m['Second'][''], 6)\n self.assertEqual(m['Third'][''], 10)\n m.step()\n self.assertEqual(m['First'][''], 5)\n self.assertEqual(m['Second'][''], 10)\n self.assertEqual(m['Third'][''], 20)",
"def risk(stock: str):\n stock_data = yf.Ticker(stock.upper()).info\n # stock_data_dict = {\n # \"Stock\": stock.upper(),\n # \"Industry\": stock_data[\"industry\"],\n # \"Beta\": stock_data[\"beta\"],\n # \"Market Cap\": stock_data[\"marketCap\"],\n # }\n # df = pd.DataFrame(stock_data_dict, index=[1, 2, 3, 4])\n typer.echo(\n f\"Stock: {stock.upper()}\\nIndustry: {stock_data['industry']}\\nBeta {stock_data['beta']: .2f}\\nMarkent Cap: {stock_data['marketCap']: .2f}\\nOpen: {stock_data['open']: .2f}\"\n )",
"def _stockBestBefore(self, username, stock_result, stockType, recipeName,dummyAllocate=0):\n\t\tsys.stderr.write(\"\\nSTART: _stockBestBefore() %s\\n\" %(stockType))\n\t\t# just a bit of protection\n\t\tif not stock_result.has_key( stockType ):\n\t\t\tstock_result[ stockType ] = {}\n\n\t\t# i knew this was going to burn us when we were playing with \n\t\t# adding ingredients\n\t\tif stockType == \"hops\":\n\t\t\tourRecipeIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND hopAddAt <= :4\",username,recipeName,stockType,0.0)\n\t\telse:\n\t\t\tourRecipeIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3\",username,recipeName,stockType)\n\n\t\t# gIngredients will NOT catch both real recipe ingredients and consumables\n\t\t# need something more but lets get ingredients done first\n\t\t# will need to build this in\n\t\t# if ITEM.category != \"bottle\" and ITEM.category != \"bottlecaps\":\n\n\t\tfor ITEM in ourRecipeIngredients.fetch(40000):\n\t\t\tqty = ITEM.qty\n\t\t\tourStockCheck = self.dbWrapper.GqlQuery(\"SELECT * FROM gPurchases WHERE owner = :1 AND storeitem = :2\",username,ITEM.ingredient)\n\t\t\tourStock = ourStockCheck.fetch(20000)\n\t\t\tif len(ourStock) > 0 :\n#US.has_key( ITEM ):\n\t\t\t\tqtyNeeded = qty\n\t\t\t\t# A future improvement might attempt to use whole bags rather than\n\t\t\t\t# cause leaving opened packets.\n\t\t\t\tbest_before_dates_obj = {}\n\t\t\t\tbest_before_dates = []\n\n\t\t\t\tfor purchasedItem in ourStock:\n\t\t\t\t\tif not best_before_dates_obj.has_key( purchasedItem.bestBeforeEnd ):\n\t\t\t\t\t\tbest_before_dates_obj[ purchasedItem.bestBeforeEnd ] = []\n\t\t\t\t\t\tbest_before_dates.append( purchasedItem.bestBeforeEnd )\n\t\t\t\t\tbest_before_dates_obj[ purchasedItem.bestBeforeEnd].append( purchasedItem )\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t# soonest best before end date first\n\t\t\t\tbest_before_dates.sort()\n\t\t\t\t#uMake the qty required tenfold as we would really like to know \n\t\t\t\t# how muct we can adjust up to.\n\t\t\t\tif dummyAllocate:\tqtyNeeded = qtyNeeded * 100\n\n\t\t\t\tfor best_before_date in best_before_dates:\n\t\t\t\t\tfor item in best_before_dates_obj[ best_before_date ]:\t\n\t\t\t\t\t\tif item.qty > 0 and qtyNeeded >0:\n\t\t\t\t\t\t\tif not stock_result[ stockType ].has_key( item.storeitem ):\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ] = []\t\n\n\t\t\t\t\t\t\tif item.qty > qtyNeeded:\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ].append( (qtyNeeded/item.qty,qtyNeeded, item.stocktag, item.storeitem, item) )\n\t\t\t\t\t\t\t\t# If we need multiple quantities then we won't do wastage\n\t\t\t\t\t\t\t\t# assumption is that the multiple qty is set appropriately.\n\t\t\t\t\t\t\t\t# item qty multiple thingy?\n\t\t\t\t\t\t\t\tif item.qtyMultiple != 1:\t\n\t\t\t\t\t\t\t\t\tqtyUsed = math.ceil( qtyNeeded / item.qtyMultiple ) * item.qtyMultiple\n\n\t\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - qtyUsed\n\t\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s\\n\" %(item.storeitem,item.stocktag,item.qty-qtyUsed))\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# Check the wastage in this step.\n\t\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - qtyNeeded\n\t\t\t\t\t\t\t\t\t\titem.qty= item.qty - item.wastageFixed\n\t\t\t\t\t\t\t\t\t\tif item.qty < 0:\n\t\t\t\t\t\t\t\t\t\t\titem.qty = 0\n\t\t\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s (Wastage)\\n\" %(item.storeitem,item.stocktag,0))\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tqtyNeeded = 0\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# This is a full use of the item in stock\n\t\t\t\t\t\t\t\t# therefore we do't introduce wastage\n\t\t\t\t\t\t\t\tqtyNeeded = qtyNeeded - item.qty\n\t\t\t\t\t\t\t\tstock_result[ stockType ][ item.storeitem ].append( (1,item.qty, item.stocktag,item.storeitem,item) )\n\t\t\t\t\t\t\t\tif not dummyAllocate:\n\t\t\t\t\t\t\t\t\titem.qty = float(0)\t\n\t\t\t\t\t\t\t\t\t# Note: we don't put() the item the object is passed back\n\t\t\t\t\t\t\t\t\t# to the caller which will do the put()\n\t\t\t\t\t\t\t\t\tsys.stderr.write(\"\\tdbg:_stockBestBefore() Setting QTY of %s/%s to %s (Used All)\\n\" %(item.storeitem,item.stocktag,0))\t\t\t\t\t\t\t\t\t\t\n\n\n\n\t\tsys.stderr.write(\"END: _stockBestBefore() %s\\n\" %(stockType))\n\t\treturn stock_result",
"def check_sell(self, data={}):\r\n \r\n to_sell = []\r\n rank_dict = {}\r\n for my_position in self.position:\r\n \r\n # compare current_price with value\r\n actual_value = my_position['current_price'] * my_position['num_shares']\r\n bought_value = my_position['total_invested']\r\n # check if current price significantly dropped from bought\r\n if bought_value * (1 - self.stop_loss) >= actual_value:\r\n to_sell.append(my_position)\r\n #rank the coin based on distance from bought value to ensure priority over other sell conditions \r\n rank_dict[my_position['code']] = actual_value - bought_value\r\n elif bought_value * self.profit_take <= actual_value:\r\n to_sell.append(my_position)\r\n # rank the coin based on the gain of selling\r\n rank_dict[my_position['code']] = bought_value - actual_value\r\n elif data[my_position[\"code\"]][\"close\"] >= self.statbot.calc_bands(my_position[\"code\"])[1] and self.statbot.get_rsi(my_position[\"code\"]) >= 70:\r\n diff = abs(data[my_position[\"code\"]][\"close\"] - self.statbot.calc_bands(my_position[\"code\"])[1])\r\n to_sell.append(my_position)\r\n #rank the coin based on the score calculated in get score using difference between bands and rsi\r\n rank_dict[my_position['code']] = self.get_score(SELL, self.statbot.get_rsi(my_position['code']), diff)\r\n \r\n for my_position in to_sell:\r\n self.sell(my_position['code'], my_position['current_price'])\r\n \r\n if len(self.selling) != 0:\r\n # sorts buying based on value of rank\r\n self.selling.sort(key = lambda x : rank_dict[x['code']])",
"def stock_analyst(stock_list):\n B = stock_list.index(min(stock_list))\n buy_value = min(stock_list)\n\n sell_value = -1\n if buy_value > 1:\n for sell_indx in range(B, len(stock_list)):\n if sell_value < stock_list[sell_indx]:\n sell_value = stock_list[sell_indx]\n S = sell_indx\n else:\n return 'Buy stock first'\n return [B, S]",
"def stock_screener_filter_top(conn_path,var_list,date,order,top,industry='None',since_ipo = {'condition': '>=', 't': 0},in_universe = False):\n var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')\n var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]\n var2 = (var2.iloc[:,0]) \n if in_universe == True:\n industry2 = 'None'\n since_ipo['min'] = 0\n since_ipo['max'] = 30\n else:\n industry2 = industry\n db = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top = top[0],order = order[0])\n n = 1\n while(n<len(var_list)):\n temp = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top=top[n],order = order[n])\n db = db.merge(pd.DataFrame(temp.iloc[:,[0,5,6]]),how = 'inner',left_on = 'Code',right_on = 'Code')\n n = n + 1\n if industry == 'None':\n db = db\n else:\n if isinstance(industry,str):\n db = db[db['Industry']==(industry)]\n else:\n db = db[db['Industry'].isin(industry)]\n if(db.empty):\n raise ValueError('No Stock meets criteria!')\n return db",
"def get_stock_data(x):",
"def _summarise_sku(self, sku_id: str) -> dict:\n selection = UncertainDemand\n\n for sku in self.__analysed_orders:\n if sku.sku_id == sku_id:\n selection = sku\n break\n\n # If summary is updated also update possible attributes error in rank summary\n summary = {'sku_id': '{}'.format(selection.sku_id),\n 'revenue_rank': '{}'.format(self._rank(sku_id=sku_id, attribute='revenue')),\n 'revenue': '{}'.format(selection.revenue),\n 'retail_price': '{}'.format(selection.retail_price),\n 'gross_profit_margin': '{}'.format(Decimal(selection.retail_price) - Decimal(selection.unit_cost)),\n 'markup_percentage': '{}'.format(\n (Decimal(selection.retail_price) - selection.unit_cost) / selection.unit_cost),\n 'unit_cost': '{}'.format(selection.unit_cost),\n 'excess_rank': '{}'.format(self._rank(sku_id=sku_id, attribute='excess_stock_cost')),\n 'excess_units': '{}'.format(selection.excess_stock),\n 'excess_cost': '{}'.format(Decimal(selection.excess_stock_cost)),\n 'shortage_rank': self._rank(sku_id=sku_id, attribute='shortage_cost'),\n 'shortage_units': '{}'.format(round(selection.shortages)),\n 'shortage_cost': '{}'.format(selection.shortage_cost),\n 'safety_stock_units': '{}'.format(round(selection.safety_stock)),\n 'safety_stock_cost': '{}'.format(selection.safety_stock_cost),\n 'safety_stock_rank': '{}'.format(self._rank(sku_id=sku_id, attribute='safety_stock_cost')),\n 'classification': '{}'.format(selection.abcxyz_classification),\n 'average_orders': '{}'.format(round(selection.average_orders)),\n 'min_order': '{}'.format(min(map(int, selection.orders))),\n 'max_order': '{}'.format(max(map(int, selection.orders))),\n 'percentage_contribution_revenue': '{}'.format(selection.percentage_revenue),\n 'quantity_on_hand': '{}'.format(selection.quantity_on_hand),\n 'inventory_turns': '{}'.format((Decimal(selection.total_orders) * Decimal(selection.unit_cost)) / (\n Decimal(selection.quantity_on_hand) * Decimal(selection.unit_cost))),\n 'inventory_traffic_light': '{}'.format(self._quantity_on_hand_alert(selection)),\n 'unit_cost_rank': self._rank(sku_id=sku_id, attribute='unit_cost'),\n }\n # print(self._rank(sku_id=sku_id, attribute='shortage_cost'))\n return summary",
"def relevance_rank():\n\n\ttry:\n\t filters, search_criteria, column_weights = deserialize_args(request.args)\n\t print(filters, search_criteria, column_weights)\n\t filtered_data = s.filter(df, filters)\n\t relevance_rank = s.relevance(filtered_data, column_weights)\n\t output = s.sort(filtered_data, relevance_rank).to_dict()\n\t return json.dumps(str(output))\n\texcept:\n\t\treturn json.dumps({\"Output\":\"No companies match the input criteria\"})",
"def find_lowest_rank(train_set, test_set):\n\tsorted_test = sorted(test_set, key=lambda x: x.perfs[-1])\n \n # train data\n\ttrain_features = [t.features for t in train_set]\n\ttrain_perfs = [t.perfs[-1] for t in train_set]\n \n # test data\n\ttest_perfs = [t.features for t in sorted_test]\n\n\tcart_model = DecisionTreeRegressor()\n\tcart_model.fit(train_features, train_perfs)\n\tpredicted = cart_model.predict(test_perfs)\n\n\tpredicted_id = [[i, p] for i, p in enumerate(predicted)]\n # i-> actual rank, p -> predicted value\n\tpredicted_sorted = sorted(predicted_id, key=lambda x: x[-1])\n # print(predicted_sorted)\n # assigning predicted ranks\n\tpredicted_rank_sorted = [[p[0], p[-1], i] for i,p in enumerate(predicted_sorted)]\n # p[0] -> actual rank, p[-1] -> perdicted value, i -> predicted rank\n\tselect_few = predicted_rank_sorted[:10]\n\n\t# print the predcited top-10 configuration \n\t# for sf in select_few[:10]:\n\t# \tprint(\"actual rank:\", sf[0], \" actual value:\", sorted_test[sf[0]].perfs[-1], \" predicted value:\", sf[1], \" predicted rank: \", sf[2])\n\t# print(\"------------\")\n\n\treturn np.min([sf[0] for sf in select_few])",
"def ranking():\n\n top_males = []\n top_females = []\n\n males = MemberOfParliament.query.filter_by(gender=\"M\").order_by(MemberOfParliament.score.desc()).limit(10).all()\n for mp in males:\n top_males.append(mp.as_dict())\n females = MemberOfParliament.query.filter_by(gender=\"F\").order_by(MemberOfParliament.score.desc()).limit(10).all()\n for mp in females:\n top_females.append(mp.as_dict())\n\n return send_api_response({\"male\": top_males, \"female\": top_females})",
"def runQueryatBrandLevel():\r\n\r\n df = pd.DataFrame()\r\n\r\n query1 = \"SELECT brand,count(id) AS totalProduct from productinfo where date=%s group by brand ORDER BY count(id) DESC \"\r\n results1 = sql.read_sql(query1, con=conn, params=[date1])\r\n results1['retailer']=retName[0]\r\n df = df.append(results1)\r\n\r\n query1 = \"SELECT brand,count(id) AS totalProduct from bub_productinfo where date=%s group by brand ORDER BY count(id) DESC \"\r\n results2 = sql.read_sql(query1, con=conn, params=[date1])\r\n results2['retailer'] = retName[1]\r\n df = df.append(results2)\r\n\r\n query1 = \"SELECT brand,count(id) AS totalProduct from boo_productinfo where date=%s group by brand ORDER BY count(id) DESC \"\r\n results3 = sql.read_sql(query1, con=conn, params=[date1])\r\n results3['retailer'] = retName[2]\r\n df = df.append(results3)\r\n\r\n list1 = results1['brand'].tolist()\r\n list2 = results2['brand'].tolist()\r\n list3 = results3['brand'].tolist()\r\n\r\n\r\n for brand in list1:\r\n if brand in list2 and brand in list3:\r\n brandName.append(brand)\r\n topBrand=brandName[:5]\r\n\r\n df.set_index('brand',inplace=True)\r\n\r\n df = df.ix[topBrand, :]\r\n\r\n df.reset_index(inplace=True)\r\n header = df.dtypes.index\r\n graphs.multipleBar(df, header[0], header[1], header[2])\r\n\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\".........No of offered products in top 5 common brands for every Retailer.......... \"\"\"\r\n\r\n df = pd.DataFrame()\r\n for o, i, z in zip(var, var1, retName):\r\n query1 = \"select o.brand,count(DISTINCT i.id) as offeredProduct from %s as o INNER JOIN %s as i on o.id=i.id \" % (o, i)\r\n query2 = query1 + \"WHERE o.date=%s AND i.date=%s AND (o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s) AND i.discountPercentage >0 GROUP BY o.brand ORDER BY offeredProduct DESC \"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4]])\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n header = df.dtypes.index\r\n graphs.multipleBar(df, header[0], header[1], header[2])\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\"..........Offer Percentage in top 5 common brands for every Retailer..........\"\"\"\r\n\r\n df = pd.DataFrame()\r\n for o, i, z in zip(var, var1, retName):\r\n query1 = \"select o.brand,AVG (i.discountPercentage) as offeredPercentage from %s as o INNER JOIN %s as i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" WHERE o.date=%s AND i.date=%s AND (o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s) AND i.discountPercentage >0 \" \\\r\n \"GROUP BY o.brand ORDER BY offeredPercentage DESC \"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4]])\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n header = df.dtypes.index\r\n graphs.multipleBar(df, header[0], header[1], header[2])\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\".........Color Variation in top 5 common brands for every Retailer.........\"\"\"\r\n\r\n df = pd.DataFrame()\r\n for o, i,z in zip(var, var1, retName):\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0]])\r\n results['brand'] = topBrand[0]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[1]])\r\n results['brand'] = topBrand[1]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[2]])\r\n results['brand'] = topBrand[2]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1, topBrand[3]])\r\n results['brand'] = topBrand[3]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n query1 = \"select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s\" \\\r\n \" AS o inner join %s AS i on o.id=i.id\" % (o, i)\r\n query2 = query1 + \" where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo\"\r\n results = sql.read_sql(query2, con=conn, params=[date1, date1, topBrand[4]])\r\n results['brand'] = topBrand[4]\r\n results['retailer'] = z\r\n df = df.append(results.ix[0:3, :])\r\n\r\n header = df.dtypes.index\r\n graphs.brandStackedMultiBar(df, header[0], header[1], header[2], header[3])\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\"..........Size Variation in top 5 common brands for every Retailer...........\"\"\"\r\n\r\n df = pd.DataFrame()\r\n for o, i, p, z in zip(var, var1, var2, retName):\r\n query1= \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[0],date1, date1, date1])\r\n results['brand'] = topBrand[0]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n query1 = \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[1],date1, date1, date1])\r\n results['brand'] = topBrand[1]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n query1 = \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[2],date1, date1, date1])\r\n results['brand'] = topBrand[2]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n query1 = \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[3], date1, date1, date1])\r\n results['brand'] = topBrand[3]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n query1 = \"select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[4], date1, date1, date1])\r\n results['brand'] = topBrand[4]\r\n results['retailer'] = z\r\n df = df.append(results)\r\n\r\n header = df.dtypes.index\r\n graphs.brandStackedMultiBar(df, header[0], header[1], header[2], header[3])\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\"....Items Sold in top 5 common brands.... \"\"\"\r\n\r\n df = pd.DataFrame()\r\n df1 = pd.DataFrame()\r\n df2 = pd.DataFrame()\r\n\r\n \"\"\"....Yesterday.....\"\"\"\r\n for o, i, p, z in zip(var[1:], var1[1:], var2[1:], retName[1:]):\r\n query1 = \"select o.brand,p.size,p.sku,p.quantity from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query2, con=conn, params=[topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4],date1, date1, date1])\r\n results['retailer'] = z\r\n df1 = df1.append(results)\r\n\r\n \"\"\"....Before Yesterday.....\"\"\"\r\n\r\n query1 = \"select o.brand,p.size,p.sku,p.quantity from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query2 = query1 + \" where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query2, con=conn,params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4], date2, date2,date2])\r\n results['retailer'] = z\r\n df2 = df2.append(results)\r\n\r\n query = \"SELECT o.brand as brand_x,p.size as size_x,p.sku as sku,q.itemQuantity as itemsold from productsize2 AS q INNER JOIN productsize AS p ON p.sku=q.sku INNER JOIN productcolor AS i on p.colorId=i.colorId \" \\\r\n \"INNER JOIN productinfo as o ON o.id=i.id WHERE o.date=%s AND i.date=%s AND p.date=%s AND q.date=%s AND (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and \" \\\r\n \"(p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') \"\r\n results = sql.read_sql(query, con=conn, params=[date1, date1, date1, date1,topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4]])\r\n results['retailer_x'] = retName[0]\r\n\r\n df = pd.merge(df1, df2, on='sku', how='inner')\r\n df['itemsold'] = df['quantity_y'] - df['quantity_x']\r\n df = df[df['itemsold'] > 0]\r\n df = df.append(results)\r\n #df1['itemsold'] = df1['quantity'] - df2['quantity']\r\n #df = df1.ix[:, ['category', 'retailer', 'itemsold', 'size']].copy()\r\n header = df.dtypes.index\r\n graphs.brandStackedMultiBar(df, header[7], header[2], header[0], header[5])\r\n\r\n print(df)\r\n print('\\n')\r\n\r\n \"\"\"....Revenue in top 5 common brands.... \"\"\"\r\n\r\n df = pd.DataFrame()\r\n df1 = pd.DataFrame()\r\n df2 = pd.DataFrame()\r\n\r\n \"\"\"....Yesterday.....\"\"\"\r\n\r\n for o, i, p, z in zip(var[1:], var1[1:], var2[1:], retName[1:]):\r\n query1 = \"select o.brand,p.size,p.sku,p.quantity,i.originalPrice,i.discountPercentage from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query = query1 + \" where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query, con=conn, params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4],date1, date1, date1])\r\n results['retailer'] = z\r\n df1 = df1.append(results)\r\n\r\n \"\"\"........Before Yesterday......\"\"\"\r\n\r\n query1 = \"select o.brand,p.size,p.sku,p.quantity,i.originalPrice,i.discountPercentage from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId\" % (o, i, p)\r\n query = query1 + \" where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query, con=conn,params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4], date2, date2,date2])\r\n results['retailer'] = z\r\n df2 = df2.append(results)\r\n\r\n\r\n query = \"SELECT o.brand as brand_x,p.size as size_x,p.sku,q.itemQuantity as itemsold,q.itemRevenue as revenue from productsize2 AS q INNER JOIN productsize AS p ON p.sku=q.sku INNER JOIN productcolor AS i on p.colorId=i.colorId \" \\\r\n \"INNER JOIN productinfo as o ON o.id=i.id WHERE o.date=%s AND i.date=%s AND p.date=%s AND q.date=%s AND (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) AND \" \\\r\n \"(p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')\"\r\n results = sql.read_sql(query, con=conn, params=[date1, date1, date1, date1,topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4]])\r\n results['retailer_x'] = retName[0]\r\n\r\n df = pd.merge(df1, df2, on='sku', how='inner')\r\n df['itemsold'] = df['quantity_y'] - df['quantity_x']\r\n df = df[df['itemsold'] > 0]\r\n df['price'] = df['originalPrice_x'] - (df['discountPercentage_x'] / 100)\r\n\r\n df['revenue'] = df['price'] * df['itemsold']\r\n df = df.append(results)\r\n\r\n header = df.dtypes.index\r\n graphs.brandStackedMultiBar(df, header[13], header[12], header[0], header[10])",
"def rank_rentabilidade(self, top):\n self.informe.cria_df_informe(\n cnpj=\",\".join(set(self.cnpjs)), columns=[\"VL_QUOTA\"]\n )\n fundo_df = self.calc_rentabilidade_periodo()\n fundo_df = self.adiciona_denom_social(fundo_df)\n return (\n fundo_df.sort_values(by=\"Rentabilidade\", ascending=False)\n .head(top)\n .to_string(float_format=\"{:.2f}%\".format)\n )",
"def submitItemRank(self, id1, id2, id3):\n survey, slide = self._basicSubmitSetup(id1, id2, id3)\n answer = ''\n for key in request.params.keys():\n try:\n # Some small validation to make sure the keys are numbers, as we expect\n int(key)\n label = request.params[key]\n rank = key\n if rank == 0:\n continue\n answer += ' %s' % label\n sa, result = self._basicAnswerCreation(survey, slide, label, rank)\n except:\n log.info('User %s submitted bad value in submitItemRank() form' % c.authuser.id)\n return self._basicReturnResult(result, answer)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert .NET ticks to formatted ISO8601 time
|
def convert_dotnet_tick(ticks):
_date = datetime.datetime(1, 1, 1) + datetime.timedelta(microseconds=ticks // 10)
if _date.year < 1900: # strftime() requires year >= 1900
_date = _date.replace(year=_date.year + 1900)
return _date.strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-3]
|
[
"def encode_datetime(o):\n r = o.isoformat()\n if o.microsecond:\n r = r[:19] + r[26:]\n if r.endswith('+00:00'):\n r = r[:-6] + 'Z'\n return r",
"def datetime_to_iso8601(dt):\n return '%s.%03dZ' % (dt.strftime('%Y-%m-%dT%H:%M:%S'),\n int(dt.microsecond / 1000))",
"def encodeTimeTicks(timeTicks):\n return _encodeUnsigned('TimeTicks', timeTicks)",
"def ISO8601():",
"def isoformat(self):\r\n s = _format_time(self.__hour, self.__minute, self.__second,\r\n self.__microsecond)\r\n tz = self._tzstr()\r\n if tz:\r\n s += tz\r\n return s",
"def time_isoformat(self):\n s = ''\n for att in ('hour', 'minute', 'second'):\n if self._content[att] is None:\n s += '00' + ':'\n else:\n s += str(self._content[att]) + ':'\n return s.rstrip(\":\") + \".0\"",
"def epoch_to_iso8601(timestamp):\n return datetime.fromtimestamp(timestamp).isoformat()",
"def ts2iso(ts):\n return datetime.datetime.utcfromtimestamp(ts).isoformat(\" \")",
"def encodeIsoTime(timeTuple, toEncoding=None):\n year, month, day, hour, minute, second, timezoneHour, timezoneMinute = timeTuple\n result = '%04d'%year\n if month is not None: result += '-%02d'%month\n if day is not None: result += '-%02d'%day\n if hour is not None: result += 'T%02d'%hour\n if minute is not None: result += ':%02d'%minute\n if second is not None: result += ':%05.2f'%second\n if hour is not None:\n if timezoneHour is None:\n tzcode = 'Z'\n else:\n tzcode = '%+03d'%timezoneHour\n if timezoneMinute is not None:\n tzcode += ':%2d'%timezoneMinute\n result += tzcode\n return result",
"def timestamp() -> str:\n return datetime.datetime.now().astimezone().replace(microsecond=0).isoformat()",
"def binary_time_to_str(x):\n return unix_time_to_ts(binary_time_to_unix_time(x))",
"def toIso8601(dt=None):\n if dt is None:\n dt = nowUTC() # make it aware\n\n return (dt.isoformat(timespec='microseconds')) # force include microseconds",
"def get_iso_systime(self):\n return time.strftime(u\"%Y-%m-%dT%H:%M:%S\",\n time.localtime(time.time())) + self._get_timezone()",
"def test_to_timestamp_vumi_format_string(self):\n timestamp = to_timestamp(\"2015-01-26 19:22:05.000\")\n self.assertEqual(timestamp, 1422300125)",
"def pythonize_iso_timestamp(timestamp):\n # 'Z'-timezone to '+00:00'-timezone\n timestamp = timestamp.replace('Z', '+00:00')\n # '+0000'-timezone to '+00:00'-timezone\n def repl(matchobj):\n sign = matchobj.group(1)\n offset = matchobj.group(2)\n hh, mm = offset[0:2], offset[2:4]\n return \"{}{}:{}\".format(sign, hh, mm)\n timestamp = re.sub(r'(\\+|-)(\\d{4})', repl, timestamp)\n # '.39' microseconds to '.390000' microseconds\n def repl2(matchobj):\n ms = matchobj.group(1)\n sign = matchobj.group(2)\n return \".{}{}\".format(ms[:6].ljust(6, '0'), sign)\n timestamp = re.sub(r'\\.(\\d*)($|\\+|-)', repl2, timestamp)\n return timestamp",
"def time_to_str(time: Timestamp) -> str:\n return time.strftime('%H:%M:%S')",
"def format(self, fmt = \"%S.%i\"):\n return _coin.SbTime_format(self, fmt)",
"def cue_build_timestamp(data):\n\n return \"%2.2d:%2.2d:%2.2d\" % ((data / 75) / 60, (data / 75) % 60, data % 75)",
"def TimeFromTicks(ticks, micro=0):\n return Time(*time.localtime(ticks)[3:6] + (micro,))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Determine if a point is in the 'crash zone'
|
def is_in_crash_zone(pos):
return Processor.CRASH_ZONE_START < pos[0] < Processor.CRASH_ZONE_END
|
[
"def crash_check():\n global CURRENT\n # Grab status\n stat = grab_status()\n\n # Check for seg\n if \"SIGSEGV\" in stat:\n return True\n return False",
"def contains(self, coord):\n try:\n pixel = self.getWcs().skyToPixel(coord)\n except (lsst.pex.exceptions.DomainError, lsst.pex.exceptions.RuntimeError):\n # Point must be way off the tract\n return False\n if not np.isfinite(pixel.getX()) or not np.isfinite(pixel.getY()):\n # Point is definitely off the tract\n return False\n return self.getBBox().contains(geom.Point2I(pixel))",
"def check_safety_zone(self):\n if self.safety_zone is None:\n return 0\n\n if self.position_xy.within(self.safety_zone_inner):\n return 0\n if self.position_xy.within(self.safety_zone):\n return 1\n return 2",
"def main():\n check_if_point_source()",
"def contains_point(self, point):\n point = IntPoint.make(point)\n return point.x >= self.left and point.x < self.right and point.y >= self.top and point.y < self.bottom",
"def check_points(self, points):\r\n for point in points:\r\n if (point > self.spec_lines.lines[0]\r\n or point < self.spec_lines.lines[-1]):\r\n print(\"Point {} out of zone 3\".format(self.x_pt))\r\n elif (point > self.spec_lines.lines[1]\r\n or point < self.spec_lines.lines[-2]):\r\n print(\"Point {} out of zone 2\".format(self.x_pt))\r\n elif (point > self.spec_lines.lines[2]\r\n or point < self.spec_lines.lines[-3]):\r\n# print(\"out of zone 1\")\r\n pass\r\n else:\r\n pass",
"def _is_program_crash(reason):\n\n if not isinstance(reason, TerminateState):\n return False\n\n return 'Invalid memory access' in str(reason)",
"def is_crash(mutation: dict) -> bool:\n return 'crashes' in mutation['path'].parent.name",
"def check_wall(self, pos):\n\t\tif(str(pos) in self.wall_map and self.wall_map[str(pos)]):\n\t\t\treturn True\n\t\treturn False",
"def is_inside(self, point):\n return not (np.prod(point[0] - self._x_range) > 0 or np.prod(point[1] - self._y_range) > 0)",
"def is_crash_nonproper_and_directional(crash_id: int) -> str:\n if not str(crash_id).isdigit():\n return False\n\n check_nonproper_polygon_query = \"\"\"\n query find_service_road_location($crashId: Int!) {\n find_service_road_location_for_centerline_crash(args: {input_crash_id: $crashId})\n {\n location_id\n }\n }\n \"\"\"\n\n try:\n \"\"\"\n We will attempt to find the record through a query using the find_service_road_location_for_centerline_crash function via Hasura.\n If the location_id key does not contain a location_id, then the crash is not a canidate for being linked to a service road location.\n \"\"\"\n response = requests.post(\n HASURA_ENDPOINT,\n data=json.dumps(\n {\n \"query\": check_nonproper_polygon_query,\n \"variables\": {\n \"crashId\": crash_id\n }\n }\n ),\n headers=HEADERS,\n verify=HASURA_SSL_VERIFY\n )\n if (response.json()[\"data\"][\"find_service_road_location_for_centerline_crash\"][0][\"location_id\"] is None):\n return ''\n else:\n return response.json()[\"data\"][\"find_service_road_location_for_centerline_crash\"][0][\"location_id\"]\n except:\n \"\"\"\n In case the response is broken or invalid, we need to:\n - Output the problem for debugging\n - Default to empty string, False by another name, but fitting in the expected str datatype\n \"\"\"\n return False",
"def is_valid_point(map_grid, point):\n x = point[0]\n y = point[1]\n width = map_grid.info.width\n height = map_grid.info.height\n return 0 <= x < width and 0 <= y < height",
"def has_pt(self) -> bool:\n return self.pt_tracer is not None",
"def point_is_on_frontier(point, cell_points):\n if (point[0] + 1, point[1]) not in cell_points or (point[0], point[1] + 1) not in cell_points \\\n or (point[0] - 1, point[1]) not in cell_points or (point[0], point[1] - 1) not in cell_points:\n return True\n else:\n return False",
"def is_visitable_point(map_grid, augmented_occ, point, allow_unknown=True):\n occ = map_grid.data[point[1] * map_grid.info.width + point[0]]\n if not allow_unknown and occ == -1:\n return False\n if occ >= occ_threshold:\n return False\n if point in augmented_occ:\n aug_occ = augmented_occ[point]\n if not allow_unknown and aug_occ == -1:\n return False\n if aug_occ >= occ_threshold:\n return False\n return True",
"def battleCollision(self, point, adjacentPoint):\n zoneName = self.dnaStore.getSuitEdgeZone(\n point.getIndex(), adjacentPoint.getIndex())\n zoneId = int(self.extractGroupName(zoneName))\n \n return self.battleMgr.cellHasBattle(zoneId)",
"def _on_board(self, point):\n return self.board[point]!= BORDER",
"def contains_point(self, x=0, y=0):\n return 0 <= x < self.get_width() and 0 <= y < self.get_height()",
"def outside_arena():\r\n return not (0 < node.x < bounds[0] and 0 < node.y < bounds[1])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Documentation Assign the barriers for the object graph
|
def __init__(self, barriers: list):
self.barriers = barriers
|
[
"def barrier_ref(self):\n return self._barrier_ref",
"def _barrier_worker(self):\n pass",
"def make_barriers():\n mouth = curve(pos=[(23,3, 10),(18,2.5, 15),(12,2,20),(7,.5,21),(0,0,23),(-7,.5,21),(-12,2,20),(-18,2.5,15),(-23,3,10)], radius= 2, color=color.black)\n T_hat = box(pos=(26.5,2.5,-43.5), axis=(-.5,0,1), length=1, width=40, height=2, color=color.magenta)\n L_hat = box(pos=(6,2.5,-46), axis=(-.5,0,1), length=14, width=1, height=2, color=color.magenta)\n R_hat = box(pos=(40,2.5,-26), axis=(-.5,0,1), length=20, width=1, height=2, color=color.magenta)\n L_side = curve(pos=[(-35,2.5,20),(-41.5,2.5,3),(-41,2.5,-8),(-37,2.5,-18),(-33,2.5,-24),(-28,2.5,-30),(-20,2.5,-36),(-12,2.5,-40),(3,2.5,-41)], radius=2, color=color.green)\n R_side = curve(pos=[(35,2.5,20),(41.5,2.5,3),(41,2.5,-8),(37,2.5,-18)], radius=2,color=color.green)\n\n list_of_barriers = [mouth, T_hat, L_hat, R_hat, L_side, R_side]\n return list_of_barriers",
"def barrier(self) -> None:\n if dist.is_initialized():\n dist.barrier()",
"def BarrierTest():\n loops = 10\n increment = 1\n results = []\n\n if (myname == 0):\n for i in range(nodes):\n TestObject(i)\n\n PyDOOMS.barrier()\n obj = PyDOOMS.get(myname)\n\n for i in range(loops):\n results.append(obj.value)\n obj.value += increment\n\n PyDOOMS._comm.addOutgoingUpdate(obj.ID,\"value\",obj.value)\n PyDOOMS.barrier()\n\n results.append(obj.value)\n\n if not (results[0] == 0*increment and results[1] == 1*increment and results[2] == 2*increment and\n results[3] == 3*increment and results[4] == 4*increment):\n logging.critical(\"results: \" + str(results))\n raise Exception",
"def gloo_barrier():\n\n assert _global_gloo_ctx is not None, \"gloo context is not initialzed.\"\n _global_gloo_ctx.barrier()",
"def testNestedBarriers(self):\n exceptions = [False, False]\n level1_reached = [False]\n\n def _Level2Exception(type, value, traceback):\n exceptions[1] = True\n\n def _Level2(cb):\n raise Exception('exception in level 2')\n\n def _Level1Exception(type, value, traceback):\n exceptions[0] = True\n\n def _OnLevel1():\n self.io_loop.add_callback(self.stop)\n level1_reached[0] = True\n\n def _Level1(cb):\n with util.Barrier(None, on_exception=_Level2Exception) as b:\n _Level2(b.Callback())\n _OnLevel1()\n\n with util.Barrier(_OnLevel1, on_exception=_Level1Exception) as b:\n _Level1(b.Callback())\n self.wait()\n self.assertTrue(not exceptions[0])\n self.assertTrue(exceptions[1])\n self.assertTrue(level1_reached[0])",
"def __init__(self, count: 'unsigned int'):\n this = _coin.new_SbBarrier(count)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def reset_connections_to_barrier_and_firewall(self):\n # undo all connections\n for loadpath in self.listLoadpaths:\n for component in loadpath.listComponents:\n component.connectedToBarrier = False\n component.connectedToFirewall = False\n component.leftNode.onBarrier = False\n component.leftNode.onFirewall = False\n component.rightNode.onBarrier = False\n component.rightNode.onFirewall = False\n for crossComp in self.listCrossComponents:\n crossComp.connectedToBarrier = False\n crossComp.connectedToFirewall = False\n crossComp.leftNode.onBarrier = False\n crossComp.leftNode.onFirewall = False\n crossComp.rightNode.onBarrier = False\n crossComp.rightNode.onFirewall = False\n # redo all connections\n for loadpath in self.listLoadpaths:\n leftLimit = min(comp.leftNode.position\n for comp in loadpath.listComponents)\n rightLimit = max(comp.rightNode.position\n for comp in loadpath.listComponents)\n frontNodes = [comp.leftNode\n for comp in loadpath.listComponents\n if comp.leftNode.position == leftLimit]\n backNodes = [comp.rightNode\n for comp in loadpath.listComponents\n if comp.rightNode.position == rightLimit]\n for frontNode in frontNodes:\n frontNode.onBarrier = True\n for comp in frontNode.towardsFirewall:\n comp.link_to_barrier()\n for backNode in backNodes:\n backNode.onFirewall = True\n for comp in backNode.towardsBarrier:\n comp.link_to_firewall()\n self.draw() ##",
"def __init__(self):\n self.railfence = []",
"def initialize_barrier(self, number_of_threads):\n self.barrier = ReusableBarrier(number_of_threads)",
"def sys_barriers_180(basename, sofile,\n barriers = 'barriers',\n minh = 0.001,\n maxn = 50,\n temp = 37.0,\n noLP = False,\n moves = 'single-base-pair',\n zipped = True,\n rates = True,\n k0 = 1.,\n paths = None,\n bsize = False,\n ssize = False,\n circ = False,\n saddle = False,\n bmfile = None,\n plot = False,\n connected = False,\n force = False):\n\n if zipped and which('zcat') is None:\n rlog.error('Using gzipped subopt files requires the commandline tool zcat.')\n raise ExecError('zcat', \"zcat\")\n\n if which(barriers) is None:\n raise ExecError(barriers, \"barriers\",\n 'http://www.tbi.univie.ac.at/RNA/Barriers')\n\n if not sofile or not os.path.exists(sofile):\n raise Exception('Cannot find input file:', sofile)\n\n bofile = basename + '_barriers.bar'\n befile = basename + '_barriers.err'\n brfile = basename + '_barriers_rates.txt'\n bbfile = basename + '_barriers_rates.bin'\n bpfile = basename + '_barriers_tree.ps' if plot else None\n bmfile = basename + '_barriers.ms' if bmfile else None\n\n if not force and os.path.exists(bofile) and os.path.exists(brfile) and os.path.exists(bbfile) and \\\n (not paths or all(map(os.path.exists, ['{:s}_path.{:03d}.{:03d}.txt'.format(basename, int(x), int(y)) for x, y in map(lambda x: x.split('='), paths)]))) and \\\n (not plot or os.path.exists(bpfile)) and \\\n (not bmfile or os.path.exists(bmfile)):\n rlog.info(f\"# files exist: {bofile}, {brfile}\")\n return [bofile, befile, brfile, bbfile, bpfile, bmfile]\n\n barcall = [barriers]\n\n if not plot:\n barcall.extend(['-q'])\n\n if connected:\n barcall.extend(['-c'])\n\n if paths:\n for p in paths:\n barcall.extend(['--path', p])\n\n if noLP:\n barcall.extend(['-G', 'RNA-noLP'])\n else:\n barcall.extend(['-G', 'RNA'])\n\n if moves == 'single-base-pair':\n pass\n elif moves == 'shift':\n barcall.extend(['--moves=Shift'])\n else:\n raise ValueError(f\"Invalid move-set for barriers: {moves}\")\n\n # buggy barriers\n if subopt_reaches_minh(sofile, minh, zipped):\n barcall.extend([\"--minh\", str(minh)])\n barcall.extend([\"--max\", str(int(maxn))])\n barcall.extend([\"-T\", str(temp)])\n\n if rates:\n barcall.extend(['--rates'])\n barcall.extend(['--rates-text-file', brfile])\n barcall.extend(['--rates-binary-file', bbfile])\n if bsize:\n barcall.extend(['--bsize'])\n if ssize:\n barcall.extend(['--ssize'])\n if saddle:\n barcall.extend(['--saddle'])\n\n if bmfile:\n barcall.extend([\"--mapstruc\", bmfile])\n barcall.extend([\"--mapstruc-output\", bmfile])\n\n call = \"{} 2> {} > {}\".format(' '.join(barcall), befile, bofile)\n rlog.info(f'# {call}')\n # TODO: This version is prettier than the old one, but it might be slower\n # than just writing the string and \"shell = True\".\n if zipped:\n inp = sub.Popen([\"zcat\", sofile], stdout = sub.PIPE)\n with open(bofile, 'w') as bh, open(befile, 'w') as eh:\n proc = sub.Popen(barcall, stdin = sub.PIPE, stdout = bh, stderr = eh)\n proc.communicate(inp.communicate()[0])\n if proc.returncode:\n raise SubprocessError(proc.returncode, call)\n else:\n with open(sofile, 'r') as sh, open(bofile, 'w') as bh, open(befile, 'w') as eh:\n proc = sub.Popen(barcall, stdin = sh, stdout = bh, stderr = eh)\n proc.communicate()\n if proc.returncode:\n raise SubprocessError(proc.returncode, call)\n\n if rates and k0 != 1.: # So this should actually be supported by barriers, but it's not.\n with open(bbfile, 'rb') as rf, \\\n open(brfile + '.tmp', 'w') as nr, \\\n open(bbfile + '.tmp', 'wb') as nb:\n dim, = unpack('i', rf.read(calcsize('i')))\n nb.write(pack(\"i\", dim))\n\n rm = []\n for e in range(dim):\n col = []\n for e in range(dim):\n r, = unpack('d', rf.read(8))\n rate = r * k0\n nb.write(pack(\"d\", rate))\n col.append(rate)\n rm.append(col)\n\n for line in zip(*rm):\n newline = \"\".join(map(\"{:10.4g}\".format, line))\n nr.write(newline + \"\\n\")\n \n os.rename(brfile + '.tmp', brfile)\n os.rename(bbfile + '.tmp', bbfile)\n\n if plot:\n os.rename('tree.ps', bpfile)\n\n if paths:\n for p in paths:\n x, y = p.split('=')\n pfname = 'path.{:03d}.{:03d}.txt'.format(int(x), int(y))\n os.rename(pfname, basename + '_' + pfname)\n\n return [bofile, befile, brfile, bbfile, bpfile, bmfile]",
"def enrich_bioprocesses(self, graph: BELGraph, use_tqdm: bool = False) -> None:\n self.add_namespace_to_graph(graph)\n for node, term in list(self.iter_terms(graph, use_tqdm=use_tqdm)):\n if node[FUNCTION] != BIOPROCESS:\n continue\n\n for hierarchy in term.in_edges:\n graph.add_is_a(hierarchy.subject.as_bel(), node)\n\n for hierarchy in term.out_edges:\n graph.add_is_a(node, hierarchy.object.as_bel())",
"def barrier(self):\n msg = KVStoreMsg( \n type=KVMsgType.BARRIER,\n rank=self._client_id,\n name=None,\n id=None,\n data=None,\n shape=None,\n c_ptr=None)\n\n for server_id in range(self._server_count):\n _send_kv_msg(self._sender, msg, server_id)\n\n for server_id in range(self._server_count):\n back_msg = _recv_kv_msg(self._receiver)\n assert back_msg.type == KVMsgType.BARRIER, 'Recv kv msg error.'",
"def graph(self):\n pass",
"def complete_linkage(c1, c2):",
"def processAgentDependencies(self):\n self.agentinitorder=None # to avoid accidents leaking from older implementation\n regmap={}\n glomap={}\n for a in self.model.agents:\n regmap[a.name]=a.depends\n glomap[a.name]=a.globaldepends\n return (regmap,glomap)\n #############\n self.agentinitorder=[]\n def pushDep(a):\n for d in a.depends:\n pushDep(self.model.getAgentByName(d))\n if not (a.name in self.agentinitorder):\n self.agentinitorder.append(a.name)\n for a in self.model.agents:\n #print a.name,a.depends\n pushDep(a)\n debug(\"Region %d agentinitorder: %s\" %(self.regionid+1,str(self.agentinitorder)))\n #raw_input(\"ENTER\")\n #return self.agentinitorder\n return (regmap,glomap)",
"def _manualCrossReferences(self, flag=True):\n self._manualCrossReferences_flag=flag",
"def barrierModel(x, *p):\n if len(p) == 1:\n p = p[0]\n xoffset = p[0]\n leverarm = p[1]\n t = p[2]\n y = np.sqrt(np.power((x - xoffset) * leverarm, 2) + 4 * t**2) * ueV2Hz\n return y"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Documentation Return the neighbouring points according to the four movements in front, right, left and back
|
def get_vertex_neighbours(self, pos: tuple):
n = []
# Allowed movements are left, front, right and back
for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
x2 = pos[0] + dx
y2 = pos[1] + dy
# if x2 < 0 or x2 > 7 or y2 < 0 or y2 > 7:
# pass
n.append((x2, y2))
return n
|
[
"def neighbours(self):# по отиз начин работи по - бързо от колкото с up.left, left... etc\n\t\tres = []\n\t\tfor x in xrange(self.x - 1, self.x + 2):\n\t\t\tres.append( Point( x, self.y+1 ) )\n\t\t\tres.append( Point( x, self.y - 1 ) )\n\t\tres.append( Point(self.x -1, self.y) )\n\t\tres.append( Point(self.x+1, self.y) )\n\t\treturn res",
"def get_adjacent_corners(self):\n if self.x % 3 == 0:\n return [\n CornerLocation(self.x + 2, self.y),\n CornerLocation(self.x - 1, self.y - 1),\n CornerLocation(self.x - 1, self.y + 1),\n ]\n return [\n CornerLocation(self.x - 2, self.y),\n CornerLocation(self.x + 1, self.y - 1),\n CornerLocation(self.x + 1, self.y + 1),\n ]",
"def neighbours(self, point):\n\n point_x, point_y = point[0], point[1]\n\n if point_x == 0 and point_y == 0:\n return (0, 1), (1, 1), (1, 0)\n if point_x == self.rows-1 and point_y == \\\n self.cols-1:\n return (point_x-1, point_y), \\\n (point_x-1, point_y-1), \\\n (point_x, point_y-1)\n if point_x == self.rows-1 and point_y == 0:\n return (point_x-1, 0), (point_x-1, 1), \\\n (point_x, 1)\n if point_x == 0 and point_y == self.cols-1:\n return (0, point_y-1), (1, point_y-1), \\\n (1, point_y)\n if point_x == 0:\n return (0, point_y - 1), (1, point_y-1), \\\n (1, point_y), (1, point_y+1), (0, point_y+1)\n if point_y == 0:\n return (point_x-1, 0), (point_x-1, 1), \\\n (point_x, 1), (point_x+1, 1), (point_x+1, 0)\n if point_x == self.rows-1:\n return (point_x-1, point_y), (point_x-1, point_y-1), \\\n (point_x, point_y-1), (point_x-1, point_y+1), \\\n (point_x, point_y+1)\n if point_y == self.cols-1:\n return (point_x, point_y-1), (point_x-1, point_y-1), \\\n (point_x-1, point_y), (point_x+1, point_y-1), \\\n (point_x+1, point_y)\n\n return (point_x-1, point_y-1), (point_x-1, point_y), \\\n (point_x-1, point_y+1), (point_x, point_y+1), \\\n (point_x+1, point_y+1), (point_x+1, point_y), \\\n (point_x+1, point_y-1), (point_x, point_y-1)",
"def get_adjacent(x, y, direction):\n\n if direction == LEFT and x > 0:\n return x - 1, y\n elif direction == RIGHT and x < 6:\n return x + 1, y\n elif direction == DOWN and y > 0:\n return x, y - 1\n elif direction == UP and y < 4:\n return x, y + 1\n else:\n return None, None",
"def neighbors_of_4(mapdata, x, y):\n neighbors = []\n offset = [-1, 1]\n for off in offset:\n if PathPlanner.is_cell_walkable(mapdata, x + off, y):\n newNeighbor = (x + off, y)\n neighbors.append(newNeighbor)\n if PathPlanner.is_cell_walkable(mapdata, x, y + off):\n newNeighbor = (x, y + off)\n neighbors.append(newNeighbor)\n return neighbors",
"def get_adjacent(x, y):\n return [(x + 1, y), (x + 1, y + 1), (x + 1, y - 1),\n (x, y - 1), (x, y + 1), (x - 1, y),\n (x - 1, y + 1), (x - 1, y - 1)]",
"def _neighbors(self,point):\n #row,col = self._point_to_coord(point)\n #if 0 <= row <= self.size+1 and 0 <= col <= self.size+1:\n return [point-1, point+1, point-self.NS, point+self.NS]\n #else:\n # raise ValueError(\"This point is out of range!\")",
"def find_directions(self, prev, gate_nr, distance):\n distance += 1\n self.points[gate_nr]['distance'] = distance\n gate = self.points[gate_nr]\n pos = np.where(self.grid == gate_nr)\n pos_x = pos[1][0]\n pos_y = pos[0][0]\n len_x = len(self.grid[0,:])\n len_y = len(self.grid[:,0])\n\n directions = np.array([self.grid[pos_y-1, pos_x], self.grid[(pos_y+1)%len_y, pos_x], self.grid[pos_y, pos_x-1], self.grid[pos_y, (pos_x+1)%len_x]]) # [up, down, left, right]\n incoming = np.where(directions==prev)[0][0]\n\n padding = 45\n left_limit = np.max([gate['y']-padding, 0])\n right_limit = np.min([gate['y']+padding, self.gray.shape[0]])\n top_limit = np.max([gate['x']-padding, 0])\n bottom_limit = np.min([gate['x']+padding, self.gray.shape[1]])\n cropped = self.gray[left_limit:right_limit, top_limit:bottom_limit]\n _, thresh = cv2.threshold(cropped, 50, 255, cv2.THRESH_BINARY)\n thresh = 255-thresh\n\n if pos_y == 0:\n deadend = directions[0]\n elif pos_y == len_y-1:\n deadend = directions[1]\n elif pos_x == 0:\n deadend = directions[2]\n elif pos_x == len_x-1:\n deadend = directions[3]\n else:\n ml = [np.sum(thresh[0:20,:]), np.sum(thresh[-21:-1,:]), np.sum(thresh[:,0:20]), np.sum(thresh[:,-21:-1])]\n deadend = directions[np.argmin(ml)]\n\n lines = cv2.HoughLinesP(thresh, 1, np.pi / 180, 60, np.array([]), 50, 0)\n if lines is not None:\n state = directions[(np.where(directions==prev)[0][0]+1)%2+np.where(directions==prev)[0][0]//2*2]\n else:\n if np.where(directions==prev)[0][0] < 2:\n if directions[2] == deadend:\n state = directions[3]\n else:\n state = directions[2]\n else:\n if directions[0] == deadend:\n state = directions[1]\n else:\n state = directions[0]\n\n directions = list(directions)\n directions.remove(prev)\n directions.remove(deadend)\n\n for direction in directions:\n self.points[gate_nr]['directions'].append(direction)\n self.points[gate_nr]['state'] = state\n self.points[gate_nr]['ostate'] = state\n self.points[gate_nr]['incoming'] = incoming\n if self.points[direction]['type'] == 'gate' and len(self.points[direction]['directions']) == 0:\n self.find_directions(gate_nr, direction, distance)",
"def neighbors_of_4(self, x, y, mapdata):\n #if the input values are greater than the mapdata, or less than 0, then an exception is thrown\n if not self.isInBounds(x,y):\n raise ValueError(\"Out of Bounds!\")\n\n availibleSpaces = []\n\n #If x is not the value next to the boarder\n if (x!=mapdata.info.width-1) and self.is_cell_walkable(x+1, y, mapdata):\n availibleSpaces.append((x+1,y)) #If cell can be reached, add it to the list of avaible spaces\n\n #If the x val is not the 0 boundary\n if (x!=0) and self.is_cell_walkable(x-1, y,mapdata):\n availibleSpaces.append((x-1,y)) \n \n #If y is not the value next to the boarder\n if (y!=mapdata.info.height-1) and self.is_cell_walkable(x, y+1,mapdata):\n availibleSpaces.append((x,y+1))\n\n #If the y val is not the 0 boundary\n if (y!=0) and self.is_cell_walkable(x, y-1,mapdata):\n availibleSpaces.append((x,y-1))\n\n return availibleSpaces",
"def get_neighbors(cell):\n x = 0\n y = 1\n xs = [cell[x]+1, cell[x]-1, cell[x]] \n ys = [cell[y]+1, cell[y]-1, cell[y]]\n return [(x,y) for x in xs for y in ys]",
"def neighbor_indices(self):",
"def _neighbours(self, x, y, x_max, y_max, r=1):\r\n return [(x_, y_)\r\n for x_ in range(x-r, x+r+1)\r\n for y_ in range(y-r, y+r+1)\r\n if ((0 <= x_ < x_max) and # not outside x range\r\n (0 <= y_ < y_max))] # not outside y range\r",
"def neighbors_of_4(mapdata, x, y):\n if self.is_cell_walkable(mapdata, x+1, y):\n walkFour.add((x+1, y))\n if self.is_cell_walkable(mapdata, x-1, y):\n walkFour.add((x-1, y))\n if self.is_cell_walkable(mapdata, x, y+1):\n walkFour.add((x, y+1))\n if self.is_cell_walkable(x, y-1):\n walkFour.is_cell_walkable((x, y+1))\n\n return walkFour",
"def neighbors_of_8(mapdata, x, y):\n if self.is_cell_walkable(mapdata, x+1, y):\n walkEight.add((x+1, y))\n if self.is_cell_walkable(mapdata, x-1, y):\n walkEight.add((x-1, y))\n if self.is_cell_walkable(mapdata, x, y+1):\n walkEight.add((x, y+1))\n if self.is_cell_walkable(x, y-1):\n walkEight.is_cell_walkable((x, y+1))\n\n if self.is_cell_walkable(mapdata, x+1, y-1):\n walkEight.add((x+1, y-1))\n if self.is_cell_walkable(mapdata, x-1, y-1):\n walkEight.add((x-1, y-1))\n if self.is_cell_walkable(mapdata, x+1, y+1):\n walkEight.add((x+1, y+1))\n if self.is_cell_walkable(x-1, y-1):\n walkEight.is_cell_walkable((x-1, y+1))\n\n return walkEight",
"def neighbors(position: Position) -> Iterator[Position]:\n for dx, dy in (1, 0), (0, 1), (-1, 0), (0, -1):\n yield position[0] + dx, position[1] + dy",
"def find_start(self):\n start = None\n for column in range(self.grid.shape[1]):\n if self.grid[0][column] == -1 and self.grid[1][column] == -1 and self.grid[2][column] == -1:\n incoming = 0 # top\n self.grid[0][column] = len(self.points)\n self.grid[1][column] = len(self.points)+1\n self.grid[2][column] = len(self.points)+2\n if self.grid[-4][column] > -1 and self.points[self.grid[3][column]]['type'] == 'gate':\n start = self.grid[3][column]\n elif column > 0 and self.points[self.grid[2][column-1]]['type'] == 'gate':\n start = self.grid[2][column-1]\n elif column < len(self.grid[0,:])-1 and self.points[self.grid[2][column+1]]['type'] == 'gate':\n start = self.grid[2][column+1]\n elif self.grid[-1][column] == -1 and self.grid[-2][column] == -1 and self.grid[-3][column] == -1:\n incoming = 1 # bottom\n self.grid[-1][column] = len(self.points)\n self.grid[-2][column] = len(self.points)+1\n self.grid[-3][column] = len(self.points)+2\n if self.grid[-4][column] > -1 and self.points[self.grid[-4][column]]['type'] == 'gate':\n start = self.grid[-4][column]\n elif column > 0 and self.points[self.grid[-3][column-1]]['type'] == 'gate':\n start = self.grid[-3][column-1]\n elif column < len(self.grid[0,:])-1 and self.points[self.grid[-3][column+1]]['type'] == 'gate':\n start = self.grid[-3][column+1]\n\n\n self.points.append({'type': 'track', 'distance': 0, 'incoming': incoming, 'directions': [len(self.points)+1], 'state': len(self.points)+1, 'ostate': len(self.points)+1})\n self.points.append({'type': 'track', 'distance': 1, 'incoming': incoming, 'directions': [len(self.points)+1], 'state': len(self.points)+1, 'ostate': len(self.points)+1})\n self.points.append({'type': 'track', 'distance': 2, 'incoming': incoming, 'directions': [start], 'state': start, 'ostate': start})\n\n return start",
"def neighbor(points, p):\n points.sort(key=lambda q: (p[0] - q.get_position()[0]) * (p[0] - q.get_position()[0]) +\n (p[1] - q.get_position()[1]) * (p[1] - q.get_position()[1]) +\n (p[2] - q.get_position()[2]) * (p[2] - q.get_position()[2]))\n return points[0]",
"def neighbors_of_8(mapdata, x, y):\n ### REQUIRED CREDIT\n\n availibleSpaces = PathPlanner.neighbors_of_4(mapdata, x, y)\n\n \n if(x!=0 and y!=0):\n if(PathPlanner.is_cell_walkable(mapdata, x-1,y-1)):\n availibleSpaces.append((x-1,y-1))\n\n if(x!=mapdata.info.width-1 and y!=mapdata.info.height-1):\n if(PathPlanner.is_cell_walkable(mapdata, x+1,y+1)):\n availibleSpaces.append((x+1,y+1))\n\n if(x!=mapdata.info.width-1 and y!=0):\n if(PathPlanner.is_cell_walkable(mapdata, x+1,y-1)):\n availibleSpaces.append((x+1,y-1))\n\n if(x!=0 and y!=mapdata.info.height-1):\n if(PathPlanner.is_cell_walkable(mapdata, x-1,y+1)):\n availibleSpaces.append((x-1,y+1))\n\n return availibleSpaces",
"def _mirror_coords(i,j):\n\t\t# 5 means a move outside of the field => point for the opponent\n\t\tassert i in {0,1,2,3,4,5}\n\t\tassert j in {0,1,2,3,4,5}\n\t\treturn 4-i,4-j\n\t\treturn 4-j,4-i"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find user by session ID
|
def get_user_from_session_id(self, session_id: str) -> str:
if session_id:
user = self._db.find_user_by(session_id=session_id)
return user
|
[
"def find_by_id(_id):\n if not _id:\n raise ValueError('Please provide the id')\n for user in USERS:\n if user['id'] == _id:\n return user\n return None",
"def get_user_from_session_id(session_id):\n try:\n session = Session.objects.get(session_key=session_id)\n except Session.DoesNotExist:\n raise User.DoesNotExist\n\n try:\n user_id = session.get_decoded().get('_auth_user_id')\n user = User.objects.get(pk=user_id)\n except User.DoesNotExist:\n raise\n return user",
"def find_user(user): \n user.find_by_username()",
"def session_user(db):\n cursor = db.cursor()\n sql = \"SELECT user FROM sessions WHERE sessionid=?\"\n key = bottle.request.get_cookie(COOKIE_NAME)\n cursor.execute(sql,(key,))\n data = cursor.fetchone()\n if data:\n return data[0]\n return None",
"def fetch_session_by_id(id):\r\n pass",
"def get_user(session, discord_snowflake=None, id=None):\n if discord_snowflake is None:\n query = session.query(User).filter_by(id=id).all()\n else:\n query = session.query(User).filter_by(\n snowflake=discord_snowflake).all()\n\n if query:\n return query[0]\n else:\n return None",
"def get_session_by_user_id(self, user_id):\n for session in self.sessions.values():\n if session['user_1_id'] == user_id or session['user_2_id'] == user_id:\n return session\n return None",
"def get_user():\n user = None\n if 'userId' in session:\n user = User.query.get(session['userId'])\n return user",
"def find_session(self, session_id):\n if session_id in self.sessions:\n session = self.sessions[session_id]\n\n if session.active == False:\n return None\n else:\n return session\n else:\n return None",
"def find(id):\n cur.execute(\n '''\n SELECT *\n FROM users\n WHERE id = ?\n ''', (id,)\n )\n row = cur.fetchone()\n\n if row is None:\n return None\n return User(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9])",
"def session_user(db):\n\n if bottle.request.get_cookie(COOKIE_NAME) != '' or bottle.request.get_cookie(COOKIE_NAME) != None:\n cur = db.cursor()\n # retrieve user sessionid and usernick (username) from the sessions table\n rows = [row[0]for row in cur.execute(\"SELECT sessionid, usernick FROM sessions\")]\n\n if(len(rows) == 0) : # if not exist\n return None\n else:\n return bottle.request.get_cookie(COOKIE_NAME)\n else:\n return None",
"def get_session(self, sid):\n q = \"SELECT username FROM sessions WHERE sid=?\"\n r = self._query(q, (sid,), fetch='one')\n try:\n return {'username': r[0],}\n except Exception as e:\n raise e",
"def get_user_for_token(self, session_id, request, *args, **kwargs):\n # SessionStore initialize the session object using session_key\n session = SessionStore(session_id)\n\n # Save the user's session object in request.\n request.session = session\n\n user = None\n if self.SESSION_USER_ID in session:\n user_id = user_model._meta.pk.to_python(session[self.SESSION_USER_ID])\n try:\n user = user_model._default_manager.get(pk=user_id)\n # Check current user's hash to match stored hash.\n # If they don't match than user have changed the password.\n # Delete this session to make user login again.\n if hasattr(user, 'get_session_auth_hash'):\n session_auth_hash = user.get_session_auth_hash()\n if session_auth_hash != session[self.SESSION_USER_HASH]:\n request.session.flush()\n user = None\n except user_model.DoesNotExist:\n return None\n return user",
"def get_by_id(cls, id):\n user = cls.query.filter_by(id=id).one()\n return user",
"def get_user():\r\n return login_session.get('user', None)",
"def load_user(session_token):\n return User.get(session_token)",
"def get_session_by_id(self, session_id):\n return self.sessions.get(session_id, None)",
"def get_single_user():",
"def user_id(self):\n return self.auth.get_user_by_session()[\"user_id\"]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize the var set manager.
|
def __init__(self):
self.variable_sets = {}
self.reserved_keys = []
self.reserved_keys.extend(self.VAR_SETS)
|
[
"def init_vars(self):\n pass",
"def init_vars(self):\n if self.session is None:\n self.set_session()\n\n self.session.run(global_variables_initializer())\n self._var_inited = (True, self.session)",
"def _initialize_track_vars(self):\n self.__log.call()\n\n track_vars = self.__track_vars = [\n None, # track vars use 1-based indexing\n ]\n\n aggregated_tracks_metadata = self.__aggregated_metadata[\"__tracks\"]\n last_track = len(aggregated_tracks_metadata) - 1\n # from_ will still be 0 here, and that's intended - it means that when\n # we invoke \"buttonup\" for the first time, it will increment the track\n # spinbox to 1, triggering a refresh of track 1's metadata\n track_number_editor = self.__metadata_editors[\"track_number\"]\n track_number_editor.config(to=last_track)\n track_number_editor.of_label.config(text=\"of %d\" % last_track)\n\n # tracks metadata also uses 1-based indexing\n for t in range(1, len(aggregated_tracks_metadata)):\n track_metadata = aggregated_tracks_metadata[t]\n\n # first initialize the individual track vars...\n varmap = {\n \"track_include\": BooleanVar(\n name=\"track_%d_include\" % t,\n value=track_metadata[\"track_include\"]),\n }\n for field in [\n \"title\",\n \"artist\",\n \"genre\",\n \"year\",\n ]:\n metadata_name = \"track_%s\" % field\n varmap[metadata_name] = StringVar(\n name=\"track_%d_%s\" % (t, field),\n value=track_metadata[metadata_name][0]\n if track_metadata[metadata_name] else \"\")\n\n track_vars.append(varmap)\n\n # ...then initialize the editors and editor vars by using the track\n # spinbox to trigger refreshes (but make sure this method is called\n # BEFORE the metadata editor is packed, otherwise the user will be\n # very disoriented and confused)\n track_number_editor.invoke(\"buttonup\")\n\n # now update the from_ to 1 and initialize the spinner to track #1 by\n # \"wrapping around\"\n track_number_editor.config(from_=1)\n track_number_editor.invoke(\"buttonup\")",
"def _init_run_vars(self):\n self._messages = []\n self._reservation = None\n self._instance = None",
"def setup(cls):\n cls._VALUE_MAP = {}",
"def _init_state_variables(self) -> None:\n for name, type_info in self.STATE_VARIABLE_DEFINITIONS.items():\n self.create_state_var(name, type_info)",
"def init(market_manager_, crop_manager_):\n global market_manager, crop_manager\n market_manager = market_manager_\n crop_manager = crop_manager_",
"def __init__(self):\n self.commandSet = None # This will be updated by addCommand\n self.subParser = None # This will be updated by addCommand",
"def setup(self):\n self.data = ContainerSet(self.name)\n for stage in self.stages:\n stage.data = self.data\n stage.setup()",
"def handlesVariables_Initialization(self):\n self.runner = PythonRunner(VAR_INIT_METHOD)\n results = self.runner.processFunction()\n \n self.assertEquals([\"i = 0\"], results[1], \"Should have the proper variable statement\")",
"def init_sorted_variables(self):\n self.sorted_variables = []",
"def _set_vars(self):\n if self.vars == []:\n for v in self.df.columns:\n if v in VARIABLES.keys():\n self.vars.append(VARIABLES[v])",
"def _collect_variables(self, vs):\n self.var_list.extend(vs)\n self.init_op = tf.variables_initializer(var_list=self.var_list)",
"def __init(self, terms, termset, varmap, mixed):\n\n self.__terms = terms\n self.__termset = termset\n self.__varmap = varmap\n self.__mixed = mixed\n\n if termset != None:\n assert len(termset) == len(terms)",
"def __init__(self):\n self.variables = {}\n self.values = {}",
"def __init__(self):\n\n self.clusterTableManager = ClusterTableManager()\n self.docManager = DocManager()\n self.processedClusterStore = ProcessedClusterStore()",
"def initVariable(self, trace_collection):\n return trace_collection.initVariableUninitialized(self)",
"def init_toolbox(self):\n pass",
"def init_environ(self):\n\t\t#workdir = wpre + projectname + '/' + setname + '/'\n\t\tself.config['pdict'] = {}\n\t\t#self.config['workdir'] = workdir\n\n\t\tself.config['solvent'] = 'water'\n\t\tself.config['interface'] = 'electronic'\n\t\tself.config['diffuse'] = 'none'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a new variable set to this variable set manager. Variables in the set can then be retrieved by complex key.
|
def add_var_set(self, name, value_dict):
if name not in self.reserved_keys:
raise ValueError("Unknown variable set name: '{}'".format(name))
if name in self.variable_sets:
raise ValueError(
"Variable set '{}' already initialized.".format(name))
try:
var_set = VariableSet(name, self.reserved_keys,
value_dict=value_dict)
except VariableError as err:
# Update the error to include the var set.
err.var_set = name
raise err
self.variable_sets[name] = var_set
|
[
"def _add_set_object(self, set_obj: Union[SET1, SET2, SET3]) -> None:\n key = set_obj.sid\n assert key >= 0\n if key in self.sets:\n self.sets[key].add_set(set_obj)\n else:\n self.sets[key] = set_obj\n self._type_to_id_map[set_obj.type].append(key)",
"def add_variable(self, new_var):\n self.variables.append(new_var)",
"def _add_seset_object(self, set_obj: SESET) -> None:\n key = set_obj.seid\n assert key >= 0\n if key in self.se_sets:\n old_set = self.se_sets[key]\n set_obj.add_seset(old_set)\n self.se_sets[key] = set_obj\n self._type_to_id_map[set_obj.type].append(key)",
"def _add_uset_object(self, set_obj: Union[USET, USET1]) -> None:\n key = set_obj.name\n if key in self.usets:\n self.usets[key].append(set_obj)\n else:\n self.usets[key] = [set_obj]\n self._type_to_id_map[set_obj.type].append(key)",
"def sadd(self, key: str, *args) -> None:\n cur_val = self.__get_key(key)\n if cur_val is None:\n self.storage[key] = Set(set(args))\n return\n\n check_type(cur_val, DataType.SET)\n\n # Add the values to the set\n self.storage[key].data.update(args)",
"def _add_seuset_object(self, set_obj: Union[SEUSET, SEUSET1]) -> None:\n key = set_obj.name\n if key in self.se_usets:\n self.se_usets[key].append(set_obj)\n else:\n self.se_usets[key] = [set_obj]\n self._type_to_id_map[set_obj.type].append(key)",
"def _add_cset_object(self, set_obj: Union[CSET, CSET1]) -> None:\n self.csets.append(set_obj)\n n = len(self._type_to_id_map['CSET'])\n self._type_to_id_map['CSET'].append(n)",
"def add_variable(self, x, y):\n pass",
"def _add_radset_object(self, set_obj: RADSET) -> None:\n if self.radset:\n self.radset.add_set(set_obj)\n else:\n self.radset = set_obj\n #self._type_to_id_map[set_obj.type].append(key)",
"def __init__(self):\n\n self.variable_sets = {}\n\n self.reserved_keys = []\n self.reserved_keys.extend(self.VAR_SETS)",
"def _add_qset_object(self, set_obj: Union[QSET, QSET1]) -> None:\n self.qsets.append(set_obj)\n n = len(self._type_to_id_map['QSET'])\n self._type_to_id_map['QSET'].append(n)",
"def add_variable(self, name, domain):\n name = str(name)\n if name in self.vs:\n raise RuntimeError(\"Variable '{0}' already defined\".format(name))\n v = Variable(name, domain, None, None)\n self.vs[name] = v",
"def add_variable(self, var_name, var_expr=''):\n return self.dataset.add_variable(var_name, var_expr)",
"def add_variables(self, new_variables):\n if isinstance(new_variables, ProbabilisticModel):\n new_variables = new_variables.variables\n if isinstance(new_variables, (list, set)):\n new_input_variables = list(self.variables) + list(new_variables)\n elif isinstance(new_variables, Variable):\n new_input_variables = list(self.variables).append(Variable)\n else:\n raise ValueError(\"The input of the add_variable method should be a Variable, a set/list of variables or a ProbabilisticModel\")\n self._initialize_model(new_input_variables)",
"def set_variables(self, variables, dataset=0, **kwargs):\n #variables.update(kwargs)\n\n if not dataset:\n dataset = ['']\n\n for ds in listify(dataset):\n for (key, val) in variables.items():\n newkey = key + str(ds)\n self.set_variable(newkey, val, **kwargs)",
"def add_vars():\n with open(VAR_CACHE) as fp:\n var = json.load(fp)\n data = plistlib.readPlist(INFO_PLIST)\n for key, value in var.items():\n data['variables'][key] = value\n log('set %s', key)\n\n plistlib.writePlist(data, INFO_PLIST)",
"def put_node_set_variable_values(self, object_id, name, step, values):\n names = self.get_variable_names('EX_NODE_SET')\n var_id = names.index(name) + 1\n (numSetNodes, _numSetDistFacts) = self.get_set_params(object_id, 'EX_NODE_SET')\n self.__ex_put_var(step, 'EX_NODE_SET', var_id, object_id, numSetNodes, values)\n return True",
"def _add_aset_object(self, set_obj: Union[ASET, ASET1]) -> None:\n self.asets.append(set_obj)\n n = len(self._type_to_id_map['ASET'])\n self._type_to_id_map['ASET'].append(n)",
"def add_variable(self, var):\n if type(var) == WorkspaceVariable:\n return var\n group_id = WorkspaceVariable.get_group_id(var)\n s = VariableValueStruct(var)\n ws_id = arts_api.add_variable(self.ptr, group_id, s)\n arts_api.set_variable_value(self.ptr, ws_id, group_id, s)\n return WorkspaceVariable(ws_id,\n str(id(var)),\n group_names[group_id],\n \"User defined variable.\",\n self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
For every combination of permutation variables (that were used), return a new var_set manager.
|
def get_permutations(self, used_per_vars):
# Get every a dictionary of var:idx for every combination of used
# permutation variables.
permutations = [{}]
for per_var in used_per_vars:
new_perms = []
for old_perm in permutations:
for i in range(self.len('per', per_var)):
new_perm = old_perm.copy()
new_perm[per_var] = i
new_perms.append(new_perm)
permutations = new_perms
permuted_var_mans = []
if len(permutations) == 1:
return [self]
# Create a new var set manager for each permutation.
for perm in permutations:
var_man = VariableSetManager()
var_man.variable_sets = self.variable_sets.copy()
perm_var_set = VariableSet('per', self.reserved_keys)
for var, idx in perm.items():
new_list = [self.variable_sets['per'].data[var].data[idx]]
vlist = VariableList()
vlist.data = new_list
perm_var_set.data[var] = vlist
var_man.variable_sets['per'] = perm_var_set
permuted_var_mans.append(var_man)
return permuted_var_mans
|
[
"def sub_var_bindings_set(triples, bindings_set) :\n\t\n\t#print 'triples',prettyquery(triples)\n\t#print 'bindings',prettyquery(bindings_set)\n\t\n\tfor bindings in bindings_set :\n\t\tyield sub_var_bindings(triples, bindings)",
"def variables(self):\n # Task 4.1\n var_set = set()\n var_set.update(self.conclusion.vars)\n for assumption in self.assumptions:\n var_set.update(assumption.vars)\n return var_set",
"def _collect_vars(self):\n res = set()\n self.objective.collect_variables(res)\n for c in self.constraints:\n c.collect_variables(res)\n self.variables = list(res)\n self.var_slices = {}\n start = 0\n for var in self.variables:\n self.var_slices[var] = slice(start, start + var.size)\n start += var.size",
"def _assign_from_var_ids(table, var_ids, var_builder):\n if table.empty:\n raise AttributeError(f\"Dismod file has no data in table {table.columns} during read from vars.\")\n var_groups = DismodGroups()\n for group_name, group in var_ids.items():\n for key, var_id_mapping in group.items():\n var_groups[group_name][key] = var_builder(table, var_id_mapping)\n return var_groups",
"def _iter_vars(sum_varlist, req_vars, vgrp):\n for vn in sum_varlist:\n req_vars[vn] = vgrp[vn]\n return req_vars",
"def update_var_into_set(var_name : str):\n for pipe in _pipes:\n pipe.send({\n \"op\": \"get_var\",\n \"var_name\": var_name\n })\n\n var_set = _process_data[var_name].copy()\n\n for pipe in _pipes:\n var_set.update(pipe.recv())\n\n return var_set",
"def gen_perms(vars, nodes):\n # For all possible values of all vars, create a permutation and add it to the list\n perms = []\n\n for var in vars:\n if len(perms) == 0:\n for value in nodes[var].classes:\n perms.append({var: value})\n else:\n classes = nodes[var].classes\n old_perms = perms\n for i in range(1, len(classes)):\n perms = perms + deepcopy(old_perms)\n\n for i in range(0, int(len(perms) / len(classes))):\n for j in range(0, len(classes)):\n perms[i + j * int(len(perms) / len(classes))][var] = classes[j]\n\n perm_tuples = []\n for dictionary in perms:\n tup = dict_to_tuple(dictionary)\n perm_tuples.append(tup)\n\n return perm_tuples",
"def variables(e: Expression) -> Set[Variable]:\n return_value = set()\n\n def f(e: Expression):\n if isinstance(e, Variable):\n return_value.add(e)\n\n traversal.on_every_node(f, e)\n return return_value",
"def builddimensions(self):\r\n e = self.experiment # synonym\r\n\r\n # find unique dimension values across variables. Dim values could be 0, 5, 5, 5, 2, 666, -74,...\r\n dims = list(np.unique([ var.dim for var in e.variables ])) # np.unique returns sorted values\r\n\r\n # renumber dimension values to be consecutive 0-based\r\n newdims = range(len(dims)) # 0-based consecutive dim values\r\n old2new = dict(zip(dims, newdims)) # maps from old dim values to new ones\r\n for var in e.variables:\r\n var.dim = old2new[var.dim] # overwrite each Variable's old dim value with the new one\r\n\r\n # use newdims to init a list of Dimensions, each with an empty Variables object\r\n self.dimensions = []\r\n for dim in newdims:\r\n d = Dimension(variables=Variables(), dim=dim)\r\n self.dimensions.append(d)\r\n\r\n # now assign each Variable object to the appropriate Dimension object\r\n for var in e.variables:\r\n d = self.dimensions[var.dim] # get the Dimension object\r\n d.variables[var.name] = var # assign the Variable to the Dimension's Variables\r\n d.shuffle = var.shuffle # set the Dimension's shuffle and random flags according to this Variable\r\n d.random = var.random\r\n d.check() # make sure everything is consistent in this Dimension\r",
"def create_constraint_set() -> Set[Tuple[Tuple[int, int], Tuple[int, int]]]:\r\n return set(chain(*map(lambda cell: {(cell, diff) for diff in constrained_variables(cell)}, ALL_CELLS)))",
"def assignment_space(graph, n, explanadum, exp_var):\n space = []\n for choice in combination(n, \\\n [key for key in exp_var if key not in explanadum.keys()]):\n # Dont give assignments to variables set in explanadum \n mutations = []\n init_assignment = {}\n for node_name in choice:\n node = graph.get_node_with_name(node_name)\n init_assignment.update( [ (node_name, node.cpt.myVals[0]) ] )\n for other_possible_value in node.cpt.myVals[1:]:\n mutations.append( (node_name, other_possible_value) )\n space.append( init_assignment )\n for j in range( len(mutations) ):\n for change in combination(j+1, mutations):\n assignment = init_assignment.copy()\n assignment.update( change )\n space.append(assignment)\n # if not len(init_assignment):\n # for node_name in choice:\n # node = graph.get_node_with_name(node_name)\n # init_assignment.update([(node_name, node.cpt.myVals[0])])\n # for other_possible_value in node.cpt.myVals[1:]:\n # mutations.append((node_name, other_possible_value))\n # else:\n # for node_name in choice:\n # node = graph.get_node_with_name(node_name)\n # for possible_value in node.cpt.myVals:\n # mutations.append((node_name, possible_value))\n\n # # initial assignment(every node has been assigned to default value)\n # space.append(init_assignment)\n # # go through all combinations of mutations\n # for j in range(len(mutations)):\n # for change in combination(j+1, mutations):\n # assignment = init_assignment.copy()\n # assignment.update(change)\n # space.append(assignment)\n return space",
"def all_different(variables) :\n pairs_of_variables = []\n var_c = variables.copy()\n #generates all the possible pairs of variables\n while len(var_c) >0:\n first_var = var_c.pop(0)\n for var in var_c:\n pairs_of_variables.append([first_var, var])\n \n #iterates through pairs of variables and set constraints for them\n constraints = []\n for pair in pairs_of_variables:\n \n new_constraint = Constraint(pair[0], pair[1], constraint_different)\n constraints.append(new_constraint)\n return constraints",
"def distributeVariablesToParsers(self, perturbedVars):\n distributedPerturbedVars = {}\n pertType = []\n # teach what are the type of perturbation (decay FY etc...)\n for i in perturbedVars.iterkeys():\n splittedKeywords = i.split('|')\n pertType.append(splittedKeywords[0])\n # declare all the dictionaries according the different type of pert\n for i in xrange (0,len(pertType)):\n distributedPerturbedVars[pertType[i]] = {}\n # populate the dictionaries \n for key, value in perturbedVars.items():\n splittedKeywords = key.split('|')\n for j in xrange (0,len(pertType)):\n if splittedKeywords[0] == pertType[j] :\n distributedPerturbedVars[pertType[j]][key] = value\n #print (distributedPerturbedVars)\n return distributedPerturbedVars",
"def __init__(self):\n\n self.variable_sets = {}\n\n self.reserved_keys = []\n self.reserved_keys.extend(self.VAR_SETS)",
"def get_variables_for_comparison1():\n import itertools\n variables = [\n 'holaps_evapotranspiration',\n 'gleam_evapotranspiration',\n 'modis_evapotranspiration',\n ]\n comparisons = [i for i in itertools.combinations(variables,2)]\n return variables, comparisons",
"def get_scope_of_new_factor_after_variable_elimination(self):\n nodes = self.cg.nodes\n return {n_target : set.union(*[set(n.cpd.scope) for n in nodes if n_target in n.cpd.scope]) \\\n - set([n_target]) for n_target in nodes}",
"def n_fresh_variables(existing_vars: Set[Variable], n: int, prefix: str = \"z\") -> List[Variable]:\n existing_vars = deepcopy(existing_vars)\n new_vars: List[Variable] = []\n for _ in range(n):\n v = fresh_variable(existing_vars, prefix)\n existing_vars.add(v)\n new_vars.append(v)\n return new_vars",
"def vars_from_equations(U: Set[Equation]):\n ALL_VARS = set()\n for e in U:\n LS = get_vars(e.left_side, unique=True)\n RS = get_vars(e.right_side, unique=True)\n ALL_VARS = ALL_VARS.union(LS).union(RS)\n return ALL_VARS",
"def num_38():\n import itertools as IT\n #\n def unique_rows(a):\n a = np.ascontiguousarray(a)\n u_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))\n u_a = u_a.view(a.dtype).reshape((u_a.shape[0], a.shape[1]))\n return u_a\n #\n frmt = \"\"\"\n :------------------------------------------------------------------:\n :Given {0} variables and {1} classes/variable, the following shows :\n : (1) the combinations :\n : (2) all arrangements, {0} variables with {1} classes/variable & :\n : (3) those combinations, where positions are not important. :\n :\n :Input categories per variable... {2}\\n\n :Combinations: no duplicates... n={3}\n {4}\\n\n :mesh form: {0} variables, all arrangements... n={5}\n : transposed for viewing...\n {6}\\n\n :unique from mesh: {0} variables, arrangement not important... n={7} \n : transposed for viewing...\n {8}\n :\n :------------------------------------------------------------------:\n \"\"\"\n a = [-1, 0, 1] #[0, 1, 2, 3] # classes\n n = len(a)\n m = 3 #2\n c = [i for j in range(n+1) for i in IT.combinations(a, j)]\n z = np.array(a*m).reshape(m, n)\n ms = np.array(np.meshgrid(*z)).T.reshape(-1,m)\n s = np.sort(ms, axis=1)\n u = unique_rows(s)\n if n == 4:\n args = [[c[0]], c[1:5], c[5:11], c[11:15], c[-1]]\n c2 = \"{}\\n{}\\n{}\\n{}\\n{}\".format(*args)\n elif n == 3:\n args = [[c[0]], c[1:4], c[4:7], c[7:]]\n c2 = \"{}\\n{}\\n{}\\n{}\".format(*args)\n args2 = [m, n, a, len(c), c2, len(ms), ms.T, u.shape[0], u.T]\n print(dedent(frmt).format(*args2))\n #return a, c, m, u"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse the given complex key, and return a reasonable (var_set, var, index, sub_var) tuple.
|
def parse_key(cls, key):
if isinstance(key, list) or isinstance(key, tuple):
parts = list(key)
elif isinstance(key, str):
parts = key.split('.')
else:
raise TypeError("Only str keys or tuples/lists are allowed.")
var_set = None
if parts[0] in cls.VAR_SETS:
var_set = parts[0]
parts = parts[1:]
if parts:
var = parts.pop(0)
if var == '':
raise KeyError("Empty variable name for key '{}'".format(key))
else:
raise KeyError("No variable name given for key '{}'".format(key))
# Grab the index and sub_var parts, if present.
index = None
if parts:
if parts[0] is None:
# We were given an explicit None in a variable tuple.
parts.pop(0)
elif parts[0] == '':
# Note: The index is optional. This is for when it's given as
# an empty string.
raise KeyError("Invalid, empty index in key: '{}'".format(key))
else:
try:
index = int(parts[0])
parts.pop(0)
except ValueError:
# If it's not an integer, assume it's a sub_key.
pass
sub_var = None
if parts:
sub_var = parts.pop(0)
if sub_var == '':
raise KeyError(
"Invalid, empty sub_var in key: '{}'".format(key))
if parts:
raise KeyError(
"Variable reference ({}) has too many parts, or an invalid "
"variable set (should be one of {})".format(key, cls.VAR_SETS))
return var_set, var, index, sub_var
|
[
"def _process_data_var(string):\n key, var = string.split(\"<-\")\n if \"structure\" in var:\n var, dim = var.replace(\"structure(\", \"\").replace(\",\", \"\").split(\".Dim\")\n # dtype = int if '.' not in var and 'e' not in var.lower() else float\n dtype = float\n var = var.replace(\"c(\", \"\").replace(\")\", \"\").strip().split()\n dim = dim.replace(\"=\", \"\").replace(\"c(\", \"\").replace(\")\", \"\").strip().split()\n dim = tuple(map(int, dim))\n var = np.fromiter(map(dtype, var), dtype).reshape(dim, order=\"F\")\n elif \"c(\" in var:\n # dtype = int if '.' not in var and 'e' not in var.lower() else float\n dtype = float\n var = var.replace(\"c(\", \"\").replace(\")\", \"\").split(\",\")\n var = np.fromiter(map(dtype, var), dtype)\n else:\n # dtype = int if '.' not in var and 'e' not in var.lower() else float\n dtype = float\n var = dtype(var)\n return key.strip(), var",
"def _parse_var(lexer: shlex.shlex) -> Tuple[str, Optional[Any]]:\n flags_token = lexer.get_token()\n\n if flags_token != \"--\":\n var_flags = set(flags_token[1:])\n else:\n var_flags = set()\n\n var_name = lexer.get_token()\n var_value: Optional[Any] = None\n lookahead = lexer.get_token()\n\n if lookahead == \"=\":\n if \"a\" in var_flags:\n var_value = _parse_indexed(lexer)\n elif \"A\" in var_flags:\n var_value = _parse_assoc(lexer)\n else:\n var_value = _parse_string(lexer.get_token())\n else:\n lexer.push_token(lookahead)\n\n return var_name, var_value",
"def parse_tag(complex_tag: str) -> dict:\n content = complex_tag[\n complex_tag.find(' '):-3] # '<tag field1=\"s1\" field2=\"s2\"/>' to 'field1=\"s1\" field2=\"s2\"'\n content = content.split('\"')\n return {content[2 * i][1:-1]: content[2 * i + 1] for i in range(int(len(content) / 2))}",
"def parse_compound_key(cls, compound_key):\n parts = compound_key.split(AGGREGATOR_DELIMITER)\n part1 = parts[0]\n if len(parts) == 2:\n part2 = parts[1]\n elif len(parts) == 1:\n part2 = None\n else:\n raise ValueError(f\"Unexpected compound_key {compound_key}\")\n return part1, part2",
"def _parse_execution_data_blob_key(blob_key):\n key_body, run = blob_key.split(\".\", 1)\n key_body = key_body[len(EXECUTION_DATA_BLOB_TAG_PREFIX) :]\n begin = int(key_body.split(\"_\")[1])\n end = int(key_body.split(\"_\")[2])\n return run, begin, end",
"def __subfield_dict(marc_subfield):\n if marc_subfield[3][0] is None:\n return __control_dict(marc_subfield[3][1])\n return {\"type\": \"variable\",\n \"ind1\": marc_subfield[1],\n \"ind2\": marc_subfield[2],\n \"subfields\": dict(marc_subfield[3:])}",
"def _parse_graph_info_blob_key(blob_key):\n key_body, run = blob_key.split(\".\")\n graph_id = key_body[len(GRAPH_INFO_BLOB_TAG_PREFIX) + 1 :]\n return run, graph_id",
"def _parse(line):\n splited = line.split(\"=\")\n key = splited[0].strip()\n value = splited[1].strip()\n return key, value",
"def _parse_graph_execution_data_blob_key(blob_key):\n # TODO(cais): Support parsing trace_id when it is supported.\n key_body, run = blob_key.split(\".\", 1)\n key_body = key_body[len(GRAPH_EXECUTION_DATA_BLOB_TAG_PREFIX) :]\n begin = int(key_body.split(\"_\")[1])\n end = int(key_body.split(\"_\")[2])\n return run, begin, end",
"def _parse_source_file_blob_key(blob_key):\n key_body, run = blob_key.split(\".\", 1)\n index = int(key_body[len(SOURCE_FILE_BLOB_TAG_PREFIX) + 1 :])\n return run, index",
"def extractVarEq( expression , key ):\n\n # expand out and convert into a string\n expression = str(expression.expand())\n # Make sure negative symbols are not stripped and split into multiplicative blocks\n s = expression.replace('- ','-').split(' ')\n # If the key does not have a power then strip away all blocks where it does\n if key.find('^') == -1:\n s = [ w for w in s if w.find(key+'^') == -1 ]\n # Find blocks multiplied by the key and remove the key from the string\n var = [w[0:w.find(key)]+w[w.find(key)+len(key):] for w in s if w.find(key) != -1 ]\n\n # Handle the case where the key was not found\n if len(var) == 0:\n return ''\n\n # Fix problems left by stripping away the key\n var = [w.replace('-*','-') for w in var]\n var = [w.replace('*/','/') for w in var]\n for i in range(0,len(var)):\n if len(var[i]) == 0: var[i] = '1'\n elif len(var[i]) == 1 and var[i][0] == '-': var[i] = '-1'\n elif var[i][-1] == '*': var[i] = var[i][:-1]\n elif var[i][0] == '*': var[i] = var[i][1:]\n\n # Expand out power\n var = [expandPower(w) for w in var]\n\n # construct a string which can be compiled\n ret = var[0]\n for w in var[1:]:\n if w[0] == '-':\n ret += ' - '+w[1:]\n else:\n ret += ' + '+w\n\n return ret",
"def split_nested_class_from_key(key: str) -> Tuple[str, Optional[str]]:\n first_dollar_sign = key.find('$')\n if first_dollar_sign == -1:\n return key, None\n else:\n return key[:first_dollar_sign], key[first_dollar_sign + 1:]",
"def parseKey(key):\r\n\tif isinstance(key, State):\r\n\t\treturn key\r\n\tif isinstance(key, (basestring, str)):\r\n\t\tx = letterToIndex(key[0:1])\r\n\t\ty = int(key[1:])-1\r\n\telif len(key) == 2:\r\n\t\tx, y = key\r\n\t\tif isinstance(x, (basestring, str)):\r\n\t\t\tx = letterToIndex(x)\r\n\t\t\ty = int(y) - 1\r\n\telse:\r\n\t\traise TypeError\r\n\r\n\treturn x, y",
"def _parse_graph_op_info_blob_key(blob_key):\n # NOTE: the op_name itself may include dots, this is why we use `rindex()`\n # instead of `split()`.\n last_dot_index = blob_key.rindex(\".\")\n run = blob_key[last_dot_index + 1 :]\n key_body = blob_key[:last_dot_index]\n key_body = key_body[len(GRAPH_OP_INFO_BLOB_TAG_PREFIX) :]\n _, graph_id, op_name = key_body.split(\"_\", 2)\n return run, graph_id, op_name",
"def _parse_variable(variable_ast: dict) -> \"VariableNode\":\n return VariableNode(\n name=_parse_name(variable_ast[\"name\"]),\n location=_parse_location(variable_ast[\"loc\"]),\n )",
"def get_parm_dict_variable_names(self, subcategory):\n varlist = []\n for cat_name in self._parms['_order']:\n for var_name in _sort(self._parms[cat_name].keys()):\n if isinstance(self._parms[cat_name][var_name]['datatype'], dict):\n for subvar_name in _sort(self._parms[cat_name][var_name]['datatype'].keys()):\n if subvar_name[0] != '_':\n this_var = self._parms[cat_name][var_name]['datatype'][subvar_name]\n if this_var['subcategory'] == subcategory:\n for parm_key in this_var['_list_of_parm_names']:\n varlist.append(parm_key)\n else:\n this_var = self._parms[cat_name][var_name]\n if this_var['subcategory'] == subcategory:\n for parm_key in this_var['_list_of_parm_names']:\n varlist.append(parm_key)\n return _sort(varlist, sort_key=_natural_sort_key)",
"def _get_vars(self, structured_array, var_list):\n\n # define vector like variables\n jets = ['CSV', 'Jet_CSV', 'Jet_CosThetaStar_Lepton', 'Jet_CosTheta_cm',\n 'Jet_E','Jet_Eta','Jet_Flav','Jet_GenJet_Eta',\n 'Jet_GenJet_Pt', 'Jet_M','Jet_PartonFlav', 'Jet_Phi',\n 'Jet_PileUpID', 'Jet_Pt']\n\n array_list = []\n vars = []\n\n number_of_saved_jets = 4 if self._category=='all' else int(self._category)//10\n \n for var in var_list:\n # Don't save the variable, if it's fixed due to category.\n if self._dont_keep_variable(var):\n continue\n \n if 'Jet_Deta_Jet' in var:\n # only keep the first entries of the jet vector, number of saved jets depends on category, don't save variable that contains the relation of a jet to itself\n reference_jet = int(var[-1])\n array = [np.array([jet_vector[i] for i in range(number_of_saved_jets) if i+1!=reference_jet]) for jet_vector in structured_array[var]]\n array_list.append(np.vstack(array))\n vars += [var+'_{}'.format(i) for i in range(1,1+number_of_saved_jets) if i!=reference_jet]\n elif var in jets:\n # only keep the first entries of the jet vector, number of saved jets depends on category\n array = [jet_vector[:number_of_saved_jets] for jet_vector in structured_array[var]]\n array_list.append(np.vstack(array))\n vars += [var+'_{}'.format(i) for i in range(1,1+number_of_saved_jets)]\n else:\n array = structured_array[var].reshape(-1,1)\n array_list.append(array)\n vars += [var]\n\n data_dict = {'data': np.hstack(array_list), 'vars': vars}\n return data_dict",
"def parse_module_variable(module, key):\n for cname, obj in inspect.getmembers(module):\n if cname==key:\n return obj\n return None",
"def get_by_complex_key(cls, json_dict, key):\n key_arr = key.strip().split('.')\n value = \"\"\n d = json_dict.copy()\n for k in key_arr:\n if k not in d.keys():\n d = ''\n break\n else:\n d = d[k]\n value = d\n return value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resolve the given key using this known var sets. Unlike parse_key, the var_set returned will never be None, as the key must correspond to a found variable in a var_set. In case of conflicts, the var_set will be resolved in order.
|
def resolve_key(self, key):
var_set, var, index, sub_var = self.parse_key(key)
# If we didn't get an explicit var_set, find the first matching one
# with the given var.
if var_set is None:
for res_vs in self.reserved_keys:
if (res_vs in self.variable_sets and
var in self.variable_sets[res_vs]):
var_set = res_vs
break
if var_set is None:
raise KeyError(
"Could not find a variable named '{}' in any variable set."
.format(var))
return var_set, var, index, sub_var
|
[
"def parse_key(cls, key):\n\n if isinstance(key, list) or isinstance(key, tuple):\n parts = list(key)\n elif isinstance(key, str):\n parts = key.split('.')\n else:\n raise TypeError(\"Only str keys or tuples/lists are allowed.\")\n\n var_set = None\n if parts[0] in cls.VAR_SETS:\n var_set = parts[0]\n\n parts = parts[1:]\n\n if parts:\n var = parts.pop(0)\n if var == '':\n raise KeyError(\"Empty variable name for key '{}'\".format(key))\n\n else:\n raise KeyError(\"No variable name given for key '{}'\".format(key))\n\n # Grab the index and sub_var parts, if present.\n index = None\n if parts:\n if parts[0] is None:\n # We were given an explicit None in a variable tuple.\n parts.pop(0)\n elif parts[0] == '':\n # Note: The index is optional. This is for when it's given as\n # an empty string.\n raise KeyError(\"Invalid, empty index in key: '{}'\".format(key))\n else:\n try:\n index = int(parts[0])\n parts.pop(0)\n except ValueError:\n # If it's not an integer, assume it's a sub_key.\n pass\n\n sub_var = None\n if parts:\n sub_var = parts.pop(0)\n\n if sub_var == '':\n raise KeyError(\n \"Invalid, empty sub_var in key: '{}'\".format(key))\n\n if parts:\n raise KeyError(\n \"Variable reference ({}) has too many parts, or an invalid \"\n \"variable set (should be one of {})\".format(key, cls.VAR_SETS))\n\n return var_set, var, index, sub_var",
"def action_resolve(self,varname_token,scopes):\n varname=varname_token.token_value\n for scope in scopes:\n try:\n return scope.resolve(varname)\n except PTKeyError as ke:\n pass\n raise PTKeyError(varname)",
"def find(self, key):\n if key not in self.data:\n self.data[key] = key\n return key\n elif key == self.data[key]:\n return key\n else:\n # reduce the depth of the set\n result = self.find(self.data[key])\n self.data[key] = result\n return result",
"def _resolve_dead(self, key):\n # pylint: disable=W0702; we want to ignore errors\n try:\n keysym, _ = SYMBOLS[CHARS[key.combining]]\n except:\n return None\n # pylint: enable=W0702\n\n if keysym not in self.keyboard_mapping:\n return None\n\n return keysym",
"def resolve(self, var, context):\r\n if var[0] in ('\"', \"'\") and var[-1] == var[0]:\r\n return var[1:-1]\r\n else:\r\n return Variable(var).resolve(context)",
"def resolve(self, container):\n try:\n mod_name, var_name = self.value_conf.rsplit('.', 1)\n except ValueError:\n # to many values to unpack. no . in it.\n return container.import_module(self.value_conf)\n else:\n mod = container.import_module(mod_name)\n return getattr(mod, var_name)",
"def _resolve_special(self, key):\n if not key.vk:\n return None\n\n return key.vk",
"def find(self, key):\n _, current, _ = self._linear_search(key)\n\n if current is None:\n value = None\n else:\n value = copy.deepcopy(current._value)\n return value",
"def resolve_variables(self, provided_variables):\n self.resolved_variables = {}\n defined_variables = self.defined_variables()\n variable_dict = dict((var.name, var) for var in provided_variables)\n for var_name, var_def in defined_variables.iteritems():\n value = resolve_variable(\n var_name,\n var_def,\n variable_dict.get(var_name),\n self.name\n )\n self.resolved_variables[var_name] = value",
"def resolve(self, text):\n\t\tprev_text = text\n\t\twhile True:\n\t\t\tfor k, v in self.vars.iteritems():\n\t\t\t\tif v is None:\n\t\t\t\t\tv = \"\"\n\t\t\t\ttext = text.replace(\"${%s}\" % k, v)\n\t\t\tif not XMLParser.RE_VAR.match(text) or text == prev_text:\n\t\t\t\tbreak\n\t\treturn text",
"def _resolve_normal(self, key):\n keysym = self._key_to_keysym(key)\n if keysym is None:\n return None\n\n if keysym not in self.keyboard_mapping:\n return None\n\n return keysym",
"def resolve(self, var, attribute):\n if attribute.startswith(\"'ID: \"):\n attribute = attribute.split(None, 1)[1][:-1]\n try:\n attribute = int(attribute)\n except Exception:\n return getattr(var, attribute, None)\n\n for v in var:\n if id(v) == attribute:\n return v\n \n return None",
"def __getitem__(self, key):\n for constraint in self.constraints:\n name = getattr(constraint, 'name', None)\n if name is not None and name == key:\n return constraint\n try:\n found = constraint[key]\n except (KeyError, TypeError):\n pass\n else:\n return found\n raise KeyError('Constraint {} not found'.format(key))",
"def refLookup(key):\n try:\n return pyformex.refcfg[key]\n except:\n pyformex.debug(\"!There is no key '%s' in the reference config!\"%key)\n return None",
"def resolve_var(frame, name: str):\n for ns in frame.f_locals, frame.f_globals, frame.f_builtins:\n try:\n return ns[name]\n except KeyError:\n pass\n raise NameError(name)",
"def resolve(self, data: Mapping[str | int | None, Any]) -> Mapping[str | int | None, Any]:\n base = data\n for key in self.keys:\n data = data[key]\n if isinstance(data, Alias):\n data = data.resolve(base)\n elif isinstance(data, tuple):\n alias, others = data\n data = alias.resolve(base)\n return data",
"def lookup(self, key):\n return self.filter(lambda x: x[0] == key).values().collect()",
"def _GetVar(vars, key_match):\n\n # Get the key.\n key = key_match.group(1)\n\n # Extract the encoding if one is specified.\n enc = ''\n m = re.match('^([^:]+):(.*)$', key)\n if m:\n enc = m.group(1).lower()\n key = m.group(2)\n\n # Raise an error for keys that don't exist.\n if key not in vars:\n raise Error('The value \"%s\" is not defined.' % key)\n\n # Get the value and apply any encodings.\n value = vars[key]\n if 'a' in enc: # Absolute path encoding.\n _LOGGER.info('Converting \"%s\" to absolute path.', key)\n value = os.path.abspath(value)\n if 'c' in enc: # C-style escaping.\n _LOGGER.info('C-escaping \"%s\" value.', key)\n value = json.dumps(value)[1:-1]\n\n return value",
"def find_key(self, key):\n if self.stack.is_empty():\n return self.global_scope\n # Look in the current frame\n scope = self.stack.curr_frame.find_key(key)\n if scope is not None:\n return scope\n # If nothing try in the global scope\n if key in self.global_scope:\n return self.global_scope\n # Semantic analysis should ensure the key exists, this should never happen\n raise RuntimeError(\"Failed to find {} in the current scope\".format(key))",
"def _parse_key(self, schemas, key, separator=\".\"):\n if not isinstance(key, str):\n # Early return out if the key isn't a string, this means it's likely\n # a hard-coded value that we can just use out of the box.\n return key\n\n key_parsed = None\n key = key.split(separator) if separator in key else [key]\n\n for schema in schemas:\n # If the schema isn't a dictionary, it must be a model as\n # defined in the existing default schemas.\n if not isinstance(schema, dict):\n # Bit of a hack, but gets us our normalized dict.\n schema = schema.__dict__\n\n for val in key:\n if not key_parsed and val in schema:\n key_parsed = schema.get(val)\n if key_parsed and isinstance(key_parsed, dict) and val in key_parsed:\n key_parsed = key_parsed.get(val)\n\n return key_parsed"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return whether the given variable in the given varset is a deferred variable.
|
def is_deferred(self, var_set, var):
return isinstance(self.variable_sets[var_set].data[var],
DeferredVariable)
|
[
"def _hasVarBeenDeclared(self, var_name, group=None):\n\n has_been_declared = False\n\n if isinstance(var_name, list) is not True:\n\n var_name = [var_name]\n\n if group is not None:\n\n where_to_look = self._equation_groups[group]\n\n else:\n\n where_to_look = self._equations_list\n\n for eq in where_to_look:\n\n #print(\"\\n======>Equation: \", eq._getSymbolicObject())\n\n try:\n\n equation_members_ = eq._getSymbolicObject().args\n\n except:\n\n equation_members_ = []\n\n for eq_i in equation_members_:\n\n #print(\"\\n\\t======>Member: \",eq_i)\n\n #print(\"\\n\\t\\t======>Has time_var_declared? : \", [ t_i in sp.srepr(eq_i) for t_i in var_name])\n\n #Will not trigger for 'Derivative' terms\n\n if any(var_i in sp.srepr(eq_i) and 'Derivative' not in sp.srepr(eq_i) for var_i in var_name):\n\n has_been_declared = True\n\n break\n\n if has_been_declared is True:\n\n break\n\n return has_been_declared",
"def has_variable(self, var: Variable) -> bool:\n return self._vars_by_name.get(var.name) == var",
"def _has_variable(self, variable):\n return variable in self",
"def has_variable(self, varname):\n return varname in self._file.variables",
"def freevar(self, var, expr):\n parsed = logic.Parser().parse(expr)\n variable = logic.Variable(var)\n return variable in parsed.free()",
"def _is_evaluated(self, variable):\n return variable in self._evaluated",
"def may_depend_on(self, var):\n for obj in self.get_content():\n if obj.may_depend_on(var):\n return True\n return False",
"def var_is_set(var, value=1):\n return {\n \"type\": \"variable_if\",\n \"name\": var,\n \"value\": value\n }",
"def contains_var(self, variable):\n return variable in self._map",
"def hasVar(self):\n return hasattr(self, 'v') and self.v is not None",
"def varExists(self, varName, autoCall=True):\n try:\n valueFromSearchList(self.searchList(),\n varName.replace('$', ''), autoCall)\n return True\n except NotFound:\n return False",
"def is_variable(term):\n return term is None or type(term) == int or term.is_var()",
"def variable_in_parent_scopes(self, variable_name):\n scope = self.parent\n\n while scope is not None:\n variables_set = set(use.name for use in scope.variable_uses\n if use.kind == VariableUsage.Kind.SET)\n if variable_name in variables_set:\n return True\n else:\n scope = scope.parent\n\n return False",
"def is_variable(obj):\n return isinstance(obj, Expr) and not obj.args and is_var_symbol(obj.op)",
"def contains(formula, var):\n for x in formula // (NodeType.REF, ):\n if x[Attr.NAME] == var:\n return True\n return False",
"def atom_attacks_variables(atom: Atom, var: AtomValue, q: ConjunctiveQuery) -> bool:\n n = Atom(\"N\", [var])\n q_new = q.add_atom(n, FunctionalDependencySet(), [True], False)\n g = gen_attack_graph(q_new)\n return g.has_edge(atom, n)",
"def has_deferred(cls, struct):\n\n if isinstance(struct, str):\n if '[\\x1b' in struct and '\\x1b]' in struct:\n return True\n else:\n return False\n elif isinstance(struct, list):\n return any([cls.has_deferred(val) for val in struct])\n elif isinstance(struct, dict):\n return any([cls.has_deferred(val) for val in struct.values()])\n else:\n raise RuntimeError(\"Config structure contains invalid data types:\"\n \"{}\".format(struct))",
"def contains_one_var(self, variables):\n for variable in variables:\n if variable in self._map:\n return True\n\n return False",
"def isVariable(self):\n return (len(self) == 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return True if the config structure contains any deferred variables.
|
def has_deferred(cls, struct):
if isinstance(struct, str):
if '[\x1b' in struct and '\x1b]' in struct:
return True
else:
return False
elif isinstance(struct, list):
return any([cls.has_deferred(val) for val in struct])
elif isinstance(struct, dict):
return any([cls.has_deferred(val) for val in struct.values()])
else:
raise RuntimeError("Config structure contains invalid data types:"
"{}".format(struct))
|
[
"def is_deferred(self, var_set, var):\n\n return isinstance(self.variable_sets[var_set].data[var],\n DeferredVariable)",
"def _needs_expansion(value):\n return Config.RE_HAS_VAR_REF.match(value) is not None",
"def has_variable(self, varname):\n return varname in self._file.variables",
"def all_vars_empty(self):\n\n return(all([v.is_empty() for v in self.vars.values()]))",
"def hasVar(self):\n return hasattr(self, 'v') and self.v is not None",
"def vars_inited(self):\n inited, init_sess = self._var_inited\n return inited and init_sess == self.session",
"def has_required_config(self):\n config = get_config()\n\n # the following options MUST be set by the user before FLACManager can\n # be used\n return (\n config[\"Organize\"].get(\"library_root\")\n and config[\"Gracenote\"].get(\"client_id\")\n and config[\"MusicBrainz\"].get(\"contact_url_or_email\")\n and config[\"MusicBrainz\"].get(\"libdiscid_location\")\n )",
"def check_vars_set():\n missing_vars = []\n\n for var in REQUIRED_VARS:\n if os.environ.get(var, None) is None:\n missing_vars.append(var)\n\n if len(missing_vars) > 0:\n print(\"Missing variables found: {missing_vars}\".format(\n missing_vars=missing_vars))\n\n return len(missing_vars) == 0",
"def HasVAR(self):\n return self.__has('VAR')",
"def is_configured(self, settings, required_keys):\n\t\tfor key in required_keys:\n\t\t\tif not settings.get(key, None) or \\\n\t\t\t\tnot settings.get(key):\n\t\t\t\treturn False\n\t\t\t\n\t\treturn True",
"def checkDefined(requiredItems, configItems):\n allReqsFound = True\n for i in requiredItems:\n if i not in configItems:\n print '\\033[91m' + i + ' not defined!' + '\\033[0m'\n allReqsFound = False\n return allReqsFound",
"def _validate_fp_settings(self):\n valid = True\n if self.fit:\n self.config.validate()\n else:\n log.info(\"No results available from fit.\")\n valid = False\n if \"flux-points\" not in self.settings:\n log.info(\"No values declared for the energy bins.\")\n valid = False\n elif \"fp_binning\" not in self.settings[\"flux-points\"]:\n log.info(\"No values declared for the energy bins.\")\n valid = False\n if not valid:\n log.info(\"Flux points calculation cannot be done.\")\n return valid",
"def __valid_settings(self):\n if not isinstance(self.settings, dict):\n return False\n else:\n # Check that each key in the defaults exists in the imported settings.\n for setting in self.defaults.keys():\n if setting not in self.settings.keys():\n return False\n else:\n return True",
"def is_density_known(self):\n return self._is_density_known",
"def contains_vars(self, variables):\n for variable in variables:\n if variable not in self._map:\n return False\n\n return True",
"def has_all_required_pref_keys(self):\n for key in self.REQUIRED_PREF_KEYS:\n if not key in self.prefs:\n return False\n return True",
"def _hasVarBeenDeclared(self, var_name, group=None):\n\n has_been_declared = False\n\n if isinstance(var_name, list) is not True:\n\n var_name = [var_name]\n\n if group is not None:\n\n where_to_look = self._equation_groups[group]\n\n else:\n\n where_to_look = self._equations_list\n\n for eq in where_to_look:\n\n #print(\"\\n======>Equation: \", eq._getSymbolicObject())\n\n try:\n\n equation_members_ = eq._getSymbolicObject().args\n\n except:\n\n equation_members_ = []\n\n for eq_i in equation_members_:\n\n #print(\"\\n\\t======>Member: \",eq_i)\n\n #print(\"\\n\\t\\t======>Has time_var_declared? : \", [ t_i in sp.srepr(eq_i) for t_i in var_name])\n\n #Will not trigger for 'Derivative' terms\n\n if any(var_i in sp.srepr(eq_i) and 'Derivative' not in sp.srepr(eq_i) for var_i in var_name):\n\n has_been_declared = True\n\n break\n\n if has_been_declared is True:\n\n break\n\n return has_been_declared",
"def has ( self, evar_name ):\n return evar_name in self._evars",
"def in_robot():\n try:\n BuiltIn().get_variables()\n return True\n except:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Traverse the given config structure and resolve any deferred variables found.
|
def resolve_deferred(self, struct):
if isinstance(struct, str):
return self.resolve_deferred_str(struct)
elif isinstance(struct, list):
for i in range(len(struct)):
struct[i] = self.resolve_deferred(struct)
return struct
elif isinstance(struct, dict):
for key in struct.keys():
struct[key] = self.resolve_deferred(struct[key])
return struct
else:
raise RuntimeError("Config structure contains invalid data types:"
"{}".format(struct))
|
[
"def interpolate(self):\n # self.log.debug('Interpolating replacement strings')\n self.build_dependencies()\n while not self.resolved:\n # self.log.debug('CONFIG NOT RESOLVED, MAKING PASS')\n # Now we can actually perform any resolutions\n for ref, ref_data in self.dep_graph.items():\n # Has this config item already been resolved?\n is_resolved = ref_data['resolved']\n if not is_resolved:\n if 'ref_to' not in ref_data:\n # self.log.debug('NO REFERENCES, RESOLVING')\n # Before resolving all the places in the config\n # that where this reference occurs, we first need\n # to evaluate value at this path so we don't\n # resolve references to this path with a string\n # surrounded in backticks\n evaled = ref_data['evaluated']\n if not evaled:\n key_seq = ref.split('.')\n val = self.getfromseq(key_seq)\n res = self.eval_expr(val)\n self.setfromseq(key_seq, res)\n self.dep_graph[ref]['evaluated'] = True\n self._resolve(ref)\n self.dep_graph[ref]['resolved'] = True\n else:\n # If all the locations this reference points to are\n # resolved and evaluated, then we can go ahead and\n # resolve this one\n if self._check_resolved(ref_data['ref_to']) and self._check_evaled(ref_data['ref_to']):\n evaled = ref_data['evaluated']\n if not evaled:\n key_seq = ref.split('.')\n val = self.getfromseq(key_seq)\n res = self.eval_expr(val)\n self.setfromseq(key_seq, res)\n self.dep_graph[ref]['evaluated'] = True\n self._resolve(ref)\n self.dep_graph[ref]['resolved'] = True\n self.resolved = self._check_resolved(self.dep_graph.keys())",
"def _parse_config(self):\n for line in self.config_str.split('\\n'):\n line = line.strip()\n if line and line[0] != '#':\n key, value = line.split('=')\n key = key.strip()\n value = value.strip()\n \n if not Config.RE_HAS_VAR_REF.match(value):\n # no variable references e.g. ${var}, so we evaluate\n # the expression to get value into the correct \n # Python type\n try:\n value = eval(value.strip(), self._globals, self._locals)\n # we evaluated it successfully so record it as such\n self._evaluated.append(key)\n except SyntaxError:\n # if there is a syntax error we'll just use the \n # variable as a string\n value = value.strip()\n self[key.strip()] = value\n\n self._check_dependencies()\n\n # finished parsing lets evaluate each variable\n while len(self.keys()) != len(self._evaluated):\n for var_name in self.keys():\n self._get_var_value(var_name)",
"def resolve_variables(self, provided_variables):\n self.resolved_variables = {}\n defined_variables = self.defined_variables()\n variable_dict = dict((var.name, var) for var in provided_variables)\n for var_name, var_def in defined_variables.iteritems():\n value = resolve_variable(\n var_name,\n var_def,\n variable_dict.get(var_name),\n self.name\n )\n self.resolved_variables[var_name] = value",
"def _init_from_config(self, reserved_keys, value_dict):\n\n for key, value in value_dict.items():\n if key in reserved_keys:\n raise VariableError(\"Var name '{}' is reserved.\".format(key),\n var=key)\n\n if isinstance(value, DeferredVariable):\n self.data[key] = value\n else:\n try:\n self.data[key] = VariableList(values=value)\n except VariableError as err:\n err.var = key\n raise err",
"def resolve_override(config, rels=[], dmaap={}):\n # use deepcopy to make sure that config is not touched\n return _recurse(copy.deepcopy(config), rels, dmaap)",
"def resolve(self, container):\n try:\n mod_name, var_name = self.value_conf.rsplit('.', 1)\n except ValueError:\n # to many values to unpack. no . in it.\n return container.import_module(self.value_conf)\n else:\n mod = container.import_module(mod_name)\n return getattr(mod, var_name)",
"def build_dependencies(self):\n # First we find all the references and the exact location(s) in the config\n # that each reference ocurrs at\n self._find_references()\n # Next we determine if any of the things we refer to in the dependency\n # graph have backticks, meaning they must be evaluated before the\n # things that refer to them actually resolve their value\n for path in self.dep_graph.keys():\n key_seq = path.split('.')\n val = self.getfromseq(key_seq)\n if isinstance(val, str) and (val[0] == '`' and val[-1] == '`'):\n self.dep_graph[path].update({'evaluated': False})\n else:\n self.dep_graph[path].update({'evaluated': True})\n\n # Now we build out the \"refers_to\" entry for each reference to see if a\n # reference at one place in the table refers to some other value\n # For each reference we found\n for ref, data in self.dep_graph.items():\n # Loop through all the other references. If the above reference exists\n # in the \"ref_by\" table, we know the above reference refers to another\n # value and we need to resolve that value first. Note we also do this\n # for ref itself so we can catch circular references\n for other_ref, its_data in self.dep_graph.items():\n if ref in its_data['ref_by']:\n if other_ref == ref:\n raise ValueError('There is a circular reference in your'\n ' config file at %s' % ref)\n else:\n if 'ref_to' in data:\n data['ref_to'].append(other_ref)\n else:\n data['ref_to'] = [other_ref]\n # Nothing has been resolved at this poing\n data['resolved'] = False",
"def resolve(self, text):\n\t\tprev_text = text\n\t\twhile True:\n\t\t\tfor k, v in self.vars.iteritems():\n\t\t\t\tif v is None:\n\t\t\t\t\tv = \"\"\n\t\t\t\ttext = text.replace(\"${%s}\" % k, v)\n\t\t\tif not XMLParser.RE_VAR.match(text) or text == prev_text:\n\t\t\t\tbreak\n\t\treturn text",
"def resolve(self, context):\n for k, v in self._vars.iteritems():\n if isinstance(v, (list, tuple)):\n v = [self._resolve_single(context, x) for x in v]\n else:\n v = self._resolve_single(context, v)\n arg = self._args[k]\n v = arg.base_clean(v)\n try:\n tag_arg_clean = getattr(self, 'clean_%s' % (arg.name,))\n except AttributeError:\n pass\n else:\n v = tag_arg_clean(v)\n self.args[k] = v\n self.clean()",
"def resolve_lookups(variable, context, provider):\n resolved_lookups = {}\n for lookup in variable.lookups:\n try:\n handler = CFNGIN_LOOKUP_HANDLERS[lookup.type]\n except KeyError:\n raise UnknownLookupType(lookup)\n try:\n resolved_lookups[lookup] = handler(\n value=lookup.input,\n context=context,\n provider=provider,\n )\n except Exception as err:\n raise FailedVariableLookup(variable.name, lookup, err)\n return resolved_lookups",
"def substitute_vars(cfg):\n for k, v in cfg.items():\n if isinstance(v, str):\n cfg[k] = test_define_value(v)[0]\n elif isinstance(v, dict):\n substitute_vars(v)\n elif isinstance(v, list):\n new_list = []\n for lv in v:\n if isinstance(lv, dict):\n substitute_vars(lv)\n new_list.append(lv)\n elif isinstance(lv, str):\n new_list.append(test_define_value(lv)[0])\n else:\n new_list.append(lv)\n cfg[k] = new_list",
"def _recurse(config, rels, dmaap):\n if isinstance(config, list):\n return [_recurse(item, rels, dmaap) for item in config]\n if isinstance(config, dict):\n for key in config:\n config[key] = _recurse(config[key], rels, dmaap)\n return config\n if isinstance(config, six.string_types):\n return _replace_value(config, rels, dmaap)\n # not a dict, not a list, not a string, nothing to do.\n return config",
"def _expand_variables(self, config, hostname):\n\n if 'hostname' in config:\n config['hostname'] = config['hostname'].replace('%h', hostname)\n else:\n config['hostname'] = hostname\n\n if 'port' in config:\n port = config['port']\n else:\n port = SSH_PORT\n\n user = os.getenv('USER')\n if 'user' in config:\n remoteuser = config['user']\n else:\n remoteuser = user\n\n host = socket.gethostname().split('.')[0]\n fqdn = LazyFqdn(config, host)\n homedir = os.path.expanduser('~')\n replacements = {'controlpath':\n [\n ('%h', config['hostname']),\n ('%l', fqdn),\n ('%L', host),\n ('%n', hostname),\n ('%p', port),\n ('%r', remoteuser),\n ('%u', user)\n ],\n 'identityfile':\n [\n ('~', homedir),\n ('%d', homedir),\n ('%h', config['hostname']),\n ('%l', fqdn),\n ('%u', user),\n ('%r', remoteuser)\n ],\n 'proxycommand':\n [\n ('%h', config['hostname']),\n ('%p', port),\n ('%r', remoteuser)\n ]\n }\n\n for k in config:\n if k in replacements:\n for find, replace in replacements[k]:\n if isinstance(config[k], list):\n for item in range(len(config[k])):\n if find in config[k][item]:\n config[k][item] = config[k][item].\\\n replace(find, str(replace))\n else:\n if find in config[k]:\n config[k] = config[k].replace(find, str(replace))\n return config",
"def test_resolve_refs():\n\n # One level of nesting\n b = do_resolve(\"b.json\")\n assert b[\"properties\"] == {\"b_prop\": {\"c_prop\": {\"type\": \"string\"}}}\n\n # Two levels of nesting\n a = do_resolve(\"a.json\")\n assert a[\"properties\"] == {\"a_prop\": b[\"properties\"]}\n\n # Two levels of nesting across different directories\n one = do_resolve(\"1.json\")\n assert one[\"properties\"] == {\"1_prop\": {\n \"2_prop\": {\"3_prop\": {\"type\": \"string\"}}}}",
"def load_config(self):\n for local_var, config_var in self.from_config.items():\n value = flask.current_app.config.get(config_var)\n if value:\n if \".\" in local_var:\n # this is a dotpath -- needs special handling\n body, tail = local_var.rsplit(\".\", 1)\n obj = getattrd(self, body)\n setattr(obj, tail, value)\n else:\n # just use a normal setattr call\n setattr(self, local_var, value)",
"def process_assignments(config):\n items = {}\n to_interpolate = {}\n\n import re\n re_interpolation = re.compile(r'\\{([^}]+)\\}')\n\n for item in config:\n if 'name' in item:\n # This is a section. Nothing to interpolate\n continue\n assignment = item\n key = assignment[\"key\"]\n value = assignment[\"value\"]\n\n if re_interpolation.search(value):\n to_interpolate[key] = value\n continue\n\n items[key] = value\n\n for key, value in to_interpolate.items():\n value = re_interpolation.sub(lambda m: items[m.group(1)], value)\n items[key] = value\n\n config = Config()\n for key, value in items.items():\n config.__add_key_value__(key, value)\n\n return config",
"def fetch(project_name, config_name):\n # project = gcloud._helpers._determine_default_project()\n client = _create_client()\n\n variable_names = _list_variables(client, project_name, config_name)\n variables = _fetch_variable_values(client, variable_names)\n\n return variables",
"def ResolveReferences(self, var_dict, args):\n re_var = re.compile('(\\$\\{[-_a-z0-9A-Z]{1,}\\})')\n\n while True:\n m = re_var.search(args)\n if not m:\n break\n lookup = m.group(0)[2:-1]\n value = var_dict.get(lookup, '')\n args = args[:m.start(0)] + value + args[m.end(0):]\n return args",
"def env_loop(environment):\n def scan(vars, vals):\n \"\"\"\n scans variables in a frame\n \"\"\"\n if isNull(vars):\n return env_loop(enclosing_env(environment)) # 5-4 env -> environment\n return set_car(vals, val) #4-15\n else:\n return scan(cdr(vars), cdr(vals)) # 4-15\n if environment is the_empty_environment:\n raise UnboundLocalError(\"lookup_variable\")\n frame = first_frame(environment)\n return scan(frame_variables(frame), frame_values(frame)) # 4-15"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resolve any deferred variables in the given string, and return the result.
|
def resolve_deferred_str(self, line):
resolved_line = []
offset = 0
match = self.DEFERRED_VAR_RE.search(line, offset)
# Walk through the line, and lookup the real value of
# each matched deferred variable.
while match is not None:
resolved_line.append(line[offset:match.start()])
offset = match.end()
var_name = match.groups()[0]
# This may raise a KeyError, which callers should
# expect.
resolved_line.append(self[var_name])
match = self.DEFERRED_VAR_RE.search(line, offset)
# Don't forget the remainder of the line.
resolved_line.append(line[offset:])
resolved_line = ''.join(resolved_line)
# Make sure all of our escape sequences are accounted for.
if '\x1e]' in resolved_line or '[\x1e' in resolved_line:
raise ValueError("Errant escape sequence '{}'"
.format(resolved_line))
return resolved_line
|
[
"def resolve(self, text):\n\t\tprev_text = text\n\t\twhile True:\n\t\t\tfor k, v in self.vars.iteritems():\n\t\t\t\tif v is None:\n\t\t\t\t\tv = \"\"\n\t\t\t\ttext = text.replace(\"${%s}\" % k, v)\n\t\t\tif not XMLParser.RE_VAR.match(text) or text == prev_text:\n\t\t\t\tbreak\n\t\treturn text",
"def resolve(self, var, context):\r\n if var[0] in ('\"', \"'\") and var[-1] == var[0]:\r\n return var[1:-1]\r\n else:\r\n return Variable(var).resolve(context)",
"def _evaluate_var(self, name):\n value = self[name]\n\n # this variable has dependencies which we need to resolve first\n if name in self._depends:\n depends = self._depends[name]\n # for each depends variable replace it's reference with \n # it's actual value\n for ref_name in depends:\n ref_value = self[ref_name]\n value = value.replace('${%s}' % ref_name, str(ref_value))\n\n # if all of the dependencies have been resolved get the correct type\n if not _needs_expansion(value):\n try:\n value = eval(value.strip(), self._globals, self._locals)\n except SyntaxError:\n # just use the stripped value if there's an error in \n # the expression\n value = value.strip()\n # mark this variable as being evaluated\n self._evaluated.append(name)\n # replace value\n self[name] = value\n return value",
"def resolve(self, text, mnamespace):\n if (text==None):\n return None\n if (not isinstance(text, basestring)):\n return text\n # ok resolve aliases -- for now we use a helper function to do the main work\n resolvedtext = misc.resolve_expand_string(text, self.aliases, mnamespace)\n # now we have a fully resolved string that may have contained some aliases\n return resolvedtext",
"def ResolveReferences(self, var_dict, args):\n re_var = re.compile('(\\$\\{[-_a-z0-9A-Z]{1,}\\})')\n\n while True:\n m = re_var.search(args)\n if not m:\n break\n lookup = m.group(0)[2:-1]\n value = var_dict.get(lookup, '')\n args = args[:m.start(0)] + value + args[m.end(0):]\n return args",
"def resolve_function(self, s, paths, context, merge):\n calls = function.findall(s)\n\n # If this is an alias, just replace it (doesn't require interpolation)\n if len(calls) == 1 and calls[0][0] == 'alias':\n if function.sub(\"\", s) != \"\":\n raise Exception(\"Alias can not be used for string interpolation: `{}`\".format(s))\n return self.get_key(calls[0][1], paths, context, None)\n\n # Iterate over all function calls and string interpolate their resolved values\n for call, arg in calls:\n if call == 'hiera':\n replace = self.get_key(arg, paths, context, None)\n elif call == 'scope':\n replace = context.get(arg)\n elif call == 'literal':\n replace = arg\n elif call == 'alias':\n raise Exception(\"Invalid alias function call: `{}`\".format(s))\n\n if not replace:\n raise Exception(\"Could not resolve value for function call: `{}`\".format(s))\n\n if not isinstance(replace, str):\n raise Exception(\"Resolved value is not a string for function call: `{}`\".format(s))\n\n # Replace only the current function call with our resolved value\n s = function.sub(replace, s, 1)\n\n return s",
"def resolve_deferred(self, struct):\n\n if isinstance(struct, str):\n return self.resolve_deferred_str(struct)\n elif isinstance(struct, list):\n for i in range(len(struct)):\n struct[i] = self.resolve_deferred(struct)\n return struct\n elif isinstance(struct, dict):\n for key in struct.keys():\n struct[key] = self.resolve_deferred(struct[key])\n return struct\n else:\n raise RuntimeError(\"Config structure contains invalid data types:\"\n \"{}\".format(struct))",
"def find_var(str):\n next_index = 0\n while next_index < len(str):\n if str[next_index].isspace() or str[next_index] in ('$', '\\'', '\\\"'):\n break\n next_index += 1\n var_name = str[0:next_index]\n str = str[next_index:]\n return var_name, str",
"def resolve_variables(self, provided_variables):\n self.resolved_variables = {}\n defined_variables = self.defined_variables()\n variable_dict = dict((var.name, var) for var in provided_variables)\n for var_name, var_def in defined_variables.iteritems():\n value = resolve_variable(\n var_name,\n var_def,\n variable_dict.get(var_name),\n self.name\n )\n self.resolved_variables[var_name] = value",
"def resolve_var(frame, name: str):\n for ns in frame.f_locals, frame.f_globals, frame.f_builtins:\n try:\n return ns[name]\n except KeyError:\n pass\n raise NameError(name)",
"def resolve_interpolates(self, s, context):\n interps = interpolate.findall(s)\n\n for i in interps:\n s = interpolate.sub(context.get(i), s, 1)\n\n return s",
"def resolve_vars(cmd, vars):\n from string import Template\n try:\n template = Template(cmd)\n convert_command = template.substitute(vars)\n except:\n print \"[ERROR] can't expand %s: %s\" % (vars, cmd)\n return None\n return convert_command",
"def resolve(self, container):\n try:\n mod_name, var_name = self.value_conf.rsplit('.', 1)\n except ValueError:\n # to many values to unpack. no . in it.\n return container.import_module(self.value_conf)\n else:\n mod = container.import_module(mod_name)\n return getattr(mod, var_name)",
"def can_resolve(self, s):\n if isinstance(s, string_types) and (function.findall(s) or interpolate.findall(s)):\n return True\n return False",
"def subst_vars (s, local_vars):\r\n check_environ()\r\n def _subst (match, local_vars=local_vars):\r\n var_name = match.group(1)\r\n if local_vars.has_key(var_name):\r\n return str(local_vars[var_name])\r\n else:\r\n return os.environ[var_name]\r\n\r\n try:\r\n return re.sub(r'\\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)\r\n except KeyError, var:\r\n raise ValueError, \"invalid variable '$%s'\" % var",
"def resolve(self, s, paths, context, merge):\n if isinstance(s, dict):\n return self.resolve_dict(s, paths, context, merge)\n elif isinstance(s, list):\n return list(self.resolve_list(s, paths, context, merge))\n elif not self.can_resolve(s):\n return s\n\n base = self.resolve_function(s, paths, context, merge)\n\n # If we can string interpolate the result, lets do that\n if isinstance(base, str):\n base = self.resolve_interpolates(base, context)\n\n return base",
"def evaluate_strings_with_given_variables(_strs_to_execute, _variable_dict=None):\n if _variable_dict is None:\n _variable_dict = {}\n if not isinstance(_strs_to_execute, list):\n _got_list_of_constraints = False\n _strs_to_execute = [_strs_to_execute]\n else:\n _got_list_of_constraints = True\n for _key, _value in _variable_dict.items():\n locals()[_key] = _value\n _ret = [eval(_elem) for _elem in _strs_to_execute]\n if _got_list_of_constraints:\n return _ret\n else:\n return _ret[0]",
"def expand(val, variables):\n while True:\n m = re.match(r'.*\\$(\\w+).*', val)\n if m is not None and m.lastindex is not None and m.lastindex >= 1:\n varname = m.group(1)\n try:\n v = variables[varname]\n except KeyError:\n v = os.getenv(varname)\n if v is None:\n print(\"Unknown variable '{0}'\".format(varname))\n exit(1)\n val = re.sub(r\"\\$\"+varname, v, val)\n else:\n break\n return val",
"def match_variable(string, idx, var_chars=variable_chars):\n def match_variable_helper(string, idx, var_chars, name, length):\n # check is there remaining string to process\n if idx >= len(string):\n return (name, length)\n\n first = string[idx]\n\n # check var_chars\n correct_char = False\n for var_str in var_chars:\n if first in var_str:\n correct_char = True\n break\n if not correct_char:\n return (name, length)\n\n return match_variable_helper(string, idx + 1, var_chars, name + first, length + 1)\n\n # check if the first character could be variable:\n if (string[idx] not in UNDERSCORE) and (string[idx] not in ALPHABETICAL):\n return (None, 0)\n\n v, l = match_variable_helper(string, idx, var_chars, \"\", 0)\n\n # no match\n if l == 0:\n return (None, 0)\n\n # variable cannot start with numeric\n if VARIABLE_NOT_START_NUMERIC and v[0] in NUMERICAL:\n return (None, 0)\n\n return ((VARIABLE, v), l)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the all variable sets as a single dictionary. This is for testing and bug resolution, not production code.
|
def as_dict(self):
var_sets = {}
for var_set in self.variable_sets.values():
var_sets[var_set.name] = {}
for key in var_set.data.keys():
var_sets[key] = []
item = var_set.data[key]
if isinstance(item, DeferredVariable):
var_sets[key] = repr(item)
else:
for subitem in var_set.data[key].data:
var_sets[key].append(subitem.data)
return var_sets
|
[
"def get_mutable_vars() -> Dict:\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}",
"def variables(self):\n # Task 4.1\n var_set = set()\n var_set.update(self.conclusion.vars)\n for assumption in self.assumptions:\n var_set.update(assumption.vars)\n return var_set",
"def collect_bake_sets():\n set_name = 'bake_SET' # this is the name we're looking for!!\n obj_sets = cmds.ls(type='objectSet')\n bake_sets = []\n\n for obj_set in obj_sets:\n if set_name in obj_set:\n bake_sets.append(obj_set)\n\n bake_set_dicts = []\n\n for bake_set in bake_sets:\n members = cmds.sets(bake_set,q=True)\n [x.encode('UTF8') for x in members]\n\n # check for cache attrs\n step = 1\n if cmds.attributeQuery('step', node=bake_set, exists=1):\n step = cmds.getAttr(bake_set+'.step')\n static = 0\n if cmds.attributeQuery('static', node=bake_set, exists=1):\n static = cmds.getAttr(bake_set+'.static')\n\n bake_dict = {\n 'name': bake_set,\n 'members': members,\n 'step': step,\n 'static': static,\n }\n\n bake_set_dicts.append(bake_dict)\n\n return bake_set_dicts",
"def variables(self):\r\n return []",
"def _collect_vars(self):\n res = set()\n self.objective.collect_variables(res)\n for c in self.constraints:\n c.collect_variables(res)\n self.variables = list(res)\n self.var_slices = {}\n start = 0\n for var in self.variables:\n self.var_slices[var] = slice(start, start + var.size)\n start += var.size",
"def defined_variables(self):\n return getattr(self, \"VARIABLES\", {})",
"def variables(self):\r\n var_list = []\r\n for arg in self.args:\r\n var_list += arg.variables()\r\n # Remove duplicates.\r\n return list(set(var_list))",
"def variables(self):\n #We give preference to test specification variable lists.\n if self.test is not None:\n return self.test.variables\n elif self.group is not None:\n return self.group.variables\n else:\n return []",
"def __get_vars(self):\n\t\tself.__tvars = {}\n\t\tvar = self.dom.getElementsByTagName(\"vars\")[0]\n\t\tfor var_xml in var.getElementsByTagName(\"var\"):\n\t\t\tv_name = XmlHandler.get_label(\"name\", var_xml)\n\t\t\tv_value = XmlHandler.get_label(\"value\", var_xml)\n\t\t\tv_topo = XmlHandler.get_label(\"topology\", var_xml)\n\t\t\tif (v_topo is not None) and (v_topo.lower() == \"yes\"):\n\t\t\t\tself.__topovars[v_name] = v_value\n\t\t\telse: self.__compvars[v_name] = v_value",
"def get_all_variables(self):\n with self.graph.as_default():\n return [_from_proto_fn(var_def) for var_def in self.info.variables]",
"def __init__(self):\n\n self.variable_sets = {}\n\n self.reserved_keys = []\n self.reserved_keys.extend(self.VAR_SETS)",
"def get_variables(context):\n return sorted(set(_flatten(context.dicts)))",
"def variables(self):\n return odict([(k,n) for k,n in self._nodes.items()\n if isinstance(n, pr.BaseVariable) and not isinstance(n, pr.BaseCommand)])",
"def get_vars():\n thevars = {\n 'kernel_version': {\n 'desc': \"Exact version of the Linux kernel used in the OS\",\n 'label': 'Kernel Version',\n 'unit': '',\n 'parents': ['version']\n },\n 'user': {\n 'desc': \"User who compiled the kernel, host name where it happened\",\n 'label': 'Username, hostname',\n 'unit': '',\n 'parents': ['version']\n },\n 'gcc_version': {\n 'desc': \"Version of the GCC compiler used for building the kernel\",\n 'label': 'GCC Version',\n 'unit': '',\n 'parents': ['version']\n },\n 'os_version': {\n 'desc': \"OS version\",\n 'label': 'OS Version',\n 'unit': '',\n 'parents': ['version']\n },\n 'kernel_type': {\n 'desc': \"Type of the kernel. SMP indicates Symmetric MultiProcessing\",\n 'label': 'Kernel Type',\n 'unit': '',\n 'parents': ['version']\n },\n 'kernel_date': {\n 'desc': \"Date and time when the kernel was built\",\n 'label': 'Date of compilation',\n 'unit': '',\n 'parents': ['version']\n }\n }\n return thevars",
"def global_data(self):\n return {Res1D.get_data_set_name(gdat): gdat for gdat in self._data.GlobalData.DataItems}",
"def variables(e: Expression) -> Set[Variable]:\n return_value = set()\n\n def f(e: Expression):\n if isinstance(e, Variable):\n return_value.add(e)\n\n traversal.on_every_node(f, e)\n return return_value",
"def _collectSets(self, setType: Type[MeshSet] = None):\n elementSets = {}\n nodeSets = {}\n surfaceSets = {}\n\n # Iterate through all user defined sets\n for elSet in self._elementSets:\n elementSets[elSet.name] = elSet\n\n for nodeSet in self._nodeSets:\n nodeSets[nodeSet.name] = nodeSet\n\n for surfSet in self._surfaceSets:\n surfaceSets[surfSet.name] = surfSet\n\n # Iterate through all loadcases and boundary conditions.and find unique values. This is greedy so will override\n # any with same name.\n for loadcase in self.loadCases:\n\n # Collect result sets node and element sets automatically\n for resultSet in loadcase.resultSet:\n if isinstance(resultSet, ElementResult):\n elementSets[resultSet.elementSet.name] = resultSet.elementSet\n elif isinstance(resultSet, NodalResult):\n nodeSets[resultSet.nodeSet.name] = resultSet.nodeSet\n\n for bc in loadcase.boundaryConditions:\n if isinstance(bc.target, ElementSet):\n elementSets[bc.target.name] = bc.target\n\n if isinstance(bc.target, NodeSet):\n nodeSets[bc.target.name] = bc.target\n\n if isinstance(bc.target, SurfaceSet):\n surfaceSets[bc.target.name] = bc.target\n\n for con in self.connectors:\n nodeSets[con.nodeset.name] = con.nodeset\n\n if setType is ElementSet:\n return list(elementSets.values())\n elif setType is NodeSet:\n return list(nodeSets.values())\n elif setType is SurfaceSet:\n return list(surfaceSets.values())\n else:\n return list(elementSets.values()), list(nodeSets.values()), list(surfaceSets.values())",
"def getGlobals():\n # type: () -> Dict[String, Any]\n return {}",
"def build_output_sets(self, sets, traits, champs):\n\n set_map = {}\n for set_number, set_name, set_chars in sets:\n set_champs_pairs = [champs[name] for name in set_chars if name in champs]\n set_traits_paths = {h for p in set_champs_pairs for h in p[1]}\n set_map[set_number] = {\n \"name\": set_name,\n \"traits\": [traits[h] for h in set_traits_paths if h in traits],\n \"champions\": [p[0] for p in set_champs_pairs],\n }\n return set_map"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize the variable set from a config dictionary.
|
def _init_from_config(self, reserved_keys, value_dict):
for key, value in value_dict.items():
if key in reserved_keys:
raise VariableError("Var name '{}' is reserved.".format(key),
var=key)
if isinstance(value, DeferredVariable):
self.data[key] = value
else:
try:
self.data[key] = VariableList(values=value)
except VariableError as err:
err.var = key
raise err
|
[
"def _init_from_config(self, values):\n\n sub_vars = None\n\n if not isinstance(values, list):\n values = [values]\n\n for idx in range(len(values)):\n value_pairs = values[idx]\n if not isinstance(value_pairs, dict):\n value_pairs = {None: value_pairs}\n\n if sub_vars is None:\n sub_vars = set(value_pairs.keys())\n elif set(value_pairs.keys()) != sub_vars:\n raise VariableError(\n \"Sub-keys do no match across variable values.\",\n index=str(idx))\n\n try:\n self.data.append(SubVariable(value_pairs))\n except VariableError as err:\n err.index = str(idx)\n raise err",
"def __init__(self):\n\n self.variable_sets = {}\n\n self.reserved_keys = []\n self.reserved_keys.extend(self.VAR_SETS)",
"def set_from_dict(config):\n if \"CACHE\" in config:\n class_ = config['CACHE'].pop(\"class\", None)\n set_defaults(class_=class_, **config['CACHE'])\n\n if \"SERIALIZER\" in config:\n class_ = config['SERIALIZER'].pop(\"class\", None)\n set_default_serializer(class_=class_, **config['SERIALIZER'])\n\n if \"PLUGINS\" in config:\n set_default_plugins(config=config['PLUGINS'])",
"def __init__(self):\n self.variables = {}\n self.values = {}",
"def __init__(self, config_dict: dict):\n assert isinstance(config_dict, collections.abc.Mapping)\n\n self._config = config_dict",
"def init_vars(self):\n pass",
"def _parse_config(self):\n for line in self.config_str.split('\\n'):\n line = line.strip()\n if line and line[0] != '#':\n key, value = line.split('=')\n key = key.strip()\n value = value.strip()\n \n if not Config.RE_HAS_VAR_REF.match(value):\n # no variable references e.g. ${var}, so we evaluate\n # the expression to get value into the correct \n # Python type\n try:\n value = eval(value.strip(), self._globals, self._locals)\n # we evaluated it successfully so record it as such\n self._evaluated.append(key)\n except SyntaxError:\n # if there is a syntax error we'll just use the \n # variable as a string\n value = value.strip()\n self[key.strip()] = value\n\n self._check_dependencies()\n\n # finished parsing lets evaluate each variable\n while len(self.keys()) != len(self._evaluated):\n for var_name in self.keys():\n self._get_var_value(var_name)",
"def __init__(self, analysis_sname, analysis_dict, var_dict, config):\n\n # Define logger on type and save analysis short name\n self.logger = logging.getLogger(analysis_sname)\n self.analysis_sname = analysis_sname\n\n # (1) Error check: analysis_dict keys are either \"datestrs\" or already in config\n # (\"datestrs\" required)\n if 'datestrs' not in analysis_dict:\n raise KeyError(\"'{}' must contain 'datestrs'\".format(analysis_sname))\n for config_key in analysis_dict:\n if config_key not in config and config_key != 'datestrs':\n raise KeyError(\"'{}' is not a valid key in '{}'\".format(config_key, analysis_sname))\n\n # (2) Define datestrs and _global_config\n self.datestrs = analysis_dict['datestrs']\n # - Force datestrs[data_source] to be a list\n for data_source in self.datestrs:\n if not isinstance(self.datestrs[data_source], list):\n self.datestrs[data_source] = [self.datestrs[data_source]]\n self._global_config = dict()\n for key in config:\n if key in analysis_dict:\n self._global_config[key] = analysis_dict[key]\n else:\n self._global_config[key] = config[key]\n\n # (3) Define _var_dict\n self._var_dict = var_dict\n\n # (4) Set up objects for plotting\n self.fig = dict()\n self.axs = dict()",
"def init(args):\n Configuration.load_config(vars(args).get(\"config\"))",
"def _initFromDict(self, dictIn):\n if \"parameters\" not in dictIn.keys():\n self.raiseAnError(IOError, 'No Parameters specified in \"dictIn\" dictionary !!!!')\n if \"name\" in dictIn.keys():\n self.name = dictIn[\"name\"]\n if type(dictIn[\"parameters\"]).__name__ == \"list\":\n self.parameters['targets'] = dictIn[\"parameters\"]\n else:\n self.parameters['targets'] = dictIn[\"parameters\"].split(\",\")\n if \"bounds\" in dictIn.keys():\n self.bounds = dictIn[\"bounds\"]\n if \"transformationMethods\" in dictIn.keys():\n self.transfMethods = dictIn[\"transformationMethods\"]\n if \"verbosity\" in dictIn.keys():\n self.verbosity = dictIn['verbosity']\n if \"side\" in dictIn.keys():\n self.lsSide = dictIn[\"side\"]\n if \"tolerance\" in dictIn.keys():\n self.tolerance = float(dictIn[\"tolerance\"])\n if self.lsSide not in [\"negative\", \"positive\", \"both\"]:\n self.raiseAnError(IOError, 'Computation side can be positive, negative, both only !!!!')",
"def load_vars(self, vars):\n\t\tpass",
"def __init__(self, config='config.json'):\n self.read_config(config)",
"def initialize_from_config(self):\n cfg = RawConfigParser()\n cfg.readfp(open('./Config/generation.conf'))\n self.height = cfg.getint(\"scene\", \"height\")\n self.width = cfg.getint(\"scene\", \"width\")\n self.num_objects = cfg.getint(\"scene\", \"num_objects\")\n self.save()",
"def _init_from_dict(self, settings: Settings) -> None:\n # The valid ivars and reasonable defaults.\n valid = dict(\n ignore_case=False,\n node_only=False,\n pattern_match=False,\n search_body=True,\n search_headline=True,\n suboutline_only=False, # Seems safest. # Was True !!!\n whole_word=True,\n )\n # Set ivars to reasonable defaults.\n for ivar in valid:\n setattr(self, ivar, valid.get(ivar))\n # Override ivars from settings.\n errors = 0\n for ivar in settings.keys():\n if ivar in valid:\n val = settings.get(ivar)\n if val in (True, False):\n setattr(self, ivar, val)\n else: # pragma: no cover\n g.trace(\"bad value: {ivar!r} = {val!r}\")\n errors += 1\n else: # pragma: no cover\n g.trace(f\"ignoring {ivar!r} setting\")\n errors += 1\n if errors: # pragma: no cover\n g.printObj(sorted(valid.keys()), tag='valid keys')",
"def _init_values(self) -> None:\n self._values: Mapping[str, configutils.Values] = {}\n for name, opt in configdata.DATA.items():\n self._values[name] = configutils.Values(opt)",
"def _set_instance_variables(self, data):\n self.start_configs = data['start_configs']\n self.waypt_configs = data['waypt_configs']\n self.start_speeds = data['start_speeds']\n self.spline_trajectories = data['spline_trajectories']\n self.horizons = data['horizons']\n self.lqr_trajectories = data['lqr_trajectories']\n self.K_nkfd = data['K_nkfd']\n self.k_nkf1 = data['k_nkf1']\n \n # Initialize variable tensor for waypoints in world coordinates\n dt = self.params.system_dynamics_params.dt\n self.waypt_configs_world = [SystemConfig(\n dt=dt, n=config.n, k=1, variable=True,\n track_trajectory_acceleration=self.params.track_trajectory_acceleration) for config in data['start_configs']]\n\n self.instance_variables_loaded = True\n\n if self.params.verbose:\n N = self.params.waypoint_params.n\n for v0, start_config in zip(self.start_velocities, self.start_configs):\n print('Velocity: {:.3f}, {:.3f}% of goals kept({:d}).'.format(v0, 100.*start_config.n/N,\n start_config.n))",
"def from_dict(cls, name, config):\r\n return cls(name=name, **config)",
"def __init__(self):\n\n\t\t# create ConfigParser() obj\n\t\tself.config = ConfigParser.ConfigParser()",
"def load_config(self):\n for local_var, config_var in self.from_config.items():\n value = flask.current_app.config.get(config_var)\n if value:\n if \".\" in local_var:\n # this is a dotpath -- needs special handling\n body, tail = local_var.rsplit(\".\", 1)\n obj = getattrd(self, body)\n setattr(obj, tail, value)\n else:\n # just use a normal setattr call\n setattr(self, local_var, value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the value of the var given the var name, index, and sub_var name.
|
def get(self, var, index, sub_var):
if var in self.data:
return self.data[var].get(index, sub_var)
else:
raise KeyError(
"Variable set '{}' does not contain a variable named '{}'. "
"Available variables are: {}"
.format(self.name, var, tuple(self.data.keys())))
|
[
"def get(self, index, sub_var):\n\n if index is None:\n index = 0\n else:\n if not isinstance(index, int):\n raise KeyError(\"Non-integer index given: '{}'\".format(index))\n\n if not -len(self.data) <= index < len(self.data):\n raise KeyError(\n \"Index out of range. There are only {} items in this variable.\"\n .format(len(self.data)))\n\n return self.data[index].get(sub_var)",
"def getVar(self, name):\n\t\treturn self.vars[name]",
"def get_var(self, name):\n try:\n return self.final_context.data[name].get_literal_value()\n except KeyError:\n raise (\"Test seeking variable (%s) not found in final context.\" %\n name)",
"def get_variable(self, varname):\n return next((v for v in self.config.data_spec if v['name'] == varname))",
"def get_variable_by_name(self,name):\n if type(name)!=type(''): self.error(name,' should be a valid variable string name.')\n if name not in self.header: self.error(name,' is not a valid variable for this object.')\n return self.data[self.header.index[name]]",
"def variable_value(self, variable, index=0):\n return get_product_variable_value(self, variable, index)",
"def __getitem__(self,\n subaddr_idx: int) -> str:\n return self.m_subaddr[subaddr_idx]",
"def get_script_variable(a_variable_name):\n return get_prefixed_variable(\"s\", a_variable_name)",
"def GetValue(self, name):\n try:\n return self.vars.Get(name).GetValue()\n except:\n raise Exception(\"variable \" + name + \" not initialized\")",
"def get_var_by_index(self, n):\n return self.data[n,:,:]",
"def varsub(val):\n _dbg('varsub(): starting with val = %s' % val)\n if isinstance(val, None.__class__):\n return val\n if isinstance(val, int):\n return str(val)\n i = 0\n while i < 100:\n if MARKER == 'dollar':\n s = re.search(r'\\$\\{([^${}]+)\\}', val)\n else:\n s = re.search(r'\\~\\{([^~{}]+)\\}', val)\n try:\n s.group(1)\n except (IndexError, AttributeError):\n break\n _dbg('varsub(): s.group(0) = %s' % s.group(0))\n _dbg('varsub(): s.group(1) = %s' % s.group(1))\n needle = s.group(0).replace('$', r'\\$')\n val = re.sub(needle, __varsub(s.group(1)), val)\n i += 1\n if MARKER == 'dollar':\n # Un-escape $\\\\{ --> ${, and $\\\\\\{ --> $\\{\n val = val.replace(r'$\\\\{', r'${')\n val = val.replace(r'$\\\\\\{', r'$\\{')\n return val",
"def get_substitute_variable(val: str, var_dict: dict) -> Any:\n var_marker = 'var:'\n if val.startswith(var_marker):\n var_name = val[len(var_marker):]\n\n if var_name in var_dict:\n return var_dict[var_name]\n\n return None",
"def get_variable_of_table(self, var_name):\n variable = None\n # if in both\n if (var_name in self.symbol_table.variable_table.keys()) & \\\n (var_name in self.symbol_table.subroutine_table.keys()):\n variable = self.symbol_table.subroutine_table[var_name]\n elif var_name in self.symbol_table.variable_table.keys():\n variable = self.symbol_table.variable_table[var_name]\n elif var_name in self.symbol_table.subroutine_table.keys():\n variable = self.symbol_table.subroutine_table[var_name]\n return variable",
"def getVar(self, varName, default=Unspecified, autoCall=True):\n\n try:\n return valueFromSearchList(\n self.searchList(), varName.replace('$', ''), autoCall)\n except NotFound:\n if default is not Unspecified:\n return default\n else:\n raise",
"def _get_variable(self, varname):\n\n return NetcdfVariableScipy(self._file.variables[varname])",
"def robust_index(in_var):\r\n\r\n if isinstance(in_var, str):\r\n return in_var\r\n elif isinstance(in_var, Symbol):\r\n return in_var.name\r\n else:\r\n raise TypeError('Value should be of type str or sympy.Symbol')",
"def _get_variable(data, variable):\n try:\n xarr = data[variable]\n except KeyError:\n raise KeyError('variable \\'' + variable +\n '\\' not found') from None\n return xarr",
"def get_value(self, var, thread_id):\n return self._state(thread_id).get_value(self._var_map[var])",
"def get_variable_by_index(self, var, index):\n\n var = var[:]\n\n if isinstance(var, np.ma.MaskedArray) or isinstance(index, np.ma.MaskedArray):\n rv = np.ma.empty((index.shape[0], 4), dtype=np.float64)\n if index.mask is not np.bool_(): # because False is not False. Thanks numpy\n rv.mask = np.zeros_like(rv, dtype=bool)\n rv.mask[:] = index.mask[:, 0][:, np.newaxis]\n rv.harden_mask()\n else:\n rv = np.zeros((index.shape[0], 4), dtype=np.float64)\n\n raw = np.ravel_multi_index(index.T, var.shape, mode='clip')\n rv[:, 0] = np.take(var, raw)\n raw += np.array(var.shape[1], dtype=np.int32)\n rv[:, 1] = np.take(var, raw)\n raw += 1\n rv[:, 2] = np.take(var, raw)\n raw -= np.array(var.shape[1], dtype=np.int32)\n rv[:, 3] = np.take(var, raw)\n return rv"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize the variable list from the given config values.
|
def _init_from_config(self, values):
sub_vars = None
if not isinstance(values, list):
values = [values]
for idx in range(len(values)):
value_pairs = values[idx]
if not isinstance(value_pairs, dict):
value_pairs = {None: value_pairs}
if sub_vars is None:
sub_vars = set(value_pairs.keys())
elif set(value_pairs.keys()) != sub_vars:
raise VariableError(
"Sub-keys do no match across variable values.",
index=str(idx))
try:
self.data.append(SubVariable(value_pairs))
except VariableError as err:
err.index = str(idx)
raise err
|
[
"def _init_from_config(self, reserved_keys, value_dict):\n\n for key, value in value_dict.items():\n if key in reserved_keys:\n raise VariableError(\"Var name '{}' is reserved.\".format(key),\n var=key)\n\n if isinstance(value, DeferredVariable):\n self.data[key] = value\n else:\n try:\n self.data[key] = VariableList(values=value)\n except VariableError as err:\n err.var = key\n raise err",
"def _collect_variables(self, vs):\n self.var_list.extend(vs)\n self.init_op = tf.variables_initializer(var_list=self.var_list)",
"def _init_values(self) -> None:\n self._values: Mapping[str, configutils.Values] = {}\n for name, opt in configdata.DATA.items():\n self._values[name] = configutils.Values(opt)",
"def init_vars(self):\n pass",
"def load_vars(self, vars):\n\t\tpass",
"def __init__(self):\n self.variables = {}\n self.values = {}",
"def SetVariableList(out, variable_name, values):\n if not values:\n return SetVariable(out, variable_name, \"\")\n if len(values) == 1:\n return SetVariable(out, variable_name, values[0])\n out.write('list(APPEND ')\n out.write(variable_name)\n out.write('\\n \"')\n out.write('\"\\n \"'.join([CMakeStringEscape(value) for value in values]))\n out.write('\")\\n')",
"def _init_state_variables(self) -> None:\n for name, type_info in self.STATE_VARIABLE_DEFINITIONS.items():\n self.create_state_var(name, type_info)",
"def _create_variables(self) -> None:\n if self.relaxed:\n kind = LpContinuous\n else:\n kind = LpInteger\n\n # List all combinations of apps and instances and workloads\n comb_res = cartesian_product(self.system.apps, self.cooked.instances_res)\n comb_dem = cartesian_product(\n self.system.apps, self.cooked.instances_dem, self.load_hist.keys()\n )\n map_res = LpVariable.dicts(\"Y\", comb_res, 0, None, kind)\n map_dem = LpVariable.dicts(\"X\", comb_dem, 0, None, kind)\n self.cooked = self.cooked._replace(map_res=map_res, map_dem=map_dem)",
"def _set_instance_variables(self, data):\n self.start_configs = data['start_configs']\n self.waypt_configs = data['waypt_configs']\n self.start_speeds = data['start_speeds']\n self.spline_trajectories = data['spline_trajectories']\n self.horizons = data['horizons']\n self.lqr_trajectories = data['lqr_trajectories']\n self.K_nkfd = data['K_nkfd']\n self.k_nkf1 = data['k_nkf1']\n \n # Initialize variable tensor for waypoints in world coordinates\n dt = self.params.system_dynamics_params.dt\n self.waypt_configs_world = [SystemConfig(\n dt=dt, n=config.n, k=1, variable=True,\n track_trajectory_acceleration=self.params.track_trajectory_acceleration) for config in data['start_configs']]\n\n self.instance_variables_loaded = True\n\n if self.params.verbose:\n N = self.params.waypoint_params.n\n for v0, start_config in zip(self.start_velocities, self.start_configs):\n print('Velocity: {:.3f}, {:.3f}% of goals kept({:d}).'.format(v0, 100.*start_config.n/N,\n start_config.n))",
"def __init__(self, listSection):\r\n self.listHeader = listSection[0].split('[')[0]\r\n self.afterList = listSection[-1].split(']')[1]\r\n listString = self.buildListString(listSection)\r\n self.values = [value.strip() for value in listString.split(',')]",
"def __init__(self, vals):\n\n self.__vals = vals\n self.__dct = {}\n for (name, val, _) in vals:\n self.__dct[name] = val",
"def initialize(self, init_values: Dict[int, int]):\n self._instruction_pointer = 0\n self._memory = [val for val in self._code]\n for index, value in init_values.items():\n self._memory[index] = value",
"def _initialize_track_vars(self):\n self.__log.call()\n\n track_vars = self.__track_vars = [\n None, # track vars use 1-based indexing\n ]\n\n aggregated_tracks_metadata = self.__aggregated_metadata[\"__tracks\"]\n last_track = len(aggregated_tracks_metadata) - 1\n # from_ will still be 0 here, and that's intended - it means that when\n # we invoke \"buttonup\" for the first time, it will increment the track\n # spinbox to 1, triggering a refresh of track 1's metadata\n track_number_editor = self.__metadata_editors[\"track_number\"]\n track_number_editor.config(to=last_track)\n track_number_editor.of_label.config(text=\"of %d\" % last_track)\n\n # tracks metadata also uses 1-based indexing\n for t in range(1, len(aggregated_tracks_metadata)):\n track_metadata = aggregated_tracks_metadata[t]\n\n # first initialize the individual track vars...\n varmap = {\n \"track_include\": BooleanVar(\n name=\"track_%d_include\" % t,\n value=track_metadata[\"track_include\"]),\n }\n for field in [\n \"title\",\n \"artist\",\n \"genre\",\n \"year\",\n ]:\n metadata_name = \"track_%s\" % field\n varmap[metadata_name] = StringVar(\n name=\"track_%d_%s\" % (t, field),\n value=track_metadata[metadata_name][0]\n if track_metadata[metadata_name] else \"\")\n\n track_vars.append(varmap)\n\n # ...then initialize the editors and editor vars by using the track\n # spinbox to trigger refreshes (but make sure this method is called\n # BEFORE the metadata editor is packed, otherwise the user will be\n # very disoriented and confused)\n track_number_editor.invoke(\"buttonup\")\n\n # now update the from_ to 1 and initialize the spinner to track #1 by\n # \"wrapping around\"\n track_number_editor.config(from_=1)\n track_number_editor.invoke(\"buttonup\")",
"def _init_run_vars(self):\n self._messages = []\n self._reservation = None\n self._instance = None",
"def __init__(self, name, val, nonConst = False):\n\n atom = ConfigAtom(name, val)\n term = ConfigTerm((atom,))\n vals = [val]\n if nonConst:\n # We force this variable to be non-constant by shoving\n # another (guaranteed unique) value into its value set.\n vals.append(NONCONST_NONCE)\n self.__init([term], None, {name: frozenset(vals)}, False)",
"def substitute_vars(cfg):\n for k, v in cfg.items():\n if isinstance(v, str):\n cfg[k] = test_define_value(v)[0]\n elif isinstance(v, dict):\n substitute_vars(v)\n elif isinstance(v, list):\n new_list = []\n for lv in v:\n if isinstance(lv, dict):\n substitute_vars(lv)\n new_list.append(lv)\n elif isinstance(lv, str):\n new_list.append(test_define_value(lv)[0])\n else:\n new_list.append(lv)\n cfg[k] = new_list",
"def _instantiate_input_states(self, context=None):\n num_values = len(self.monitored_values)\n values = [None] * num_values\n names = self.names or [None] * num_values\n\n # If default_input_value arg (assigned to variable in __init__) was used to specify the size of inputStates,\n # pass those values for use in instantiating inputStates\n if self.variable is not None:\n input_state_sizes = self.variable\n else:\n input_state_sizes = values\n for i, monitored_value, name in zip(range(num_values), self.monitored_values, names):\n values[i] = self._instantiate_input_state_for_monitored_value(input_state_sizes[i],\n monitored_value,\n name,\n context=context)\n\n # If self.variable was not specified, construct from values of inputStates\n if self.variable is None:\n # If all items of self.variable are numeric and of the same length, convert to ndarray\n dim_axis_0 = len(values)\n dim_axis_1 = len(values[0])\n if all((is_numeric(values[i]) and len(values[i])==dim_axis_1) for i in range(dim_axis_0)):\n self.variable = np.zeros((dim_axis_0,dim_axis_1), dtype=float)\n # Otherwise, just use list of values returned from instantiation above\n else:\n self.variable = values.copy()\n\n self.variableClassDefault = self.variable.copy()\n self.inputValue = list(self.variable)",
"def set_pipeline_env_var_list(self):\n\n #env_var_exclusive_dict\n env_var_exclusive_dict = {}\n\n #iterate env_var_dict\n for dcc_name, dcc_dict in self.env_var_dict.iteritems():\n\n #iterate dcc_dict\n for env_var_name, env_var_value_list in dcc_dict.iteritems():\n\n #convert to list if not of type list\n if not(type(env_var_value_list) is list):\n env_var_value_list = [env_var_value_list]\n\n #append\n #env var not in dict\n if not(env_var_name in env_var_exclusive_dict.keys()):\n env_var_exclusive_dict[env_var_name] = env_var_value_list\n #key already in there\n else:\n current_env_var_value_list = env_var_exclusive_dict[env_var_name]\n env_var_exclusive_dict[env_var_name] = list(set(current_env_var_value_list + env_var_value_list))\n\n #env_var_list\n env_var_list = []\n\n #iterate and append\n for env_var_name, env_var_value_list in env_var_exclusive_dict.iteritems():\n\n #append\n env_var_list.append([env_var_name, env_var_value_list])\n\n\n #env_var_list\n self.env_var_list = env_var_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the variable value at the given index and sub_var.
|
def get(self, index, sub_var):
if index is None:
index = 0
else:
if not isinstance(index, int):
raise KeyError("Non-integer index given: '{}'".format(index))
if not -len(self.data) <= index < len(self.data):
raise KeyError(
"Index out of range. There are only {} items in this variable."
.format(len(self.data)))
return self.data[index].get(sub_var)
|
[
"def get(self, var, index, sub_var):\n\n if var in self.data:\n return self.data[var].get(index, sub_var)\n else:\n raise KeyError(\n \"Variable set '{}' does not contain a variable named '{}'. \"\n \"Available variables are: {}\"\n .format(self.name, var, tuple(self.data.keys())))",
"def __getitem__(self,\n subaddr_idx: int) -> str:\n return self.m_subaddr[subaddr_idx]",
"def variable_value(self, variable, index=0):\n return get_product_variable_value(self, variable, index)",
"def get_var_by_index(self, n):\n return self.data[n,:,:]",
"def varsub(val):\n _dbg('varsub(): starting with val = %s' % val)\n if isinstance(val, None.__class__):\n return val\n if isinstance(val, int):\n return str(val)\n i = 0\n while i < 100:\n if MARKER == 'dollar':\n s = re.search(r'\\$\\{([^${}]+)\\}', val)\n else:\n s = re.search(r'\\~\\{([^~{}]+)\\}', val)\n try:\n s.group(1)\n except (IndexError, AttributeError):\n break\n _dbg('varsub(): s.group(0) = %s' % s.group(0))\n _dbg('varsub(): s.group(1) = %s' % s.group(1))\n needle = s.group(0).replace('$', r'\\$')\n val = re.sub(needle, __varsub(s.group(1)), val)\n i += 1\n if MARKER == 'dollar':\n # Un-escape $\\\\{ --> ${, and $\\\\\\{ --> $\\{\n val = val.replace(r'$\\\\{', r'${')\n val = val.replace(r'$\\\\\\{', r'$\\{')\n return val",
"def _get_variable(data, variable):\n try:\n xarr = data[variable]\n except KeyError:\n raise KeyError('variable \\'' + variable +\n '\\' not found') from None\n return xarr",
"def get_variable_by_index(self, var, index):\n\n var = var[:]\n\n if isinstance(var, np.ma.MaskedArray) or isinstance(index, np.ma.MaskedArray):\n rv = np.ma.empty((index.shape[0], 4), dtype=np.float64)\n if index.mask is not np.bool_(): # because False is not False. Thanks numpy\n rv.mask = np.zeros_like(rv, dtype=bool)\n rv.mask[:] = index.mask[:, 0][:, np.newaxis]\n rv.harden_mask()\n else:\n rv = np.zeros((index.shape[0], 4), dtype=np.float64)\n\n raw = np.ravel_multi_index(index.T, var.shape, mode='clip')\n rv[:, 0] = np.take(var, raw)\n raw += np.array(var.shape[1], dtype=np.int32)\n rv[:, 1] = np.take(var, raw)\n raw += 1\n rv[:, 2] = np.take(var, raw)\n raw -= np.array(var.shape[1], dtype=np.int32)\n rv[:, 3] = np.take(var, raw)\n return rv",
"def get_variable(self, varname):\n return next((v for v in self.config.data_spec if v['name'] == varname))",
"def getVar(self, name):\n\t\treturn self.vars[name]",
"def _append_getitem_expression(\r\n self, index: Union[int, Int],\r\n value: T) -> None:\r\n from apysc import AnyValue\r\n from apysc.expression import expression_file_util\r\n from apysc.type import value_util\r\n value_: VariableNameInterface\r\n if not isinstance(value, VariableNameInterface):\r\n value_ = AnyValue(None)\r\n else:\r\n value_ = value\r\n index_str: str = value_util.get_value_str_for_expression(\r\n value=index)\r\n expression: str = (\r\n f'var {value_.variable_name} = '\r\n f'{self.variable_name}[{index_str}];'\r\n )\r\n expression_file_util.append_js_expression(expression=expression)",
"def get_var(self, name):\n try:\n return self.final_context.data[name].get_literal_value()\n except KeyError:\n raise (\"Test seeking variable (%s) not found in final context.\" %\n name)",
"def get_value(self, var, thread_id):\n return self._state(thread_id).get_value(self._var_map[var])",
"def get_script_variable(a_variable_name):\n return get_prefixed_variable(\"s\", a_variable_name)",
"def getVarnode(self) -> ghidra.program.model.pcode.Varnode:\n ...",
"def visit_Subscript(self, node):\n node = self.generic_visit(node)\n\n if (isinstance(node.slice, ast.Index) and\n isinstance(node.value, _resolved)):\n sliceVal = MISSING\n sliceRepr = ''\n if isinstance(node.slice.value, _resolved):\n # (a[b])[c]\n # will include `a[b]` in the extras.\n self.extras.append(node.slice.value)\n sliceVal = node.slice.value.value\n sliceRepr = node.slice.value.representation\n elif isinstance(node.slice.value, ast.Num):\n sliceVal = node.slice.value.n\n sliceRepr = repr(sliceVal)\n elif isinstance(node.slice.value, ast.Str):\n sliceVal = node.slice.value.s\n sliceRepr = repr(sliceVal)\n if sliceVal is not MISSING:\n node = _resolved(\n '%s[%s]' % (node.value.representation, sliceRepr),\n node.value.value[sliceVal])\n\n return node",
"def get_var(backend_model, var, dims=None, sparse=False, expr=False):\n try:\n var_container = getattr(backend_model, var)\n except AttributeError:\n raise exceptions.BackendError(\"Variable {} inexistent.\".format(var))\n\n if not dims:\n if var + \"_index\" == var_container.index_set().name:\n dims = [i.name for i in var_container.index_set().subsets()]\n else:\n dims = [var_container.index_set().name]\n\n if sparse and not expr:\n if invalid(var_container.default()):\n result = pd.Series(var_container._data).apply(\n lambda x: po.value(x) if not invalid(x) else np.nan\n )\n else:\n result = pd.Series(var_container.extract_values_sparse())\n else:\n if expr:\n result = pd.Series(var_container._data).apply(po.value)\n else:\n result = pd.Series(var_container.extract_values())\n if result.empty:\n raise exceptions.BackendError(\"Variable {} has no data.\".format(var))\n\n result = result.rename_axis(index=dims)\n\n return xr.DataArray.from_series(result)",
"def _get_variable(self, varname):\n\n return NetcdfVariableScipy(self._file.variables[varname])",
"def lookup(indexable, idx):\n return indexable[idx]",
"def test_getitem_root(self):\n x = IndexedVariable(name='x', index=1)\n self.assertIs(x[()], x)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a AppointmentDatabase instance for accessing the database. If the database file does not yet exist, it creates a new database.
|
def get_db():
if not hasattr(g, 'app_db'):
g.apps_db = AppointmentDatabase(app.config['DATABASE'])
return g.apps_db
|
[
"def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n with app.app_context():\n if app.config.get('TESTING'):\n db = g._database = sqlite3.connect(app.config['DATABASE'])\n db.row_factory = sqlite3.Row\n db.execute('PRAGMA foreign_keys = ON')\n elif app.config['PRODUCTION']:\n components = urlparse.urlparse(os.environ['DATABASE_URL'])\n db = g._database = psycopg2.connect(\n database=components.path[1:],\n user=components.username,\n password=components.password,\n host=components.hostname\n )\n else:\n db = g._database = psycopg2.connect(\n 'dbname={0} user={1} password={2}'.format(\n app.config['DATABASE'], app.config['USER'],\n app.config['PASSWORD']))\n return db",
"def get_database():\r\n dbpath = \"/\".join(__file__.split('/')[:-1] + ['samples.db'])\r\n return shelve.open(dbpath,protocol=2,writeback=True)",
"def get_instance():\r\n if Database._instance is None:\r\n Database._instance = Database()\r\n return Database._instance",
"def get_database(self, name=None):\n name = name or 'default'\n\n database = self._databases.get(name)\n\n if database:\n return database\n\n raise DatabaseNotFound(name)",
"def get_flask_database_manager(app, table=None):\n directory = os.path.join(app.root_path, 'migrations')\n database = app.config['DATABASE']\n return DatabaseManager(database, table_name=table, directory=directory)",
"def _database(folder, writable=False, refresh=False):\n if writable:\n if refresh:\n database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OVERWRITE)\n else:\n database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OPEN)\n else:\n try:\n database = xapian.Database(folder)\n except xapian.DatabaseOpeningError:\n raise InvalidIndexError(u'Unable to open index at %s' % folder)\n\n return database",
"def _get_database_directory():\n return get_database_directory()",
"def open(file, flag=None, mode=0666):\r\n # flag argument is currently ignored\r\n return _Database(file, mode)",
"def get_database(self, name='database'):\n try:\n return self._dbs[name]\n except KeyError:\n dbstring = self.get_config_string(name)\n self._dbs[name] = PostgresConnectionPool.for_url(dbstring)\n return self._dbs[name]",
"def test_create_db_file_if_not_exist(self):\n databasemanager.DatabaseManager(driftwood())",
"def open_db(self):\n path_exists = os.path.exists(self._db_path)\n\n self._db_conn = sqlite3.connect(self._db_path)\n\n #if the path didn't exist then we've created a new database file\n if not path_exists:\n self.createdb()\n\n return path_exists",
"def _create_database(self, name, url):\n uri = make_url(url)\n\n class_name = DATABASE_ALIASES.get(uri.drivername)\n\n if class_name is None:\n database_cls = Database\n else:\n database_cls = import_string(class_name)\n\n return database_cls(name, url, scope_func=self._scope_func)",
"def get(self, name, check=True):\n return Database(self, name, check=check)",
"def find_agenda_db(log):\n\n home = os.path.expanduser(\"~\")\n db_file = \"{0}{1}\".format(home, DB_LOCATION)\n if not os.path.isfile(db_file):\n log.debug(\n \"Agenda db not found at {0}\".format(db_file))\n\n log.debug(db_file)\n return db_file",
"def create_database(self, instance, name, character_set=None,\r\n collate=None):\r\n return instance.create_database(name, character_set=character_set,\r\n collate=collate)",
"def queue_db(self):\n db_name = os.path.join(self.dir, 'queue')\n db = Database(db_name)\n return db",
"def load_DB(self):\n\t\tstream = open(self.DB_file)\n\t\tself.DB = pickle.load(stream)\n\t\tstream.close()\n\t\treturn",
"def open_dumptruck(dbname):\n if os.path.isfile(dbname):\n # Check for the database file\n try:\n dt = dumptruck.DumpTruck(dbname, adapt_and_convert = False)\n except sqlite3.OperationalError, e:\n if e.message == 'unable to open database file':\n msg = e.message + ' (Check that the file exists and is readable by everyone.)'\n code = 500\n raise NotOK(code, msg)\n else:\n msg = 'Error: database file does not exist.'\n code = 500\n raise NotOK(code, msg)\n\n dt.connection.set_authorizer(_authorizer_readonly)\n return dt",
"def db(self):\n return self.session.db if not self.disable_db else None",
"def getDefaultDB():\n\n return _defaultDB"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a Response object containing the error message as JSON.
|
def to_response(self):
response = jsonify({'error': self.error_message})
response.status = self.status_code
return response
|
[
"def _build_error_response(message, status_code, error_id, **kwargs):\n\n return make_response(\n jsonify({\n \"status_code\": status_code,\n \"error\": {\n \"message\": message,\n \"id\": error_id\n },\n **kwargs\n }), status_code\n )",
"def _create_rest_error_output(error_message, error_code):\r\n response = {\r\n \"success\": \"false\",\r\n \"data\": {},\r\n \"error\": {\r\n \"code\": error_code,\r\n \"message\": error_message\r\n }\r\n }\r\n return response",
"def create_error(code=400, msg='bad request'):\n return json.dumps({'status': code, 'error': msg}), code",
"def _render_error_response(self, code, title, message):\n\n if self._response_format == 'py':\n response = {'status': 'error',\n 'code': code,\n 'title': title,\n 'message': message}\n elif self._response_format == 'json':\n response = '{\"status\": \"error\", ' \\\n '\"code\": \"%s\", ' \\\n '\"title\": \"%s\", ' \\\n '\"message\": \"%s\"}' % (code, title, message)\n elif self._response_format == 'xml':\n response = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n' \\\n '<response>\\n' \\\n ' <status>error</status>\\n' \\\n ' <code>%s</code>\\n' \\\n ' <title>%s</title>\\n' \\\n ' <message>%s</message>\\n' \\\n '</response>' % (code, title, message)\n else:\n response = 'status: error\\n' \\\n 'code: %s\\n' \\\n 'title: %s\\n' \\\n 'message: %s' % (code, title, message)\n\n return response",
"def create_server_error_json_result(message):\n status = _StatusCode.SERVER_ERROR\n return_obj = {}\n return_obj[\"result\"] = []\n return_obj[\"status\"] = status\n return_obj[\"message\"] = message\n return (status, json.dumps(return_obj, sort_keys=True))",
"def jsonify_error(status, message, traceback, version): \\\r\n # pylint: disable=unused-argument\r\n\r\n cherrypy.response.headers['Content-Type'] = 'application/json'\r\n response_body = json.dumps(\r\n {\r\n 'error': {\r\n 'http_status': status,\r\n 'message': message,\r\n }\r\n })\r\n\r\n cherrypy.response.status = status\r\n\r\n return response_body",
"def json_error_context(self, msg):\n self.status_code = 500\n return {'error': msg}",
"def error(msg: str):\n return json.dumps({\"error\": msg})",
"def handle_custom_error(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response",
"def json_err(msg: str) -> Any:\n return jsonify({\"success\": False, \"error\": msg})",
"def error_to_json(exc):\n response = jsonify(message=str(exc))\n response.status_code = (exc.code if isinstance(exc, HTTPException) else 500)\n return response",
"def send_json_error(err, code):\n msg = str(err).split(': ')[1]\n context = {'error': msg}\n return make_response(jsonify(**context), code)",
"def render_JSON_Error(message, data={}):\n res = {\n 'status': 'Error',\n 'err': message,\n }\n res.update(data)\n return HttpResponse(json.dumps(res))",
"def format_exception(self, e, target, action):\n exception_cls = e.__class__.__name__\n if self.error_status.get(exception_cls):\n status = self.error_status.get(exception_cls)\n else: # pragma: no cover\n status = 500\n if exception_cls in ('BadRequest', 'Forbidden','Unauthorized'):\n e.message = e.description\n error = dict(action=action.upper(),\n status=\"failed\",\n status_code=status,\n target=target,\n exception_cls=exception_cls,\n exception_msg=str(e.message))\n return Response(json.dumps(error), status=status,\n mimetype='application/json')",
"def output_error_json(self, message):\n error = {\n 'result': 'error',\n 'error': [message]\n }\n self.write(json.dumps(error))",
"async def error_to_json(req: Request, res: Response, exc: HTTPError):\n res.status_code = exc.status_code\n res.json = exc.as_json()",
"def jsonify_error(status: str, message: str, **traceback: dict) -> str:\n # Take the response generation of cherrypy in case of error\n response = cherrypy.response\n\n # Add the JSON Header\n response.headers[\"Content-Type\"] = \"application/json\"\n\n # Return the JSON with all the information\n return json.dumps(\n {\n \"status\": \"Failure\",\n \"status_details\": {\"message\": status, \"description\": message},\n }\n )",
"def handle_invalid_api_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response",
"def build_errmsg(\n cls,\n response,\n msg: t.Optional[str] = None,\n exc: t.Optional[Exception] = None,\n ) -> str:\n from .tools import json_log\n\n url = response.url\n method = response.request.method\n code = response.status_code\n reason = response.reason\n out_len = len(response.request.body or \"\")\n in_len = len(response.text or \"\")\n\n msg = msg or \"Error in REST API response\"\n pre = [\n msg,\n get_exc_str(exc=exc),\n f\"URL: {url!r}, METHOD: {method}\",\n f\"CODE: {code!r}, REASON: {reason!r}, BYTES OUT: {out_len}, BYTES IN: {in_len}\",\n ]\n middle = [\n \"Request Object:\",\n json_log(obj=response.request.body),\n \"Response Object:\",\n json_log(obj=response.text),\n ]\n msgs = [*pre, \"\", *middle, \"\", *pre]\n return \"\\n\".join(msgs)",
"def handle_server_error_httpexception(err: ServerError) -> Response:\n content = json.dumps(err.error_body)\n status_code = err.code\n\n headers = {\n \"Content-Type\": \"application/json\"\n }\n return Response(content, status_code, headers)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a dictionary containing appointments indexed by doctor. The dictionary keys are doctor names, and the values are lists of appointments. Each appointment is represented by a sqlite row object, which can be used like a dictionary.
|
def get_app_by_doctor():
cur = get_db().conn.cursor()
# By using an OrderedDict we will preserve alphabetical order of
# doctors
app_by_doctor = OrderedDict()
query = '''
SELECT doctors.doctor as doctor, patients.FirstN as FirstN,
patients.LastN as LastN, patients.gender as gender, patients.age as age,
patients.birth as birth, app.month as month, symptoms.symptom as symptom
FROM doctors, patients, app, symptoms
WHERE app.doctor_id = doctors.doctor_id
AND app.patient_id = patients.patient_id
AND app.symptom_id = symptoms.symptom_id
ORDER BY doctor, FirstN'''
for row in cur.execute(query):
doctor = row['doctor']
if doctor not in app_by_doctor:
app_by_doctor[doctor] = []
app_by_doctor[doctor].append(row)
return app_by_doctor
|
[
"def get_appointments(doc_id: int, cur) -> json:\n return cur.execute(\n \"SELECT appointment FROM Doctors where UID = ?;\", (doc_id,)\n ).fetchone()[0]",
"def populate_appointments(endpoint, doctor):\n date = timezone.now().strftime('%Y-%m-%d')\n\n appointments = endpoint.list({'doctor': doctor.id, 'date': date})\n for appointment_data in appointments:\n patient = Patient.objects.get(id=appointment_data['patient'])\n\n # simplify/clean statuses for project purposes\n status = appointment_data['status']\n if status not in ('Checked In', 'In Session', \n 'Complete', 'Cancelled'):\n status = ''\n\n\n data = {\n 'doctor': doctor,\n 'patient': patient,\n 'scheduled_time': appointment_data['scheduled_time'],\n 'duration': appointment_data['duration'],\n 'office': appointment_data['office'],\n 'exam_room': appointment_data['exam_room'],\n 'status': status,\n 'reason': appointment_data['reason']\n }\n\n appointment, created = Appointment.objects.update_or_create(\n defaults=data, pk=appointment_data['id'])",
"def retrieve_all_suitable_appointments() -> Dict[str, List[AppointmentMatch]]:\n all_appointments = {}\n for department in DEPARTMENTS:\n entry = f\"{DEPARTMENTS_TABLE[department]} ({department})\"\n all_appointments[entry] = find_centers_for_department(department)\n return all_appointments",
"def appointment_db_query(self):\n \n appointments = []\n sql = \"\"\"\n SELECT appt.id AS appt_id, appt.customer_id AS cust_id,\n DATE_FORMAT(appt.startDate, '%W %m/%d/%Y %l:%i %p') AS appt_lt,\n CONVERT_TZ(DATE_SUB(appt.startDate, INTERVAL 2 HOUR), pref.value, 'UTC') AS reminder_utc,\n comp.id AS comp_id, comp.name AS comp_name, comp.domain AS comp_domain, pref.value AS comp_timezone,\n (SELECT value from preferences AS p WHERE name='receipt header' AND p.company_id=comp.id) AS comp_address,\n cust.email AS cust_email, cust.firstname AS cust_fname, cust.surname AS cust_lname, cust.phone_num as cust_phone\n FROM appointments AS appt\n INNER JOIN companies as comp ON comp.id=appt.company_id\n INNER JOIN customers as cust ON cust.id=appt.customer_id\n INNER JOIN preferences AS pref ON pref.company_id=comp.id\n WHERE (DATE(appt.startDate)=DATE(CONVERT_TZ(CURDATE(), 'UTC', pref.value)) \n OR DATE(appt.startDate)=DATE(CONVERT_TZ(DATE_ADD(CURDATE(), INTERVAL 1 DAY), 'UTC', pref.value)))\n AND pref.name='timezone';\n \"\"\"\n db = self.db_connect()\n appointments_db_query = self.db_execute_query(db, sql)\n appointments = appointments_db_query\n return appointments",
"def get_appointments(self):\n if self.is_admin():\n return Appointment.objects\n\n elif self.is_doctor():\n return Appointment.objects.filter(doctor=self)\n\n return Appointment.objects.filter(patient=self)",
"def appointments(self):\n appointments = []\n if self.show == 'forms':\n appointments = [self.appointment]\n else:\n # or filter appointments for the current membership categories\n # schedule_group__membership_form\n codes = []\n for category in self.membership_form_category:\n codes.extend(MembershipForm.objects.codes_for_category(membership_form_category=category))\n appointments = Appointment.objects.filter(\n registered_subject=self.registered_subject,\n visit_definition__code__in=codes).order_by(\n 'visit_definition__time_point', 'visit_instance', 'appt_datetime')\n return appointments",
"def _get_doctor_list(self, form):\n available_doctors = []\n start_date = form.cleaned_up['start_date']\n no_of_days = form.cleaned_up['no_of_days']\n start_time = form.cleaned_up['start_time']\n am_pm = form.cleaned_up['am_pm']\n duration = form.cleaned_up['duration']\n start_time = int(self._convert_to_24_hour(start_time, am_pm))\n\n start_weekday_id = start_date.weekday() + 1 # +1 because weekday start from 0 till 6 but in db it is 1 to 7\n end_weekday_id = (start_weekday_id + no_of_days) % 7\n end_time = (start_time + duration) % 24\n\n end_date = start_date + timedelta(no_of_days)\n\n avail_days_obj = AvailableDay.get_days_by_ids(start_weekday_id, end_weekday_id)\n all_available_doctors = DoctorSchedule.get_doctorlist(avail_days_obj, start_time, end_time)\n\n if all_available_doctors:\n already_booked_doctors = BookingStatus.is_available(start_date, end_date, start_time, end_time)\n\n all_booked_doctor = set([booked.schedule.id for booked in already_booked_doctors])\n for available_doc in all_available_doctors:\n if all_booked_doctor and available_doc.id not in all_booked_doctor:\n available_doctors.apppend(available_doc)\n\n return available_doctors",
"def appointmentRates(conn):\n\n cursor = executeQuery(conn, \"SELECT * FROM AggregateAppointmentData\")\n appointmentDict = {}\n\n for row in cursor:\n # The date on which an appointment was made, not the appointment itself.\n schedule_date_str = row[\"ScheduleDate\"]\n # The date and time of the actual scheduled appointment.\n appointment_date_str = row[\"ActualApptDate\"]\n appointment_time_str = row[\"ActualApptTime\"]\n # Converts Date/Time strings extracted from table into DateTime type.\n schedule_date = datetime.strptime(schedule_date_str, '%Y-%m-%d')\n appointment_date = datetime.strptime(appointment_date_str, '%Y-%m-%d')\n appointment_time = datetime.strptime(appointment_time_str, '%H:%M')\n # Calculate lead time (days) between day of scheduling and appointment.\n lead_days = (appointment_date - schedule_date).days\n # Converts appointment time into 24-hour (hour:minute) notation for\n # readability.\n appointment_hour_min = str(appointment_time.hour) + \":\" + \\\n str('{:>02d}'.format(appointment_time.minute))\n # Creates a tuple (lead-days, appointment-time) to insert into\n # frequency distribution \"appointmentDict\".\n days_time_tuple = (lead_days, appointment_hour_min)\n if(days_time_tuple in appointmentDict):\n appointmentDict[days_time_tuple] += 1\n else:\n appointmentDict[days_time_tuple] = 1\n\n return appointmentDict",
"def AddAppointments( self, Appointments ):\n\t\tfor App in Appointments:\n\t\t\tevent = Event()\n\t\t\tif App.has_key( 'Class' ): \n\t\t\t\tevent.add('summary', App['Subject']+\" - \"+App['Class'])\n\t\t\telse:\n\t\t\t\tevent.add('summary', App['Subject'])\n\t\t\tevent.add('dtstart', App['Hours'][0])\n\t\t\tevent.add('dtend', App['Hours'][1])\n\t\t\t\n\t\t\tif App.has_key( 'Location' ): event.add( 'location', App['Location'] )\n\t\t\t\n\t\t\tself.cal.add_component(event)\n\t\t\t# print \"Event added\", App",
"def list_appointments(request, for_date: date, current_user_id=1):\n\n if request.method != 'GET':\n return HttpResponse(status=405)\n\n query_set = BookingService.get_appointments_for_range(current_user_id, for_date, timedelta(days=1) + for_date)\n return JsonResponse(status=200, data={\"appointments\": [model_to_dict(model) for model in query_set]})",
"def get_attendance(attendance:list):\n\tclass_name = attendance[1][1]\n\tstart_time = attendance[1][2]\n\tmeeting_host = attendance[1][4]\n\tmeeting_duration = attendance[1][5]\n\tattendants = {email:{'name':name, 'duration':int(duration)} for name,email,duration,_ in attendance[4:]}\n\treturn {'class':class_name,\n\t\t\t'start time':start_time,\n\t\t\t'host':meeting_host,\n\t\t\t'duration':int(meeting_duration),\n\t\t\t'attendants':attendants}",
"def get(self):\n return render_template(\"appointments.html\",\n apps=get_db().get_all_apps())",
"def get(self, patient_id):\n if not patientDAO.get_patient(db.con_pool, patient_id):\n abort(404, 'Patient id not found')\n\n return make_response(jsonify(\n appointmentDAO.get_appointments_current_by_patient(db.con_pool, patient_id)), 200)",
"def get_app_by_month():\n\n cur = get_db().conn.cursor()\n\n # By using an OrderedDict we will preserve alphabetical order of month\n app_by_month = OrderedDict()\n\n query = '''\n SELECT app.month as month, patients.FirstN as FirstN, patients.LastN as\n LastN, patients.gender as gender, patients.age as age,\n patients.birth as birth,\n doctors.doctor as doctor, symptoms.symptom as symptom\n FROM doctors, patients, app, symptoms\n WHERE app.doctor_id = doctors.doctor_id\n AND app.patient_id = patients.patient_id\n AND app.symptom_id = symptoms.symptom_id\n ORDER BY month, FirstN'''\n\n for row in cur.execute(query):\n month = row['month']\n\n if month not in app_by_month:\n app_by_month[month] = []\n\n app_by_month[month].append(row)\n\n return app_by_month",
"def get_doctors():\n all_doctors = schema.Doctor.query.all()\n result = schema.doctors_schema.dump(all_doctors)\n return jsonify(result.data)",
"def get_room_dictionary(specific_bookings: bool = False) -> Dict[int, Dict[int, List[str]]]:\n if specific_bookings:\n additional_rooms_file = settings.settings.paths.bookings_additional()\n if not additional_rooms_file:\n return {}\n all_rooms = import_rooms_from_csv(additional_rooms_file)\n else:\n all_rooms = Data().room_by_name.values()\n\n all_times: List[int] = settings.settings.times()\n bookings: Dict[int, Dict[int, List[str]]] = {}\n days: List[datetime.date] = settings.settings.days()\n for day in days:\n day_index = converter.date_to_day_index(day)\n bookings[day_index] = {}\n for hour in all_times:\n bookings[day_index][hour] = []\n for room in all_rooms:\n # if room.type == \"tutorial\":\n times = room.get_booked_times(day)\n for hour in times:\n bookings[day_index][hour].append(room.name)\n return bookings",
"def book_appointment(request, current_user_id=1):\n if request.method != 'POST':\n return JsonResponse(status=405, data={\"reasons\": ['Method Not Allowed']})\n payload = json.loads(request.body)\n doctor_id: int = payload['doctor_id']\n appointment_start: datetime = datetime.fromisoformat(payload['appointment_start'])\n appointment_finish: datetime = datetime.fromisoformat(payload['appointment_finish'])\n\n try:\n visit_time = VisitTime(appointment_start, appointment_finish)\n except ValueError as e:\n return JsonResponse(status=400, data={\"reasons\": [str(e)]})\n\n is_available, reasons = BookingService.check_appointment_time_availability(current_user_id, doctor_id, visit_time)\n if not is_available:\n return JsonResponse(status=409, data={\"reasons\": reasons})\n\n appointment = Appointment(\n patient_id=current_user_id,\n doctor_id=doctor_id,\n appointment_start=appointment_start,\n appointment_finish=appointment_finish,\n )\n appointment.save()\n return JsonResponse(status=201, data=model_to_dict(appointment))",
"def get_appointment(self):\n\n # Getting the real IP if the server is behind a reverse proxy\n remote_addr = get_remote_addr()\n\n # Check that data type and content are correct. Abort otherwise.\n try:\n request_data = get_request_data_json(request)\n\n except InvalidParameter as e:\n self.logger.info(\"Received invalid get_appointment request\", from_addr=\"{}\".format(remote_addr))\n return jsonify({\"error\": str(e), \"error_code\": errors.INVALID_REQUEST_FORMAT}), HTTP_BAD_REQUEST\n\n locator = request_data.get(\"locator\")\n\n try:\n self.inspector.check_locator(locator)\n self.logger.info(\"Received get_appointment request\", from_addr=\"{}\".format(remote_addr), locator=locator)\n\n r = self.stub.get_appointment(\n GetAppointmentRequest(locator=locator, signature=request_data.get(\"signature\"))\n )\n data = (\n r.appointment_data.appointment\n if r.appointment_data.WhichOneof(\"appointment_data\") == \"appointment\"\n else r.appointment_data.tracker\n )\n\n rcode = HTTP_OK\n response = {\n \"locator\": locator,\n \"status\": r.status,\n \"appointment\": json_format.MessageToDict(\n data, including_default_value_fields=True, preserving_proto_field_name=True\n ),\n }\n\n except (InspectionFailed, grpc.RpcError) as e:\n # Default, for InspectionFailed and not-found appointments\n rcode = HTTP_NOT_FOUND\n response = {\"locator\": locator, \"status\": AppointmentStatus.NOT_FOUND}\n\n if isinstance(e, grpc.RpcError):\n if e.code() == grpc.StatusCode.UNAUTHENTICATED:\n rcode = HTTP_BAD_REQUEST\n response = {\n \"error\": e.details(),\n \"error_code\": errors.APPOINTMENT_INVALID_SIGNATURE_OR_SUBSCRIPTION_ERROR,\n }\n elif e.code() == grpc.StatusCode.UNAVAILABLE:\n rcode = HTTP_SERVICE_UNAVAILABLE\n response = {\"error\": e.details()}\n\n return jsonify(response), rcode",
"def get_attendance(self):\r\n result = [self.attendance_id, self.employee_id, self.first_name, self.last_name,\r\n self.arrival_date, self.arrival_time,\r\n self.departure_date, self.departure_time]\r\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a dictionary containing appointments indexed by month. The dictionary keys are month names, and the values are lists of appointments. Each appointment is represented by a sqlite row object, which can be used like a dictionary.
|
def get_app_by_month():
cur = get_db().conn.cursor()
# By using an OrderedDict we will preserve alphabetical order of month
app_by_month = OrderedDict()
query = '''
SELECT app.month as month, patients.FirstN as FirstN, patients.LastN as
LastN, patients.gender as gender, patients.age as age,
patients.birth as birth,
doctors.doctor as doctor, symptoms.symptom as symptom
FROM doctors, patients, app, symptoms
WHERE app.doctor_id = doctors.doctor_id
AND app.patient_id = patients.patient_id
AND app.symptom_id = symptoms.symptom_id
ORDER BY month, FirstN'''
for row in cur.execute(query):
month = row['month']
if month not in app_by_month:
app_by_month[month] = []
app_by_month[month].append(row)
return app_by_month
|
[
"def appointment_db_query(self):\n \n appointments = []\n sql = \"\"\"\n SELECT appt.id AS appt_id, appt.customer_id AS cust_id,\n DATE_FORMAT(appt.startDate, '%W %m/%d/%Y %l:%i %p') AS appt_lt,\n CONVERT_TZ(DATE_SUB(appt.startDate, INTERVAL 2 HOUR), pref.value, 'UTC') AS reminder_utc,\n comp.id AS comp_id, comp.name AS comp_name, comp.domain AS comp_domain, pref.value AS comp_timezone,\n (SELECT value from preferences AS p WHERE name='receipt header' AND p.company_id=comp.id) AS comp_address,\n cust.email AS cust_email, cust.firstname AS cust_fname, cust.surname AS cust_lname, cust.phone_num as cust_phone\n FROM appointments AS appt\n INNER JOIN companies as comp ON comp.id=appt.company_id\n INNER JOIN customers as cust ON cust.id=appt.customer_id\n INNER JOIN preferences AS pref ON pref.company_id=comp.id\n WHERE (DATE(appt.startDate)=DATE(CONVERT_TZ(CURDATE(), 'UTC', pref.value)) \n OR DATE(appt.startDate)=DATE(CONVERT_TZ(DATE_ADD(CURDATE(), INTERVAL 1 DAY), 'UTC', pref.value)))\n AND pref.name='timezone';\n \"\"\"\n db = self.db_connect()\n appointments_db_query = self.db_execute_query(db, sql)\n appointments = appointments_db_query\n return appointments",
"def appointmentRates(conn):\n\n cursor = executeQuery(conn, \"SELECT * FROM AggregateAppointmentData\")\n appointmentDict = {}\n\n for row in cursor:\n # The date on which an appointment was made, not the appointment itself.\n schedule_date_str = row[\"ScheduleDate\"]\n # The date and time of the actual scheduled appointment.\n appointment_date_str = row[\"ActualApptDate\"]\n appointment_time_str = row[\"ActualApptTime\"]\n # Converts Date/Time strings extracted from table into DateTime type.\n schedule_date = datetime.strptime(schedule_date_str, '%Y-%m-%d')\n appointment_date = datetime.strptime(appointment_date_str, '%Y-%m-%d')\n appointment_time = datetime.strptime(appointment_time_str, '%H:%M')\n # Calculate lead time (days) between day of scheduling and appointment.\n lead_days = (appointment_date - schedule_date).days\n # Converts appointment time into 24-hour (hour:minute) notation for\n # readability.\n appointment_hour_min = str(appointment_time.hour) + \":\" + \\\n str('{:>02d}'.format(appointment_time.minute))\n # Creates a tuple (lead-days, appointment-time) to insert into\n # frequency distribution \"appointmentDict\".\n days_time_tuple = (lead_days, appointment_hour_min)\n if(days_time_tuple in appointmentDict):\n appointmentDict[days_time_tuple] += 1\n else:\n appointmentDict[days_time_tuple] = 1\n\n return appointmentDict",
"def getMonthActivityByYear(self):\n monthActByYear = dict() \n for year in self.monthActivity:\n if year not in monthActByYear:\n monthList = [0,0,0,0,0,0,0,0,0,0,0,0]\n for month in self.monthActivity[year]:\n monthList[month-1] = self.monthActivity[year][month]\n monthActByYear[year] = monthList\n\n return monthActByYear",
"def create_month_entries(self):\r\n factories.Entry(**{\r\n 'user': self.user,\r\n 'start_time': self.month,\r\n 'end_time': self.month + relativedelta(hours=1)\r\n })\r\n factories.Entry(**{\r\n 'user': self.user,\r\n 'start_time': self.month + relativedelta(weeks=1),\r\n 'end_time': self.month + relativedelta(weeks=1, hours=1)\r\n })\r\n factories.Entry(**{\r\n 'user': self.user,\r\n 'start_time': self.month + relativedelta(weeks=2),\r\n 'end_time': self.month + relativedelta(weeks=2, hours=1)\r\n })\r\n factories.Entry(**{\r\n 'user': self.user,\r\n 'start_time': self.month + relativedelta(weeks=3),\r\n 'end_time': self.month + relativedelta(weeks=3, hours=1)\r\n })",
"def get_month(month):\n with open('/Users/thomsuykerbuyk/GitHub/AmphiaBot/Roosters/' + month + '/' + month + '.csv', 'r') as new_file:\n lines = new_file.readlines()\n lines = [line.split(',') for line in lines]\n\n agenda = {} # saving all information in dictionary called agenda\n for line in lines:\n agenda[line[0]] = line[1][:-1] # line[0] == date, line[1] == shift\n return agenda",
"def get_appointments(doc_id: int, cur) -> json:\n return cur.execute(\n \"SELECT appointment FROM Doctors where UID = ?;\", (doc_id,)\n ).fetchone()[0]",
"def retrieve_all_suitable_appointments() -> Dict[str, List[AppointmentMatch]]:\n all_appointments = {}\n for department in DEPARTMENTS:\n entry = f\"{DEPARTMENTS_TABLE[department]} ({department})\"\n all_appointments[entry] = find_centers_for_department(department)\n return all_appointments",
"def appointments(self):\n appointments = []\n if self.show == 'forms':\n appointments = [self.appointment]\n else:\n # or filter appointments for the current membership categories\n # schedule_group__membership_form\n codes = []\n for category in self.membership_form_category:\n codes.extend(MembershipForm.objects.codes_for_category(membership_form_category=category))\n appointments = Appointment.objects.filter(\n registered_subject=self.registered_subject,\n visit_definition__code__in=codes).order_by(\n 'visit_definition__time_point', 'visit_instance', 'appt_datetime')\n return appointments",
"def get_app_by_doctor():\n\n cur = get_db().conn.cursor()\n\n # By using an OrderedDict we will preserve alphabetical order of\n # doctors\n\n app_by_doctor = OrderedDict()\n\n query = '''\n SELECT doctors.doctor as doctor, patients.FirstN as FirstN,\n patients.LastN as LastN, patients.gender as gender, patients.age as age,\n patients.birth as birth, app.month as month, symptoms.symptom as symptom\n FROM doctors, patients, app, symptoms\n WHERE app.doctor_id = doctors.doctor_id\n AND app.patient_id = patients.patient_id\n AND app.symptom_id = symptoms.symptom_id\n ORDER BY doctor, FirstN'''\n\n for row in cur.execute(query):\n doctor = row['doctor']\n\n if doctor not in app_by_doctor:\n app_by_doctor[doctor] = []\n\n app_by_doctor[doctor].append(row)\n\n return app_by_doctor",
"def list_appointments(request, for_date: date, current_user_id=1):\n\n if request.method != 'GET':\n return HttpResponse(status=405)\n\n query_set = BookingService.get_appointments_for_range(current_user_id, for_date, timedelta(days=1) + for_date)\n return JsonResponse(status=200, data={\"appointments\": [model_to_dict(model) for model in query_set]})",
"def get_items_by_date(month, year):",
"def getMonthCalendar(self):\n res = []\n for dayline in calendar.monthcalendar(self.curr.year, self.curr.month):\n res_line = []\n for day in dayline:\n data = False\n total = 0\n abnormal = 0\n if day in self.curr.events:\n data = True\n if self.area == 0:\n a = filter_samples(day,self.curr.month,self.curr.year,0)\n else:\n a = filter_samples(day,self.curr.month,self.curr.year,self.area)\n total = a.count()\n #assigns td name to be used by css to put color\n abnormal = get_normality(day,self.curr.month,self.curr.year)\n res_line.append((day, data, total,abnormal))\n res.append(res_line)\n return res",
"def calendar_all(request, pYear, pMonth):\n lYear = int(pYear)\n lMonth = int(pMonth)\n lEvents = event_filter(Event.objects.all(),lYear,lMonth)\n lCalendar = EventCalendar(lEvents).formatmonth(lYear, lMonth)\n \n dict=make_dict(pYear,pMonth)\n dict['Calendar']=mark_safe(lCalendar)\n return render_to_response('cal/month.html',dict)",
"def specified_month_all(self, trans, **kwd):\n message = ''\n PageSpec = namedtuple('PageSpec', ['entries', 'offset', 'page', 'pages_found'])\n\n params = util.Params(kwd)\n monitor_email = params.get('monitor_email', 'monitor@bx.psu.edu')\n specs = sorter('date', kwd)\n offset = 0\n limit = 10\n sort_id = specs.sort_id\n order = specs.order\n arrow = specs.arrow\n _order = specs.exc_order\n\n if \"entries\" in kwd:\n entries = int(kwd.get('entries'))\n else:\n entries = 10\n limit = entries * 4\n\n if \"offset\" in kwd:\n offset = int(kwd.get('offset'))\n else:\n offset = 0\n\n if \"page\" in kwd:\n page = int(kwd.get('page'))\n else:\n page = 1\n\n # In case we don't know which is the monitor user we will query for all jobs\n monitor_user_id = get_monitor_id(trans, monitor_email)\n\n # If specified_date is not received, we'll default to the current month\n specified_date = kwd.get('specified_date', datetime.utcnow().strftime(\"%Y-%m-%d\"))\n specified_month = specified_date[:7]\n\n year, month = map(int, specified_month.split(\"-\"))\n start_date = date(year, month, 1)\n end_date = start_date + timedelta(days=calendar.monthrange(year, month)[1])\n month_label = start_date.strftime(\"%B\")\n year_label = start_date.strftime(\"%Y\")\n\n # Use to make the page table\n month_jobs = sa.select((sa.func.date(model.Job.table.c.create_time).label('date'),\n sa.func.count(model.Job.table.c.id).label('total_jobs')),\n whereclause=sa.and_(model.Job.table.c.user_id != monitor_user_id,\n model.Job.table.c.create_time >= start_date,\n model.Job.table.c.create_time < end_date),\n from_obj=[model.Job.table],\n group_by=['date'],\n order_by=[_order],\n offset=offset,\n limit=limit)\n\n # Use to make trendline\n all_jobs = sa.select((model.Job.table.c.create_time.label('date'), model.Job.table.c.id.label('id')),\n whereclause=sa.and_(model.Job.table.c.user_id != monitor_user_id,\n model.Job.table.c.create_time >= start_date,\n model.Job.table.c.create_time < end_date))\n\n trends = dict()\n for job in all_jobs.execute():\n job_hour = int(job.date.strftime(\"%-H\"))\n job_day = job.date.strftime(\"%d\")\n\n try:\n trends[job_day][job_hour] += 1\n except KeyError:\n trends[job_day] = [0] * 24\n trends[job_day][job_hour] += 1\n\n jobs = []\n for row in month_jobs.execute():\n row_dayname = row.date.strftime(\"%A\")\n row_day = row.date.strftime(\"%d\")\n\n jobs.append((row_dayname,\n row_day,\n row.total_jobs,\n row.date))\n\n pages_found = ceil(len(jobs) / float(entries))\n page_specs = PageSpec(entries, offset, page, pages_found)\n\n return trans.fill_template('/webapps/reports/jobs_specified_month_all.mako',\n order=order,\n arrow=arrow,\n sort_id=sort_id,\n month_label=month_label,\n year_label=year_label,\n month=month,\n page_specs=page_specs,\n jobs=jobs,\n trends=trends,\n is_user_jobs_only=monitor_user_id,\n message=message)",
"def populate_appointments(endpoint, doctor):\n date = timezone.now().strftime('%Y-%m-%d')\n\n appointments = endpoint.list({'doctor': doctor.id, 'date': date})\n for appointment_data in appointments:\n patient = Patient.objects.get(id=appointment_data['patient'])\n\n # simplify/clean statuses for project purposes\n status = appointment_data['status']\n if status not in ('Checked In', 'In Session', \n 'Complete', 'Cancelled'):\n status = ''\n\n\n data = {\n 'doctor': doctor,\n 'patient': patient,\n 'scheduled_time': appointment_data['scheduled_time'],\n 'duration': appointment_data['duration'],\n 'office': appointment_data['office'],\n 'exam_room': appointment_data['exam_room'],\n 'status': status,\n 'reason': appointment_data['reason']\n }\n\n appointment, created = Appointment.objects.update_or_create(\n defaults=data, pk=appointment_data['id'])",
"def article_months(category):\n months = Article.objects.filter(category=category).dates(\"publication_date\", \"month\")\n\n return {\n \"category\": category,\n \"months\": months,\n }",
"def add_monthly_availability():\n input_json = request.json\n year = input_json['year']\n month = input_json['month']\n doctor_id = input_json['doctor_id']\n\n print(\"Quick assigning monthly event for Doctor No.{} on {}-{}\".format(doctor_id,year,month))\n doctor_calendar.insertMonthlyEvents(int(year),int(month),int(doctor_id))\n\n return jsonify(input_json)",
"def calendar(request, year, month):\n year, month = int(year), int(month)\n\n jogging = Jogging.objects.filter(date__year=year, date__month=month)\n yoga = Yoga.objects.filter(date__year=year, date__month=month)\n weightlifting = (\n Weightlifting.objects.filter(date__year=year, date__month=month)\n )\n\n exercise_dates = [\n entry.date for entry in chain(jogging, yoga, weightlifting)\n ]\n\n exercise_calendar = Calendar(exercise_dates)\n\n return HttpResponse(exercise_calendar.formatmonth(year, month))",
"def get_month(self, year, month):\n path = self.get_path(year, month)\n\n if not os.path.exists(path):\n return []\n\n li = []\n\n with open(path, newline='') as f:\n reader = csv.reader(f, delimiter='\\t')\n for line in reader:\n try:\n entry = self._read_entry(line)\n except ValueError:\n message = 'Could not read the file for {}.{}'\n raise DatabaseError(message.format(year, month))\n else:\n li.append(entry)\n\n return list(sorted(li, key=lambda d: d['start']))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Serves a page which shows all the appointments in the database.
|
def get(self):
return render_template("appointments.html",
apps=get_db().get_all_apps())
|
[
"def my_appts():\n\n\tform = MyAppointmentSearch()\n\tappointments = Appointment.query.filter_by(date=str(datetime.date.today())).all()\n\n\tif form.validate_on_submit():\n\t\tselection = form.filter_menu.data\n\t\tif selection == '2':\n\t\t\tcurrent_date = date_tool.get_current_date()\n\t\t\tendof_week = date_tool.get_endof_week()\n\t\t\tappointments = db.session.query(Appointment).filter(Appointment.date.between(current_date, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t endof_week)).order_by(Appointment.date.asc())\n\t\telif selection == '3':\n\t\t\tstart_date, end_date = date_tool.getcurrent_beginend_ofmonth()\n\t\t\tappointments = db.session.query(Appointment).filter(Appointment.date.between(start_date, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t end_date)).order_by(Appointment.date.asc())\n\t\telse:\n\t\t\treturn redirect(url_for('my_appts'))\n\n\treturn render_template('my_appts.html', form=form, appointments=appointments, pretty_date=pretty_date)",
"def list_appointments(request, for_date: date, current_user_id=1):\n\n if request.method != 'GET':\n return HttpResponse(status=405)\n\n query_set = BookingService.get_appointments_for_range(current_user_id, for_date, timedelta(days=1) + for_date)\n return JsonResponse(status=200, data={\"appointments\": [model_to_dict(model) for model in query_set]})",
"def appointments(self):\n appointments = []\n if self.show == 'forms':\n appointments = [self.appointment]\n else:\n # or filter appointments for the current membership categories\n # schedule_group__membership_form\n codes = []\n for category in self.membership_form_category:\n codes.extend(MembershipForm.objects.codes_for_category(membership_form_category=category))\n appointments = Appointment.objects.filter(\n registered_subject=self.registered_subject,\n visit_definition__code__in=codes).order_by(\n 'visit_definition__time_point', 'visit_instance', 'appt_datetime')\n return appointments",
"def get(self, request):\n user = self.request.user\n\n if user.is_staff:\n appointments = Appointment.objects.all()\n else:\n appointments = Appointment.objects.filter(client=user)\n\n serializer = AppointmentSerializer(appointments, many=True)\n return Response(serializer.data)",
"def get_appointments(self):\n if self.is_admin():\n return Appointment.objects\n\n elif self.is_doctor():\n return Appointment.objects.filter(doctor=self)\n\n return Appointment.objects.filter(patient=self)",
"def appointment_db_query(self):\n \n appointments = []\n sql = \"\"\"\n SELECT appt.id AS appt_id, appt.customer_id AS cust_id,\n DATE_FORMAT(appt.startDate, '%W %m/%d/%Y %l:%i %p') AS appt_lt,\n CONVERT_TZ(DATE_SUB(appt.startDate, INTERVAL 2 HOUR), pref.value, 'UTC') AS reminder_utc,\n comp.id AS comp_id, comp.name AS comp_name, comp.domain AS comp_domain, pref.value AS comp_timezone,\n (SELECT value from preferences AS p WHERE name='receipt header' AND p.company_id=comp.id) AS comp_address,\n cust.email AS cust_email, cust.firstname AS cust_fname, cust.surname AS cust_lname, cust.phone_num as cust_phone\n FROM appointments AS appt\n INNER JOIN companies as comp ON comp.id=appt.company_id\n INNER JOIN customers as cust ON cust.id=appt.customer_id\n INNER JOIN preferences AS pref ON pref.company_id=comp.id\n WHERE (DATE(appt.startDate)=DATE(CONVERT_TZ(CURDATE(), 'UTC', pref.value)) \n OR DATE(appt.startDate)=DATE(CONVERT_TZ(DATE_ADD(CURDATE(), INTERVAL 1 DAY), 'UTC', pref.value)))\n AND pref.name='timezone';\n \"\"\"\n db = self.db_connect()\n appointments_db_query = self.db_execute_query(db, sql)\n appointments = appointments_db_query\n return appointments",
"def view_appointment(self):\n stage = 0\n while True:\n Parser.print_clean()\n while stage == 0:\n print(f\"Viewing confirmed appointments for GP {self.username}.\")\n user_input = Parser.selection_parser(options={\"T\": \"View today's appointments\", \"D\": \"Select by Date\",\n \"--back\": \"to go back\"})\n if user_input == \"T\":\n selected_date = datetime.datetime.today().date()\n print(str(selected_date))\n stage = 1\n elif user_input == \"--back\":\n print(\"\\n\")\n return\n else:\n selected_date = Parser.date_parser(question=\"Select a Date:\")\n if selected_date == \"--back\":\n return\n else:\n stage = 1\n while stage == 1:\n bookings_result = SQLQuery(\"SELECT visit.BookingNo, visit.Timeslot, visit.NHSNo, users.firstName, \"\n \"users.lastName, visit.Confirmed FROM visit INNER JOIN users ON \"\n \"visit.NHSNo = users.ID WHERE visit.StaffID = ? AND visit.Timeslot >= ? AND \"\n \"visit.Timeslot <= ? AND visit.Confirmed = 'T' ORDER BY visit.Timeslot ASC\")\\\n .fetch_all(decrypter=EncryptionHelper(), parameters=(self.ID, selected_date,\n selected_date + datetime.timedelta(days=1)))\n message = f\"for {selected_date.strftime('%Y-%m-%d')} (confirmed).\"\n booking_no = GP.print_select_bookings(bookings_result, message)\n if not booking_no:\n stage = 0\n else:\n GP.start_appointment(booking_no[1])",
"def viewAllEvents(request):\n\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n # Select all the events from the events table\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT * FROM posts\")\n data = dictfetchall(cursor)\n\n # Get context to be displayed in template\n pic_url = getProfile(request)\n context = {'data': data, 'user_id': user_id,\n 'pfp': pic_url}\n\n return render(request, 'showevents.html', context)",
"def populate_appointments(endpoint, doctor):\n date = timezone.now().strftime('%Y-%m-%d')\n\n appointments = endpoint.list({'doctor': doctor.id, 'date': date})\n for appointment_data in appointments:\n patient = Patient.objects.get(id=appointment_data['patient'])\n\n # simplify/clean statuses for project purposes\n status = appointment_data['status']\n if status not in ('Checked In', 'In Session', \n 'Complete', 'Cancelled'):\n status = ''\n\n\n data = {\n 'doctor': doctor,\n 'patient': patient,\n 'scheduled_time': appointment_data['scheduled_time'],\n 'duration': appointment_data['duration'],\n 'office': appointment_data['office'],\n 'exam_room': appointment_data['exam_room'],\n 'status': status,\n 'reason': appointment_data['reason']\n }\n\n appointment, created = Appointment.objects.update_or_create(\n defaults=data, pk=appointment_data['id'])",
"def test_get_paginated_officer_appointments(caplog):\n TEST_OFFICER_ID: str = \"jPDjBFxindfhdgXh5IEu00ZGauA\"\n output = get_officer_appointments_data(\n TEST_OFFICER_ID,\n )\n assert output[\"total_results\"] == 59\n assert len(output[\"items\"]) == 59\n assert caplog.records == []",
"def show_all_employees():\n\n logger.debug('Function show_all_employees(). Routed to /employees')\n titles = ['Name', 'Birthday', 'In Department']\n employees = es.get_all()\n\n logger.info('Get list of employees, length = %i', len(employees))\n return render_template('employees.html',\n title='Employees',\n table_title='List of Employees',\n headers=titles,\n employees=employees)",
"def home() -> list:\r\n alarm_list = get_alarms()\r\n weather_now = update_weather()\r\n news = update_news()\r\n notification_list = update_notifications()\r\n return render_template('index.html', alarm_list=alarm_list,\r\n weather_now=weather_now, news=news,\r\n notification_list=notification_list)",
"def get_appointments(doc_id: int, cur) -> json:\n return cur.execute(\n \"SELECT appointment FROM Doctors where UID = ?;\", (doc_id,)\n ).fetchone()[0]",
"def get(self):\n return render_template(\"patients.html\",\n patients=get_db().get_all_patients())",
"def search_appt():\n\n\tform = SearchAppointmentForm()\n\tappt_results = db.session.query(Appointment).order_by(Appointment.id.desc()).limit(15)\n\n\tif form.validate_on_submit():\n\t\tselection = form.search_by.data\n\t\t\n\t\tif selection == '1':\n\t\t\tchars = '!@#$%^&*()_-+|\\\\}]{[;:/?.>,<`~='\n\t\t\tstate = True\n\t\t\tuser_input = form.search_field.data\n\n\t\t\twhile state:\n\t\t\t\tfor char in user_input:\n\t\t\t\t\tif char in chars or char.isalpha():\n\t\t\t\t\t\tstate = False\n\t\t\t\tbreak\n\n\t\t\tif state:\n\t\t\t\traw_results = Appointment.query.filter_by(marketer_id=int(form.search_field.data)).all()\n\t\t\t\tappt_results = list(reversed(raw_results))\n\t\t\telse:\n\t\t\t\tappt_results = db.session.query(Appointment).order_by(Appointment.id.desc()).limit(15)\n\t\t\t\tflash('Please enter only integers')\n\t\telif selection == '2':\n\t\t\tappt_results = Appointment.query.filter_by(client_first=form.search_field.data).all()\n\t\telif selection == '3':\n\t\t\tappt_results = Appointment.query.filter_by(client_last=form.search_field.data).all()\n\t\telif selection == '4':\n\t\t\tappt_results = Appointment.query.filter_by(date=form.search_field.data).all()\n\n\treturn render_template('search_appt.html', results=appt_results, form=form, pretty_date=pretty_date)",
"def AddAppointments( self, Appointments ):\n\t\tfor App in Appointments:\n\t\t\tevent = Event()\n\t\t\tif App.has_key( 'Class' ): \n\t\t\t\tevent.add('summary', App['Subject']+\" - \"+App['Class'])\n\t\t\telse:\n\t\t\t\tevent.add('summary', App['Subject'])\n\t\t\tevent.add('dtstart', App['Hours'][0])\n\t\t\tevent.add('dtend', App['Hours'][1])\n\t\t\t\n\t\t\tif App.has_key( 'Location' ): event.add( 'location', App['Location'] )\n\t\t\t\n\t\t\tself.cal.add_component(event)\n\t\t\t# print \"Event added\", App",
"def showAllEats():\n\n eats = session.query(Eats).all()\n return render_template('alleats.html', eats=eats,\n login_session=login_session)",
"def list_appointments(\n self,\n booking_business_id, # type: str\n orderby=None, # type: Optional[List[Union[str, \"models.Enum13\"]]]\n select=None, # type: Optional[List[Union[str, \"models.Enum14\"]]]\n expand=None, # type: Optional[List[str]]\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.CollectionOfBookingAppointment\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.CollectionOfBookingAppointment\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_appointments.metadata['url'] # type: ignore\n path_format_arguments = {\n 'bookingBusiness-id': self._serialize.url(\"booking_business_id\", booking_business_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if self._config.top is not None:\n query_parameters['$top'] = self._serialize.query(\"self._config.top\", self._config.top, 'int', minimum=0)\n if self._config.skip is not None:\n query_parameters['$skip'] = self._serialize.query(\"self._config.skip\", self._config.skip, 'int', minimum=0)\n if self._config.search is not None:\n query_parameters['$search'] = self._serialize.query(\"self._config.search\", self._config.search, 'str')\n if self._config.filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"self._config.filter\", self._config.filter, 'str')\n if self._config.count is not None:\n query_parameters['$count'] = self._serialize.query(\"self._config.count\", self._config.count, 'bool')\n if orderby is not None:\n query_parameters['$orderby'] = self._serialize.query(\"orderby\", orderby, '[str]', div=',')\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('CollectionOfBookingAppointment', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.odata_next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize(models.OdataError, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )",
"def index(request):\n\n # Admin can see all, users can see theirs only\n if request.user.has_perm(\"admin\"):\n bookings = Booking.objects.all()\n else:\n bookings = Booking.objects.filter(user=request.user)\n\n context = {\n \"resources\": Resource.objects.all(),\n \"bookings\": bookings,\n \"resource_form\": ResourceForm(),\n \"booking_form\": BookingForm(),\n \"profile_form\": ProfileForm(instance=request.user.profile),\n \"now\": timezone.now(),\n }\n return render(request, \"booking/index.html\", context)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Serves a page which shows all doctors in the database.
|
def get(self):
return render_template("doctors.html",
doctors=get_db().get_all_doctors())
|
[
"def get(self):\n return make_response(jsonify(doctorDAO.get_doctors(db.con_pool)), 200)",
"def get_doctors():\n all_doctors = schema.Doctor.query.all()\n result = schema.doctors_schema.dump(all_doctors)\n return jsonify(result.data)",
"def doctor_main_page(request, doctor_id):\n doctor = get_object_or_404(Professor, pk = doctor_id)\n if request.method == 'GET':\n return render(request, 'doctor/main.html', {'doctor': doctor})",
"def doctors(self, city_id: int, service_id: int, clinic_id: int = None, from_date: date = None) -> Dict[int, str]:\n return self._mapped_visit_filters(\n 'Doctors', city_id=city_id, clinic_id=clinic_id, from_date=from_date, service_id=service_id)",
"def department_list():\n depts = Department.query.all()\n return render_template(\"depts.html\", depts=depts)",
"def get(self):\n return render_template(\"patients.html\",\n patients=get_db().get_all_patients())",
"def index():\n recipes = mongo.db.recipe.find()\n categories = mongo.db.category.find()\n cuisines = mongo.db.cuisine.find()\n\n return render_template(\"index.html\", recipes=recipes,\n categories=categories,\n cuisines=cuisines)",
"def get(self):\n elections = models.Election.gql('WHERE owner = :1', self.user).fetch(1000)\n self.response.out.write(render('elections',\n elections=elections,\n user=self.user))",
"def list_cities():\n states = storage.all(State).values()\n return render_template('8-cities_by_states.html', states=states)",
"def show_all_employees():\n\n logger.debug('Function show_all_employees(). Routed to /employees')\n titles = ['Name', 'Birthday', 'In Department']\n employees = es.get_all()\n\n logger.info('Get list of employees, length = %i', len(employees))\n return render_template('employees.html',\n title='Employees',\n table_title='List of Employees',\n headers=titles,\n employees=employees)",
"def show_all_movies():\n\n movies = crud.get_movies()\n\n return render_template('all_movies.html', movies = movies)",
"def _get_doctor_list(self, form):\n available_doctors = []\n start_date = form.cleaned_up['start_date']\n no_of_days = form.cleaned_up['no_of_days']\n start_time = form.cleaned_up['start_time']\n am_pm = form.cleaned_up['am_pm']\n duration = form.cleaned_up['duration']\n start_time = int(self._convert_to_24_hour(start_time, am_pm))\n\n start_weekday_id = start_date.weekday() + 1 # +1 because weekday start from 0 till 6 but in db it is 1 to 7\n end_weekday_id = (start_weekday_id + no_of_days) % 7\n end_time = (start_time + duration) % 24\n\n end_date = start_date + timedelta(no_of_days)\n\n avail_days_obj = AvailableDay.get_days_by_ids(start_weekday_id, end_weekday_id)\n all_available_doctors = DoctorSchedule.get_doctorlist(avail_days_obj, start_time, end_time)\n\n if all_available_doctors:\n already_booked_doctors = BookingStatus.is_available(start_date, end_date, start_time, end_time)\n\n all_booked_doctor = set([booked.schedule.id for booked in already_booked_doctors])\n for available_doc in all_available_doctors:\n if all_booked_doctor and available_doc.id not in all_booked_doctor:\n available_doctors.apppend(available_doc)\n\n return available_doctors",
"def customer_list(request):\n customer = Customer.objects.all()\n page = request.GET.get('page', 1)\n\n paginator = Paginator(customer, 25)\n try:\n customers = paginator.page(page)\n except PageNotAnInteger:\n customers = paginator.page(1)\n except EmptyPage:\n customers = paginator.page(paginator.num_pages)\n context = {\n 'title': 'Customer List',\n 'customers': customers,\n }\n return render(request, 'invoice/customers.html', context)",
"def all_courses():\r\n categories = db.session.query(Category).all()\r\n courses = db.session.query(Course).all()\r\n\r\n return render_template('course_list.html',\r\n categories=categories,\r\n courses=courses)",
"def listPersons():\n\n print 'Directory'\n sql.execute('SELECT * FROM persons ORDER BY name')\n allRows = sql.fetchall()\n for row in allRows:\n print row[0], row[1]",
"def accesos_list():\n accesos = Accesos().get_accesos()\n return render_template(\n 'access/list.html.jinja',\n accesos=accesos\n )",
"def index():\n results = models.Journal.select()\n return render_template('index.html', journals=results)",
"def plants_list():\n\n # Database call to retrieve *all* plants from the Mongo database's `plants` collection.\n plants_data = plants_collection.find({})\n\n context = {\n 'plants': plants_data,\n }\n return render_template('plants_list.html', **context)",
"def index(request):\n phones = Phone.objects.filter(domain=request.user.selected_domain).select_related(depth=3)\n return render_to_response(request, \"phone/phone_index.html\", {\"phones\": phones})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Serves the page for showing all patients in the database.
|
def get(self):
return render_template("patients.html",
patients=get_db().get_all_patients())
|
[
"def get_all():\n return jsonify(patients.get_all())",
"def get_all_patient_detailed(request, *args, **kwargs):\n user = request.user\n patient_id = kwargs['id']\n if not patient_id: raise PermissionDenied({'detail': \"mention the patient\", \"error_code\": 609})\n panel = get_my_partner_panel(user, patient_id)\n patient = panel.patient\n panel_serializer = PanelSerializerWithoutDoctor(panel)\n drugs = Drug.objects.filter(doctor__user=user, patient=patient).order_by('-consuming_day')\n drug_serializer = DrugSerializerWithoutPatientAndDoctor(drugs, many=True)\n doctor_events = get_relevant_health_events_queryset(user)\n doctor_patient_events = doctor_events.filter(\n Q(owner=patient.user) | Q(invited_patients=patient)).order_by('-time')\n events_serializer = HealthEventSerializerJustIdAndNameForParticipates(doctor_patient_events, many=True)\n return Response({\"panel\": panel_serializer.data, 'drugs': drug_serializer.data, 'events': events_serializer.data})",
"def get(self):\n return render_template(\"doctors.html\",\n doctors=get_db().get_all_doctors())",
"def show_all_employees():\n\n logger.debug('Function show_all_employees(). Routed to /employees')\n titles = ['Name', 'Birthday', 'In Department']\n employees = es.get_all()\n\n logger.info('Get list of employees, length = %i', len(employees))\n return render_template('employees.html',\n title='Employees',\n table_title='List of Employees',\n headers=titles,\n employees=employees)",
"def index():\n results = models.Journal.select()\n return render_template('index.html', journals=results)",
"def list_patient(self):\r\n conn, c = self._connect()\r\n c.execute('''SELECT U.ID, U.FirstName, U.Surname, max(V.VisitDate) FROM Visit AS V, User AS U WHERE U.ID=V.PatientID AND V.Complete=1 GROUP BY V.PatientID''')\r\n res = c.fetchall()\r\n self._disconnect(conn, c)\r\n return res",
"def viewAllEvents(request):\n\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n # Select all the events from the events table\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT * FROM posts\")\n data = dictfetchall(cursor)\n\n # Get context to be displayed in template\n pic_url = getProfile(request)\n context = {'data': data, 'user_id': user_id,\n 'pfp': pic_url}\n\n return render(request, 'showevents.html', context)",
"def get(self):\n return render_template(\"appointments.html\",\n apps=get_db().get_all_apps())",
"def plants_list():\n\n # Database call to retrieve *all* plants from the Mongo database's `plants` collection.\n plants_data = plants_collection.find({})\n\n context = {\n 'plants': plants_data,\n }\n return render_template('plants_list.html', **context)",
"def show_pets():\n\n pets = Pet.query.all()\n\n return render_template(\"pet-list.html\", pets=pets)",
"def animals(request):\n animals = Animal.objects.all()\n\n template = 'animals/animals.html'\n context = {\n 'animals': animals,\n }\n return render(request, template, context)",
"def index():\n sort = __get_sort_query_param()\n page = __get_page_query_param()\n name = __get_name_query_param()\n\n players = __get_base_query(name, sort).paginate(page, 15)\n\n return render_template('index.html', columns=columns.values(), players=players, name=name, sort=sort, page=page)",
"def showAllEats():\n\n eats = session.query(Eats).all()\n return render_template('alleats.html', eats=eats,\n login_session=login_session)",
"def list_pets():\n pets = Pet.query.all()\n return render_template('list.html', pets=pets)",
"def get_patients_list(self):\n return self.patients_list",
"def plants_list():\n return render_template('plants_list.html', plants=plants.find())",
"def transfer_patient_list(request):\n user_type = get_user_type(request.user)\n\n if user_type == \"doctor\":\n doctor = Doctor.objects.filter(user=request.user)[0]\n hospitals = Hospital.objects.filter(doctor=doctor)\n patients = Patient.objects.filter(transfer_to__in=hospitals) | Patient.objects.filter(admitted_to__in=hospitals)\n patients = patients.order_by('user__last_name')\n elif user_type == \"admin\":\n h_a = Hospital_Admin.objects.filter(user=request.user)[0]\n hospital = Hospital.objects.filter(hospital_admin=h_a)\n patients = Patient.objects.filter(transfer_to=hospital) | Patient.objects.filter(admitted_to=hospital)\n patients = patients.order_by('user__last_name')\n\n return render(request, 'HealthApps/transfer_patient_list.html', dict(patients=patients, user_type=user_type))",
"def get(self):\n\n try:\n session = self.acquire_sql_session()\n except:\n raise HTTPError(500, 'Could not acquire database connection')\n\n try:\n contests_running = ContestRepository.get_active_contests(session)\n contests_upcoming = ContestRepository.get_future_contests(session)\n contests_recent = ContestRepository.get_recent_contests(session)\n except:\n raise HTTPError(500, 'A database error has occured.')\n\n session.close()\n\n self.render(\"contest_list.html\",\n contests_running=contests_running,\n contests_upcoming=contests_upcoming,\n contests_recent=contests_recent,\n dateOf=dateOf,\n timeOf=timeOf)",
"def all_music():\n if notLoggedIn(): # check if logged in\n return redirect( url_for('index'))\n conn = dbi.connect()\n allMusic = music.getAllMusic(conn)\n return render_template('all-music.html',all_music =allMusic, title='All Titles')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Serves the page for showing all symptoms in the database.
|
def get(self):
return render_template("symptoms.html",
symptoms=get_db().get_all_symptoms())
|
[
"def show_isps():\n isps = db_session.query(ISP).order_by(ISP.name)\n return render_template(\n \"isps.html\",\n isps=isps,\n location=\"home\",\n title=\"ISPs\")",
"def all_music():\n if notLoggedIn(): # check if logged in\n return redirect( url_for('index'))\n conn = dbi.connect()\n allMusic = music.getAllMusic(conn)\n return render_template('all-music.html',all_music =allMusic, title='All Titles')",
"def test_get_all_symptoms(self):\n # hit the API endpoint\n response = self.client.get(reverse(ENDPOINT, kwargs={VERSION: API_VERSION_V1}))\n # fetch the data from db\n expected = Symptom.objects.all()\n serialized = SymptomSerializer(expected, many=True)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def show_searches():\n return render_template('show_searches.html', saved_searches=SavedSearch.query.all())",
"def index(request):\n return render(request, \"encyclopedia/index.html\", {\"entries\": util.list_entries()})",
"def all_tickets(request):\n tickets = Ticket.objects.all()\n return render(request, \"tickets.html\", {'tickets': tickets})",
"def get(self):\n return render_template(\"patients.html\",\n patients=get_db().get_all_patients())",
"def all_envelopes():\n return render_page(\"envelopes\", envelopes=db_session.query(Envelope))",
"def __callShowSubjects(self):\r\n subs = self.__table.showSubjects()\r\n if len(subs) == 0:\r\n print(\"There's no subjects.\")\r\n else:\r\n print(\"*List of subjects*\")\r\n for sub in subs:\r\n print(sub.getID()+\" | \"+sub.getName()+ \" : \"+ sub.getTeacher())",
"def get_hows():\n hows = list(mongo.db.hows.find())\n return render_template(\"get_hows.html\", hows=hows)",
"def show_all_employees():\n\n logger.debug('Function show_all_employees(). Routed to /employees')\n titles = ['Name', 'Birthday', 'In Department']\n employees = es.get_all()\n\n logger.info('Get list of employees, length = %i', len(employees))\n return render_template('employees.html',\n title='Employees',\n table_title='List of Employees',\n headers=titles,\n employees=employees)",
"def stories(request):\n stories = Story.objects.all() # retrieve all Story instances from database\n context = {\n 'stories': stories,\n 'page_title': 'Stories',\n }\n template = \"stories/stories.html\"\n return render(request, template, context)",
"def index():\n results = models.Journal.select()\n return render_template('index.html', journals=results)",
"def showAllEats():\n\n eats = session.query(Eats).all()\n return render_template('alleats.html', eats=eats,\n login_session=login_session)",
"def display_all_tickets(self):\n\n self.model.get_all_tickets()\n pass",
"def index(request):\n resepies_list = Resepi.objects.all()\n context = {'resepies_list': resepies_list}\n return render(request, 'myresepies/index.html', context)",
"def survey_page():\n return render_template(\n \"survey_start.html\",\n title = survey.title,\n instructions = survey.instructions\n )",
"def all_issues(request):\n\n\n allissues = Issue.objects.all().order_by('-created_at')\n comments = Comment.objects.all()\n\n # Pagination settings ##\n\n page = request.GET.get('page', 1)\n paginator = Paginator(allissues, 10)\n \n try:\n allissues = paginator.page(page)\n \n except PageNotAnInteger:\n \n allissues = paginator.page(1)\n \n except EmptyPage:\n \n allissues = paginator.page(paginator.num_pages)\n\n return render(request, 'issue/issues.html', {'issues': allissues}, {'comments': comments})",
"def render_surveys(self):\n return render_to_string('surveys.html', {'surveys': self.surveys, 'survey': self.survey})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
On or off music in game.
|
def turn_music(self):
if self.config.getboolean('audio', 'music'):
self.config.set('audio', 'music', 'false')
pygame.mixer.music.stop()
self.speech.speak(self.phrases['music_off'])
else:
self.config.set('audio', 'music', 'true')
self.music_play()
self.speech.speak(self.phrases['music_on'])
with open('settings.ini', 'w') as config_file:
self.config.write(config_file)
|
[
"def play_menu_music(self):\n if not self.muted:\n self.menumusic.play(-1) # infinite loop",
"def setMusic(self, on = True, musicClass = None):\n\n command = 'SET MUSIC {}'.format(['OFF', 'ON'][on])\n if musicClass is not None:\n command += ' {}'.format(musicClass)\n\n d = self.sendCommand(command)\n d = d.addCallback(self.resultAsInt)\n return d",
"async def _sfx(self, ctx):\n #default on.\n server = ctx.message.server\n if server.id not in self.settings[\"SERVER_SFX_ON\"]:\n self.settings[\"SERVER_SFX_ON\"][server.id] = True\n else:\n self.settings[\"SERVER_SFX_ON\"][server.id] = not self.settings[\"SERVER_SFX_ON\"][server.id]\n #for a toggle, settings should save here in case bot fails to send message\n fileIO(\"data/audio/settings.json\", \"save\", self.settings)\n if self.settings[\"SERVER_SFX_ON\"][server.id]:\n await self.bot.say(\"Sound effects are now enabled on this server.\")\n else:\n await self.bot.say(\"Sound effects are now disabled on this server.\")",
"def mute_music(self):\n if self.ventana.muted: # Unmute the music\n mixer.music.set_volume(0.7)\n self.ventana.volumeBtn.configure(image=self.ventana.volumePhoto)\n self.ventana.scale.set(70)\n self.ventana.muted = FALSE\n else: # mute the music\n mixer.music.set_volume(0)\n self.ventana.volumeBtn.configure(image=self.ventana.mutePhoto)\n self.ventana.scale.set(0)\n self.ventana.muted = TRUE",
"def BACKGROUND_MUSIC(self): \n musicSound = Sound(source = 'ninja.wav')\n musicSound.play()",
"def play_bg_music(self):\n pygame.mixer.music.play(-1)",
"def play():\n p.mixer.music.play(loops = -1)",
"def stopmusic(cls) -> None:\n pygame.mixer.music.stop()",
"def music_update(self, level):\n if level.music_key == self.current_level.music_key: return\n music_fade_ms = DEFAULT_MUSIC_FADE_MS\n thread.start_new_thread( Player.switch_music, ( self, music_fade_ms, self.current_level.music_key ) )",
"async def player(self):\n self.settings[\"AVCONV\"] = not self.settings[\"AVCONV\"]\n if self.settings[\"AVCONV\"]:\n await self.bot.say(\"Player toggled. You're now using Avconv\")\n else:\n await self.bot.say(\"Player toggled. You're now using Ffmpeg\")\n fileIO(\"data/audio/settings.json\", \"save\", self.settings)",
"def setAudio(self, audio, mode):\n\t\tpass",
"def toggle_play_pause():\n if is_active():\n is_playing = sp.current_user_playing_track()['is_playing']\n if is_playing:\n sp.pause_playback()\n else:\n sp.start_playback()\n else:\n pass",
"def joystickOnOff(self, on):\r\n if on:\r\n self._command(\"J\")\r\n else:\r\n self._command(\"H\")",
"def switchPlayStatus(self):\n if self.signalPlayer.playStatus == AudioSignalPlayer.PLAYING or \\\n self.signalPlayer.playStatus == AudioSignalPlayer.RECORDING:\n\n self.pause()\n\n elif self.signalPlayer.playStatus == AudioSignalPlayer.PAUSED or \\\n self.signalPlayer.playStatus == AudioSignalPlayer.STOPPED:\n\n self.play()",
"def handle_playback_on_put():\n global playback\n playback.on = True",
"def volume(self, value):\r\n pygame.mixer_music.set_volume(value)",
"async def test_music_mode_service(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data={CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE},\n unique_id=MAC_ADDRESS,\n )\n config_entry.add_to_hass(hass)\n bulb = _mocked_bulb()\n bulb.raw_state = bulb.raw_state._replace(model_num=0xA3) # has music mode\n bulb.microphone = True\n with _patch_discovery(), _patch_wifibulb(device=bulb):\n await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})\n await hass.async_block_till_done()\n\n entity_id = \"light.bulb_rgbcw_ddeeff\"\n assert hass.states.get(entity_id)\n\n bulb.effect = MODE_MUSIC\n bulb.is_on = False\n await hass.services.async_call(\n DOMAIN,\n \"set_music_mode\",\n {\n ATTR_ENTITY_ID: entity_id,\n ATTR_EFFECT: 12,\n ATTR_LIGHT_SCREEN: True,\n ATTR_SENSITIVITY: 50,\n ATTR_BRIGHTNESS: 50,\n ATTR_FOREGROUND_COLOR: [255, 0, 0],\n ATTR_BACKGROUND_COLOR: [0, 255, 0],\n },\n blocking=True,\n )\n bulb.async_set_music_mode.assert_called_once_with(\n sensitivity=50,\n brightness=50,\n mode=MusicMode.LIGHT_SCREEN.value,\n effect=12,\n foreground_color=(255, 0, 0),\n background_color=(0, 255, 0),\n )",
"def check_music_settings(self):\n self.gstMusic = None\n self.gstSound = None\n if gst_media_imported:\n try: \n if self.music_enabled:\n self.gstMusic = gst_media.MusicPlayer()\n if self.sound_enabled:\n self.gstSound = gst_media.SoundPlayer()\n except:\n # Playbin object creation failed\n self.log_msg('Warning: Failed to create Music gstreamer objects','0')\n return\n # Check dir\n if not os.path.isdir(self.wahcade_ini.get('music_path')):\n self.log_msg('Error: Music Path [%s] does not exist' % (self.musicpath))\n return\n # Set dir\n tracks = self.gstMusic.set_directory(self.musicpath, MUSIC_FILESPEC)\n # Set volume\n self.gstMusic.set_volume(self.music_vol)\n # Play\n if len(tracks) > 0:\n self.gstMusic.load_playlist(\n playlist = tracks,\n play = True,\n shuffle = self.musicshuffle)",
"def pause_play():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Change language for phrases.
|
def change_language(self):
if 'ru' == self.config.get('total', 'language'):
self.config.set('total', 'language', 'en')
with open('languages.dat', 'rb') as lang_file:
self.phrases = pickle.load(lang_file)['en']
else:
self.config.set('total', 'language', 'ru')
with open('languages.dat', 'rb') as lang_file:
self.phrases = pickle.load(lang_file)['ru']
self.player.phrases = self.phrases
self.speech.speak(self.phrases['language'])
with open('settings.ini', 'w') as config_file:
self.config.write(config_file)
|
[
"def change_lang(self, new_lang: str):\r\n self.lang = new_lang",
"def on_action_english_triggered(self):\n self.set_language('en_US')",
"def lang_change():\n global lang, current_lang\n if lang == 'english':\n lang = 'polish'\n elif lang == 'polish':\n lang = 'english'\n current_lang = language[lang]\n game.caption_change(current_lang['title'])",
"def lang(self, language):\r\n doc.lang = language",
"def setRobotLanguage(session, language):\n\n tts = session.service(\"ALTextToSpeech\")\n\n try:\n assert language in tts.getSupportedLanguages()\n tts.setLanguage(language)\n\n except AssertionError:\n if language.lower() == \"indian\":\n print language + \" is not supported by the robot, \" \\\n \"language set to English\"\n\n tts.setLanguage(\"English\")",
"def switch_language(request):\n if request.session[LANGUAGE_SESSION_KEY] == \"tl\":\n request.session[LANGUAGE_SESSION_KEY] = \"en\"\n translation.activate(\"en\")\n elif request.session[LANGUAGE_SESSION_KEY] == \"en\":\n request.session[LANGUAGE_SESSION_KEY] = \"tl\"\n translation.activate(\"tl\")\n\n return request",
"def change_language():\n\tglobal language_dict,k\n\thello.config(text=language_dict[k])\n\tk = (1 + k) % 6",
"async def lang(value):\n global LANG\n LANG = value.pattern_match.group(1)\n await value.edit(\"Default language changed to **\" + LANG + \"**\")\n if BOTLOG:\n await value.client.send_message(\n BOTLOG_CHATID, \"Default language changed to **\" + LANG + \"**\")",
"def languageChanged(self, language = None):\r\n self.createTextDictionary()\r\n\r\n if language:\r\n self.phoneLanguage = language\r\n else:\r\n self.phoneLanguage = self.getLanguage()",
"def _set_language(self, language):\n self.m_language = language",
"def change_ru_RU(self):\n self.language = 'ru_RU'\n self.save_settings_to_file()\n self.load_settings()\n self.start_setting()",
"def set_language_properties(self,iSurveyID,aSurveyLocaleData,sLanguage=None):",
"def change_en_US(self):\n self.language = 'en_US'\n self.save_settings_to_file()\n self.load_settings()\n self.start_setting()",
"def activate_lang(self):\n lang = self.lang if self.lang else settings.LANGUAGE_CODE\n old = translation.get_language()\n translation.activate(lang)\n yield\n translation.activate(old)",
"def update_translation(ctx, language=\"all\"):\n if language == \"all\":\n # -- CASE: Process/update all support languages (translations).\n DEFAULT_LANGUAGES = os.environ.get(\"SPHINXINTL_LANGUAGE\", None)\n if DEFAULT_LANGUAGES:\n # -- EXAMPLE: SPHINXINTL_LANGUAGE=\"de,ja\"\n DEFAULT_LANGUAGES = DEFAULT_LANGUAGES.split(\",\")\n languages = ctx.config.sphinx.languages or DEFAULT_LANGUAGES\n else:\n # -- CASE: Process only one language (translation use case).\n languages = [language]\n\n # -- STEP: Generate *.po/*.pot files w/ sphinx-build -b gettext\n build(ctx, builder=\"gettext\")\n\n # -- STEP: Update *.po/*.pot files w/ sphinx-intl\n if languages:\n gettext_build_dir = _sphinxdoc_get_destdir(ctx, \"gettext\").abspath()\n docs_sourcedir = ctx.config.sphinx.sourcedir\n languages_opts = \"-l \"+ \" -l \".join(languages)\n with ctx.cd(docs_sourcedir):\n ctx.run(\"sphinx-intl update -p {gettext_dir} {languages}\".format(\n gettext_dir=gettext_build_dir.relpath(docs_sourcedir),\n languages=languages_opts))\n else:\n print(\"OOPS: No languages specified (use: SPHINXINTL_LANGUAGE=...)\")",
"def translate_language(lang):\n english = ['en', 'eng', 'english']\n\n if lang.lower() not in english:\n if lang.lower() in LANGUAGES.keys():\n language = LANGUAGES[lang.lower()]\n else:\n language = lang.title()\n return language",
"def set_language(lang):\n # from django.utils.translation import activate\n # activate(lang)\n request.session['language'] = lang\n from .. import language\n language.set_language(lang)",
"def updateLanguage(self, lang):\n # if an unsupported language is requested default to English\n if lang in appC.supLang:\n selLang = appC.supLang[lang]\n else:\n selLang = wx.LANGUAGE_DEFAULT\n\n if self.locale:\n assert sys.getrefcount(self.locale) <= 2\n del self.locale\n\n # create a locale object for this language\n self.locale = wx.Locale(selLang)\n if self.locale.IsOk():\n self.locale.AddCatalog(appC.langDomain)\n # self.act_log.AppendText(\"updated\")\n else:\n self.locale = None",
"def set_languages(self, languages=list()):\n self._lang = languages",
"def make_language_keyboard():\n return telegram.make_keyboard(\n globalvars.lang.text('SUPPORTED_LANGUAGES'),\n 2,\n '')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Uses a players dictionary to create a countries dictionary in which countries are key and a list of player names are values
|
def create_country_dict(player_dict):
country_dict = dict()
for chess_player, chess_player_data in player_dict.items():
country = chess_player_data[COUNTRY]
if country in country_dict:
name_list = country_dict[country]
name_list.append(chess_player)
else:
name_list = [chess_player]
country_dict[country] = name_list
return country_dict
|
[
"def build_player_lookup_table(self):\n players = {}\n for player in self.sport_player_model_class.objects.all():\n player_data = LookupItem(player).get_data()\n player_name = '%s %s' % (player.first_name, player.last_name)\n players[player_name] = player_data\n return players",
"def select(self, country_list):\n countries = dict()\n for c in country_list:\n tmp = self.get_country_data(c)\n if tmp is not None:\n countries[c] = tmp\n return countries",
"def _get_alternative_names_countries(self):\n names = dd(dict)\n pg.cur.execute(\"\"\"SELECT geonameid FROM countries\"\"\")\n for geonameid, in pg.cur.fetchall():\n pg.cur.execute(f\"\"\"SELECT name, full_name, population, country_geonameid, adm1_geonameid FROM geonames WHERE geonameid = {geonameid}\"\"\")\n res = pg.cur.fetchone()\n if res is None:\n continue\n name, full_name, population, country_geonameid, adm1_geonameid = res\n if name not in names:\n names[name] = {}\n\n geonameid_info = {\n 'type': 'country',\n 'abbreviations': [],\n \"toponym\": name,\n \"geonameid\": geonameid,\n \"population\": population,\n \"country_geonameid\": country_geonameid,\n \"adm1_geonameid\": adm1_geonameid\n }\n names[name][geonameid] = geonameid_info\n\n pg.cur.execute(f\"\"\"SELECT alternate_name, isolanguage, full_name FROM alternate_names WHERE geonameid = {geonameid}\"\"\")\n for name, isolanguage, full_name in pg.cur.fetchall():\n if name not in names:\n names[name] = {}\n if geonameid not in names[name]:\n names[name][geonameid] = geonameid_info\n if isolanguage == 'abbr':\n names[name][geonameid]['abbreviations'].append(full_name)\n return names",
"def list_products_countries(self, country_list):\r\n products_countries_dic = {}\r\n for country in country_list:\r\n products_countries_dic[country] = self.list_products_country(country)\r\n return products_countries_dic",
"def create_players(num_players: int) -> Dict[Name, Player]:\n num_fascists = int((num_players - 1) / 2)\n fascists = []\n players = {}\n names = list(range(num_players))\n\n for i in names:\n players[i] = Player(i, names, num_fascists)\n\n names = list(names)\n shuffle(names)\n hitler = names.pop()\n chosen_fascists = 1\n\n while chosen_fascists < num_fascists:\n fascists.append(names.pop())\n chosen_fascists += 1\n for player in names:\n players[player].set_roles(Role.LIBERAL, {Role.FASCIST: [], Role.HITLER: None})\n for player in fascists:\n known_roles = {Role.FASCIST: list(fascists), Role.HITLER: hitler}\n players[player].set_roles(Role.FASCIST, known_roles)\n\n h_fascists = fascists if num_players < 6 else []\n known_roles = {Role.FASCIST: h_fascists, Role.HITLER: hitler}\n players[hitler].set_roles(Role.HITLER, known_roles)\n return players",
"def _json_play_players(play, data):\r\n players = OrderedDict()\r\n for playerid, statcats in data.iteritems():\r\n if playerid == '0':\r\n continue\r\n for info in statcats:\r\n if info['statId'] not in nflgame.statmap.idmap:\r\n continue\r\n if playerid not in players:\r\n home = play.drive.game.is_home(info['clubcode'])\r\n if home:\r\n team_name = play.drive.game.home\r\n else:\r\n team_name = play.drive.game.away\r\n stats = nflgame.player.PlayPlayerStats(playerid,\r\n info['playerName'],\r\n home, team_name)\r\n players[playerid] = stats\r\n statvals = nflgame.statmap.values(info['statId'], info['yards'])\r\n players[playerid]._add_stats(statvals)\r\n return players",
"def create_name_code_dict():\n name_code_dict = {country.name: country.alpha_3 for country in pycountry.countries}\n dict_adjust = {'Czech Republic': 'CZE', 'Hong Kong SAR, China': 'HKG', 'Korea, Rep.': 'KOR',\n 'Macao SAR, China': 'MAC', 'OECD members': 'OED', 'Slovak Republic': 'SVK',\n 'China, Hong Kong Special Administrative Region': 'HKG', 'China, Macao Special Administrative Region': 'MAC',\n 'Republic of Korea': 'KOR', 'United Kingdom of Great Britain and Northern Ireland': 'GBR',\n 'United States of America': 'USA', 'OECD members': 'OAVG'}\n name_code_dict.update(dict_adjust)\n return name_code_dict",
"def player_names(self, live=False):\n if live:\n df = self._api_data.elements[[\"code\", \"first_name\", \"second_name\"]].copy()\n df[\"name\"] = df[\"first_name\"].str.cat(df[\"second_name\"], sep=\" \")\n return dict(zip(df[\"code\"], df[\"name\"]))\n else:\n fp = os.path.join(DIR_STRUCTURED_DATA, \"player_names.json\")\n with open(fp, \"r\") as f:\n return {int(k): v for k, v in json.load(f).items()}",
"def country_converter(name):\r\n converter = {'Curaçao': 'Curacao', 'Macao SAR': 'Macao', 'Czechia': 'Czech Republic',\r\n 'St Maarten': 'Sint Maarten', 'St Vincent and the Grenadines': 'Saint Vincent and the Grenadines',\r\n 'St Kitts and Nevis': 'Saint Kitts and Nevis', 'St Lucia': 'Saint Lucia',\r\n 'Virgin Islands, U.S.': 'US Virgin Islands', 'Hong Kong SAR': 'Hong Kong',\r\n 'Korea, South': 'South Korea', 'São Tomé and Príncipe': 'Sao Tome and Principe',\r\n 'Timor-Leste': 'East Timor', 'China (mainland)': 'China',\r\n 'Macedonia, North': 'Macedonia', 'United States': 'USA',\r\n 'United Kingdom': 'UK', 'Eswatini': 'Swaziland', 'Gambia, The': 'Gambia',\r\n 'United Arab Emirates': 'UAE', 'Congo, Republic of the': 'Congo',\r\n 'Korea, North': 'North Korea', 'Comoros': 'Comorros', 'Côte d\\'Ivoire': 'Cote d\\'Ivoire',\r\n 'Congo, the Democratic Republic of the': 'DR Congo', 'Palestinian Territory': 'Palestine',\r\n 'Central African Republic': 'CAR', 'Syria': 'Syrian Arab Republic'}\r\n if name in converter:\r\n return converter[name]\r\n else:\r\n return name",
"def _build_country_info(self):\n if not self.users_by_country:\n return {}\n\n country_data = {}\n for country in pycountry.countries:\n country_info = self.users_by_country.get(country.alpha_2)\n number_of_users = 0\n percentage_of_users = 0\n color_rgb = [247, 247, 247]\n if country_info is not None:\n if self.private:\n number_of_users = country_info[\"number_of_users\"] or 0\n percentage_of_users = country_info[\"percentage_of_users\"] or 0\n color_rgb = country_info[\"color_rgb\"] or [247, 247, 247]\n\n # Use common_name if available to be less political\n # offending (#310)\n try:\n country_name = country.common_name\n except AttributeError:\n country_name = country.name\n\n country_data[country.numeric] = {\n \"name\": country_name,\n \"code\": country.alpha_2,\n \"percentage_of_users\": percentage_of_users,\n \"color_rgb\": color_rgb,\n }\n\n if self.private:\n country_data[country.numeric][\n \"number_of_users\"\n ] = number_of_users\n\n return country_data",
"def get_countries(self):\n if self.db_connected:\n data = {}\n countries = self.cur.execute(\"SELECT id, key, name FROM countries ORDER BY name\")\n for country in countries.fetchall():\n data[country[0]] = {\n \"iso_id\" : country[1],\n \"name\" : country[2]\n }\n\n return data\n else:\n return False",
"def allCountries():",
"def player_mapping(player_id, players):\n player_color = 'w'\n player_nr = 'playerA'\n if players['playerA']['id'] != player_id:\n player_color = 'b'\n player_nr = 'playerB'\n return player_color, player_nr",
"def tag_country_basic(dict):\n\n from geotext import GeoText\n import pycountry\n\n places = GeoText(dict['full_text'])\n dict['cities'] = places.cities\n dict['nationalities'] = places.nationalities\n dict['countries_iso2'] = places.country_mentions\n\n dict['primary_country'] = \"\"\n if len(places.country_mentions) > 0:\n country = pycountry.countries.get(alpha_2=list(places.country_mentions)[0])\n dict['primary_country'] = [country.name, list(places.country_mentions)[0]]\n\n dict['countries'] = []\n while len(places.country_mentions) > 0:\n c = places.country_mentions.popitem(last=False)\n country = pycountry.countries.get(alpha_2=c[0])\n dict['countries'].append((country.name, c[0], c[1]))",
"def _load_country_2_continent(self):\n pg.cur.execute(\"SELECT geonameid, continents FROM countries\")\n return {\n country: [int(c) for c in continent.split(',')]\n for country, continent in pg.cur.fetchall()\n }",
"def get_id_dict(players):\r\n ids = {}\r\n for player in players:\r\n ids[player.id] = player\r\n return ids",
"def get_active_players(app_id: str) -> dict:\n query = {'application_id': app_id}\n host = 'https://api.worldoftanks'\n regions = {\n 'Russia': '.ru',\n 'Europe': '.eu',\n 'North America': '.com',\n 'Asia': '.asia'\n }\n end_point = '/wgn/servers/info/'\n\n player_count = {region: dict() for region in regions.keys()}\n for region, domain in regions.items():\n url = host + domain + end_point\n r = requests.get(url, params=query)\n for game, servers in r.json()['data'].items():\n player_count[region][game] = sum(server['players_online'] for server in servers)\n return player_count",
"def create_players(players):\n for player in players:\n player[\"height\"] = int(player[\"height\"][0:2])\n\n if player[\"experience\"] == \"YES\":\n player[\"experience\"] = True\n else:\n player[\"experience\"] = False\n\n guardians_split = player[\"guardians\"].split(\"and\")\n player[\"guardians\"] = [guardian.strip(\" \") for guardian in guardians_split]\n\n return players",
"def create_players(player_list):\n for i in range(draftkings_info.nrows):\n row = draftkings_info.row_values(i)\n if row[4] == 'UTIL':\n player_list.append(Player(row[2], row[7], row[5], 0))\n for p in player_list:\n for i in range(roto_info.nrows):\n row = roto_info.row_values(i)\n if p.get_name() == row[0]:\n p.set_proj_score(row[7])\n break\n return player_list",
"def set_country_populations_dict():\r\n\r\n pop2017dict = {}\r\n\r\n lines = country_pop.splitlines()[1:]\r\n for line in lines:\r\n data = line.split('\\t')\r\n pop2017dict.update({data[1] : (data[5], data[6])})\r\n return pop2017dict"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the average ratings for the given players
|
def get_average_rating(players, player_dict):
ratings = [player_dict[player][RATING] for player in players]
average = sum(ratings)/len(ratings)
return average
|
[
"def average_rating(self):\n ratings = GameRating.objects.filter(game=self)\n\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.rating\n\n if len(ratings):\n return total_rating / len(ratings)\n\n # Calculate the averge and return it.\n # If you don't know how to calculate averge, Google it.",
"def player_stat_average(player_name, average_num_games):\n try:\n with open('players.json') as players_json:\n person_dict = json.load(players_json)\n player_id = search_player_id(person_dict, player_name)\n\n # player_id = players.find_players_by_full_name(player_name)[0]['id'] # hasnt been updated with rookies\n except:\n raise Exception(f'Failed to find player {player_name}')\n return\n try:\n player_gamelog = playergamelog.PlayerGameLog(player_id=str(player_id), season='2020',\n season_type_all_star='Regular Season')\n except:\n raise Exception(f'Failed to get data on player {player_name}')\n sleep(0.25)\n\n data = player_gamelog.get_data_frames()[0][required_stats]\n num_games_include = average_num_games if len(data.index) >= average_num_games else len(data.index)\n if num_games_include>0:\n data_points_mean = data.iloc[range(num_games_include), :].describe().loc[\n \"mean\"] # gets the category stats and finds mean from last x games\n else:\n data_points_mean = pd.Series(np.zeros(len(required_stats)),required_stats)\n data_points_mean = pd.concat([pd.Series({'Player_Name': player_name}), data_points_mean])\n\n return data_points_mean.rename(str(player_id)) # allows index to be player id in the dataframe",
"def _calculate_ratings_score(ratings):\n return round(mean(ratings), NUM_DECIMALS)",
"def calculateRatingMetrics(self) -> None:\r\n lineup = np.array([player.getRating() for player in self._players]) #NumPy array of each rating in the entire line-up\r\n defenders = np.array([player.getRating() for player in self._players if (player.getPosition() in self._positions['DEFENCE']) or\r\n (player.getPosition() in self._positions['GOALKEEPER'])]) #Subset of lineup, based on position\r\n midfielders = np.array([player.getRating() for player in self._players if player.getPosition() in self._positions['MIDFIELD']])\r\n forwards = np.array([player.getRating() for player in self._players if player.getPosition() in self._positions['FORWARD']])\r\n averages = []\r\n for player_group in [lineup, defenders, midfielders, forwards]:\r\n if not len(player_group): #When the length of the array is empty, 0 is added as a placeholder, this will take the average rating later\r\n averages.append(0)\r\n continue\r\n averages.append(np.mean(player_group)) #The mean is taken of each NumPy array\r\n self._average_ratings = averages",
"def average_age(players):\r\n# players = player_dates() \r\n all_teams = []\r\n for x in players:\r\n team = []\r\n for i in x:\r\n team_total_age = sum(float(i[-1]) for i in x)\r\n player_count = len(x)\r\n team_avg_age = team_total_age / player_count \r\n team.append(i[-2])\r\n team.append(team_avg_age)\r\n all_teams.append(team)\r\n# return(all_teams)\r\n teams_min_max_age(all_teams)",
"def calculate_mean(collection):\n ratings = []\n for game in collection['items']['item']:\n ratings.append(float(game['stats']['rating']['@value']))\n mean = sum(ratings)/len(ratings)\n return mean",
"def get_average_rating(self):\n connection = sqlite3.connect(\"db.sqlite3\")\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n SELECT ROUND(AVG(vote_value), 1) FROM u_app_rating\n WHERE rated_doggo_id=?\"\"\", (self,))\n\n return cursor.fetchone()",
"def compute_average_user_ratings(user_ratings):\n\tave_ratings = {}\n\t# Your code here\n\n\tfor user in user_ratings:\n\t\tave_ratings[user] = np.mean(user_ratings[user].values())\n\n\treturn ave_ratings",
"def calc_mean_score(movies):\n ratings = [m.score for m in movies] # accessing the score values in list m\n mean = sum(ratings) / max(1, len(ratings)) # have used sum function to add all values of score; max will return the largest value\n # between the arg1 which is 1 and the len(list) which is higher than 1\n return round(mean, 1)",
"def calc_mean_score(movies):\n \n\n return round(sum(movie.score for movie in movies) /len(movies),1)",
"def compute_average_user_ratings(user_ratings):\n ave_ratings = coll.defaultdict(float)\n for key in user_ratings:\n ave_ratings[key] = np.mean(user_ratings[key].values())\n return ave_ratings",
"def get_avg_rating(self):\n if self.rated_show.all():\n return self.rated_show.aggregate(Avg('rating'))['rating__avg']\n else:\n return 0.00",
"def print_average_rating(params):\n info_list = get_info_from_dataset(['rating'], params)\n average = sum(float(loc[0]) for loc in info_list) / len(info_list)\n print(f'The average rating is {average}')\n print('-' * 80)",
"def print_average(players: list, averages_to_be_printed: list) -> None:\n # TODO: get this printing nicely with the players names\n if len(players) == len(averages_to_be_printed):\n i = 0\n print(\"Player\\tFree Throw Avg\")\n while i < len(players):\n print(players[i], \"\\t\", averages_to_be_printed[i])\n i += 1\n else:\n print(\"Data sets not compatible. Kick the author\")\n return",
"def average_review(self):\n reviews = ReviewRating.objects.filter(product=self, status=True).aggregate(average=Avg('rating'))\n avg = 0\n if reviews['average'] is not None:\n avg = float(reviews['average'])\n return avg",
"def calc_mean_score(movies):\r\n list_of_scores = []\r\n for m in movies:\r\n list_of_scores.append(m.score)\r\n return round(mean(list_of_scores), 1)",
"def Average(self):\n self.avgGPA = 0\n for stud in self.roster:\n self.avgGPA += stud.gpa()\n \n if len(self.roster) > 0 :\n GPA = ( (self.avgGPA) / (len(self.roster)) )\n \n return GPA",
"def get_elo_ratings(self):\n\n elo_ratings = dict()\n for _player in self._players:\n elo_ratings[_player] = _player.elo_rating\n return elo_ratings",
"def avg_support(self, fans):\n users = []\n for key in self.imdb:\n show = self.imdb[key]\n users.append(show.get(fans, 0))\n # get maximum no. of users\n max_users = max(users)\n\n for key in self.imdb:\n show = self.imdb[key]\n value = {fans: float(show.get(fans, 0)) / max_users * 10}\n self.update_grand_rating(key, value)",
"def get_average_rating(self, director_name):\n\n films = self.films_db_service.get_films(director_name)\n rating_sum = 0\n for film in films:\n rating_sum += film[\"rating\"]\n return Decimal(rating_sum/len(films)).quantize(Decimal('0.1'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints information sorted on the key of a_dict
|
def print_sorted(a_dict, player_dict):
sorted_dict = sorted(a_dict.items())
for key, players in sorted_dict:
average_rating = get_average_rating(players, player_dict)
print("{} ({}) ({:.1f}):".format(key, len(players), average_rating))
for player in players:
rating = player_dict[player][RATING]
print("{:>40}{:>10d}".format(player, rating))
|
[
"def print_sorted_dictionary(dictionary):\n\n alpha_rest_list = sorted(dictionary.keys())\n\n for rest in alpha_rest_list:\n print \"%s is rated at %s.\" % (rest, dictionary[rest])",
"def sort_and_print(rating_dict):\n for key, value in sorted(rating_dict.items()):\n print(f\"{key} is rated at {value}.\")",
"def print_tag_dict(aDict):\n for pos in aDict:\n print(pos, \"words:\")\n for word in aDict[pos]:\n print(word, aDict[pos][word]) # print word and frequency\n print() # new line for spacing",
"def print_sorted_dictionary(my_dict):\n sorted_keys = sorted(my_dict)\n\n for k in sorted_keys:\n print(\"{}: {}\".format(k, my_dict[k]))",
"def pretty_print_key(self, key): \n data = json.dumps(self.data[key], sort_keys=True, indent=4 * ' ')\n print(\"{}\\n{}\".format(key, data))",
"def dict_prettyprint(dic, title):\n print title\n for k in dic.keys():\n print \"\\t%s : %s\" % (k, dic[k])\n print \"\"",
"def show_dictionary():",
"def print_dict(dictionary):\n for key in dictionary:\n print(f'{key}: {dictionary[key]}')",
"def display():\r\n\r\n print(f'\\n{\"State\":<20}{\"Capital\":<20}{\"Population\":<20}{\"Flower\":<20}')\r\n print()\r\n for state in sorted(state_info_dict):\r\n info_list = state_info_dict[state]\r\n capital = info_list[0]\r\n population = f'{info_list[1]:,}'\r\n flower = info_list[2]\r\n print(f'{state:<20}{capital:<20}{population:<20}{flower:<20}')",
"def pretty_print(arbi_dict):\n\tprint(json.dumps(arbi_dict,indent=4))",
"def print_dict_entries(dictionary, num=3):\n keys = list(dictionary.keys())[:num]\n\n for idx, key in enumerate(keys):\n print(str(idx+1) + ':\\t' + key)\n print(dictionary[key])\n print()",
"def pretty_print_dict(dictionary):\n for k, v in dictionary.items():\n res = ' {}: {}'.format(k, str(v)).replace('\\n', ',')\n print(res[:75] + '...' if len(res) > 74 else res)",
"def print_by_phone():\n print \"\\nSorted by numbers\"\n contact_values = sorted(contacts.values())\n for i in contact_values:\n for key, value in contacts.items():\n if value == i:\n print key + \" : \" + i",
"def print_key_pairs(v, title=\"Parameters\", print_function=None):\n items = v.items() if type(v) is dict else v\n print_function(\"=\" * 40)\n print_function(title)\n print_function(\"=\" * 40)\n for key,value in items:\n print_function(\"{:<15}: {:<10}\".format(key, value if value is not None else \"None\"))\n print_function(\"-\" * 40)",
"def sorted_dict(d):\n return \"{%s}\" % \", \".join(\"%s: %s\" % (repr(k),repr(v)) \\\n for k,v in sorted(d.iteritems()))",
"def feed_dict_debug_string(self, feed_dict):\n debug_str = 'feed_dict={\\n'\n feed_dict_plhs = [(plh, plh.name) for plh in feed_dict.keys()]\n feed_dict_plhs = sorted(feed_dict_plhs, key=lambda x: x[1])\n for plh, name in feed_dict_plhs:\n debug_str += '{}: \\n{}\\n'.format(plh, feed_dict[plh])\n debug_str += '}'\n return debug_str",
"def print_as_table(data: dict, *, capitalize: bool = False):\n # Get the largest key\n size = 0\n for key in data.keys():\n if len(key) > size:\n size = len(key)\n\n # Now, time to start printing\n for key, value in data.items():\n key = str(key)\n value = str(value)\n\n if capitalize:\n key = key[0].upper() + key[1:]\n\n print(key + \":\" + (\" \" * (size - len(key) + 3)) + \" \" + value)",
"def pretty_print_table(hashtable):\n for key,val in hashtable.items():\n values = [\",\".join(map(str, v)) for v in val]\n print(key + \"\\t\" + \"\\t\".join(values))",
"def get_anchor_results_keys_sorted(self):\r\n dict = self.get_tpchronosreverse_dict()\r\n keys_sorted = sorted(dict.keys())\r\n anchor_results_keys_sorted = []\r\n for key in keys_sorted:\r\n anchor_results_keys_sorted.append(dict[key])\r\n return anchor_results_keys_sorted",
"def print_keys(entry: dict, depth: int) -> None:\n for k, v in entry.items():\n print((' ' * depth) + k)\n if isinstance(v, dict):\n print_keys(entry[k], depth + 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the expected value based on held_dice given that there are num_free_dice to be rolled, each with num_die_sides.
|
def expected_value(held_dice, num_die_sides, num_free_dice):
outcomes = range(1, num_die_sides + 1, 1)
all_free_dices = gen_all_sequences(outcomes, num_free_dice)
total_score = 0
for dummy_free_dice in all_free_dices:
total_score += score(held_dice + dummy_free_dice)
return float(total_score) / float(len(all_free_dices))
|
[
"def expected_value(held_dice, num_die_sides, num_free_dice):\n current_expected_value = 0\n # Generate possible sequences from free dice\n possible_sequences = gen_all_sequences(list(range(1, num_die_sides + 1)), num_free_dice)\n # Score every sequence with current hold dice\n for sequence in possible_sequences:\n full_hand = list(held_dice + sequence)\n hand_score = score(full_hand)\n current_expected_value += hand_score\n return current_expected_value / float(len(possible_sequences))",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n expected = 0\n outcomes = range(1, num_die_sides + 1)\n all_sequences = [held_dice + seq for seq in gen_all_sequences(outcomes, num_free_dice)]\n for item in all_sequences:\n expected += score(item) * (1 / float(num_die_sides ** num_free_dice))\n return expected",
"def strategy(hand, num_die_sides):\n ans = ()\n expected_val = 0\n held_set = gen_all_holds(hand)\n for held_dice in held_set:\n num_free_dice = len(hand) - len(held_dice)\n tmp_exp = expected_value(held_dice, num_die_sides, num_free_dice)\n if tmp_exp > expected_val:\n expected_val = tmp_exp\n ans = held_dice\n return (expected_val, ans)",
"def strategy(hand, num_die_sides):\n all_holds_set = gen_all_holds(hand)\n final_expected_value = 0\n final_held_dice = tuple()\n for held_dice in all_holds_set:\n temp_expected_value = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice))\n if temp_expected_value > final_expected_value:\n final_expected_value = temp_expected_value\n final_held_dice = held_dice\n return (final_expected_value, final_held_dice)",
"def strategy(hand, num_die_sides):\n all_holds = gen_all_holds(hand)\n max_value = 0\n req_hold = None\n for each_hold in all_holds:\n value = expected_value(each_hold, num_die_sides, len(hand) - len(each_hold))\n if value > max_value:\n max_value = value\n req_hold = each_hold\n print max_value\n print req_hold\n return (max_value, req_hold)",
"def hope(dices):\n # with only 1 dice left, it should at least be 4\n if dices == 1:\n return 4\n # with only 2 dices left, they should at least be 5\n if dices == 2:\n return 5\n # you'd want 6 in any other case\n else:\n return 6",
"def _measure_probability(sides: int, target_val: int, n_trials: int = _default_rolls, **kwargs) -> float:\n\n # Using a functional notation to avoid storing the whole array\n hits = sum(map(lambda x: roll_die(sides, **kwargs)[0] == target_val, range(n_trials)))\n return hits / n_trials",
"def roll_die(num_sides):\r\n result = random.randrange(0, num_sides) + 1\r\n return result",
"def dice_roll(player_count: int, sides=6):\n\n if player_count < 1:\n return print('wrong players number')\n if sides < 2:\n return print('should be more sides on dice')\n\n list_of_num = [0] * player_count\n index_of_max = [i for i, j in enumerate(list_of_num)]\n not_max = []\n rounds = 0\n\n while len(index_of_max) != 1: #\n\n for i in range(0, len(not_max)):\n list_of_num[not_max[i]] = 0\n for i in range(0, len(index_of_max)):\n list_of_num[index_of_max[i]] = random.randint(1, sides)\n\n rounds += 1\n max_value = max(list_of_num)\n index_of_max = [i for i, j in enumerate(list_of_num) if j == max_value]\n not_max = [i for i, j in enumerate(list_of_num) if j != max_value]\n\n winner = 1 + index_of_max[0]\n\n print('Winner: Player № ' + str(winner))\n print('Rounds: ' + str(rounds))",
"def final_strategy(score, opponent_score):\n def E(n):\n \"\"\" Returns the expected score (without special rules applied) for rolling N six sided die\n \"\"\"\n return pow((5/6),n)*4*n\n\n def E_4(n):\n \"\"\" Returns the expected score (without special rules applied) for rolling N four sided die\n \"\"\"\n return pow((3/4),n)*3*n\n\n expected_scores = [] # array of expected values of scores. index refers to number of dice rolled\n d = select_dice(score,opponent_score) # which dice the current player will roll\n x = take_turn(0,opponent_score) # the points scored if the current player rolls 0 dice\n y = select_dice(x+score,opponent_score) # the dice the opponent must use if the current player rolls 0 dice\n z = num_allowed_dice(x+score,opponent_score) # the number of allowed dice the opponent will be allowed if the current player rolls 0 dice\n expected_scores.append(x) # simulate value of rolling zero dice and insert as first element of array\n # Fill in array of expected values\n for i in range(1,11):\n if d == six_sided_dice:\n expected_scores.append(floor(E(i)))\n else:\n expected_scores.append(floor(E_4(i)))\n\n m = max(expected_scores) # Find the maximum of the expected scores.\n\n if (x >= goal-score) or ((abs(score - opponent_score) < 12) and (y == four_sided_dice or z == 1)):\n return 0\n elif ((x >= 5) and (y == four_sided_dice or z == 1)):\n return 0\n elif ((opponent_score - score) >= 20) and (d == four_sided_dice):\n return 3\n elif (opponent_score - score) >= 20:\n return 8\n elif (score - opponent_score) >= 20:\n return 3\n else:\n return expected_scores.index(m) # Return the index of the maximum expected score.",
"def fives(dice):\n return dice_counts(dice)[5] * 5",
"def test_roll_dice(self):\n dice = Dice()\n exp = random.randint(dice.lowest, dice.highest)\n res = dice.lowest <= exp <= dice.highest\n self.assertIn(exp, self.faces)\n self.assertTrue(res)\n\n # another assertion\n dice.roll_dice()\n self.assertIn(dice.rolled_dice, self.faces)",
"def roll_die(self) -> None:\n self.face_value = random.randint(1, self.number_of_sides)",
"def WoundsDealt(self):\n saved = len([die for die in self.dice if die >= self.save])\n return len(self.dice) - saved",
"def test_fair():\n die = Die()\n \n # Set the number of rolls\n rolls = 1000000\n \n # Create a dictionary keep tally\n tally={}\n for i in range(1,7):\n tally[i] =0\n #Roll the dice 'rolls' times\n for i in range(0,rolls):\n tally[die.roll()]+=1\n \n # Assert that the probability is correct\n for i in range(1,7):\n assert tally[i]/rolls == pytest.approx(1/6, 1e-2)",
"def rollthedices(self):\n self.result = self.dices()\n print(\"Throwing the dices...\\nThe sum of the two dices is %s\" % self.result)\n\n # Are there winners?\n self.results = []\n for item in self.bets:\n if item == self.result:\n self.results.append(True)\n else:\n self.results.append(False)\n\n winners = self.results.count(True)\n if winners == 1:\n print(\"There is one winner.\")\n elif winners > 1:\n print(\"There are %s winners\" % winners)\n else:\n print(\"There is no winner.\")\n return self.results",
"def biased_die_roll(die_state, choice):\n p = list()\n die_total = sum(die_state)\n for i in die_state:\n p.append(i*1.0/die_total)\n return choice(a=die_state, p=p)",
"def fours(dice):\n return dice_counts(dice)[4] * 4",
"def setUpDiceFromUser():\n sixDie = sixSidedDie()\n tenDie = tenSidedDie()\n twentyDie = twentySidedDie()\n\n return sixDie, tenDie, twentyDie"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the hold that maximizes the expected value when the discarded dice are rolled.
|
def strategy(hand, num_die_sides):
all_holds_set = gen_all_holds(hand)
final_expected_value = 0
final_held_dice = tuple()
for held_dice in all_holds_set:
temp_expected_value = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice))
if temp_expected_value > final_expected_value:
final_expected_value = temp_expected_value
final_held_dice = held_dice
return (final_expected_value, final_held_dice)
|
[
"def strategy(hand, num_die_sides):\n all_holds = gen_all_holds(hand)\n max_value = 0\n req_hold = None\n for each_hold in all_holds:\n value = expected_value(each_hold, num_die_sides, len(hand) - len(each_hold))\n if value > max_value:\n max_value = value\n req_hold = each_hold\n print max_value\n print req_hold\n return (max_value, req_hold)",
"def strategy(hand, num_die_sides):\n ans = ()\n expected_val = 0\n held_set = gen_all_holds(hand)\n for held_dice in held_set:\n num_free_dice = len(hand) - len(held_dice)\n tmp_exp = expected_value(held_dice, num_die_sides, num_free_dice)\n if tmp_exp > expected_val:\n expected_val = tmp_exp\n ans = held_dice\n return (expected_val, ans)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n current_expected_value = 0\n # Generate possible sequences from free dice\n possible_sequences = gen_all_sequences(list(range(1, num_die_sides + 1)), num_free_dice)\n # Score every sequence with current hold dice\n for sequence in possible_sequences:\n full_hand = list(held_dice + sequence)\n hand_score = score(full_hand)\n current_expected_value += hand_score\n return current_expected_value / float(len(possible_sequences))",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = range(1, num_die_sides + 1, 1)\n all_free_dices = gen_all_sequences(outcomes, num_free_dice)\n total_score = 0\n for dummy_free_dice in all_free_dices:\n total_score += score(held_dice + dummy_free_dice)\n return float(total_score) / float(len(all_free_dices))",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n expected = 0\n outcomes = range(1, num_die_sides + 1)\n all_sequences = [held_dice + seq for seq in gen_all_sequences(outcomes, num_free_dice)]\n for item in all_sequences:\n expected += score(item) * (1 / float(num_die_sides ** num_free_dice))\n return expected",
"def WoundsDealt(self):\n saved = len([die for die in self.dice if die >= self.save])\n return len(self.dice) - saved",
"def test_fair():\n die = Die()\n \n # Set the number of rolls\n rolls = 1000000\n \n # Create a dictionary keep tally\n tally={}\n for i in range(1,7):\n tally[i] =0\n #Roll the dice 'rolls' times\n for i in range(0,rolls):\n tally[die.roll()]+=1\n \n # Assert that the probability is correct\n for i in range(1,7):\n assert tally[i]/rolls == pytest.approx(1/6, 1e-2)",
"def compute_expected_return(self, max_bet=2., policy=None, ties=\"win\"):\n r = 0.\n num_hands = len(self.hands)\n if policy is None:\n for hand in self.hands:\n wp = self.hand_to_win_prob[hand]\n if wp > 0.5:\n if ties == \"win\":\n r += wp*max_bet - (1-wp)*max_bet\n elif hand[0] == hand[1]: # don't win against self \n r += (wp - 1./num_hands)*max_bet - (1-wp)*max_bet\n else: # don't win against self or other tie\n r += (wp - 2./num_hands)*max_bet - (1-wp)*max_bet\n\n else:\n for hand in self.hands:\n bet = policy[hand]\n wp = self.hand_to_win_prob[hand]\n r += wp*bet - (1-wp)*bet\n\n r /= len(self.hands)\n return r",
"def large_straight(dice):\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n else:\n return 0",
"def _measure_probability(sides: int, target_val: int, n_trials: int = _default_rolls, **kwargs) -> float:\n\n # Using a functional notation to avoid storing the whole array\n hits = sum(map(lambda x: roll_die(sides, **kwargs)[0] == target_val, range(n_trials)))\n return hits / n_trials",
"def hope(dices):\n # with only 1 dice left, it should at least be 4\n if dices == 1:\n return 4\n # with only 2 dices left, they should at least be 5\n if dices == 2:\n return 5\n # you'd want 6 in any other case\n else:\n return 6",
"def final_strategy(score, opponent_score):\n def E(n):\n \"\"\" Returns the expected score (without special rules applied) for rolling N six sided die\n \"\"\"\n return pow((5/6),n)*4*n\n\n def E_4(n):\n \"\"\" Returns the expected score (without special rules applied) for rolling N four sided die\n \"\"\"\n return pow((3/4),n)*3*n\n\n expected_scores = [] # array of expected values of scores. index refers to number of dice rolled\n d = select_dice(score,opponent_score) # which dice the current player will roll\n x = take_turn(0,opponent_score) # the points scored if the current player rolls 0 dice\n y = select_dice(x+score,opponent_score) # the dice the opponent must use if the current player rolls 0 dice\n z = num_allowed_dice(x+score,opponent_score) # the number of allowed dice the opponent will be allowed if the current player rolls 0 dice\n expected_scores.append(x) # simulate value of rolling zero dice and insert as first element of array\n # Fill in array of expected values\n for i in range(1,11):\n if d == six_sided_dice:\n expected_scores.append(floor(E(i)))\n else:\n expected_scores.append(floor(E_4(i)))\n\n m = max(expected_scores) # Find the maximum of the expected scores.\n\n if (x >= goal-score) or ((abs(score - opponent_score) < 12) and (y == four_sided_dice or z == 1)):\n return 0\n elif ((x >= 5) and (y == four_sided_dice or z == 1)):\n return 0\n elif ((opponent_score - score) >= 20) and (d == four_sided_dice):\n return 3\n elif (opponent_score - score) >= 20:\n return 8\n elif (score - opponent_score) >= 20:\n return 3\n else:\n return expected_scores.index(m) # Return the index of the maximum expected score.",
"def biased_die_roll(die_state, choice):\n p = list()\n die_total = sum(die_state)\n for i in die_state:\n p.append(i*1.0/die_total)\n return choice(a=die_state, p=p)",
"def dice_roll(player_count: int, sides=6):\n\n if player_count < 1:\n return print('wrong players number')\n if sides < 2:\n return print('should be more sides on dice')\n\n list_of_num = [0] * player_count\n index_of_max = [i for i, j in enumerate(list_of_num)]\n not_max = []\n rounds = 0\n\n while len(index_of_max) != 1: #\n\n for i in range(0, len(not_max)):\n list_of_num[not_max[i]] = 0\n for i in range(0, len(index_of_max)):\n list_of_num[index_of_max[i]] = random.randint(1, sides)\n\n rounds += 1\n max_value = max(list_of_num)\n index_of_max = [i for i, j in enumerate(list_of_num) if j == max_value]\n not_max = [i for i, j in enumerate(list_of_num) if j != max_value]\n\n winner = 1 + index_of_max[0]\n\n print('Winner: Player № ' + str(winner))\n print('Rounds: ' + str(rounds))",
"def scoring_when_all_pins_down(game, result, frame, total_frames, max_pins_number, roll_index):\n if is_a_spare(game[roll_index]):\n result += get_value(game[roll_index+1])\n elif is_a_strike(game[roll_index]):\n result += get_value(game[roll_index+1])\n if is_a_spare(game[roll_index+2]):\n result += max_pins_number - get_value(game[roll_index+1])\n else:\n result += get_value(game[roll_index+2])\n return result",
"def most_probable_value(self) -> int:\n if self.count_0 >= self.count_1:\n return 0\n else:\n return 1",
"def get_longest_run(die, trial_rolls):\n unique_results = list(set(die.possibleVals[:]))\n\n if len(trial_rolls) == 0:\n return\n\n max_run = 0\n current_run = []\n max_roll = 0\n for r in trial_rolls:\n # compare r to last roll\n try:\n if r == current_run[-1]:\n current_run.append(r)\n else:\n current_run = [r]\n\n # nothing in current run gives an IndexError\n except IndexError:\n current_run.append(r)\n if len(current_run) > max_run:\n max_run = len(current_run)\n max_roll = r\n return max_roll, max_run",
"def test_roll_B(self):\n holding_value = self.game.angryDieB.currentValue\n for i in range(10):\n self.game.roll_the_dice(\"b\")\n if self.game.angryDieB.currentValue != holding_value:\n self.assertTrue(True)\n print(\"Die B can be rolled\")\n return\n self.assertTrue(False,\"Die value did not change from holding value for 10 rolls.\")",
"def solution(A):\n n = len(A)\n MAX_ROLL = 6 # 6 sided dice means we can look back a max of 6 spaces.\n exit_score = [A[0]] * n # Set all exit scores to start score\n for i in range(1, n):\n # At each point slice the previous 6 exit scores and choose the best\n spread = exit_score[max(i-MAX_ROLL, 0):i]\n # print(spread, max(spread))\n exit_score[i] = A[i] + max(spread)\n return exit_score[-1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get elements inside a nested dict, based on a dict query. The query is defined by a string separated by '__'. traverse_dict(foo, 'a__b__c') is roughly equivalent to foo[a][b][c] but will short circuit to return None if something on the query is None.
|
def traverse_dict(obj: T.Mapping[str, _T], query: str) -> T.Optional[_T]:
query_split = query.split('__')
cur_obj: T.Optional[T.Union[_T, T.Mapping[str, _T]]] = obj
for name in query_split:
assert isinstance(cur_obj, Mapping) # help mypy
cur_obj = cur_obj.get(name, None)
if cur_obj is None:
return None
assert not isinstance(cur_obj, Mapping) # help mypy
return cur_obj
|
[
"def access(dictionary, nested_keys):\r\n\r\n for index, key in enumerate(nested_keys):\r\n\r\n print index, key\r\n\r\n try:\r\n if dictionary.has_key(key):\r\n if nested_keys[index + 1:] != []:\r\n return access(dictionary[key], nested_keys[index + 1:])\r\n else:\r\n return dictionary[key]\r\n else:\r\n return False\r\n except AttributeError: # at this point, dictionary is a list, perhaps containing dictionaries\r\n if key < len(dictionary):\r\n if nested_keys[index + 1:] != []:\r\n return access(dictionary[key], nested_keys[index + 1:])\r\n else:\r\n return dictionary[key]\r\n else:\r\n return False",
"def navigate_by_keys(dictionary: dict, subkeys: list):\n subdict = dictionary\n value = None\n for key in subkeys:\n value = value_or_none(subdict, key)\n if value:\n subdict = value\n\n return value",
"def _nested_get(key: str, lookup_dict: Dict, remove_plusplusplus=True, raise_keyerror=True):\n result = lookup_dict\n for name in key.split(\".\"):\n try:\n result = result[name]\n except KeyError:\n if raise_keyerror:\n raise KeyError(f\"Error accessing {name} for key {key}\")\n return None\n\n if remove_plusplusplus and isinstance(result, collections.abc.Mapping):\n\n def do_remove_plusplusplus(option):\n if isinstance(option, collections.abc.Mapping):\n option.pop(\"+++\", None)\n for values in option.values():\n do_remove_plusplusplus(values)\n\n result = copy.deepcopy(result)\n do_remove_plusplusplus(result)\n\n return result",
"def nested_lookup(nested_key: Text,\n nested_dict: Dict[Text, Any],\n delimiter: Text = '/') -> tf.Tensor:\n # Parse the input string.\n keys = nested_key.split(delimiter)\n # Return the nested value.\n value = nested_dict\n for key in keys:\n try:\n value = value[key]\n except KeyError:\n raise KeyError(f'Key \\'{key}\\' as a part of nested key \\'{nested_key}\\' '\n 'not found during nested dictionary lookup, out of '\n f'available keys: {nested_keys(nested_dict)}')\n return value",
"def sub_dict(d:dict, paths:list, *, compl=False):\n# k = keys[0]\n# assert type(k) in {list, tuple}\n# res = nested_dict(k, fsl.utils.data.get_item(d, k))\n res = {}\n if compl:\n pp = []\n for p in get_paths(d):\n for q in paths:\n if q == p[:len(q)]:\n break\n else:\n pp.append(p)\n else:\n pp = paths\n\n for k in pp:\n # assert type(k) in {list, tuple}\n setitem(res, k, getitem(d, k))\n return res",
"def get_val_in_dict_dotted(field: str, dicto: Dict[str, Any]) -> Any:\n try:\n if \".\" not in field: # simple field; ex: \"logical_name\", \"sha512\"\n return dicto[field] # possible KeyError/TypeError\n\n # compound field; ex: \"checksum.sha512\"\n parent, child = field.split(\".\", maxsplit=1) # ex: \"checksum\" & \"sha512\"\n\n # ex: is \"sha512\" in \"checksum\"'s dict?\n # possible KeyError/TypeError\n return get_val_in_dict_dotted(child, dicto[parent])\n\n except (KeyError, TypeError) as e:\n raise DottedKeyError() from e",
"def test_second_level_retrieval(nested_dict):\n\n l = ['first', 'second']\n\n val = get_nested_value(d=nested_dict, keys=l)\n\n assert val == {'third': {'fourth': 'leaf', 'another': 'label'} }",
"def jsonpaths_in_dict(dic, path='$', *, notation='dot'):\n for k, v in dic.items():\n if notation == 'dot':\n json_path = f\"{path}.{k}\"\n elif notation == 'bracket':\n json_path = f\"{path}['{k}']\"\n else:\n json_path = None\n ValueError(f\"Notation: '{notation}' is not supported\")\n\n if isinstance(v, dict):\n for json_path_ in jsonpaths_in_dict(\n v, json_path, notation=notation):\n yield json_path_\n else:\n yield json_path",
"def get_deep_item(d, k, sep='.'):\n if not isinstance(k, basestring):\n raise KeyError('expected string, got {0}: {1}'.format(type(k).__name__, k))\n val = d\n # recursively look for dictionary values, then\n # return the last value\n for key in k.split(sep):\n if key and isinstance(val, Mapping) and key in val:\n val = val.__getitem__(key)\n else:\n raise KeyError(k)\n return val",
"def traverse_keys(d, include_keys=None, exclude_keys=None):\n include_keys = include_keys or []\n exclude_keys = exclude_keys or []\n\n def traverse_helper(d, keys):\n if isinstance(d, dict):\n for k in d.keys():\n yield from traverse_helper(d[k], keys + [k])\n elif isinstance(d, list):\n for i in d:\n yield from traverse_helper(i, keys)\n else:\n yield keys, d\n\n if include_keys:\n for k in include_keys:\n for val in key_value(d, k):\n if val:\n # only yield non-empty value\n # when val is None, it could be either:\n # 1. k is not found in d\n # 2. the value of k in d is indeed None\n # For now, we cannot tell which case, just skip it\n yield k, val\n else:\n for kl, val in traverse_helper(d, []):\n key = \".\".join(kl)\n if key not in exclude_keys:\n yield key, val",
"def getitem(d:dict, k:list):\n # retrieve from a nested dictionary\n # possible to use dict.get() or operator.getitem()\n return functools.reduce(dict.__getitem__, k, d)",
"def search_dict(partial: [dict, list], key):\n if isinstance(partial, dict):\n for k, v in partial.items():\n if k == key: yield v\n else: yield from search_dict(v, key)\n elif isinstance(partial, list):\n for item in partial: yield from search_dict(item, key)",
"def get_subdict(D, path_vec):\n if path_vec:\n try:\n return get_subdict(D[path_vec[0]], path_vec[1:])\n except:\n print(f'problem accessing subpath {path_vec} of dictionary in get_subdict')\n else:\n return D",
"def value_from_dict_recursively(dictionary: dict, key: str) -> str:\n for k, v in dictionary.items():\n if k == key:\n return v\n elif isinstance(v, dict):\n value = value_from_dict_recursively(v, key)\n if value is not None:\n return value\n else:\n continue\n else:\n continue",
"def deep_in(key_tup, dict_obj):\n d = dict_obj\n for k in key_tup:\n if isinstance(d, dict) and k in d:\n d = d[k]\n else:\n return False\n else:\n return True",
"def deep_get(target_dict, key_list):\n for key in key_list:\n if not isinstance(target_dict, dict) or key not in target_dict:\n return None\n target_dict = target_dict[key]\n return target_dict",
"def get_recursively(search_dict, field):\n fields_found = []\n\n for key, value in search_dict.iteritems():\n if field in key:\n fields_found.append(key)\n elif isinstance(value, dict):\n results = get_recursively(value, field)\n for result in results:\n fields_found.append(result)\n elif isinstance(value, list):\n for item in value:\n if isinstance(item, dict):\n more_results = get_recursively(item, field)\n for another_result in more_results:\n fields_found.append(another_result)\n return fields_found",
"def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):\n return {\"value\": traverse_get(mapping, *traverse)}",
"def find(key, dictionary):\n if hasattr(dictionary, \"items\"):\n for k, v in dictionary.items():\n if k == key:\n yield v\n\n if isinstance(v, dict):\n for result in find(key, v):\n yield result\n\n elif isinstance(v, list):\n for d in v:\n for result in find(key, d):\n yield result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Scans the log folder for missing files
|
def scan_logfiles(self):
import os
import re
import itertools
def ranges(i):
for a, b in itertools.groupby(enumerate(i), lambda x_y: x_y[1] - x_y[0]):
b = list(b)
yield b[0][1], b[-1][1]
expected = list(range(1, self.njobs + 1))
existing = os.listdir(self.folder_log)
found = [idx for idx in expected if self.logfile(idx) in existing]
found = list(ranges(found))
missing = [
idx for idx in expected if self.logfile(idx) not in existing
]
num_missing = len(missing)
missing = list(ranges(missing))
print('------------------------------')
print('missing logfiles:')
print((',\n'.join([
'{:}-{:}'.format(*tup)
if not tup[0] == tup[1] else '{:}'.format(tup[0])
for tup in missing
])))
print(('total missing files:', num_missing))
print('------------------------------')
return found, missing
|
[
"def _purge_useless_logs(self):\n for logf in os.listdir(self.resultsdir):\n\n log_path = os.path.join(self.resultsdir, logf)\n\n # Remove empty files\n if os.path.isfile(log_path) and os.path.getsize(log_path) == 0:\n os.remove(log_path)\n\n # Remove other files containing useless information\n elif logf.endswith(\"-srpm-stdout.log\"):\n with open(log_path) as f:\n data = f.read(4096)\n if re.match(\"Downloading [^\\n]*\\n\\n\\nWrote: [^\\n]\", data):\n os.remove(log_path)",
"def scan_logs(self):\n self.log.debug('Scan log files')\n pattern = self.cfg['remote'][self.url_key + '.log.pattern']\n log_dir = self.cfg['remote'][self.url_key + '.log.dir']\n log_files = self.cfg['remote'][self.url_key + '.log.files']\n if pattern is None or log_dir is None or log_files is None:\n self.log.info('No log file scan configured or incomplete')\n self.__add_to_ssh_message('No log file scan configured or incomplete')\n return\n cmd = 'grep -i \"{0}\" {1}/{2}'.format(pattern, log_dir, log_files)\n grep = self.__ssh_command(cmd)\n self.record['logs'] = grep",
"def ScanLogFiles(self):\n new_log_files = []\n paths = glob.glob(self.args.path)\n for path in paths:\n if path not in self.log_files:\n # Since we replace os.sep with '_' in the offset filename, it is\n # possible for two different paths to collide with the same offset\n # filename. For example:\n #\n # /some/file/a => _some_file_a\n # /some/file_a => _some_file_a\n #\n # We add a small CRC string (calculated from before the '_' replacement)\n # to (significantly) reduce the probability of offset filenames\n # colliding under this condition:\n #\n # /some/file/a => _some_file_a_0744b918\n # /some/file_a => _some_file_a_287bc0ee\n crc = '{:08x}'.format(abs(zlib.crc32(path.encode('utf-8'))))\n offset_file = '%s_%s' % (path.replace(os.sep, '_'), crc)\n log_file = LogFile(\n logger_name=self.logger.name,\n args=self.args,\n path=path,\n offset_path=os.path.join(self.GetDataDir(), offset_file),\n parse_and_emit_fn=self.ParseAndEmit)\n self.log_files[path] = log_file\n new_log_files.append(log_file)\n return new_log_files",
"def scan_for_new_files(self):\r\n\r\n self.db_manager.scan_for_new_files(TOP_LEVEL)",
"def test_directory_no_log(self):\n self.assertFalse(valet.view(self.test_subdir)\n .find(self.test_subdir + \"?log\") >= 0)",
"def _get_log_files(self):\n logs = glob.glob(os.path.join(self.md_dir, 'log*.lammps*'))\n logs.sort()\n if self.temper:\n logs.remove(os.path.join(self.md_dir, 'log.lammps'))\n logs = sorted(logs, key=lambda x: int(re.findall(r'\\d+', os.path.basename(x))[0]))\n log_data_store = []\n if len(logs) > 1:\n logs = sorted(logs, key=lambda x: int(re.findall(r'\\d+', os.path.basename(x))[0]))\n for log in logs:\n with open(os.path.join(self.md_dir, log), 'r') as log_file:\n lines = log_file.readlines()\n for i, line in enumerate(lines):\n if \"WARNING\" in line:\n del lines[i]\n log_data_store.append(lines)\n return log_data_store",
"def check_logfile_path(self):\n parrent_logdir = os.path.dirname(self.LOG_DIR)\n if not os.path.exists(parrent_logdir):\n logging.critical('No such directory: \"{}\"'.format(parrent_logdir))\n logging.critical(self.SD_MP_ERR)\n raise FileNotFoundError('No such directory: \"{}\"'.format(parrent_logdir))\n if not os.path.exists(self.LOG_DIR):\n os.mkdir(self.LOG_DIR)",
"def scanDirectory(self):\n\n file_path = self.file_path\n if file_path:\n for root, dirs, files in os.walk(file_path):\n if files:\n for file in files:\n temp_path = os.path.join(root, file)\n self.file_list.append(temp_path)\n\n if self.deepScan is False:\n break",
"def backup_failed_log(self, d):\n logs = [x for x in os.listdir(d) if x.endswith('.log')]\n for lg in logs:\n shutil.copy(os.path.join(d, lg), self.d_failed_nicad_logs)",
"def ScanLogFilesTask(self):\n self.debug('Scanning for log files after %d seconds elapsed...',\n self.args.new_file_poll_interval)\n new_log_files = self.ScanLogFiles()\n if new_log_files:\n self.info('Scanned for log files, %d new files detected',\n len(new_log_files))\n next_scan = time_utils.MonotonicTime() + self.args.new_file_poll_interval\n new_process_tasks = [\n (0, self.ProcessLogFileTask, [log_file]) for log_file in new_log_files]\n return [(next_scan, self.ScanLogFilesTask, [])] + new_process_tasks",
"def _clear_logs(self):\n logfile = \"%s/postmaster.log\" % self.checker._logs_dir\n if not os.path.exists(logfile):\n # here i didn't check it is a file or a dir\n return\n logger.info(\"Cleanup the existed postmaster log\")\n open(logfile, 'w').close()",
"def scan_dirs(self, dirs):\n self.ip2i.reset_statistics()\n\n # Locate monthly log archives and peek inside for log files.\n months = {}\n all_logs = []\n for dir in dirs:\n for zip_path in iglob(\"%s/access_log_*.zip\" % dir):\n st = os.stat(zip_path)\n m = re.match(r\".*/access_log_((\\d{4})(\\d\\d))\\.zip$\", zip_path)\n\n if not m:\n continue\n\n # Determine month date properties\n if m.group(1) not in months:\n year = nextyear = int(m.group(2))\n month = nextmonth = int(m.group(3))\n if month == 12:\n nextyear = year + 1\n nextmonth = 1\n else:\n nextmonth = month + 1\n\n # FIXME: use miscutils.timeseries() for arbitrary time units.\n month_start = timegm((year, month, 1, 0, 0, 0, 0, -1, -1))\n month_end = timegm((nextyear, nextmonth, 1, 0, 0, 0, 0, -1, -1))\n prev_day = month_start - self.time_unit * self.horizon\n next_day = month_end + self.time_unit * self.horizon\n months[m.group(1)] = (month_start, month_end,\n strftime(\"%Y%m%d\", gmtime(prev_day)),\n strftime(\"%Y%m%d\", gmtime(next_day)))\n\n zfile = ZipFile(zip_path, \"r\")\n for fi in zfile.infolist():\n n = re.match(r\"access_log_(\\d+)(?:\\.txt)?$\", fi.filename)\n if n:\n all_logs.append((m.group(1), n.group(1),\n fi.filename, fi.file_size, fi.CRC,\n \"%04d%02d%02dZ%02d%02d%02d\" % fi.date_time,\n zip_path, st[ST_SIZE], st[ST_MTIME]))\n\n # For each month build a list of log files to consider as input.\n # For any one month, we take files for one previous and one next\n # day to handle slightly out of order logging.\n monthly_logs = {}\n for month, lim in months.iteritems():\n logs = [l for l in all_logs if l[1] >= lim[2] and l[1] < lim[3]]\n monthly_logs[month] = sorted(logs)\n\n # Decide which months need to be reprocessed. For each month build\n # a list of log files we used for that months results, and compare\n # to the list we have saved (if any). Reprocess the month if the\n # the two lists aren't identical and the month isn't frozen.\n aggregators = []\n for month in sorted(months.keys(), reverse=True):\n lim = months[month]\n logs = monthly_logs[month]\n\n statfile = \"%s/stats-%s.txt\" % (self.statedir, month)\n dbfile = \"%s/stats-%s.db\" % (self.statedir, month)\n dbfrozen = \"%s/.frozen-%s\" % (self.statedir, month)\n mystamp = \"\".join(\"%s %s %s %s %s\\n\" %\n (f[4], f[3], f[5], f[2], f[6])\n for f in logs)\n\n try:\n oldstamp = os.access(statfile, os.R_OK) and open(dbfile).read()\n except EnvironmentError:\n oldstamp = None\n\n # Start the aggregator, and wait it to finish parsing.\n if mystamp != oldstamp and not os.path.exists(dbfrozen):\n agg = LogAggregator(self, lim, logs, statfile, dbfile, mystamp)\n agg.start()\n agg.completed_parse.get()\n aggregators.append(agg)\n\n # Reap any aggregators which have finished.\n i = 0\n while i < len(aggregators):\n if not aggregators[i].completed_all.empty():\n aggregators.pop(i).join()\n else:\n i += 1\n\n # Wait all remaining aggregators to exit.\n map(lambda agg: agg.join(), aggregators)",
"def _get_container_log_files(self, rootfs_path,\n options=defaults.DEFAULT_CRAWL_OPTIONS,\n ):\n\n # following files need to be ported to envionment modules\n # cloudsight, watson, alchemy etc.\n logs = self._parse_log_locations(\n var='LOG_LOCATIONS',\n isJson=False)\n self.log_file_list.extend(logs)\n\n logs = self._parse_log_locations(\n var='LOGS_CONFIG',\n isJson=True)\n\n self.log_file_list.extend(logs)\n\n # Finally, make sure that the paths are absolute\n\n for log in self.log_file_list:\n name = log['name']\n if not os.path.isabs(name) or '..' in name:\n self.log_file_list.remove(log)\n logger.warning(\n 'User provided a log file path that is not absolute: %s' %\n name)\n return self.log_file_list",
"def scanAll(self): \n\t\tself.scanFolder(Config().RootDirectory)",
"def get_log_files(self):\n\n if self.logs_startdir is not None:\n if self.logs_startdir == self.res_startdir:\n logs_execdir = self.results_execdir\n else:\n logs_execdir = file_Utils.createDir_addtimestamp(self.logs_startdir, self.nameonly)\n logfile = self.get_exec_file_by_type(\"Logs\", logs_execdir)\n\n elif self.logs_startdir is None:\n colocate = False\n logs_location = xml_Utils.getChildTextbyParentTag(self.filepath, 'Details', 'Logsdir')\n results_location = xml_Utils.getChildTextbyParentTag(self.filepath,\n 'Details', 'Resultsdir')\n #get default logs and results directory\n default_xml = Tools.__path__[0] + os.sep + 'w_settings.xml' \n default_logsdir = get_credentials(default_xml, 'def_dir',['Logsdir'], 'Setting')\n default_resultsdir = get_credentials(default_xml, 'def_dir',['Resultsdir'], 'Setting')\n #use the default directory if user didn't define it in test case/test suite/project\n if results_location is None or results_location is False :\n if default_resultsdir['Resultsdir'] is not None :\n results_location = default_resultsdir['Resultsdir']\n \n if logs_location is None or logs_location is False :\n if default_logsdir['Logsdir'] is not None :\n logs_location = default_logsdir['Logsdir']\n\n if logs_location is None or logs_location is False\\\n or str(logs_location).strip() == \"\":\n logs_execdir = self.create_def_exec_dir()\n logfile = self.get_exec_file_by_type('Logs', logs_execdir)\n\n elif logs_location is not None and logs_location is not False:\n logs_location_rel = str(logs_location).strip()\n logs_location = file_Utils.getAbsPath(logs_location_rel,\n os.path.dirname(self.filepath))\n results_location_rel = str(results_location).strip()\n results_location = file_Utils.getAbsPath(results_location_rel,\n os.path.dirname(self.filepath))\n if logs_location == results_location:\n colocate = True\n\n logfile, logs_execdir = self.checkdir_create_file(logs_location, 'Logs', colocate)\n\n # print \"printing logs_execdir: \", logs_execdir\n logsdir = os.path.dirname(logfile)\n return logfile, logsdir, logs_execdir",
"def filter_available_logs(self):\n text = self.available_logs_filter.text()\n str_regex = '.*' + text + '.*'\n\n self.available_logs.clear()\n\n if not text:\n for (log_file, log_location) in self.path_file_dict.items():\n if log_file not in self.removed_logs_dict:\n self.available_logs.addItem(self.create_list_item(log_file,log_location))\n else:\n for (log_file, log_location) in self.path_file_dict.items():\n if re.match(str_regex, log_file) and log_file not in self.removed_logs_dict:\n self.available_logs.addItem(self.create_list_item(log_file,log_location))",
"def get_log_files(self) -> [Path]:\n if self.log_dir is None:\n return []\n logfiles = list(self.log_dir.glob(\"*\"))\n logfiles = [f for f in logfiles if not f.name.startswith(\".\")]\n return logfiles",
"def test_search_with_missing_file(caplog: pytest.LogCaptureFixture) -> None:\n entry = Entry(\"Cao_2019\", EXAMPLE_ENTRY_DICT)\n entry.file = \"some_non_existent_file.txt\" # type: ignore\n _ = entry.search([\"Chemical\"], context=0)\n for source, level, message in caplog.record_tuples:\n if level != 30 or source != \"cobib.database.entry\":\n continue\n if message.startswith(\"The associated file\") and message.endswith(\n \"of entry Cao_2019 does not exist!\"\n ):\n break\n else:\n pytest.fail(\"Missing file was not logged.\")",
"def _check_files_in_src_folder(files):\r\n if len(files) > 0:\r\n log.warning(f\"Des fichiers ont ete detectes dans le repertoire de base -> {len(files)}\")\r\n for f in files:\r\n log.info(f\"| -- {f}\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Scans the output folder for missing files
|
def scan_output(self):
import os
import re
import itertools
def ranges(i):
for a, b in itertools.groupby(enumerate(i), lambda x_y1: x_y1[1] - x_y1[0]):
b = list(b)
yield b[0][1], b[-1][1]
expected = list(range(1, self.njobs + 1))
existing = os.listdir(self.folder_out)
found = [idx for idx in expected if self.outfile(idx) in existing]
found = list(ranges(found))
missing = [
idx for idx in expected if self.outfile(idx) not in existing
]
num_missing = len(missing)
missing = list(ranges(missing))
print('------------------------------')
print('missing outputfiles:')
print((',\n'.join([
'{:}-{:}'.format(*tup)
if not tup[0] == tup[1] else '{:}'.format(tup[0])
for tup in missing
])))
print(('total missing files:', num_missing))
print('------------------------------')
return found, missing
|
[
"def check_output_empty(self):\n # src_directory_list = os.listdir(self.src_directory)\n is_empty_output = True\n for root, dirs, files in os.walk(self.src_directory):\n if len(files) > 0:\n is_empty_output = False\n break\n if is_empty_output:\n self.error_exit_log('The output is empty:%s' % self.src_directory)",
"def missingoutputfiles(self):\n return self.getmissingoutputfiles(self.SlideID, **self.workflowkwargs)",
"def find_output_files(self):\n # find the base path to the files\n if self.input_uri.startswith(\"file\"):\n test_path = self.input_uri.split(\":\")[-1]\n if os.path.isdir(test_path):\n base_path = os.path.abspath(test_path)\n elif os.path.isdir(os.path.join(os.getcwd(), test_path)):\n base_path = os.path.join(os.getcwd(), test_path)\n else:\n raise ValueError(f\"output path {test_path} does not exist\")\n search_fits = f\"{base_path}/{self.ipppssoot.lower()[0:5]}*.fits\"\n # trailer files\n search_tra = f\"{base_path}/{self.ipppssoot.lower()[0:5]}*.tra\"\n # env file\n search_env = f\"{base_path}/{self.ipppssoot.lower()}_cal_env.txt\"\n\n else:\n base_path = os.getcwd()\n subfolder = os.path.join(base_path, \"inputs\", self.ipppssoot)\n search_fits = f\"{subfolder}/{self.ipppssoot.lower()[0:5]}*.fits\"\n search_tra = f\"{subfolder}/{self.ipppssoot.lower()[0:5]}*.tra\"\n search_env = f\"{subfolder}/{self.ipppssoot.lower()}_cal_env.txt\"\n\n self.divider(\"Finding output data for:\", repr(search_fits))\n files = glob.glob(search_fits)\n\n self.divider(\"Finding output trailers for:\", repr(search_tra))\n files.extend(glob.glob(search_tra))\n\n self.divider(\"Finding output cal env file for:\", repr(search_env))\n files.extend(glob.glob(search_env))\n\n return list(sorted(files))",
"def process_output(output):\n if os.path.isdir(output):\n\n dir_list = [directory for directory in os.listdir(output)\n if os.path.isdir(directory)]\n\n for directory in dir_list:\n\n print(\"Processing output in \" +\n os.path.join(directory, OUTPUT_FILE) +\n \"...\")\n out = nwchem.NwOutput(os.path.join(directory, OUTPUT_FILE))\n\n try:\n error = False\n for output in out.data:\n if output['has_error']:\n error = True\n\n if error:\n print(\"File: \" + os.path.join(directory, OUTPUT_FILE) +\n \" contains errors!\")\n\n elif out.data[-1]['task_time'] == 0:\n print('No timing information found in ' +\n os.path.join(directory, OUTPUT_FILE) + \".\")\n\n else:\n out.to_file(os.path.join(directory, 'data.json'))\n\n except NameError:\n\n print(\"No data found in file. \")\n\n except IndexError:\n\n print(\"Data is empty!\")\n\n else:\n\n output = os.path.abspath(output)\n print('Processing output in ' + output)\n\n try:\n out = nwchem.NwOutput(output)\n except:\n raise IOError('Could not find proper nwchem output file.')\n\n try:\n error = False\n for output in out.data:\n if output['has_error']:\n error = True\n\n if error:\n print(\"File: \" + output + \" contains errors!\")\n\n elif out.data[-1]['task_time'] == 0:\n print('No timing information found in ' + output + \".\")\n\n else:\n out.to_file(os.path.join(os.path.dirname(output),\n 'data.json'))\n\n except NameError:\n\n print(\"No data found in file. \")\n\n except IndexError:\n\n print(\"Data is empty!\")\n\n out.to_file(os.path.join(os.path.dirname(output), 'data.json'))",
"def test_output_exists():\n global out_dir\n assert_true(path.exists(path.join(out_dir, 'run.log')))\n assert_true(path.exists(path.join(out_dir, 'lsi.model')))\n assert_true(path.exists(path.join(out_dir, 'pre.model')))\n assert_true(path.exists(path.join(out_dir, 'lsi.model.npy')))",
"def has__no_valid_output_files(self):\r\n return not self.__has_valid_output_files",
"def prepareOutput():\r\n\r\n os.removedirs(\"output\")\r\n os.mkdir(\"output\")",
"def test_output_exists():\n global out_dir\n assert_true(path.exists(path.join(out_dir, 'run.log')))\n assert_true(path.exists(path.join(out_dir, 'info.pickle')))\n assert_true(path.exists(path.join(out_dir, 'articles.pickle')))",
"def clean_up_output():\n yield\n if os.path.isdir('output'):\n rmtree('output')",
"def setUpCrosswalk(self):\n if self.harvestInfo['xsl_file'] is not None and self.harvestInfo['xsl_file'] != '':\n self.storeFileExtension = 'tmp'\n # clean up previous crosswalk and import content\n self.outputDir = self.harvestInfo['data_store_path'] + str(self.harvestInfo['data_source_id'])\n self.outputDir = self.outputDir + os.sep + str(self.harvestInfo['batch_number'])\n for file in os.listdir(self.outputDir):\n if file.endswith(self.resultFileExtension) or \\\n file.endswith(self.resultFileExtension + \".validated\") or \\\n file.endswith(self.resultFileExtension + \".processed\"):\n try:\n if os.path.isfile(self.outputDir + os.sep + file):\n os.unlink(self.outputDir + os.sep + file)\n else:\n self.emptyDirectory(self.outputDir + os.sep + file)\n os.rmdir(self.outputDir + os.sep + file)\n except PermissionError as e:\n self.logger.logMessage(\"Unable to remove %s\" % (self.outputDir + os.sep + file), \"ERROR\")",
"def scan_build_files(self, base_path):",
"def get_output_directories(self):\r\n pass",
"def prune_out_dir(self):\n # do nothing if we haven't written a JSON file yet\n if not self._json_filename:\n return\n \n info('Prune output dir (remove previous files from today)')\n \n out_dir, current_filename = os.path.split(self._json_filename)\n date = self.date()\n for f in os.listdir(out_dir):\n # find and remove files from the same day but with a different timestamp\n if f.startswith(date) and f != current_filename:\n \n chat('Remove {}'.format(current_filename))\n \n os.remove(os.path.join(out_dir, f))",
"def scanDirectory(self):\n\n file_path = self.file_path\n if file_path:\n for root, dirs, files in os.walk(file_path):\n if files:\n for file in files:\n temp_path = os.path.join(root, file)\n self.file_list.append(temp_path)\n\n if self.deepScan is False:\n break",
"def clean_output_directory(project_directory: str) -> None:\n for base_folder in ['Species', 'rxns']:\n base_path = os.path.join(project_directory, 'output', base_folder)\n dir_names = list()\n for (_, dirs, _) in os.walk(base_path):\n dir_names.extend(dirs)\n break # don't continue to explore subdirectories\n for species_label in dir_names:\n species_path = os.path.join(base_path, species_label)\n file_names = list()\n for (_, _, files) in os.walk(species_path):\n file_names.extend(files)\n break # don't continue to explore subdirectories\n if any(['rotor' in file_name for file_name in file_names]) \\\n and not os.path.exists(os.path.join(species_path, 'rotors')):\n os.makedirs(os.path.join(species_path, 'rotors'))\n for file_name in file_names:\n if '_rotor' in file_name: # move to the rotor directory\n shutil.move(src=os.path.join(species_path, file_name),\n dst=os.path.join(species_path, 'rotors', file_name))",
"def check_output(output_files_expected,output_folder=None):\n \n for file in output_files_expected:\n if output_folder:\n expected_file = os.path.join(output_folder,file)\n else:\n expected_file = file\n # check the file exists\n yield (os.path.isfile(os.path.join(expected_file)), \"File does not exist: \" + file)\n \n # check the file is not empty\n yield (os.stat(expected_file).st_size > 0, \"File is empty: \" + file)",
"def outputExcludedFiles(self):\n outputFile = open(self.fileExcOutput,\"w\",-1,\"utf-8\")\n for file in self.filesExcluded:\n outputFile.write(str(file) + \"\\n\")\n outputFile.close()",
"def _check_files_in_src_folder(files):\r\n if len(files) > 0:\r\n log.warning(f\"Des fichiers ont ete detectes dans le repertoire de base -> {len(files)}\")\r\n for f in files:\r\n log.info(f\"| -- {f}\")",
"def scanAll(self): \n\t\tself.scanFolder(Config().RootDirectory)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run the calculations for a subset of the parameter space
|
def run_subset(self, jobid, outputfile):
import numpy as np
import itertools as it
# Runs the function supplied by config on a a fraction of the parameter space
# Fraction depends on the number of total jobs
setup = self.conf['setup_func']()
results = []
from time import time
for perm in self.perm_slice(jobid):
# inp = {}
# for key, arr, idx in zip(self.param_names, self.param_values,
# perm):
# inp[key] = arr[idx]
# func = self.conf['single_run_func']
# results.append(func(setup, **inp))
perm = tuple(perm)
func = self.conf['single_run_func']
results.append(func(setup, perm))
# Save the list of results to pickle
import pickle as pickle
with open(outputfile, "wb") as thefile:
pickle.dump(results, thefile, protocol=pickle.HIGHEST_PROTOCOL)
print(('collected results dumped to ', outputfile))
|
[
"def executeParamStudy(self):\n\n # --- Create vectors of varying values for each respective parameter that was selected for the study\n if self.inputData.c['paramA']:\n aRange = np.linspace(self.inputData.c['aStart'], self.inputData.c['aEnd'], self.inputData.paramSteps)\n else:\n aRange = [self.inputData.c['a']]\n if self.inputData.c['paramB']:\n bRange = np.linspace(self.inputData.c['bStart'], self.inputData.c['bEnd'], self.inputData.paramSteps)\n else:\n bRange = [self.inputData.c['b']]\n\n # --- Run the parameter study for each combination of parameters\n i = 0\n for a in aRange:\n for b in bRange:\n print(\"\\nExecuting combination a: {0} and b: {1}\" .format(a, b))\n i = i + 1\n self.inputData.c['a'] = float(a)\n self.inputData.c['b'] = float(b)\n if i < 10:\n num_str = \"0\" + str(i)\n else:\n num_str = str(i)\n vtk_filename = self.inputData.paramFilename + \"_\" + num_str\n self.execute()\n self.exportVtk(vtk_filename)\n print(\"Successfully completed %i studies.\" % i)\n self.outputData.paramnum = i",
"def _run_simulations(self, *args, context=None):\n\n # Use the default variable for the function (control allocation), we don't need this for data fitting.\n variable = self.defaults.variable\n\n # Check that we have the proper number of arguments to map to the fitting parameters.\n if len(args) != len(self.fit_param_names):\n raise ValueError(\n f\"Expected {len(self.fit_param_names)} arguments, got {len(args)}\"\n )\n\n # Set the search space to the control allocation. The only thing evaluate is actually \"searching\" over is the\n # randomization dimension, which should be the last sample iterator in the search space list.\n search_space = self.parameters.search_space._get(context)\n for i, arg in enumerate(args):\n # Map the args in order of the fittable parameters\n len_search_space = (\n len(search_space)\n if self.owner.num_estimates is None\n else len(search_space) - 1\n )\n if i < len_search_space:\n assert search_space[i].num == 1, (\n \"Search space for this dimension must be a single value, during search \"\n \"we will change the value but not the shape.\"\n )\n\n # All of this code is required to set the value of the singleton search space without creating a new\n # object. It seems cleaner to just use search_space[i] = SampleIterator([arg]) but this seems to cause\n # problems for Jan in compilation. Need to confirm this, maybe its ok as long as size doesn't change.\n # We can protect against this with the above assert.\n search_space[i].specification = [arg]\n search_space[i].generator = search_space[i].specification\n search_space[i].start = arg\n else:\n raise ValueError(\"Too many arguments passed to run_simulations\")\n\n # Reset the search grid\n self.reset_grid()\n\n # Evaluate objective_function for each sample\n last_sample, last_value, all_samples, all_values = self._evaluate(\n variable=variable,\n context=context,\n params=None,\n fit_evaluate=True,\n )\n\n # We need to swap the simulation (randomization dimension) with the output dimension so things\n # are in the right order passing to the objective_function call signature.\n all_values = np.transpose(all_values, (0, 2, 1))\n\n return all_values",
"def compute_util_params(self, load_shape, nrg_charges, eta=-0.1,\n fit='saturated'):\n if fit == 'saturated':\n # Series of linear coefficients\n self._alpha = nrg_charges.loc[self._index] * (1 - 1/eta)\n # Series of quadratic coefficients\n self._beta = -1.0*nrg_charges.loc[self._index].divide(\n load_shape.loc[self._index]*eta, axis='index')\n elif fit == 'regression':\n df = pd.concat([nrg_charges, load_shape], axis=1)\n df = df.rename(columns={nrg_charges.name: 'p',\n load_shape.name: 'q'})\n df['month'] = df.index.month\n df['HoD'] = df.index.hour\n df['wknd'] = (df.index.dayofweek >= 5).astype(int)\n df['a_indiv'] = df['p'] * (1 - 1 / eta)\n df['b_indiv'] = -1.0 * df['p'] / (df['q'] * eta)\n y_a, X_a = dmatrices('a_indiv ~ -1 + C(month):C(HoD):C(wknd)',\n data=df.loc[self._index])\n y_b, X_b = dmatrices('b_indiv ~ -1 + C(month):C(HoD):C(wknd)',\n data=df.loc[self._index])\n # Note: the following weird syntax is necessary to convert patsy\n # DesignMatrix objects to np arrays - o/w this creates issues\n # when using multiprocessing since DesignMatrix objects cannot\n # be pickled (hopefully to be fixed in a later patsy version)\n _alpha = np.dot(np.asarray(X_a), np.linalg.lstsq(\n np.asarray(X_a), np.asarray(y_a).flatten())[0])\n self._alpha = pd.Series(_alpha, index=self._index)\n _beta = np.dot(np.asarray(X_b), np.linalg.lstsq(\n np.asarray(X_b), np.asarray(y_b).flatten())[0])\n self._beta = pd.Series(_beta, index=self._index)\n else:\n raise Exception('Unknown value for parameter \"fit\".')",
"def test_run_subset(self):\r\n training_data_dir = \"InputTestFilesSection3/TrainingSets\"\r\n tile_prom_region_path = training_data_dir + '/tile_prom_region'\r\n prom_unactive_path = training_data_dir + '/prom_unactive'\r\n other_act_reg_path = training_data_dir + '/other_active_region'\r\n training_dir_results = training_data_dir + '/training_results/'\r\n\r\n tissue_for_cell_name, cell_name_for_tissue = \\\r\n Utilities.cell_to_tissue_matches('InputTestFilesSection3/TissueCellMatches')\r\n motif_split_chr = \"InputTestFilesSection3/motifs_split_chr/\"\r\n tile_prom_region_data = WeightFeatures.get_trainings_data_dirs(tile_prom_region_path, cell_name_for_tissue,\r\n tissue_for_cell_name,\r\n motif_split_chr)\r\n prom_unactive_data = WeightFeatures.get_trainings_data_dirs(prom_unactive_path, cell_name_for_tissue,\r\n tissue_for_cell_name)\r\n other_act_reg_data = WeightFeatures.get_trainings_data_dirs(other_act_reg_path, cell_name_for_tissue,\r\n tissue_for_cell_name)\r\n motif_info_col_names = ['chr', 'motifstart', 'motifend', 'name', 'score', 'pval', 'strand']\r\n\r\n col_names_to_weight_param = ['ChromHMM'.lower(), 'DNase__seq'.lower(), 'FANTOM'.lower(),\r\n 'NumOtherTFBinding'.lower(), 'RepliDomain'.lower(), 'TFBinding'.lower(),\r\n 'TFExpr'.lower(), 'score'.lower(), 'footprints'.lower(), 'cCRE'.lower(),\r\n 'IndexDHS'.lower(), 'RegElem'.lower()]\r\n x = WeightFeatures.run_subset(tile_prom_region_data=tile_prom_region_data,\r\n prom_unactive_data=prom_unactive_data,\r\n other_act_reg_data=other_act_reg_data,\r\n col_names_to_weight_param=col_names_to_weight_param,\r\n db_name='funmotifsdb', db_user_name='mm99', cell_table='test_table',\r\n col_names=motif_info_col_names, training_dir_results=training_dir_results)\r\n d = {'score': [3.333, 1.111, 2.222], 'tfexpr': [0.1597, 1.9020, 67.1400],\r\n 'activity_score': [73.749, 0.0, 772.530134424223], 'cellname': ['liver', 'liver', 'liver']}\r\n df = pd.DataFrame(d)\r\n z = (x == df)\r\n for i in z.all():\r\n assert i is True\r\n return",
"def start_analysis(self):\n for point in self.parameter_space.flat:\n self._start_point_analysis(point)",
"def run_param_experiments(self, desc):\n timings = {}\n doc = Templater(desc[\"name\"])\n diameter = float(desc['ptCloud_diameter'])\n test_parameters = (par for par in desc[\"parameters\"] if len(par['values']) > 0)\n for par in test_parameters: # for each parameter listed in the descriptor file\n plot_rot = Plot(par[\"name\"] + \", rotation error\")\n plot_rot.set_axis_label(desc['dataset_variable'], 'Error (rad)')\n plot_tra = Plot(par[\"name\"] + \", translation error\")\n plot_tra.set_axis_label(\n desc['dataset_variable'], 'Error (% of diameter)')\n plot_rmse = Plot(par[\"name\"] + \", RMSE\")\n plot_rmse.set_axis_label(desc['dataset_variable'], 'RMSE')\n for val in par[\"values\"]: # for each value listed for that parameter \n values = []\n y_rot = []\n y_tra = []\n y_rmse = []\n for dataset in desc[\"dataset\"]: # for each dataset (X-axis)\n values.append(float(dataset['value']))\n rot_err_avg = []\n tra_err_avg = []\n rmse_avg = []\n for ptCloudIdx in range(0, len(dataset[\"P\"])):\n ptCloud_P = dataset[\"P\"][ptCloudIdx]\n ptCloud_Q = dataset[\"Q\"][ptCloudIdx]\n T_file = dataset[\"T\"][ptCloudIdx]\n cmd = self.base_cmd(desc) + self.dataset_args(ptCloud_P, ptCloud_Q) # create the command to run the alg. on the current dataset\n cmd = cmd + self.add_arg(par[\"flag\"], val) # add the current parameter with its tested value\n other_params = ( xpar for xpar in desc[\"parameters\"] if par[\"flag\"] != xpar[\"flag\"] )\n for xpar in other_params:\n cmd = cmd + self.add_arg(xpar[\"flag\"], xpar[\"nominal\"])\n cmd = cmd + \" -j \" + self.REPORT_FILENAME # set up the report\n self.run_cmd(cmd) # execute the algorithm\n # Read and analyze output\n try:\n report = self.parse_report(self.REPORT_FILENAME)\n except IOError:\n report = None\n print('\"'+cmd+'\" did not produce any result')\n except ValueError:\n report = None\n print('\"'+cmd+'\" returned invalid JSON')\n\n if report is not None and report['completed'] is True:\n T_gnd = self.read_ground_truth(T_file)\n T_est = np.matrix(report['transformation'])\n # RMSE\n rmse = float(report['RMSE'][-1])\n [rot_err, tra_err] = self.rot_and_trans_error(T_est, T_gnd)\n if np.isnan(rot_err) or np.isnan(tra_err):\n print('\"'+cmd+'\" returned nan errors')\n raise FloatingPointError('Errors cannot be NaN')\n rmse_avg.append(rmse)\n rot_err_avg.append(rot_err)\n tra_err_avg.append(tra_err / diameter * 100)\n # Timing\n for ti in report['timing']:\n if ti['tag'] in timings:\n timings[ti['tag']].append(float(ti['time']))\n else:\n timings[ti['tag']] = [float(ti['time'])]\n y_rmse.append(np.average(rmse_avg))\n y_rot.append(np.average(rot_err_avg))\n y_tra.append(np.average(tra_err_avg))\n # ... new dataset\n plot_rmse.add_datapoints(str(val), values, y_rmse)\n plot_rot.add_datapoints(str(val), values, y_rot)\n plot_tra.add_datapoints(str(val), values, y_tra)\n # .. new value\n doc.add_plot(plot_rmse)\n doc.add_plot(plot_rot)\n doc.add_plot(plot_tra)\n # .. new parameter\n\n timings_plot = BoxPlot(\"Timings\")\n timings_plot.set_axis_label('Seconds')\n timings_plot.add_datapoints(timings)\n doc.add_plot(timings_plot)\n\n self.remove_file(self.REPORT_FILENAME)\n return doc",
"def InputVariables(parameters_dict, n_option = \"random\", nmin = 0.1, nmax = 0.2, m = 0.03):\n nx, ny = parameters_dict['nx'], parameters_dict['ny'] #retrieve grid size\n dx = parameters_dict['dx']\n\n # set cell initial distribution based on function input\n while n_option not in ['uniform', 'random', 'linear', 'sinusoidal']:\n print(\"Invalid initial cell distribution choice made (can be 'uniform', 'random', 'linear' or 'sinusoidal')\")\n exit()\n\n if n_option in ['uniform']: #selects uniform distribution n = nmin \n n = nmin * np.ones((nx, ny))\n\n if n_option in ['random']: #selects distribution with random fluctuations between cmin and cmax\n np.random.seed(42)\n n = nmin + ((nmax - nmin) * np.random.rand(nx, ny))\n \n if n_option in ['linear']: #selects linear distribution between cmin and cmax\n n = np.zeros((nx, ny))\n for i in range(ny):\n n[i, :] = nmin + ((nmax - nmin) / (ny-1)) * (i)\n \n if n_option in ['sinusoidal']:\n n = (nmin + ((nmax - nmin) / 2)) * np.ones((nx, ny))\n for i in range(ny):\n n[i, :] += ((nmax - nmin) / 2) * np.sin(20 * np.pi * i * dx)\n\n # amount of free volume\n phi = 1 - m\n\n # water volume fraction dependent on cell distribution via no voids constraint (n + w + m = 1)\n w = phi - n \n\n # water velocity \n uw = np.zeros((nx, ny))\n vw = np.zeros((nx, ny))\n\n # create variables dictionary\n\n # update parameters dictionary \n parameters_dict[\"phi\"] = phi\n parameters_dict[\"m\"] = m \n\n return n, w, uw, vw, parameters_dict",
"def test_params_module():\n # Get the inputs required by the Scales object\n (profile, disp_phases, z0) = get_sim_data()\n\n\n # Test that the governing parameters are computed correctly\n # First, test a single dispersed phase\n model = params.Scales(profile, disp_phases[1])\n check_get_variables(model, z0, 0.15, 0.21724144538674975,\n 0.001724100901081246, 0.22611661456807244, 0.15)\n\n # Second, try a list of dispersed phases, where the dominant phase is\n # not the first one\n particles = [disp_phases[1], disp_phases[0], disp_phases[2]]\n model = params.Scales(profile, particles)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Third, make sure we get the same answer as the previous case if the\n # particles are in a different order (i.e., the original order)\n model = params.Scales(profile, disp_phases)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Using the latest Scales object, check that the other methods return\n # the correct results. Since these methods only depend on the values\n # of B, N, and us computed by the get_variables() method, only one case\n # needs to be tested\n assert_approx_equal(model.h_T(z0), 346.40139518559153, significant=6)\n assert_approx_equal(model.h_P(z0), 627.57408319500291, significant=6)\n assert_approx_equal(model.h_S(z0, 0.15), 295.45365120553163,\n significant=6)\n assert_approx_equal(model.lambda_1(z0, 0), 0.74523735215223819,\n significant=6)\n assert_approx_equal(model.u_inf_crit(z0), 0.063723667111426671,\n significant=6)",
"def search_params(param_item, powertrain_filter, size_filter):\n lang = session.get(\"language\", \"en\")\n parameters = [\n param\n for param in app.calc.load_params_file()\n if any(param_item.lower() in x.lower() for x in param)\n ]\n\n if lang == \"en\":\n powertrain_filter = [\n app.calc.d_pt_en[pt] for pt in powertrain_filter.split(\",\")\n ]\n size_filter = [app.calc.d_size_en[s] for s in size_filter.split(\",\")]\n\n if lang == \"de\":\n powertrain_filter = [\n app.calc.d_pt_de[pt] for pt in powertrain_filter.split(\",\")\n ]\n size_filter = [app.calc.d_size_de[s] for s in size_filter.split(\",\")]\n\n if lang == \"fr\":\n powertrain_filter = [\n app.calc.d_pt_fr[pt] for pt in powertrain_filter.split(\",\")\n ]\n size_filter = [app.calc.d_size_fr[s] for s in size_filter.split(\",\")]\n\n if lang == \"it\":\n powertrain_filter = [\n app.calc.d_pt_it[pt] for pt in powertrain_filter.split(\",\")\n ]\n size_filter = [app.calc.d_size_it[s] for s in size_filter.split(\",\")]\n\n response = []\n for a in parameters:\n if isinstance(a[4], str):\n a[4] = [p.strip() for p in a[4].split(\",\")]\n if isinstance(a[5], str):\n a[5] = [p.strip() for p in a[5].split(\",\")]\n if isinstance(a[6], str):\n a[6] = [s.strip() for s in a[6].split(\",\")]\n if list(set(a[5]).intersection(powertrain_filter)) and list(\n set(a[6]).intersection(size_filter)\n ):\n if lang == \"en\":\n a[5] = [\n app.calc.d_rev_pt_en[pt]\n for pt in a[5]\n if pt in app.calc.d_rev_pt_en\n ]\n a[6] = [\n app.calc.d_rev_size_en[s]\n for s in a[6]\n if s in app.calc.d_rev_size_en\n ]\n response.append(a)\n\n if lang == \"de\":\n a[5] = [\n app.calc.d_rev_pt_de[pt]\n for pt in a[5]\n if pt in app.calc.d_rev_pt_de\n ]\n a[6] = [\n app.calc.d_rev_size_de[s]\n for s in a[6]\n if s in app.calc.d_rev_size_de\n ]\n response.append(a)\n\n if lang == \"fr\":\n a[5] = [\n app.calc.d_rev_pt_fr[pt]\n for pt in a[5]\n if pt in app.calc.d_rev_pt_fr\n ]\n a[6] = [\n app.calc.d_rev_size_fr[s]\n for s in a[6]\n if s in app.calc.d_rev_size_fr\n ]\n response.append(a)\n\n if lang == \"it\":\n a[5] = [\n app.calc.d_rev_pt_it[pt]\n for pt in a[5]\n if pt in app.calc.d_rev_pt_it\n ]\n a[6] = [\n app.calc.d_rev_size_it[s]\n for s in a[6]\n if s in app.calc.d_rev_size_it\n ]\n response.append(a)\n\n return jsonify(response[:7])",
"def eval(self, n_iter=10, init_x_max=None):\n\n # get a random initial value for the incumbent from our search space if not specified\n if not init_x_max:\n x_max = self.search_space[np.random.randint(len(self.search_space))]\n x_max = x_max.item()\n else:\n x_max = init_x_max\n\n # for storing the best return and some parameters specifying it\n best_return = None\n best_return_x = None\n best_return_param = None\n\n for i in range(n_iter):\n\n # print some information\n print(\"\\nBO Iteration %d --> Chosen parameter: %f %s\" % (i, x_max,\n \"\" if (init_x_max or i != 0) else \"(randomly)\"))\n # evaluate the function\n y, param = self.f(x_max)\n\n # store if it is the best\n if not best_return or y > best_return:\n best_return = y\n best_return_x = x_max\n best_return_param = param\n\n # add the new sample to the dataset\n self.dataset.append([x_max, y])\n\n # get all the data samples in the dataset\n xs = np.array(self.dataset)[:, 0].reshape(-1, 1)\n ys = np.array(self.dataset)[:, 1].reshape(-1, 1)\n\n # fit the GP with the updated dataset\n if self.mode == \"linear\":\n self.gp.fit(xs, ys)\n else:\n self.gp.fit(np.log10(xs), ys)\n\n # calculate the maximum utilization and its position\n x_max, util_max, util = self._max_acq()\n\n # save the state for later plotting\n self.states.append({\"dataset\": self.dataset.copy(),\n \"util\": util,\n \"GP\": self.gp.predict(self.gp_search_space, return_std=True)})\n\n return best_return_x, best_return_param",
"def grid_search(self):\n # error handling\n if not len(self.optimization_defs) == 1:\n raise RuntimeError(\"grid search method only allowed when optimizing a single parameter\")\n if not 'grid_search_step_size' in self._params['optimizer_initialization'].keys():\n raise RuntimeError(\"Missing required parameter in experiment_yaml: 'grid_search_step_size'\")\n \n display_name, p_dict = list(self.optimization_defs.items())[0]\n rosparam_name = p_dict['rosparam_name']\n min_bound = p_dict['min_bound']\n max_bound = p_dict['max_bound']\n print(\"Performing grid search on parameter\", display_name, \"(aka\", rosparam_name, \").\")\n for step_size in self._params['optimizer_initialization']['grid_search_step_size']:\n print(\"Setting step size to\", step_size)\n paramspace_grid = {rosparam_name: np.arange(min_bound, max_bound, step_size)}\n print(\"Querying at\", len(paramspace_grid[rosparam_name]), \"locations:\")\n print(paramspace_grid)\n self.optimizer.explore(paramspace_grid)\n # just fit the gp hyperparams to the data given via explore, don't choose new samples\n self.optimizer.maximize(init_points=0, n_iter=0, kappa=0, **self.gpr_kwargs)\n self.plot_gpr_single_param(display_name.replace(\" \", \"_\") + \"_step_size_\" + str(step_size) + \".svg\", display_name)\n # this is kind of hacky, but will induce a string to identify which of the resulting plots and param files corresponds to which step_size\n self.iteration = step_size\n # another hacky piece of code to make the Bayesian Optimization output a maximum without having to do at least one iteration\n self.optimizer.res['max'] = {'max_val': self.optimizer.Y.max(),\n 'max_params': dict(zip(self.optimizer.keys,\n self.optimizer.X[self.optimizer.Y.argmax()]))\n }\n \n self.handle_new_best_parameters()\n # reset optimizer\n self.optimizer = BayesianOptimization(self.obj_function.evaluate, self.opt_bounds(self._params['normalize']), verbose=0)",
"def _take_data(self, qc_measurement_parameters: List[qc.Parameter]) -> int:\n meas = QC_Measurement()\n output = []\n output_dict: Dict[str, Optional[float]] = {}\n gate_parameters = []\n n_points_true = [0, 0]\n gates_to_sweep = self.setpoint_settings['gates_to_sweep']\n\n nt.set_database(self.data_settings['db_name'],\n db_folder=self.data_settings['db_folder'])\n\n nt_meta = self._prepare_nt_metadata()\n\n with self.set_up_gates_for_measurement():\n for gate in gates_to_sweep:\n meas.register_parameter(gate.dc_voltage)\n gate_parameters.append(gate.dc_voltage)\n\n for m_param in qc_measurement_parameters:\n _flush_buffers(m_param)\n meas.register_parameter(m_param, setpoints=gate_parameters)\n output.append([m_param, None])\n output_dict[m_param.full_name] = None\n\n start_time = time.time()\n done = False\n\n with meas.run() as datasaver:\n # Save some important metadata before we start measuring\n datasaver.dataset.add_metadata(nt.meta_tag, json.dumps(nt_meta))\n\n for set_point0 in self.current_setpoints[0]:\n gates_to_sweep[0].dc_voltage(set_point0)\n self.do_at_outer_setpoint(set_point0)\n n_points_true[0] += 1\n\n if len(gates_to_sweep) == 2:\n gates_to_sweep[1].use_ramp(True)\n start_voltage = self.current_setpoints[1][0]\n\n gates_to_sweep[1].dc_voltage(start_voltage)\n gates_to_sweep[1].use_ramp(False)\n\n for set_point1 in self.current_setpoints[1]:\n gates_to_sweep[1].dc_voltage(set_point1)\n n_points_true[1] += 1\n m_params = qc_measurement_parameters\n for p, parameter in enumerate(m_params):\n value = parameter.get()\n output[p][1] = value\n output_dict[parameter.full_name] = value\n\n paramx = gates_to_sweep[0].dc_voltage.full_name\n paramy = gates_to_sweep[1].dc_voltage.full_name\n datasaver.add_result(\n (paramx, set_point0),\n (paramy, set_point1),\n *output, # type: ignore\n )\n done = self.finish_early(output_dict) # type: ignore\n if done:\n break\n else:\n m_params = qc_measurement_parameters\n for p, parameter in enumerate(m_params):\n value = parameter.get()\n output[p][1] = value\n output_dict[parameter.full_name] = value\n\n paramx = gates_to_sweep[0].dc_voltage.full_name\n datasaver.add_result(\n (paramx, set_point0), *output # type: ignore\n )\n done = self.finish_early(output_dict) # type: ignore\n if done:\n break\n\n elapsed_time = time.time() - start_time\n minutes, seconds = divmod(elapsed_time, 60)\n msg = \"Elapsed time to take data: {:.0f} min, {:.2f} sec.\"\n logger.info(msg.format(minutes, seconds))\n\n # Add last bits of info to metadata\n nt_meta[\"n_points\"] = n_points_true\n nt_meta[\"elapsed_time\"] = round(float(elapsed_time), 2)\n\n datasaver.dataset.add_metadata(nt.meta_tag, json.dumps(nt_meta))\n\n return datasaver.run_id",
"def optimise_friction_results(testing_parameters, x_measured_set, cof_measured_set,\n RANGE_C, RANGE_K1, RANGE_K2, RANGE_K3, RANGE_LAMBDA1, RANGE_LAMBDA2,\n time_input=True, plot_results=False):\n fit_params = Parameters()\n\n ### Create the sets of fitting parameters, one for each set of data.\n for iy, dictionary in enumerate(testing_parameters, 1):\n # Each item in the list testing_parameters has a dictionary with test conditions.\n fit_params.add(f\"temperature_degC_{iy}\", value=dictionary[\"temperature_degC\"], vary=False)\n fit_params.add(f\"pressure_MPa_{iy}\", value=dictionary[\"pressure_MPa\"], vary=False)\n fit_params.add(f\"force_N_{iy}\", value=dictionary[\"force_N\"], vary=False)\n fit_params.add(f\"speed_mmpersecond_{iy}\", value=dictionary[\"speed_mmpersecond\"], vary=False)\n fit_params.add(f\"lubricant_thickness_{iy}\", value=dictionary[\"lubricant_thickness\"], vary=False)\n\n fit_params.add(f\"mu_lubricated_0_{iy}\", value=dictionary[\"mu0_lubricated\"], vary=False)\n fit_params.add(f\"Q_lubricated_{iy}\", value=dictionary[\"Q_lubricated\"], vary=False)\n fit_params.add(f\"mu_dry_0_1{iy}\", value=dictionary[\"mu0_dry\"], vary=False)\n fit_params.add(f\"Q_dry_{iy}\", value=dictionary[\"Q_dry\"], vary=False)\n fit_params.add(f\"eta_0_{iy}\", value=dictionary[\"eta_0\"], vary=False)\n fit_params.add(f\"Q_eta_{iy}\", value=dictionary[\"Q_eta\"], vary=False)\n\n # BACKUP LAMBDA2\n fit_params.add(f\"lambda_1_{iy}\", value=dictionary[\"lambda_1\"], min=RANGE_LAMBDA1[0], max=RANGE_LAMBDA1[1], vary=True)\n fit_params.add(f\"lambda_2_{iy}\", value=dictionary[\"lambda_2\"], min=RANGE_LAMBDA2[0], max=RANGE_LAMBDA2[1], vary=True)\n\n phys_constr = dictionary[\"blank_roughness_Ra\"]\n # fit_params.add(f\"delta_constraint_{iy}\", min=math.log(14.), max=math.log(16.))\n # fit_params.add(f\"lambda_2_{iy}\", expr=f'delta_constraint_{iy}/(log({phys_constr}*lambda_1_{iy}))')\n\n lambda_2 = dictionary[\"lambda_2\"]\n # Extra physical constraints\n # fit_params.add(f\"lambda_1_constraint_1_{iy}\", expr=f'({phys_constr}*lambda_1_{iy})**lambda_2_{iy}', min=0., max=8)\n # fit_params.add(f\"lambda_1_constraint_2_{iy}\", expr=f'({phys_constr}*lambda_1_{iy})**lambda_2_{iy}', min=16)\n\n fit_params.add(f\"c_{iy}\", value=dictionary[\"c\"], min=RANGE_C[0], max=RANGE_C[1])\n fit_params.add(f\"k_1_{iy}\", value=dictionary[\"k_1\"], min=RANGE_K1[0], max=RANGE_K1[1])\n fit_params.add(f\"k_2_{iy}\", value=dictionary[\"k_2\"], min=RANGE_K2[0], max=RANGE_K2[1])\n fit_params.add(f\"k_3_{iy}\", value=dictionary[\"k_3\"], min=RANGE_K3[0], max=RANGE_K3[1])\n\n # print(phys_constr)\n for iy in range((len(testing_parameters)-1)):\n # To enforce all values to be the same\n fit_params[f\"lambda_1_{iy+2}\"].expr = \"lambda_1_1\"\n fit_params[f\"lambda_2_{iy+2}\"].expr = \"lambda_2_1\"\n fit_params[f\"c_{iy+2}\"].expr = \"c_1\"\n fit_params[f\"k_1_{iy+2}\"].expr = \"k_1_1\"\n fit_params[f\"k_2_{iy+2}\"].expr = \"k_2_1\"\n fit_params[f\"k_3_{iy+2}\"].expr = \"k_3_1\"\n fit_params[f\"k_3_{iy+2}\"].expr = \"k_3_1\"\n # fit_params[f\"delta_constraint_{iy+2}\"].expr = \"delta_constraint_1\"\n\n t0 = time.time()\n print(\"Optimising...\")\n # https://groups.google.com/forum/#!topic/lmfit-py/M_t2W3Z6H50 - Customisation for maxiter.\n # https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.optimize.basinhopping.html\n\n # Options: Differential evolution, simulated annealing, basinhopping and more.\n # Necessary to use this method to enable further customisation of optimisation method.\n\n # https://www2.hawaii.edu/~jonghyun/classes/S18/CEE696/files/14_global_optimization.pdf\n # https://uk.mathworks.com/help/gads/how-simulated-annealing-works.html\n mini = lmfit.Minimizer(objective, fit_params, fcn_args=(x_measured_set, cof_measured_set, time_input), fcn_kws={})\n # result = mini.minimize(method='leastsq', maxfev=100)\n result = mini.minimize(method='basinhopping', niter=25, disp=True)\n\n t1 = time.time()\n optimisation_time = t1 - t0\n print(\"Finished Optimising!\\n\")\n print(\"Optimisation Results:\")\n print(f\" - Optimisation time: {round(optimisation_time, 3)} s\")\n print(f\" - Evaluations: {result.nfev}\\n\")\n\n # Now print the new values of the optimisation results.\n pretty_table = PrettyTable()\n pretty_table.field_names = [\"Parameter\", \"Old Value\", \"New Value\", \"Lower\", \"Upper\"]\n for optimisation_parameter in result.var_names:\n # Remove the _1 from the end of each parameter name.\n parameter_name = optimisation_parameter[:-2]\n old_value = round(fit_params[optimisation_parameter].value, 5)\n new_value = round(result.params[optimisation_parameter].value, 5)\n lower_bound = result.params[optimisation_parameter].min\n upper_bound = result.params[optimisation_parameter].max\n\n pretty_table.add_row([parameter_name, old_value, new_value,\n lower_bound, upper_bound])\n\n print(result.var_names)\n print(pretty_table)\n\n # Updating parameters is done this way to make it easy to read.\n for optimisation_parameter in result.var_names:\n parameter_name = optimisation_parameter[:-2]\n for dictionary in testing_parameters:\n dictionary[parameter_name] = result.params[optimisation_parameter].value\n\n if plot_results == True:\n # Plot until the maxmimum value of x given.\n list_zipped_results = (list(zip(testing_parameters, x_measured_set, cof_measured_set)))\n\n temp = []\n for array in x_measured_set:\n temp.append(np.max(array))\n\n max_sd_val = max(temp)\n\n plotting_range = np.linspace(0, max_sd_val, 100)\n base = testing_parameters[0]\n plot_graphs(plotting_range, base, list_zipped_results, time_input=time_input)\n\n print()\n return testing_parameters\n # Ctrl+Shift+L to select all of the same instance.",
"def __call__(self, individual):\n num_params = individual.get_number_local_optimization_params()\n c_0 = np.random.uniform(*self.options[\"param_init_bounds\"], num_params)\n params = self._run_method_for_optimization(\n self._sub_routine_for_obj_fn, individual, c_0)\n individual.set_local_optimization_params(params)",
"def test_multidim_parameter_study_uses_bounds():\n e = Experiment(method=\"multidim_parameter_study\")\n assert_is_not_none(e.variables.lower_bounds)\n assert_is_not_none(e.variables.upper_bounds)",
"def test_gen_plan_params(self):\n pp = PlanetPopulation(**self.spec)\n a, e, p, Rp = pp.gen_plan_params(self.nsamp)\n\n # expect e and p to be uniform\n for j, (param, param_range) in enumerate(zip([e, p], [pp.erange, pp.prange])):\n pval = scipy.stats.kstest(\n param,\n scipy.stats.uniform.cdf,\n args=(param_range[0], param_range[1] - param_range[0]),\n ).pvalue\n\n if pval <= self.kscrit:\n tmp = pp.gen_plan_params(self.nsamp)\n pval = scipy.stats.kstest(\n tmp[j + 1],\n scipy.stats.uniform.cdf,\n args=(param_range[0], param_range[1] - param_range[0]),\n ).pvalue\n\n self.assertGreater(\n pval,\n self.kscrit,\n \"{} does not appear uniform.\".format([\"eccentricity\", \"albedo\"][j]),\n )\n\n # expect a and Rp to be log-uniform\n for j, (param, param_range) in enumerate(\n zip([a.value, Rp.value], [pp.arange.value, pp.Rprange.value])\n ):\n pval = scipy.stats.kstest(\n param, scipy.stats.loguniform.cdf, args=tuple(param_range)\n ).pvalue\n\n if pval < self.kscrit:\n a2, _, _, R2 = pp.gen_plan_params(self.nsamp)\n pval = scipy.stats.kstest(\n [a2.value, R2.value][j],\n scipy.stats.loguniform.cdf,\n args=tuple(param_range),\n ).pvalue\n\n self.assertGreater(\n pval,\n self.kscrit,\n \"{} does not appear log-uniform.\".format([\"sma\", \"planet radius\"][j]),\n )",
"def set_scalar_parameters(self):\n for g, d in self.gb:\n\n a = self.aperture(g)\n specific_volumes = self.specific_volumes(g)\n\n # Define boundary conditions for flow\n bc = self.bc_type_scalar(g)\n # Set boundary condition values\n bc_values = self.bc_values_scalar(g)\n\n biot_coefficient = self.biot_alpha(g)\n compressibility = self.fluid.COMPRESSIBILITY\n\n mass_weight = compressibility * self.porosity(g)\n if g.dim == self.Nd:\n mass_weight += (\n biot_coefficient - self.porosity(g)\n ) / self.rock.BULK_MODULUS\n\n mass_weight *= self.scalar_scale * specific_volumes\n g_rho = (\n -pp.GRAVITY_ACCELERATION\n * self.density(g)\n / self.scalar_scale\n * self.length_scale\n )\n gravity = np.zeros((self.Nd, g.num_cells))\n gravity[self.Nd - 1, :] = g_rho\n pp.initialize_data(\n g,\n d,\n self.scalar_parameter_key,\n {\n \"bc\": bc,\n \"bc_values\": bc_values,\n \"mass_weight\": mass_weight,\n \"biot_alpha\": biot_coefficient,\n \"time_step\": self.time_step,\n \"ambient_dimension\": self.Nd,\n \"source\": self.source_scalar(g),\n # + self.dVdt_source(g, d, self.scalar_parameter_key),\n \"vector_source\": gravity.ravel(\"F\"),\n },\n )\n for e, data_edge in self.gb.edges():\n g_l, g_h = self.gb.nodes_of_edge(e)\n params_l = self.gb.node_props(g_l)[pp.PARAMETERS][self.scalar_parameter_key]\n mg = data_edge[\"mortar_grid\"]\n a = mg.slave_to_mortar_avg() * self.aperture(g_l)\n\n grho = (\n mg.slave_to_mortar_avg()\n * params_l[\"vector_source\"][self.Nd - 1 :: self.Nd]\n )\n\n gravity = np.zeros((self.Nd, mg.num_cells))\n gravity[self.Nd - 1, :] = grho * a / 2\n\n data_edge = pp.initialize_data(\n e,\n data_edge,\n self.scalar_parameter_key,\n {\"vector_source\": gravity.ravel(\"F\")},\n )\n self.set_permeability()",
"def optimise_friction_results(testing_parameters, x_measured_set, cof_measured_set, time_input, plot_results=False):\n fit_params = Parameters()\n\n ### Create the sets of fitting parameters, one for each set of data.\n for iy, dictionary in enumerate(testing_parameters, 1):\n # Each item in the list testing_parameters has a dictionary with test conditions.\n fit_params.add(f\"T_{iy}\", value=dictionary[\"T\"], vary=False)\n fit_params.add(f\"P_{iy}\", value=dictionary[\"P\"], vary=False)\n fit_params.add(f\"F_{iy}\", value=dictionary[\"F\"], vary=False)\n fit_params.add(f\"v_{iy}\", value=dictionary[\"v\"], vary=False)\n fit_params.add(f\"h0_{iy}\", value=dictionary[\"h0\"], vary=False)\n\n fit_params.add(f\"mu_lubricated_0_{iy}\", value=dictionary[\"mu0_lubricated\"], vary=False)\n fit_params.add(f\"Q_lubricated_{iy}\", value=dictionary[\"Q_lubricated\"], vary=False)\n fit_params.add(f\"mu_dry_0_1{iy}\", value=dictionary[\"mu0_dry\"], vary=False)\n fit_params.add(f\"Q_dry_{iy}\", value=dictionary[\"Q_dry\"], vary=False)\n fit_params.add(f\"eta_0_{iy}\", value=dictionary[\"eta_0\"], vary=False)\n fit_params.add(f\"Q_eta_{iy}\", value=dictionary[\"Q_eta\"], vary=False)\n\n # Constraints: (lambda1*0.5) ^ lambda2 > 16 or (lambda1*0.5) ^ lambda2 < 8.\n # Where 0.5 is the average Ra of the workpiece blank which in this case is 0.5 micrometres.\n # Numbers 16 and 8 relate to how strict the constraint to control how much\n # the CoF has increased when the film thickness decreases to 0.5 micrometres.\n\n # BACKUP LAMBDA2\n fit_params.add(f\"lambda_2_{iy}\", value=dictionary[\"lambda_2\"], min=0.1, max=3, vary=False)\n fit_params.add(f\"lambda_1_{iy}\", value=dictionary[\"lambda_1\"], min=0.1, max=42, vary=False)\n\n phys_constr = dictionary[\"blank_roughness\"]\n # fit_params.add(f\"delta_constraint_{iy}\", min=math.log(14.), max=math.log(16.))\n # fit_params.add(f\"lambda_2_{iy}\", expr=f'delta_constraint_{iy}/(log({phys_constr}*lambda_1_{iy}))')\n\n lambda_2 = dictionary[\"lambda_2\"]\n # Extra physical constraints\n # fit_params.add(f\"lambda_1_constraint_1_{iy}\", expr=f'({phys_constr}*lambda_1_{iy})**lambda_2_{iy}', min=0., max=8)\n # fit_params.add(f\"lambda_1_constraint_2_{iy}\", expr=f'({phys_constr}*lambda_1_{iy})**lambda_2_{iy}', min=16)\n\n fit_params.add(f\"c_{iy}\", value=dictionary[\"c\"], min=100, max=500)\n fit_params.add(f\"k_1_{iy}\", value=dictionary[\"k_1\"], min=1.0, max=5)\n fit_params.add(f\"k_2_{iy}\", value=dictionary[\"k_2\"], min=0.1, max=3.0)\n fit_params.add(f\"k_3_{iy}\", value=dictionary[\"k_3\"], min=2.0, max=6)\n\n # print(phys_constr)\n for iy in range((len(testing_parameters)-1)):\n # To enforce all values to be the same\n fit_params[f\"lambda_1_{iy+2}\"].expr = \"lambda_1_1\"\n fit_params[f\"lambda_2_{iy+2}\"].expr = \"lambda_2_1\"\n fit_params[f\"c_{iy+2}\"].expr = \"c_1\"\n fit_params[f\"k_1_{iy+2}\"].expr = \"k_1_1\"\n fit_params[f\"k_2_{iy+2}\"].expr = \"k_2_1\"\n fit_params[f\"k_3_{iy+2}\"].expr = \"k_3_1\"\n fit_params[f\"k_3_{iy+2}\"].expr = \"k_3_1\"\n # fit_params[f\"delta_constraint_{iy+2}\"].expr = \"delta_constraint_1\"\n\n t0 = time.time()\n print(\"Optimising...\")\n # https://groups.google.com/forum/#!topic/lmfit-py/M_t2W3Z6H50 - Customisation for maxiter.\n # https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.optimize.basinhopping.html\n\n # Options: Differential evolution, simulated annealing, basinhopping and more.\n # Necessary to use this method to enable further customisation of optimisation method.\n\n # https://www2.hawaii.edu/~jonghyun/classes/S18/CEE696/files/14_global_optimization.pdf\n # https://uk.mathworks.com/help/gads/how-simulated-annealing-works.html\n mini = lmfit.Minimizer(objective, fit_params, fcn_args=(x_measured_set, cof_measured_set, time_input), fcn_kws={})\n result = mini.minimize(method='leastsq', maxfev=100)\n # result = mini.minimize(method='basinhopping', niter=200, disp=True)\n\n t1 = time.time()\n optimisation_time = t1 - t0\n print(\"Finished Optimising!\\n\")\n print(\"Optimisation Results:\")\n print(f\" - Optimisation time: {round(optimisation_time, 3)} s\")\n print(f\" - Evaluations: {result.nfev}\\n\")\n\n # Now print the new values of the optimisation results.\n pretty_table = PrettyTable()\n pretty_table.field_names = [\"Parameter\", \"Old Value\", \"New Value\", \"Lower\", \"Upper\"]\n for optimisation_parameter in result.var_names:\n # Remove the _1 from the end of each parameter name.\n parameter_name = optimisation_parameter[:-2]\n old_value = round(fit_params[optimisation_parameter].value, 5)\n new_value = round(result.params[optimisation_parameter].value, 5)\n lower_bound = result.params[optimisation_parameter].min\n upper_bound = result.params[optimisation_parameter].max\n\n pretty_table.add_row([parameter_name, old_value, new_value,\n lower_bound, upper_bound])\n\n print(result.var_names)\n print(pretty_table)\n\n # Updating parameters is done this way to make it easy to read.\n for optimisation_parameter in result.var_names:\n parameter_name = optimisation_parameter[:-2]\n for dictionary in testing_parameters:\n dictionary[parameter_name] = result.params[optimisation_parameter].value\n\n if plot_results == True:\n # Plot until the maxmimum value of x given.\n list_zipped_results = (list(zip(testing_parameters, x_measured_set, cof_measured_set)))\n\n temp = []\n for array in x_measured_set:\n temp.append(np.max(array))\n\n max_sd_val = max(temp)\n\n plotting_range = np.linspace(0, max_sd_val, 100)\n base = testing_parameters[0]\n plot_graphs(plotting_range, base, list_zipped_results, time_input=time_input)\n\n print()\n return testing_parameters\n # Ctrl+Shift+L to select all of the same instance.",
"def multiple_fits(self):\n self.subtract_background()\n k = 1\n for key in self.fit_names:\n #get params for this fit\n #with new lmfit might not need to do this\n self.params = copy.deepcopy(self.all_params[key])\n\n results = minimize(self.fit_dict[self.fit_type], self.params,\n args = ())\n self.params = results.params\n\n #then if k > num_fits copy result values to params dictionary and fit\n if k < self.num_fits:\n #update parameters\n next_key = self.fit_names[k]\n for i in self.all_params[next_key].keys():\n self.all_params[next_key][i].value = self.params[i].value\n\n #move to next iteration\n k = k + 1\n\n self.fit_results = results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Send genre then mood. e.g. !letschill action silly
|
def letschill(self, message, args):
movie_list = []
genre = args[0].lower()
try:
mood = args[1].lower()
except:
mood = None
if not mood:
mood = random.choice(moods)
elif mood not in moods:
return "Please use the moods command to get list of available moods."
if genre not in genres:
if genre in moods:
mood = genre
valid = None
while not valid:
genre = random.choice(genres)
for v in movies[genre].values():
if mood in v:
valid = True
else:
return "Please use the genres command to get list of available genres."
for k, v in movies[genre].items():
if mood in v:
movie_list.append(k)
if len(movie_list) == 0:
return "Nothing matched your searches of {}/{}".format(genre, mood)
else:
return "You should watch {}!".format(random.choice(movie_list))
|
[
"def mood():",
"def genrephrase(self, category, genre):\n\n s = self.simplenlg.SPhraseSpec(self.simplenlg.nlgfactory)\n s.setSubject(self.pronoun)\n s.setVerb(\"like\")\n\n if category == 'books and literature':\n clause = self.simplenlg.nlgfactory.createClause()\n clause.setVerb(\"read\")\n clause.setFeature(self.simplenlg.Feature.FORM, self.simplenlg.Form.INFINITIVE)\n clause.setObject(genre)\n s.setObject(clause)\n\n elif category == 'movies and tv':\n clause = self.simplenlg.nlgfactory.createClause()\n clause.setVerb(\"watch\")\n clause.setFeature(self.simplenlg.Feature.FORM, self.simplenlg.Form.INFINITIVE)\n clause.setObject(genre)\n s.setObject(clause)\n\n elif category == 'music':\n clause = self.simplenlg.nlgfactory.createClause()\n clause.setVerb(\"listen\")\n clause.setFeature(self.simplenlg.Feature.FORM, self.simplenlg.Form.INFINITIVE)\n\n c = self.simplenlg.nlgfactory.createCoordinatedPhrase()\n\n for g in genre:\n c.addCoordinate(g)\n\n pp = self.simplenlg.PPPhraseSpec(self.simplenlg.nlgfactory)\n pp.addComplement(c)\n pp.setPreposition(\"to\")\n clause.setObject(pp)\n s.setObject(clause)\n\n\n return self.simplenlg.nlgfactory.createSentence(s)",
"async def send_random_by_genre(message):\n chat_id = message.from_user.id\n\n if IsSerial.bol:\n content = BOT_FUNCTIONS.select_random_by_genre(media='SERIALS', genre=message.text)\n else:\n content = BOT_FUNCTIONS.select_random_by_genre(media='FILMS', genre=message.text)\n\n message_text = f'👇Вот случайный тайтл этого жанра👇\\n{content_to_html(content)}'\n\n await bot.send_message(chat_id=chat_id, text=message_text, parse_mode='markdown', disable_web_page_preview=False)",
"async def _mood(self,ctx):\n mother = ctx.message.author\n\n if common.has_mon(str(mother)):\n pet = common.user_data['players'][str(mother)]['mon']\n await self.bot.say(\"{0}: {1}{2}\".format(mother.mention,\n pet['name'],\n common.mood_msg[pet['happy']]))",
"def append_action(targets, speech):\n actions_bag.append(\n {'speech': 'simon says '+speech, 'targets': targets})\n if random.random() > 0.7: # this is where we add in non-simon says tasks\n actions_bag.append(\n {'speech': speech, 'targets': targets})",
"async def romance_movie(self, ctx: commands.Context) -> None:\n await send_random_movie_embed(bot=self.bot, ctx=ctx, icon=\":sparkling_heart:\", genre_code=\"10749\", pages=20)",
"def set_genre(self, genre=UNKNOWN_GENRE):\n self.genre = genre",
"def xmms2_genre(self):\n self.writeCommand('xmms2_genre')\n return self",
"async def _love(self,ctx):\n mother = ctx.message.author\n\n if common.has_mon(str(mother)):\n pet = common.user_data['players'][str(mother)]['mon']\n\n love_msg = [\"*{0} glances at {1}, giving the desperate {2} just enough attention to feel something like love.*\\n{1} gained a little happiness.\".format(mother.mention, pet['name'], pet['type']),\n \"*{0} pats {1}, catching it off guard. It's not like {1} wanted to be patted, anyway...*\\n{1} gained a little happiness.\".format(mother.mention, pet['name'])]\n\n\n if common.add_hoh(pet, 'happy'):\n await self.bot.say(random.sample(love_msg,1)[0])\n else:\n await self.bot.say(pet['name'] + \" already knows how much you love them.<3\")",
"def radio(self, character):\n sounds = [\"'You will never escape...'\",\n \"'Are you trying to leave?'\",\n \"'Do you know what happened here??'\",\n \"'Can you discover my secret...'\"]\n \n # Checks whether the player has picked up the batteries.\n if character.batteries == False:\n print(\"The radio is powered by batteries.\")\n time.sleep(1)\n print(\"You need to find some batteries first.\")\n time.sleep(1)\n else:\n print(\"You turn on the radio.\")\n time.sleep(1)\n print(\"You hear a faint sound.\")\n time.sleep(1)\n print(random.choice(sounds))",
"def murderer_clue():\n if murderer_mc:\n in_or_with = \"in\" if search_type == Murder.ROOM else \"with\"\n _ = \"once\" if murderer_mc == 1 else f\"{murderer_mc} times\"\n print(f\"\\t{guess_murderer} killed {in_or_with} {search} {_}\")\n else:\n _ = {Murder.WEAPON: \"didn't use\", Murder.ROOM: \"didn't kill in\"}[search_type]\n print(f\"\\t{guess_murderer} {_} {search}\")",
"def special(self, game, player):\n player.gain_card(\"Copper\")\n player.gain_card(\"Gold\")\n player.output(\"Gained a Copper and a Gold\")",
"def reactions():\n pass",
"def detect_intent(self):\n self.grammars = \"builtin:speech/transcribe,builtin:dtmf/digits\" \n self.synth_and_recog()",
"def victim_clue():\n if search_type == Murder.WEAPON:\n _ = \"\" if victim_mc else \"nt\"\n print(f\"\\t{guess_weapon} was{_} used on {guess_victim}\")\n else: # elif search_type == Murder.ROOM\n _ = \"died\" if victim_mc else \"didn't die\"\n print(f\"\\t{guess_victim} {_} in {guess_room}\")",
"def mood(self):\r\n if self.sickness is True:\r\n print('\\033[91m'+\"I'm sick\" + '\\033[0m')\r\n return \"sick\"\r\n # The pet can't die just because the pet is old unless user play for\r\n # couple hours and it will print out \"...\" and exit the game or\r\n # users do nothing with their pet.\r\n # We intend to do that because the pet is not real one.\r\n if self.age == self.max_age:\r\n print('\\033[91m'+\"....\"+'\\033[0m')\r\n exit()\r\n return \"...\"\r\n if self.age >= self.max_age - 3:\r\n print('\\033[91m'+\"I'm getting old\"+'\\033[0m')\r\n return \"getting old\"\r\n if self.hungriness > self.hungriness_threshold:\r\n print('\\033[91m' + \"I'm hungry\" + '\\033[0m')\r\n return \"hungry\"\r\n elif self.tiredness > self.tiredness_threshold:\r\n print('\\033[91m'+\"I'm tired\"+'\\033[0m')\r\n return \"tired\"\r\n elif self.boredom > self.boredom_threshold:\r\n print('\\033[91m'+\"I'm bored\"+'\\033[0m')\r\n return \"bored\"\r\n else:\r\n print('\\033[91m'+\"I'm happy\"+'\\033[0m')\r\n return \"happy\"",
"def simon_says(new_def, process_step, check_side_seq, neutral):\n actions_list = []\n actions_list.append(\n {'speech': 'in simon says, I will tell you something to do and ' +\n 'show you how to do it, mirrored. If I say simon says, you ' +\n 'should do it with me. If I do not say simon says, you should ' +\n 'not do the action. Watch out, I may try to trick you. ' +\n 'After every movement return to a ready position'})\n actions_bag = []\n left = []\n right = []\n\n def append_action(targets, speech):\n \"\"\"Append a new action to the bag of actions, adding a non-simon\n says action with 30% frequency\n\n Args:\n targets: Target to move arms to\n speech: Speech to speak\n \"\"\"\n actions_bag.append(\n {'speech': 'simon says '+speech, 'targets': targets})\n if random.random() > 0.7: # this is where we add in non-simon says tasks\n actions_bag.append(\n {'speech': speech, 'targets': targets})\n\n sort_defs(new_def, left, right, process_step,\n check_side_seq, append_action)\n random.shuffle(left)\n random.shuffle(right)\n if new_def.bimanual:\n mix_bimanual(left, right, append_action)\n # If either we didn't run in bimanual mode or if there is just some left over in one arm:\n while left:\n left_act = left.pop()\n append_action(left_act['targets'], left_act['speech'])\n while right:\n right_act = right.pop()\n append_action(right_act['targets'], right_act['speech'])\n\n random.shuffle(actions_bag)\n actions_list += actions_bag\n\n actions_list.append(\n {'speech': 'that was a lot of fun, thanks for playing with me'})\n actions_list = list(chain.from_iterable(\n (neutral, at) for at in actions_list))\n return actions_list",
"def giveNewMilk(self, sender, reciever):\n\n self.mainComm.cast(\"{} received milk in the night!\".format(self.mainComm.getName(reciever.id)))",
"def _add_genre(self, genre):\n self.genres.add(genre)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Our goal is to get the edges split into high/low groups but we do not care what the final orienation is of the edges. Each edge can either be in its final orientation or not so there are (2^12)/2 or 2048 possible permutations. The /2 is because there cannot be an odd number of edges not in their final orientation.
|
def eo_edges(self):
permutations = []
original_state = self.state[:]
original_solution = self.solution[:]
tmp_solution_len = len(self.solution)
# Build a list of the wing strings at each midge
wing_strs = []
for (_, square_index, partner_index) in midges_recolor_tuples_555:
square_value = self.state[square_index]
partner_value = self.state[partner_index]
wing_str = square_value + partner_value
wing_str = wing_str_map[square_value + partner_value]
wing_strs.append(wing_str)
# build a list of all possible EO permutations...an even number of edges must be high
for num in range(4096):
num = str(bin(num)).lstrip("0b").zfill(12)
if num.count("1") % 2 == 0:
permutations.append(list(map(int, num)))
# Put all 2048 starting states in a file and point ida-via-graph
# at the file so it can solve all of them and apply the one that is the shortest.
pt_states = []
for (index, permutation) in enumerate(permutations):
must_be_uppercase = []
must_be_lowercase = []
self.state = original_state[:]
for (wing_str, uppercase) in zip(wing_strs, permutation):
if uppercase:
must_be_uppercase.append(wing_str)
else:
must_be_lowercase.append(wing_str)
# log.info("%s: %s permutation %s" % (self, index, "".join(map(str, permutation))))
self.edges_flip_orientation(must_be_uppercase, must_be_lowercase)
pt_states.append(
(
self.lt_phase3_lr_center_stage_eo_inner_orbit.state_index(),
self.lt_phase3_eo_outer_orbit.state_index(),
)
)
self.state = original_state[:]
self.solution = original_solution[:]
# When solve_via_c is passed pt_states (2048 lines of states in this case), it will try all 2048 of them
# to find the state that has the shortest solution.
self.lt_phase3.solve_via_c(pt_states=pt_states)
# re-color the cube so that the edges are oriented correctly so we can
# pair 4-edges then 8-edges. After all edge pairing is done we will uncolor
# the cube and re-apply the solution.
self.post_eo_state = self.state[:]
self.post_eo_solution = self.solution[:]
self.edges_flip_orientation(wing_strs, [])
self.highlow_edges_print()
self.print_cube()
log.info(
"%s: end of phase 3, edges EOed, %d steps in" % (self, self.get_solution_len_minus_rotates(self.solution))
)
self.solution.append(
"COMMENT_%d_steps_555_edges_EOed" % self.get_solution_len_minus_rotates(self.solution[tmp_solution_len:])
)
|
[
"def groupsizes(total, len):\n if len == 1:\n yield (total,)\n else:\n for i in range(1, total - len + 1 + 1):\n for perm in groupsizes(total - i, len - 1):\n yield (i,) + perm",
"def get_seed_chunks(\n graph: nx.Graph,\n num_chunks: int,\n num_dists: int,\n pop_target: Union[int, float],\n pop_col: str,\n epsilon: float,\n node_repeats: int = 1,\n method: Callable = partial(bipartition_tree_random, max_attempts=10000),\n) -> List[List[int]]:\n num_chunks_left = num_dists // num_chunks\n parts = range(num_chunks)\n new_epsilon = epsilon / (num_chunks_left * num_chunks)\n if num_chunks_left == 1:\n new_epsilon = epsilon\n\n chunk_pop = 0\n for node in graph.node_indices:\n chunk_pop += graph.nodes[node][pop_col]\n\n while True:\n epsilon = abs(epsilon)\n\n flips = {}\n remaining_nodes = set(graph.nodes)\n\n min_pop = pop_target * (1 - new_epsilon) * num_chunks_left\n max_pop = pop_target * (1 + new_epsilon) * num_chunks_left\n\n chunk_pop_target = chunk_pop / num_chunks\n\n diff = min(max_pop - chunk_pop_target, chunk_pop_target - min_pop)\n new_new_epsilon = diff / chunk_pop_target\n\n for i in range(len(parts[:-1])):\n part = parts[i]\n\n nodes = method(\n graph.subgraph(remaining_nodes),\n pop_col=pop_col,\n pop_target=chunk_pop_target,\n epsilon=new_new_epsilon,\n node_repeats=node_repeats,\n )\n\n if nodes is None:\n raise BalanceError()\n\n for node in nodes:\n flips[node] = part\n remaining_nodes -= nodes\n\n # All of the remaining nodes go in the last part\n for node in remaining_nodes:\n flips[node] = parts[-1]\n\n part_pop = 0\n for node in remaining_nodes:\n part_pop += graph.nodes[node][pop_col]\n part_pop_as_dist = part_pop / num_chunks_left\n fake_epsilon = epsilon\n if num_chunks_left != 1:\n fake_epsilon = epsilon / 2\n min_pop_as_dist = pop_target * (1 - fake_epsilon)\n max_pop_as_dist = pop_target * (1 + fake_epsilon)\n\n if part_pop_as_dist < min_pop_as_dist:\n new_epsilon = new_epsilon / 2\n elif part_pop_as_dist > max_pop_as_dist:\n new_epsilon = new_epsilon / 2\n else:\n break\n\n chunks: Dict[Any, List] = {}\n for key in flips.keys():\n if flips[key] not in chunks.keys():\n chunks[flips[key]] = []\n chunks[flips[key]].append(key)\n\n return list(chunks.values())",
"def test_with_remainder(self):\n data = range(21)\n grouped = util.make_even_groups(data, 5)\n self.assertEqual(len(grouped), 4)\n for group in grouped:\n self.assertEqual(len(group), 5)\n full = sorted(flatten(grouped))\n self.assertEqual(full, data[:-1])",
"def testNGroupSplit(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupSplitter(2)\n hs_reversed = NGroupSplitter(2, reverse=True)\n\n for isreversed, splitter in enumerate((hs, hs_reversed)):\n splits = list(splitter(self.data))\n self.failUnless(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.failUnless( len(p) == 2 )\n self.failUnless( p[0].nsamples == 50 )\n self.failUnless( p[1].nsamples == 50 )\n\n self.failUnless((splits[0][1-isreversed].uniquechunks == [0, 1, 2, 3, 4]).all())\n self.failUnless((splits[0][isreversed].uniquechunks == [5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][1-isreversed].uniquechunks == [5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][isreversed].uniquechunks == [0, 1, 2, 3, 4]).all())\n\n # check if it works on pure odd and even chunk ids\n moresplits = list(hs(splits[0][0]))\n\n for split in moresplits:\n self.failUnless(split[0] != None)\n self.failUnless(split[1] != None)\n\n # now test more groups\n s5 = NGroupSplitter(5)\n s5_reversed = NGroupSplitter(5, reverse=True)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5_reversed)):\n splits = list(s5splitter(self.data))\n\n # must have 10 splits\n self.failUnless(len(splits) == 5)\n\n # check split content\n self.failUnless((splits[0][1-isreversed].uniquechunks == [0, 1]).all())\n self.failUnless((splits[0][isreversed].uniquechunks == [2, 3, 4, 5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][1-isreversed].uniquechunks == [2, 3]).all())\n self.failUnless((splits[1][isreversed].uniquechunks == [0, 1, 4, 5, 6, 7, 8, 9]).all())\n # ...\n self.failUnless((splits[4][1-isreversed].uniquechunks == [8, 9]).all())\n self.failUnless((splits[4][isreversed].uniquechunks == [0, 1, 2, 3, 4, 5, 6, 7]).all())\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return [ (train, test) for (train, test) in spl(dat) ]\n s20 = NGroupSplitter(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def find_best_new_edges(graph, n = 15, k = 3):\n pairs = list(itl.combinations(graph.vs['name'], 2))\n edges_in_graph = [(x['name'], y['name']) for (x, y) in [e.vertex_tuple for e in graph.es]]\n for pair in edges_in_graph:\n pairs.remove(pair)\n \n best_pairs = []\n best_modularities = []\n \n i = 0\n while i < n:\n modularities = []\n for m in range(len(pairs)):\n new_graph = deepcopy(graph)\n new_graph.add_edge(pairs[m][0], pairs[m][1])\n modularities.append(louvain.find_partition(new_graph, louvain.ModularityVertexPartition).quality())\n if n - i > k:\n for _ in range(k):\n graph.add_edge(pairs[modularities.index(max(modularities))][0], pairs[modularities.index(max(modularities))][1])\n best_pairs.append(pairs[modularities.index(max(modularities))])\n best_modularities.append(louvain.find_partition(graph, louvain.ModularityVertexPartition).quality())\n pairs.remove(pairs[modularities.index(max(modularities))])\n modularities.remove(max(modularities))\n i += 1\n else:\n for _ in range(n - i):\n graph.add_edge(pairs[modularities.index(max(modularities))][0], pairs[modularities.index(max(modularities))][1])\n best_pairs.append(pairs[modularities.index(max(modularities))])\n best_modularities.append(louvain.find_partition(graph, louvain.ModularityVertexPartition).quality())\n pairs.remove(pairs[modularities.index(max(modularities))])\n modularities.remove(max(modularities))\n i += 1\n return (best_pairs, best_modularities, i)",
"def amr_subgraphs_optimized(g,n_min=1,n_max=None): # g: AMRGraph object\n output = defaultdict(list)\n # PROXY_AFP_ENG_20021112_0467.11 - a cyclic graph\n if not nx.is_directed_acyclic_graph(g):\n print('The input graph is not directed acyclic.')\n return output\n \n amr_root = list(g.successors('@'))[0]\n order = list(nx.dfs_preorder_nodes(g,amr_root))\n# print(order)\n if not n_max:\n n_max = len(g.nodes())\n # assumption: n_min < n_max\n for i in range(n_min,n_max+1):\n# print(i)\n for n in order:\n# pool = list(nx.dfs_preorder_nodes(g,'s',depth_limit=i-1))\n pool = set(y for v in nx.dfs_successors(g,n,depth_limit=i-1).values() \n for y in v)\n# print(n,pool)\n if len(pool)<i-1: \n continue\n for ns in itertools.combinations(pool,i-1):\n sg = g.subgraph((n,) + ns).copy()\n if nx.is_connected(sg.to_undirected()): \n amr_root = list(nx.topological_sort(sg))[0]\n sg.add_edge('@',amr_root,label='')\n sg = AMRGraph(sg)\n sg.meta = '# connected subgraph of {} nodes'.format(i)\n output[i].append(sg)\n return output",
"def singularity_polyedge_decomposition(self):\n\t\tpolyedges = [polyedge for polyedge in self.polyedges() if (self.is_vertex_singular(polyedge[0]) or self.is_vertex_singular(polyedge[-1])) and not self.is_edge_on_boundary(polyedge[0], polyedge[1])]\t\t\t\t\t\t\t\t\t\n\n\t\t# split boundaries\n\t\tall_splits = self.singularities()\n\t\tfor boundary in self.boundaries():\n\t\t\tsplits = [vkey for vkey in boundary if vkey in all_splits]\n\t\t\tnew_splits = []\n\n\t\t\tif len(splits) == 0:\n\t\t\t\tnew_splits += [vkey for vkey in list(itemgetter(0, int(floor(len(boundary) / 3)), int(floor(len(boundary) * 2 / 3)))(boundary))]\n\t\t\t\t\n\t\t\telif len(splits) == 1:\n\t\t\t\ti = boundary.index(splits[0])\n\t\t\t\tnew_splits += list(itemgetter(i - int(floor(len(boundary) * 2 / 3)), i - int(floor(len(boundary) / 3)))(boundary))\n\t\t\t\n\t\t\telif len(splits) == 2:\n\t\t\t\tone, two = list_split(boundary, [boundary.index(vkey) for vkey in splits])\n\t\t\t\thalf = one if len(one) > len(two) else two\n\t\t\t\tnew_splits.append(half[int(floor(len(half) / 2))])\t\n\n\t\t\tfor vkey in new_splits:\n\t\t\t\tfor nbr in self.vertex_neighbors(vkey):\n\t\t\t\t\tif not self.is_edge_on_boundary(vkey, nbr):\n\t\t\t\t\t\tnew_polyedge = self.polyedge(vkey, nbr)\n\t\t\t\t\t\tpolyedges.append(new_polyedge)\n\t\t\t\t\t\tall_splits = list(set(all_splits + new_polyedge))\n\t\t\t\t\t\tbreak\n\n\t\t# add boundaries\n\t\tpolyedges += [polyedge for polyedge in self.polyedges() if self.is_edge_on_boundary(polyedge[0], polyedge[1])]\n\n\t\t# get intersections between polyedges for split\n\t\tvertices = [vkey for polyedge in polyedges for vkey in set(polyedge)]\n\t\tsplit_vertices = [vkey for vkey in self.vertices() if vertices.count(vkey) > 1]\n\t\t\n\t\t# split singularity polyedges\n\t\treturn [split_polyedge for polyedge in polyedges for split_polyedge in list_split(polyedge, [polyedge.index(vkey) for vkey in split_vertices if vkey in polyedge])]",
"def detect_edges(gen):\n # Pairwise iteration\n a, b = itertools.tee(gen)\n next(b, None)\n for prev, curr in zip(a, b):\n yield curr - prev",
"def partition_by_segments(self):\n edges = set(self._edges)\n def remove(a, b):\n edges.discard((a,b))\n edges.discard((b,a))\n while len(edges) > 0:\n segment = edges.pop()\n while segment is not None:\n if self.degree(segment[0]) == 2:\n nhood = set(self.neighbours(segment[0]))\n assert len(nhood) == 2\n nhood.discard(segment[1])\n key = nhood.pop()\n remove(key, segment[0])\n segment = (key, ) + segment\n elif self.degree(segment[-1]) == 2:\n nhood = set(self.neighbours(segment[-1]))\n if not len(nhood) == 2:\n raise AssertionError(segment[-1])\n nhood.discard(segment[-2])\n key = nhood.pop()\n remove(key, segment[-1])\n segment = segment + (key, )\n else:\n yield segment\n segment = None",
"def auto_square_edges(containerSize): \n\n # initializing/reseting all the lists\n \n leftEdge = []\n rightEdge = []\n topEdge = []\n bottomEdge = []\n\n # the following for loop defines the edges (without corners) as lists of\n # tuples denoting their (columnID, rowID)\n\n for i in range(1,containerSize-1):\n \n topEdge.append((i, 0))\n \n bottomEdge.append((i, containerSize-1))\n \n leftEdge.append((0, i))\n\n rightEdge.append((containerSize - i % 2 - 1, i))\n \n \n return topEdge, bottomEdge, leftEdge, rightEdge",
"def get_split_tree(pwd, mutation_rate, accessible_size, generation_time=1,\n groupings=None, outgroup=None, prune_outgroup=True):\n\n\n individual_ne = get_ne(pwd, mutation_rate, accessible_size)\n if groupings is not None:\n for k,v in groupings.iteritems():\n if v == outgroup:\n del individual_ne[k]\n else:\n del individual_ne[outgroup]\n if min(individual_ne.values())*2 < max(individual_ne.values()): \n warnings.warn(\"Inferred effective population sizes differ by a factor more than 2.\"\n \" The assumptions used to infer split times are not met. \" \n \"The tree is likely far off from the truth. Branches with smallest Ne will be far too long. \"\n \"Here are the estimates: {}\".format(str(individual_ne)))\n ne = get_ne(pwd, mutation_rate, accessible_size, groupings=groupings)\n \n n_samples = get_samples_per_group(pwd, groupings=groupings, haploid=True)\n \n if groupings is not None:\n pwd = get_group_pwd(pwd, groupings)\n \n \n split_diff = get_split_diff(pwd)\n split_time = split_diff/(2.*mutation_rate*accessible_size/generation_time)\n\n #the factor 2 comes from the fact that the distance between two leafes is 2*split_time\n tree = tt.dm_to_tree(2*split_time, outgroup=outgroup, prune_outgroup=prune_outgroup)\n \n \n tree.add_property_to_nodes('ne',ne)\n tree.add_property_to_nodes('n_samples',n_samples)\n \n \n for node in tree.iter_descendants('postorder'):\n if not hasattr(node, 'ne'):\n l,r = node.get_children()\n nes = [l.ne, r.ne]\n for i,n in enumerate([l,r]):\n if n.is_leaf():\n nes[i] = pwd.loc[n.name, n.name]/(4.*mutation_rate*accessible_size)\n \n node.ne = sum(nes)/2. \n \n return tree",
"def pre_processing(self):\n while self.number_of_dmax() < 1:\n self.dmax -= 1\n __edges = self.current_edges()\n print('current edges =', __edges, ' expected edges =', self.edges)\n if __edges < self.edges:\n __temp = self.dmax\n __l = self.dmax\n self.dmax *= 2\n __r = self.dmax\n while self.number_of_dmax() >= 1 and __r < self.nodes:\n __l = __r\n self.dmax *= 2\n __r = self.dmax\n while __l < __r:\n self.dmax = int((__l + __r) / 2)\n if self.number_of_dmax() < 1:\n __r = self.dmax\n else:\n __l = self.dmax + 1\n self.dmax = __l - 1\n __edges = self.current_edges()\n if __edges > self.edges:\n __l = __temp\n __r = self.dmax\n while __l < __r:\n self.dmax = int((__l + __r) / 2)\n __edges = self.current_edges()\n if __edges > self.edges:\n __r = self.dmax\n else:\n __l = self.dmax + 1\n self.dmax = __l - 1\n print('adjust dmax =', self.dmax, ' edges =', int(__edges))\n elif __edges > self.edges:\n # __temp1 = [_ ** self.lmd for _ in range(self.dmin, self.dmax + 1)]\n # __temp2 = [_ * __ for _, __ in zip(__temp1, list(range(self.dmin, self.dmax+1)))]\n # c = self.edges / sum(__temp2)\n # n = c * sum(__temp1)\n # self.select_p = n / self.nodes\n # print('reduce select p =', self.select_p)\n __l, __r = self.dmin, self.dmax\n while __l < __r:\n self.dmax = int((__l + __r) / 2)\n __edges = self.current_edges()\n if __edges > self.edges:\n __r = self.dmax\n else:\n __l = self.dmax + 1\n self.dmax = __l - 1\n print('adjust dmax = ', self.dmax, ' edges = ', __edges)",
"def generate_ER(n, p):\n ordering = np.arange(n)\n np.random.shuffle(ordering) \n #each element ordering[i] is node indexd by i's place in the ordering\n\n dag = np.zeros((n,n))\n\n for i in range(n):\n for j in range(i, n):\n if i == j:\n continue\n if np.random.binomial(1, p) == 1:\n if ordering[i] > ordering[j]:\n dag[j,i] = 1\n else:\n dag[i,j] = 1\n\n return dag",
"def get_mode_groups(trans_mode: int) -> Iterator[Tuple[int, int]]:\n l_array = np.arange(0, trans_mode + 1, dtype=int)\n p_array = np.arange(0, (trans_mode + 1)/2, dtype=int)\n for p in p_array:\n for l in l_array:\n if trans_mode == 2*p + l:\n yield (p, l)",
"def split_n_range ( low , high , num ) :\n if high <= low or num < 1 : yield low , low\n elif 1 == num : yield low , high\n elif low < high and high <= num + low : yield low , high\n else : \n \n nn = high - low\n newn = nn // num\n for i in range ( 0 , num - 1 ) :\n nl = i * newn\n nh = nl + newn\n yield low + nl , low + nh \n yield low + num * newn - newn , high",
"def mk_ed_orient(ed_ids, ed_type, n, dim):\n #First we grab the list that gives the ids of each face on each edge\n\n\n #Next we generalize n a little bit\n nn = n\n if type(n) == list:\n if len(n) < 2:\n nn = [n[0], n[0]]\n if type(nn) == int:\n nn = [nn, nn]\n\n NN = nn[0]\n\n orient = []\n if dim == 1 or dim == 0:\n orient = [[[0]]]\n elif dim == 2:\n for ed in ed_ids:\n e_orient = []\n e_orient.append(ed)\n e_orient.append(ed[::-1])\n orient.append(e_orient)\n else:\n for i in range(len(ed_ids)):\n e_orient = []\n e_type = ed_type[i]\n if e_type == 0: #Triangles\n #I couldn't find an elegant way to do this, so on a case\n #by case basis, here we go:\n #Type where opposing face had vertices 012 compared to 012\n e_orient.append(ed_ids[i])\n #Type where opposing face had vertices 120 compared to 012\n e_o = []\n start = len(ed_ids[i]) - 1\n for m in range(NN + 1):\n prev = start\n e_o.append(ed_ids[i][start])\n for n in range(NN - m):\n e_o.append(ed_ids[i][prev - (2 + n + m)])\n prev += - (2 + n + m)\n start -= (1 + m)\n e_orient.append(e_o)\n #Type where opposing face had vertices 201 compared to 012\n e_o = []\n start = NN\n for m in range(NN + 1):\n prev = start\n e_o.append(ed_ids[i][start])\n for n in range(NN - m):\n e_o.append(ed_ids[i][prev + NN - n])\n prev += NN - n\n start -= 1\n e_orient.append(e_o)\n #now a spacer (so that the orientation numbers for triangles\n #and squares can be the same...):\n e_orient.append([])\n #Type where opposing face had vertices 021 compared to 012\n e_o = []\n start = 0\n for m in range(NN + 1):\n prev = start\n e_o.append(ed_ids[i][start])\n for n in range(NN - m):\n e_o.append(ed_ids[i][prev + (NN + 1 - n)])\n prev += (NN + 1 - n)\n start += 1\n e_orient.append(e_o)\n #Type where opposing face had vertices 102 compared to 012\n e_o = []\n start = NN\n for m in range(NN + 1):\n prev = start\n e_o.append(ed_ids[i][start])\n for n in range(NN - m):\n e_o.append(ed_ids[i][prev - 1])\n prev += -1\n start += NN - m\n e_orient.append(e_o)\n #FINALLY! Last one:\n #Type where opposing face had vertices 210 compared to 012\n e_o = []\n start = len(ed_ids[i]) - 1\n for m in range(NN + 1):\n prev = start\n e_o.append(ed_ids[i][start])\n for n in range(NN - m):\n e_o.append(ed_ids[i][prev - (1 + n + m)])\n prev += - (1 + n + m)\n start -= (2 + m)\n e_orient.append(e_o)\n\n elif (e_type == 1) or (e_type == 3): #Rectangles\n #handle the case where we have n as a list\n if (e_type == 1):\n e_n = [nn[0], nn[0]]\n else:\n e_n = nn\n\n #The rectangle can be done much more elegantly -- first we\n #make an ids list - then we flip and rotate it as necessary\n ids = arange(len(ed_ids[i]))\n ids = ids.reshape(e_n[0] + 1, e_n[1] + 1)\n for jj in range(2):\n for ii in range(4):\n e_orient.append(\\\n (array(ed_ids[i])[ids.ravel()]).tolist())\n ids = rot90(ids)\n ids = ids.T\n #we have to switch two of the entries\n tmp = e_orient[1]\n e_orient[1] = e_orient[3]\n e_orient[3] = tmp\n #And we have to reverse the order for the last 3\n e_orient[-3:] = e_orient[-1:-4:-1]\n\n orient.append(e_orient)\n return orient",
"def generate_graph(num_vert, percent_edges):\n graph = [[0 for i in range(num_vert)] for j in range(num_vert)]\n num_edges = 0\n while get_percentage_edges(num_edges, num_vert) < percent_edges:\n for i in range(num_vert):\n for j in range(num_vert):\n if j != i and graph[i][j] == 0:\n edge = random.randint(0, 1)\n graph[i][j] = edge\n graph[j][i] = edge\n num_edges += edge\n if get_percentage_edges(num_edges, num_vert) >= percent_edges:\n return graph\n return graph",
"def _generate_partial_permutations(self):\n # Avoid identity permutation\n perm_a = utils.generate_random_permutation(self._graph_size)\n\n # Generate sigma\n perm_b = utils.compose_permutations(self._tau, utils.invert(perm_a))\n\n return perm_a, perm_b",
"def _guess_orders(groups, rotor_class):\n max_order = len(groups[-1])\n if rotor_class[0] == \"spheric\":\n max_order = min(max_order, 5)\n return range(2, max_order + 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
phase5 requires a 4edge combo where none of the edges are in the zplane. phase4 will put a 4edge combo into that state. There are 12!/(4!8!) or 495 different 4edge combinations. Try them all and see which one has the lowest phase4 cost.
|
def find_first_four_edges_to_pair(self):
original_state = self.state[:]
original_solution = self.solution[:]
original_solution_len = len(self.solution)
results = []
min_phase4_solution_len = 99
min_phase4_high_low_count = 0
for (wing_str_index, wing_str_combo) in enumerate(itertools.combinations(wing_strs_all, 4)):
wing_str_combo = sorted(wing_str_combo)
self.state = original_state[:]
self.solution = original_solution[:]
self.lt_phase4.wing_strs = wing_str_combo
if self.lt_phase4.solve():
phase4_solution = self.solution[original_solution_len:]
phase4_solution_len = len(phase4_solution)
high_edge_count = self.high_edge_midge_pair_count(self.lt_phase4.wing_strs)
low_edge_count = self.low_edge_midge_pair_count(self.lt_phase4.wing_strs)
high_low_count = high_edge_count + low_edge_count
results.append((phase4_solution_len, high_low_count, wing_str_combo))
if phase4_solution_len < min_phase4_solution_len:
min_phase4_solution_len = phase4_solution_len
min_phase4_high_low_count = high_low_count
log.info(
f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is {phase4_solution_len}, high/low count {high_low_count} (NEW MIN)"
)
elif phase4_solution_len == min_phase4_solution_len:
if high_low_count > min_phase4_high_low_count:
min_phase4_high_low_count = high_low_count
log.info(
f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is {phase4_solution_len}, high/low count {high_low_count} (NEW MIN)"
)
else:
log.info(
f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is {phase4_solution_len}, high/low count {high_low_count} (TIE)"
)
else:
log.debug(
f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is {phase4_solution_len}, high/low count {high_low_count}"
)
else:
log.debug(f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is >= 4 ")
self.state = original_state[:]
self.solution = original_solution[:]
results = sorted(results, key=lambda x: (x[0], -x[1]))
# log.info("\n" + "\n".join(map(str, results[0:20])))
results = [x[2] for x in results[0:20]]
return results
|
[
"def stage_first_four_edges_555(self):\n\n # return if they are already staged\n if self.x_plane_edges_are_l4e():\n log.info(\"%s: first L4E group in x-plane\" % self)\n return\n\n if self.y_plane_edges_are_l4e():\n log.info(\"%s: first L4E group in y-plane, moving to x-plane\" % self)\n self.rotate(\"z\")\n return\n\n if self.z_plane_edges_are_l4e():\n log.info(\"%s: first L4E group in z-plane, moving to x-plane\" % self)\n self.rotate(\"x\")\n return\n\n min_solution_len = None\n min_solution_steps = None\n\n # The table for staging the 1st 4-edges would have 364,058,145 if built to completion.\n # Building that table the entire way is difficult though because this is a table where\n # the centers must be kept solved...so this involves building out a HUGE table and only\n # keeping the entries where the centers are solved. To build one deep enough to find\n # all 364,058,145 entries needed that also have solved centers would probably take a\n # few months and more drive space than I have access to.\n #\n # To avoid building such a massive table we only build the table out 10-deep which gives\n # us a few million entries. We then try all 495 permutations of 4-edges out of 12-edges\n # looking for one that does have a hit. Most of the time this is all that is needed and\n # we can find a hit. On the off chance that we cannot though we need a way to find a solution\n # so what we do is try all outer layer moves up to 3 moves deep and see if any of those\n # sequences put the cube in a state such that one of the 495 edge permutations does find\n # a hit. I have yet to find a cube that cannot be solved with this approach but if I did\n # the pre_steps_to_try could be expanded to 4-deep.\n\n # Remember what things looked like\n original_state = self.state[:]\n original_solution = self.solution[:]\n original_solution_len = len(self.solution)\n\n min_solution_len = None\n min_solution_steps = None\n\n for pre_steps in pre_steps_to_try:\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n for step in pre_steps:\n self.rotate(step)\n\n post_pre_steps_state = self.state[:]\n post_pre_steps_solution = self.solution[:]\n states_to_find = []\n\n for wing_strs in itertools.combinations(wing_strs_all, 4):\n states_to_find.append(self.lt_edges_stage_first_four.state(wing_strs))\n\n log.info(\"%s: %d states_to_find\" % (self, len(states_to_find)))\n results = self.lt_edges_stage_first_four.binary_search_multiple(states_to_find)\n len_results = len(results)\n log.info(\"%s: %d states found\" % (self, len(results)))\n\n # We sort the keys of the dict so that the order is the same everytime, this isn't\n # required but makes troubleshooting easier.\n for (line_number, key) in enumerate(sorted(results.keys())):\n steps = results[key]\n self.state = post_pre_steps_state[:]\n self.solution = post_pre_steps_solution[:]\n\n for step in steps.split():\n self.rotate(step)\n\n self.stage_final_four_edges_in_x_plane()\n solution_steps = self.solution[original_solution_len:]\n solution_len = self.get_solution_len_minus_rotates(solution_steps)\n\n # Technically we only need 4 edges to be pairable for the next phase but 5 is nice because it gives\n # the next phase some wiggle room...it can choose the best 4-edge tuple.\n if min_solution_len is None or solution_len < min_solution_len:\n log.info(\n \"%s: %d/%d 1st 4-edges can be staged in %d steps %s (NEW MIN)\"\n % (\n self,\n line_number + 1,\n len_results,\n solution_len,\n \" \".join(solution_steps),\n )\n )\n min_solution_len = solution_len\n min_solution_steps = solution_steps\n else:\n log.info(\n \"%s: %d/%d 1st 4-edges can be staged in %d steps\"\n % (self, line_number + 1, len_results, solution_len)\n )\n\n if min_solution_len is not None:\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n for step in min_solution_steps:\n self.rotate(step)\n break\n\n if not self.x_plane_edges_are_l4e():\n raise SolveError(\"There should be an L4E group in x-plane but there is not\")\n\n # self.print_cube()\n self.solution.append(\n \"COMMENT_%d_steps_555_first_L4E_edges_staged\"\n % self.get_solution_len_minus_rotates(self.solution[original_solution_len:])\n )\n log.info(\n \"%s: 1st 4-edges staged to x-plane, %d steps in\"\n % (self, self.get_solution_len_minus_rotates(self.solution))\n )",
"def stage_second_four_edges_555(self):\n\n # return if they are already staged\n if self.y_plane_edges_are_l4e() and self.z_plane_edges_are_l4e():\n return\n\n first_four_wing_strs = list(self.get_x_plane_wing_strs())\n wing_strs_for_second_four = []\n\n log.info(\"first_four_wing_strs %s\" % pformat(first_four_wing_strs))\n\n for wing_str in wing_strs_all:\n if wing_str not in first_four_wing_strs:\n wing_strs_for_second_four.append(wing_str)\n\n log.info(\"wing_strs_for_second_four %s\" % pformat(wing_strs_for_second_four))\n assert len(wing_strs_for_second_four) == 8\n\n # Remember what things looked like\n original_state = self.state[:]\n original_solution = self.solution[:]\n original_solution_len = len(self.solution)\n\n min_solution_len = None\n min_solution_steps = None\n\n for pre_steps in pre_steps_to_try:\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n for step in pre_steps:\n self.rotate(step)\n\n post_pre_steps_state = self.state[:]\n post_pre_steps_solution = self.solution[:]\n states_to_find = []\n\n for wing_strs in itertools.combinations(wing_strs_for_second_four, 4):\n states_to_find.append(self.lt_edges_stage_second_four.state(wing_strs))\n\n log.info(\"%s: %d states_to_find\" % (self, len(states_to_find)))\n results = self.lt_edges_stage_second_four.binary_search_multiple(\n states_to_find\n )\n len_results = len(results)\n # log.info(results)\n log.info(\"%s: %d states found\" % (self, len(results)))\n\n # We sort the keys of the dict so that the order is the same everytime, this isn't\n # required but makes troubleshooting easier.\n for (line_number, key) in enumerate(sorted(results.keys())):\n steps = results[key]\n self.state = post_pre_steps_state[:]\n self.solution = post_pre_steps_solution[:]\n\n for step in steps.split():\n self.rotate(step)\n\n solution_steps = self.solution[original_solution_len:]\n solution_len = len(solution_steps)\n\n if min_solution_len is None or solution_len < min_solution_len:\n log.info(\n \"%s: %d/%d 2nd 4-edges can be staged in %d steps %s (NEW MIN)\"\n % (\n self,\n line_number + 1,\n len_results,\n solution_len,\n \" \".join(solution_steps),\n )\n )\n min_solution_len = solution_len\n min_solution_steps = solution_steps\n else:\n log.info(\n \"%s: %d/%d 2nd 4-edges can be staged in %d steps\"\n % (self, line_number + 1, len_results, solution_len)\n )\n\n if min_solution_len is not None:\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n for step in min_solution_steps:\n self.rotate(step)\n break\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n if min_solution_len is None:\n raise SolveError(\"Could not find 4-edges to stage\")\n else:\n for step in min_solution_steps:\n self.rotate(step)\n\n self.solution.append(\n \"COMMENT_%d_steps_555_second_L4E_edges_staged\"\n % self.get_solution_len_minus_rotates(self.solution[original_solution_len:])\n )\n log.info(\n \"%s: 2nd 4-edges staged to x-plane, %d steps in\"\n % (self, self.get_solution_len_minus_rotates(self.solution))\n )",
"def try_phase():\n global init_simp, smp_trace,aigs\n n = n_phases()\n print 'Phases = %d'%n\n## if ((n == 1) or (n_ands() > 45000) or init_simp == 0):\n if ((n == 1) or (n_ands() > 60000)):\n return False\n## init_simp = 0\n res = a_trim()\n## print hist\n print 'Trying phase abstraction - Max phase = %d'%n\n abc('w %s_phase_temp.aig'%f_name)\n na = n_ands()\n nl = n_latches()\n ni = n_pis()\n no = n_pos()\n z = ok_phases(n) # factors n into prime factors\n print z,\n if len(z) == 1:\n return False\n #p = choose_phase()\n p = z[1]\n abc('phase -F %d'%p)\n if no == n_pos(): #nothing happened because p is not mod period\n print 'Phase %d is incompatible'%p\n abc('r %s_phase_temp.aig'%f_name)\n if len(z)< 3:\n return False\n else:\n p = z[2]\n #print 'Trying phase = %d: '%p,\n abc('phase -F %d'%p)\n if no == n_pos(): #nothing happened because p is not mod period\n print 'Phase %d is incompatible'%p\n abc('r %s_phase_temp.aig'%f_name)\n return False\n else:\n smp_trace = smp_trace + ['phase -F %d'%p]\n abc('r %s_phase_temp.aig'%f_name)\n abc('&get;&frames -o -F %d;&scl;&put'%p)\n else:\n abc('r %s_phase_temp.aig'%f_name)\n abc('&get;&frames -o -F %d;&scl;&put'%p)\n smp_trace = smp_trace + ['phase -F %d'%p]\n print 'Simplifying with %d phases: => '%p,\n smp_trace = smp_trace + ['simplify(1)']\n simplify(1)\n## res = a_trim() #maybe we don't need this because rel_cost uses n_real_inputs\n ps()\n cost = rel_cost([ni,nl,na])\n print 'New relative cost = %f'%(cost)\n if cost < -.01:\n abc('w %s_phase_temp.aig'%f_name)\n if ((n_latches() == 0) or (n_ands() == 0)):\n return True\n if n_phases() == 1: #this bombs out if no latches. Need to see if any more phases to be tried.\n aigs_pp('push','phase') #this code can be simplified - \n print 'n_phases = %d'%n_phases()\n return False\n else:\n aigs_pp('push','phase')\n result = try_phase()\n return result\n elif len(z)>2: #Try the next eligible phase.\n abc('r %s_phase_temp.aig'%f_name)\n if p == z[2]: #already tried this\n return False\n p = z[2]\n print 'Trying phase = %d: => '%p,\n abc('phase -F %d'%p)\n if no == n_pos(): #nothing happened because p is not mod period\n print 'Phase = %d is not compatible'%p\n return False\n abc('r %s_phase_temp.aig'%f_name)\n abc('&get;&frames -o -F %d;&scl;&put'%p)\n smp_trace = smp_trace + ['phase -F %d'%p]\n print 'Simplify with %d phases: '%p,\n simplify(1)\n## res =a_trim() #maybe we don't need this because rel_cost uses n_real_inputs\n cost = rel_cost([ni,nl,na])\n print 'New relative cost = %f'%(cost)\n if cost < -.01:\n print 'Phase abstraction with %d phases obtained:'%p,\n print_circuit_stats()\n abc('w %s_phase_temp.aig'%f_name)\n if ((n_latches() == 0) or (n_ands() == 0)):\n return True\n if n_phases() == 1: # this bombs out if no latches\n return True\n else:\n aigs_pp('push','phase')\n result = try_phase()\n return result\n else:\n smp_trace = smp_trace + ['de_phase']\n abc('r %s_phase_temp.aig'%f_name)\n return False",
"def eo_edges(self):\n permutations = []\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # Build a list of the wing strings at each midge\n wing_strs = []\n\n for (_, square_index, partner_index) in midges_recolor_tuples_555:\n square_value = self.state[square_index]\n partner_value = self.state[partner_index]\n wing_str = square_value + partner_value\n wing_str = wing_str_map[square_value + partner_value]\n wing_strs.append(wing_str)\n\n # build a list of all possible EO permutations...an even number of edges must be high\n for num in range(4096):\n num = str(bin(num)).lstrip(\"0b\").zfill(12)\n if num.count(\"1\") % 2 == 0:\n permutations.append(list(map(int, num)))\n\n # Put all 2048 starting states in a file and point ida-via-graph\n # at the file so it can solve all of them and apply the one that is the shortest.\n pt_states = []\n\n for (index, permutation) in enumerate(permutations):\n must_be_uppercase = []\n must_be_lowercase = []\n self.state = original_state[:]\n\n for (wing_str, uppercase) in zip(wing_strs, permutation):\n if uppercase:\n must_be_uppercase.append(wing_str)\n else:\n must_be_lowercase.append(wing_str)\n\n # log.info(\"%s: %s permutation %s\" % (self, index, \"\".join(map(str, permutation))))\n self.edges_flip_orientation(must_be_uppercase, must_be_lowercase)\n\n pt_states.append(\n (\n self.lt_phase3_lr_center_stage_eo_inner_orbit.state_index(),\n self.lt_phase3_eo_outer_orbit.state_index(),\n )\n )\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # When solve_via_c is passed pt_states (2048 lines of states in this case), it will try all 2048 of them\n # to find the state that has the shortest solution.\n self.lt_phase3.solve_via_c(pt_states=pt_states)\n\n # re-color the cube so that the edges are oriented correctly so we can\n # pair 4-edges then 8-edges. After all edge pairing is done we will uncolor\n # the cube and re-apply the solution.\n self.post_eo_state = self.state[:]\n self.post_eo_solution = self.solution[:]\n self.edges_flip_orientation(wing_strs, [])\n\n self.highlow_edges_print()\n self.print_cube()\n log.info(\n \"%s: end of phase 3, edges EOed, %d steps in\" % (self, self.get_solution_len_minus_rotates(self.solution))\n )\n self.solution.append(\n \"COMMENT_%d_steps_555_edges_EOed\" % self.get_solution_len_minus_rotates(self.solution[tmp_solution_len:])\n )",
"def rule_of_five(mol):\n\n # Ro5 descriptors\n MW = Descriptors.MolWt(mol)\n HBA = Descriptors.NumHAcceptors(mol)\n HBD = Descriptors.NumHDonors(mol)\n LogP = Descriptors.MolLogP(mol)\n\n # Ro5 conditions\n conditions = [MW <= 500, HBA <= 10, HBD <= 5, LogP <= 5]\n\n # passes Ro5 if no more than one out of four conditions is violated\n pass_ro5 = conditions.count(True) >= 3\n\n ro5 = {\n \"MW\": MW,\n \"HBA\": HBA,\n \"HBD\": HBD,\n \"LogP\": LogP,\n \"Pass_Ro5\": pass_ro5,\n }\n\n return ro5",
"def fkine_ur5(q):\n\t# Longitudes (en metros)\n\tl1 = 0.089159\n\tl2 = 0.425\n\tl3 = 0.39225\n\tl4 = 0.09465\n\tm1 = 0.0823\n\tm2 = 0.10915\n\talpha1 = np.pi/2\n\talpha4 = np.pi/2\n\talpha5 = np.pi/2\n\t# Matrices DH (completar)\n\tT1 = dh(l1, q[0], 0, \talpha1 )\n\tT2 = dh(0, q[1]-np.pi/2, -l2,\t0 )\n\tT3 = dh(0, q[2], -l3,\t0 )\n\tT4 = dh(m2, q[3] - np.pi/2, 0, \talpha4 )\n\tT5 = dh(l4, q[4] + np.pi, 0, \talpha5 )\n\tT6 = dh(m1, q[5], 0, \t0 )\n\t'''\n\talpha1 = np.pi/2\n alpha4 = np.pi/2\n alpha5 = np.pi/2\n # Matrices DH (completar)\n T1 = dh( l1, q[0], 0, alpha1 )\n T2 = dh( 0, q[1]-np.pi/2, \t -l2, 0 ) \n T3 = dh( 0, q[2], -l3, 0 )\n T4 = dh( m2, q[3]-np.pi/2, \t 0, alpha4 ) \n T5 = dh( l4, q[4]+ np.pi, 0, alpha5 )\n T6 = dh( m1, q[5], 0, 0 )'''\n\n\n\t# Efector final con respecto a la base\n\tT12 = np.dot(T1, T2)\n\tT34 = np.dot(T3, T4)\n\tT56 = np.dot(T5, T6)\n\n\tT14 = np.dot(T12, T34)\n\tT = np.dot(T14, T56)\n\treturn T",
"def test_phase_estimated(self, phase):\n estimates = []\n wire_range = range(2, 10)\n\n for wires in wire_range:\n dev = qml.device(\"default.qubit\", wires=wires)\n m = qml.RX(phase, wires=0).matrix\n target_wires = [0]\n estimation_wires = range(1, wires)\n\n with qml.tape.QuantumTape() as tape:\n # We want to prepare an eigenstate of RX, in this case |+>\n qml.Hadamard(wires=target_wires)\n\n qml.templates.QuantumPhaseEstimation(\n m, target_wires=target_wires, estimation_wires=estimation_wires\n )\n qml.probs(estimation_wires)\n\n tape = tape.expand()\n res = tape.execute(dev).flatten()\n initial_estimate = np.argmax(res) / 2 ** (wires - 1)\n\n # We need to rescale because RX is exp(- i theta X / 2) and we expect a unitary of the\n # form exp(2 pi i theta X)\n rescaled_estimate = (1 - initial_estimate) * np.pi * 4\n estimates.append(rescaled_estimate)\n\n # Check that the error is monotonically decreasing\n for i in range(len(estimates) - 1):\n err1 = np.abs(estimates[i] - phase)\n err2 = np.abs(estimates[i + 1] - phase)\n assert err1 >= err2\n\n # This is quite a large error, but we'd need to push the qubit number up more to get it\n # lower\n assert np.allclose(estimates[-1], phase, rtol=1e-2)",
"def pattern_for_phase( p, size ):\n return [[0,1,0,-1][(c//(p+1))%4] for c in range(1,size+1)]",
"def test_poly_gcd5p(self):\n\n result = 0\n\n for v in range(1, max_variables+1):\n e1 = x\n for i in range(v):\n e1 *= y[i]\n\n d = e1 - 1;\n f = d * (e1 + 3)\n g = d * (e1 - 3)\n r = gcd(f.expand(), g.expand())\n if not (r-d).expand().is_zero():\n print \"case 5p, gcd(\",f,\",\",g,\") = \",r,\" (should be \",d,\")\"\n result += 1\n\n self.assertEqual(result,0)",
"def pick_phase(self, state):\n if self.cur_phase is not None:\n if self.phase_length < 3:\n self.phase_length += 1\n return self.cur_phase\n if self.cur_phase in [1, 3]:\n self.cur_phase = (self.cur_phase + 1) % 4\n self.phase_length = 0\n return self.cur_phase\n\n sum_north_south = 0\n sum_east_west = 0\n for lane, num_cars in state.items():\n if lane in self.north_south_lanes:\n sum_north_south += num_cars\n elif lane in self.east_west_lanes:\n sum_east_west += num_cars\n else:\n print(\"warning: got value for unregistered lane for LQF: %s\" % str(lane), file=sys.stderr)\n\n if sum_north_south > sum_east_west:\n if self.cur_phase == 0:\n self.phase_length += 1\n return self.cur_phase # north-south\n else:\n self.cur_phase = 3 # prepare transition\n self.phase_length = 0\n return self.cur_phase\n else:\n if self.cur_phase == 2:\n self.phase_length += 1\n return self.cur_phase # east-west\n else:\n self.cur_phase = 1 # prepare transition\n self.phase_length = 0\n return self.cur_phase",
"def A4(t,X):\n\treturn(c2*c3*dR1_dx1(X)*dX1_dt(X)*KT_1(X)*X[6] \\\n\t\t\t\t+ c2*c3*R1(X)*dKT_1_dx3(X)*dX3_dt(X)*X[6] \\\n\t\t\t\t\t+ c2*c3*R1(X)*KT_1(X)*(c5*X[2] - c6*F_PE1_1(X) - c7*X[6] + c8*X[6]**2/X[4]) \\\n\t\t\t+ c2*c4*dR2_dx1(X)*dX1_dt(X)*KT_2(X)*X[7] \\\n\t\t\t \t+ c2*c4*R2(X)*dKT_2_dx4(X)*dX4_dt(X)*X[7] \\\n\t\t\t\t\t+ c2*c4*R2(X)*KT_2(X)*(c9*X[3] - c10*F_PE1_2(X) - c11*X[7] + c12*X[7]**2/X[5]) \\\n\t\t\t- dA3(t,X) - Z3(t,X) + k4*Z4(t,X))",
"def CalculateChi5p(mol):\r\n return _CalculateChinp(mol, NumPath=5)",
"def test_ikfast_5d_case_1(self):\n i = 0\n for (initsol, qseed, T) in zip(self.qsols, self.qseeds, self.transformations):\n i += 1\n point = T[0:3, 3]\n direction = T[0:3, 2] / np.linalg.norm(T[0:3, 2])\n ikparam = orpy.IkParameterization(orpy.Ray(point, direction), iktype5D)\n with self.robot:\n self.robot.SetActiveDOFValues(qseed)\n ts = time.time()\n qsol = self.manip.FindIKSolution(ikparam, ikfilter_checkcollision)\n te = time.time()\n \n if qsol is not None:\n self.total_time += te - ts\n self.no_success += 1\n \n with self.robot:\n self.robot.SetActiveDOFValues(qsol)\n Tmanip = self.manip.GetTransform()\n\n # Check direction\n direction_actual = Tmanip[0:3, 2] / np.linalg.norm(Tmanip[0:3, 2])\n\n try:\n np.testing.assert_allclose(direction, direction_actual, \n rtol=1e-5, atol=1e-5)\n except:\n print 'initsol = np.' + repr(initsol)\n print 'qsol = np.' + repr(qsol)\n\n # Check position\n point_actual = Tmanip[0:3, 3]\n np.testing.assert_allclose(point_actual, point, \n rtol=1e-5, atol=1e-5)\n \n self.assertTrue((qsol <= self.q_max).all(), msg=\"Violate joint limits\")\n self.assertTrue((self.q_min <= qsol).all(), msg=\"Violate joint limits\")",
"def CalculateChi4p(mol):\r\n return _CalculateChinp(mol, NumPath=4)",
"def get_automaton_5():\n q0 = WeightedState(\"q0\", 1, 0.05)\n q1 = WeightedState(\"q1\", 0, 0.05)\n q2 = WeightedState(\"q2\", 0, 0.05)\n q3 = WeightedState(\"q3\", 0, 0.05)\n\n q0.add_transition(zero, q3, 0.665)\n q0.add_transition(one, q1, 0.285)\n q1.add_transition(zero, q2, 0.285)\n q1.add_transition(one, q0, 0.665)\n q2.add_transition(zero, q1, 0.285)\n q2.add_transition(one, q3, 0.665)\n q3.add_transition(zero, q0, 0.285)\n q3.add_transition(one, q2, 0.665)\n\n states = {q0, q1, q2, q3}\n comparator = WFAToleranceComparator()\n return ProbabilisticDeterministicFiniteAutomaton(binaryAlphabet, states, SymbolStr(\"$\"), comparator, \"WeightedTomitas5\")",
"def test_5q_circuit_20q_coupling(self):\n # ┌───┐\n # q_0: ──■───────┤ X ├───────────────\n # │ └─┬─┘┌───┐\n # q_1: ──┼────■────┼──┤ X ├───────■──\n # ┌─┴─┐ │ │ ├───┤┌───┐┌─┴─┐\n # q_2: ┤ X ├──┼────┼──┤ X ├┤ X ├┤ X ├\n # └───┘┌─┴─┐ │ └───┘└─┬─┘└───┘\n # q_3: ─────┤ X ├──■─────────┼───────\n # └───┘ │\n # q_4: ──────────────────────■───────\n qr = QuantumRegister(5, \"q\")\n circuit = QuantumCircuit(qr)\n circuit.cx(qr[0], qr[2])\n circuit.cx(qr[1], qr[3])\n circuit.cx(qr[3], qr[0])\n circuit.x(qr[2])\n circuit.cx(qr[4], qr[2])\n circuit.x(qr[1])\n circuit.cx(qr[1], qr[2])\n\n dag = circuit_to_dag(circuit)\n pass_ = SabreLayout(CouplingMap(self.cmap20), seed=0, swap_trials=32, layout_trials=32)\n pass_.run(dag)\n\n layout = pass_.property_set[\"layout\"]\n self.assertEqual([layout[q] for q in circuit.qubits], [18, 11, 13, 12, 14])",
"def check_four(hand):\n values = sorted([card_values[x[0]] for x in hand])\n\n if len(set(values[:4])) == 1:\n res = [8, max(values[:4]), 0, values[4:]]\n elif len(set(values[1:])) == 1:\n res = [8, max(values[1:]), 0, values[:1]]\n else:\n res = []\n\n return res",
"def pick_5v5(side):\n if side != 0 and side != 1:\n print(\"Error. Pick a side: A (0) or B (1) for player.\")\n return [], []\n\n AI_turn = side\n \n A_side = []\n B_side = []\n \n while len(B_side) < 4:\n A_chosen = turn_pick(AI_turn, A_side, B_side)\n A_side.append(A_chosen)\n \n AI_turn = (AI_turn+1)%2\n \n B_chosen = turn_pick(AI_turn, A_side, B_side)\n B_side.append(B_chosen)\n \n B_chosen = turn_pick(AI_turn, A_side, B_side)\n B_side.append(B_chosen)\n \n AI_turn = (AI_turn+1)%2\n \n A_chosen = turn_pick(AI_turn, A_side, B_side)\n A_side.append(A_chosen)\n \n A_chosen = turn_pick(AI_turn, A_side, B_side)\n A_side.append(A_chosen)\n \n AI_turn = (AI_turn+1)%2\n \n B_chosen = turn_pick(AI_turn, A_side, B_side)\n B_side.append(B_chosen)\n\n return [], [], A_side, B_side",
"def getPhase(self,nE,s):\n\n # Get initial phase values MUX and MUY from previous element\n # or start marker (if nE=0)\n if nE != 0:\n prevE = self.elems[nE-1]\n else:\n prevE = self.markers[0]\n\n if s == 0:\n return dct([('MUX', prevE.MUX),\n ('MUY', prevE.MUY)])\n\n para = self.getBeta(nE,s)\n\n # Copy element nE and change length to location of interest, s\n # Calculate transport matrix for this element assuming D=0\n # If nE is not DRIFT, QUADRUPOLE or DIPOLE, change element to\n # DRIFT and recalculate transport matrix\n e = dct(self.elems[nE])\n if e.L != 0:\n e.K1L = (e.K1L / e.L) * s\n e.ANGLE = (e.ANGLE / e.L) * s\n e.L = s\n m = matrixForElement(e, 6) # NOTE: Takes order 6\n if m == None:\n e.KEYWORD = \"DRIFT\"\n m = matrixForElement(e, 6)\n m = m(d=0)\n\n # Calculate cos(delta Phi) and sin(delta Phi) in x and y planes\n xy = m.item((0,1)) / math.sqrt(prevE.BETX * para.BETX)\n xx = (math.sqrt(prevE.BETX) * m.item((0,0)) / math.sqrt(para.BETX)) - (prevE.ALFX * xy)\n\n yy = m.item((2,3)) / math.sqrt(prevE.BETY * para.BETY)\n yx = (math.sqrt(prevE.BETY) * m.item((2,2)) / math.sqrt(para.BETY)) - (prevE.ALFY * yy)\n\n thetaX = math.atan2(xy,xx)\n thetaY = math.atan2(yy, yx)\n if thetaX < 0:\n thetaX =+ 2 * math.pi\n if thetaY < 0:\n thetaY =+ 2 * math.pi\n\n return dct([('MUX', thetaX / (2 * math.pi) + prevE.MUX),\n ('MUY', thetaY / (2 * math.pi) + prevE.MUY)])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns the OID for row
|
def oid(self):
if self.oid_field_ob:
return self.atts[self.oid_field_ob.name]
return None
|
[
"def get_nature_id(row):\n return row[\"NVRID\"]",
"def oid_column_name(self):\n raise NotImplementedError()",
"def _get_col_id(self):",
"def orcid_identity(self):\n from api.models.inspirehep import OrcidIdentity\n try:\n return OrcidIdentity.objects.get(orcid_value=self['value'])\n except OrcidIdentity.DoesNotExist:\n return None",
"def test_oid(self):\n from sqlalchemy.databases import postgres\n dialect = postgres.PGDialect()\n dialect.max_identifier_length = 30\n tt = table1.select(use_labels=True).alias('foo')\n x = select([tt], use_labels=True, order_by=tt.oid_column).compile(dialect=dialect)\n #print x\n # assert it doesnt end with \"ORDER BY foo.some_large_named_table_this_is_the_primarykey_column\"\n assert str(x).endswith(\"\"\"ORDER BY foo.some_large_named_table_t_2\"\"\")",
"def getOperonIDfromGene(self, geneobject):\n\t\traise NotImplementedError(\"Abstract Base Class\")",
"def getLastRowID(self): \n return self.lastRowID",
"def get_oid_val(rtr, oid):\n snmp_user = (a_user, auth_key, encrypt_key)\n return snmp_extract(snmp_get_oid_v3(rtr, snmp_user, oid=oid))",
"def row_number(self) -> int:\n return pulumi.get(self, \"row_number\")",
"def get_oid(cls, name):\n tail = \"\"\n match = rx_tailing_numbers.match(name)\n if match:\n name, tail = match.groups()\n # Search by primary name\n d = MIBData.objects.filter(name=name).first()\n if not d:\n # Search by aliases\n d = MIBData.objects.filter(aliases=name).first()\n if d:\n return d.oid + tail\n return None",
"def get_oid(self, username):\n # TODO: OID is cut to 30 chars due to django username limitation\n return 'MPASSOID.{user_hash}'.format(user_hash=hashlib.sha1('ad_oulu' + username).hexdigest())[:30]",
"def observation_id_col(self):\n return 'observation_id'",
"def extract_legacy_id(oid):\r\n\t\treturn struct.unpack('L', oid.binary[-4:])[0]",
"def get_excel_row_index(row: Union[str, int]) -> int:\n\treturn int(row)-1",
"def _get_current_row(cls):\n # https://unix.stackexchange.com/a/183121/181714\n # via http://stackoverflow.com/a/2575525\n script = r\"IFS=';' read -sdR -p $'\\E[6n' ROW COL;echo \\\"${ROW#*[}\\\"\"\n try:\n p = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE)\n return int(p.communicate(timeout=1)[0].decode('utf-8').strip()) - 1\n except Exception: # noqa: PIE786\n return None",
"def generate_oid(object: persistent.Persistent) -> OID_TYPE:\n oid = getattr(object, \"_p_oid\")\n if oid:\n assert isinstance(oid, OID_TYPE)\n return oid\n\n id: Optional[str] = getattr(object, \"id\", None) or getattr(object, \"_id\", None)\n if id:\n return Database.hash_id(id)\n\n return Database.new_oid()",
"def get_next_row_id(self):\n if len(self.deleted_row_ids) > 0:\n # is there an existing deleted row?\n rowid = self.deleted_row_ids.pop()\n else:\n rowid = len(self.tuples)\n # append an empty row\n self.tuples.append([])\n return rowid",
"def _get_trapv1_oid(self, pmod, req_pdu):\n generic_trap = pmod.apiTrapPDU.getGenericTrap(req_pdu)\n if generic_trap == 6:\n # Enterprise trap <enterprise>.<specific>\n return '{}.{}'.format(\n pmod.apiTrapPDU.getEnterprise(req_pdu).prettyPrint(),\n pmod.apiTrapPDU.getSpecificTrap(req_pdu)\n )\n try:\n generic_trap_int = int(generic_trap) + 1\n except ValueError:\n return None\n else:\n return '1.3.6.1.6.3.1.1.5.{}'.format(generic_trap_int)",
"def getOID(self, digestType):\n if isinstance(digestType, tuple):\n return digestType\n if digestType in self.driver2OID:\n return self.driver2OID[digestType]\n return obj2oid(digestType)",
"def _get_oid_for_managed_object_name(self, name):\n oid, label, suffix = self.mib_view_controller.getNodeName(name)\n return oid + suffix"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
method to return MapService Object, supports wildcards
|
def get_MapService(self, name_or_wildcard):
full_path = self.get_service_url(name_or_wildcard)
if full_path:
return MapService(full_path, token=self.token)
|
[
"def service_mapping():\n return \"/foo/{anything}/bar\"",
"def get_map_search(self):\n return # osid.mapping.MapSearch",
"def _map_service_to_driver(self, service):\n\n if service in mapper:\n return mapper[service]\n return service",
"def request_map():\n rospy.loginfo(\"Requesting the map\")\n\n # rospy.wait_for_service('map')\n try:\n Imported = rospy.ServiceProxy('static_map', GetMap)\n resp1 = Imported()\n\n except rospy.ServiceException, e:\n print \"Service call failed: %s\" % e\n return None\n rospy.loginfo(\"Got map\")\n return resp1.map",
"def get_services(self, provider):\n if not self.mapping:\n self.read_data()\n return self.mapping[provider]",
"def serve_map(self, foo):\n if(self.phase == 1):\n rospy.loginfo(\"Requesting the map for map converter\")\n rospy.wait_for_service('dynamic_map')\n try:\n map_service = rospy.ServiceProxy('dynamic_map', GetMap)\n responce = map_service()\n return responce.map\n except rospy.ServiceException as e:\n rospy.loginfo(\"service call failed: %s\" %e)\n return None\n else:\n return self.static_map",
"def request_map():\n\n rospy.loginfo(\"Requesting the map\")\n rospy.wait_for_service('get_map')\n rospy.loginfo(\"STEP 1 ...................................\")\n\n try:\n rospy.loginfo(\"STEP 2 ..............................\")\n mapserver = rospy.ServiceProxy('get_map', nav_msgs / GetMap)\n rospy.loginfo(\"STEP 3 ..............................\")\n newmap = mapserver()\n\n return newmap.map\n\n except rospy.ServiceException, e:\n print \"expand_map service call unsuccessful: %s\" % e",
"def gen_svc_lookup():\n def fun(self):\n \"\"\"\n self is an instance of the tester class\n \"\"\"\n if not hasattr(self, \"_rimi_service_obj\"):\n\n # get service from container proc manager\n relevant_services = [\n item[1] for item in self.container.proc_manager.procs.items() \n if type(item[1]) == service_type\n ]\n\n assert (0 < len(relevant_services)), \\\n \"no services of type '%s' found running in container!\" % service_type\n \n\n service_itself = relevant_services[0]\n self._rimi_service_obj = service_itself\n assert(self._rimi_service_obj)\n\n return self._rimi_service_obj\n\n if not hasattr(self.tester_class, \"_rimi_getservice\"): \n add_new_method(\"_rimi_getservice\", \"Finds the embedded service\", fun)",
"def service2_mapping():\n return \"/ip/{anything}\"",
"def get_maps_by_query(self, map_query):\n return # osid.mapping.MapList",
"def _map_response(self, response: TextResponse) -> YelpService:\n return YelpService(name=self._extract_service_name(response),\n address=self._extract_service_address(response),\n phone=self._extract_service_phone(response))",
"def getComponentMaps(self, namePattern):\n filterSpec= self.__ConfServiceclient.factory.create(\"componentMapFilterSpecDataObj\")\n filterSpec.namePattern= namePattern\n try:\n return self.__ConfServiceclient.service.getComponentMaps(filterSpec)\n except suds.WebFault as detail:\n return detail",
"def _call_get_map_srv(self):\n rospy.wait_for_service('get_map')\n get_map = rospy.ServiceProxy('get_map', GetMap)\n try:\n self.static_map = get_map(self.map_nr).map\n return True\n except rospy.ServiceException:\n rospy.logerr(\"Costmap generator: Could not receive map from map server.\")\n return False",
"def get_maps_by_provider(self, resource_id):\n return # osid.mapping.MapList",
"def get_service():\n\n service = build(\"customsearch\", \"v1\",\n developerKey=api_key)\n return service",
"def get_map_query(self):\n return # osid.mapping.MapQuery",
"def service_any(service):\n service_type = service['type']\n service_port = service['data']['name']\n data = {\n \"type\":service_type,\n \"dst\":service_port\n }\n return data",
"def newOffsetMap(**kwds):\n # get the factory\n from .OffsetMap import OffsetMap\n # instantiate and return it\n return OffsetMap(**kwds)",
"def get_maps_by_ids(self, map_ids):\n return # osid.mapping.MapList",
"def get_service(dic):\n occurrence = dic['occurrence']\n event = occurrence.event\n service = Service.objects.get(pk=event.pk)\n\n return service"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return memory location in string
|
def memory_location(value):
return hex(id(value))
|
[
"def string_read( self, mem_addr ):\n\t\tstorage_length = self.byte_read( mem_addr )\n\t\tbin = struct.unpack(\"%is\"%storage_length, self.read(mem_addr+1, storage_length) )[0]\n\t\treturn bin.decode(\"UTF-8\").rstrip('\\x00')",
"def string_get(self, ypos, xpos, length):\n # the screen's co-ordinates are 1 based, but the command is 0 based\n xpos -= 1\n ypos -= 1\n cmd = self.exec_command('ascii({0},{1},{2})'.format(ypos, xpos, length).encode(\"utf-8\"))\n # this usage of utf-8 should only return a single line of data\n assert len(cmd.data) == 1, cmd.data\n return cmd.data[0].decode(\"utf-8\")",
"def get_string(self, offset):\r\n table_offset = self['sh_offset']\r\n s = parse_cstring_from_stream(self.stream, table_offset + offset)\r\n return s",
"def get_char_from_mem(self, x):\n return self._sequence[x]",
"def get_address(self, offset):\n mode = self.modes[offset - 1]\n if mode == 0:\n return self.memory[self.iptr + offset]\n if mode == 1:\n return self.iptr + offset\n return self.memory[self.iptr + offset] + self.base",
"def get_string_at(self, _ea):\t\n\t\tif (_ea != BADADDR):\n\t\t\tstype = idc.GetStringType(_ea)\n\t\t\treturn idc.GetString(_ea, strtype=stype) \n\t\treturn \"\"",
"def parse_location(die):\n\n\n if LOC in die.attributes:\n loc = die.attributes[LOC]\n elif CVAL in die.attributes:\n return '$' + str(die.attributes[CVAL].value)\n else:\n return ''\n\n if loc.form != EXPR:\n print('Unrecognized location encoding:')\n print('\\t%s\\t%s' % (die.attributes[LOC].form, die.attributes[LOC].value))\n return '???'\n\n try:\n if hasattr(loc, 'value'):\n loc = loc.value\n\n # shitty hack\n if type(loc) is int:\n loc = [loc]\n\n if loc[0] == OP_CFA:\n if len(loc) > 1:\n # Indicates (signed) LEB128 offset from base pointer\n return get_leb128(loc[1:])\n else:\n # Not sure what this means, maybe just %rbp ?\n return '%rbp'\n\n if loc[0] >= OP_REG and loc[0] < OP_BREG:\n # Indicates in-register location\n\n # TODO: figure out size of operand and change register name accordingly\n result = regs[loc[0] - OP_REG]\n return '%' + result\n\n if loc[0] >= OP_BREG:\n if len(loc) > 1:\n # Get offset from register\n offset = get_leb128(loc[1:])\n else:\n offset = ''\n\n try:\n # Get register\n reg = regs[loc[0] - OP_BREG]\n\n return [offset, reg]\n except:\n return '???'\n\n except:\n print('Unable to resolve location: %s' % loc)\n try: print('\\t(decoded: %s)' % get_leb128(loc))\n except: pass\n raise",
"def get_string_from_table(self, offset):\r\n return parse_cstring_from_stream(self.debug_str_sec.stream, offset)",
"def locate(self, location: 'char const *') -> \"ScXMLDataObj *\":\n return _coin.ScXMLECMAScriptEvaluator_locate(self, location)",
"def get_position(position, entry):\n\n return entry.split(' ')[position]",
"def memory():\n return myself()[MEMORY]",
"def getLocationString(self, string: 'SbString') -> \"void\":\n return _coin.SoInput_getLocationString(self, string)",
"def locate(self, location: 'char const *') -> \"ScXMLDataObj *\":\n return _coin.ScXMLEvaluator_locate(self, location)",
"def readMem(addr): \n return mem[addr]",
"def GetPositionStr(self):\n return \"Line %i.%i\" % (self.lineNum, self.linePos)",
"def read(self, offset):\n addr = self.get_address(offset)\n return self.memory[addr] if addr in self.memory else 0",
"def location(s, (x,y)):\n\t\treturn s.matrix[x][y]",
"def Locate(self, string, string_1, string_2):\n ...",
"def fetch(address):\n return self.memory[address]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Downloads the quickdraw data for the supplied class_name
|
def download_examples_for_class(class_name, temp_dir):
class_url = class_name.replace('_', '%20')
download_url = QUICKDRAW_NUMPY_BASE_URL + f'{class_url}.npy'
download_filepath = os.path.join(temp_dir, f'{class_name}.npy')
file_already_exists = os.path.isfile(download_filepath)
if (not file_already_exists):
log(f'Downloading [{class_name}] training data from "{download_url}"')
urllib.request.urlretrieve(download_url, download_filepath)
else:
log(f'Data file for [{class_name}] already exists. Using existing file.')
return download_filepath
|
[
"def get_class_data(classname, fname=None, **kwargs):\n \n # options for filter and page size\n flt = kwargs.get(\"flt\", \"\")\n page_size = kwargs.get(\"page_size\", 75000)\n page = kwargs.get(\"page\", 0)\n if len(flt)>0: flt=\"&%s\" % flt\n if \"order-by\" not in flt: flt=\"&order-by=%s.dn%s\" % (classname, flt)\n \n if fname is not None:\n try:\n logging.debug(\"reading file %s\" % fname)\n with open(fname, \"r\") as f:\n jst = time.time()\n j = json.loads(f.read())\n logging.debug(\"json load time: %s\" % td(jst, time.time()))\n return j \n except Exception as e:\n logging.error(\"unabled to read %s: %s\" % (c,e))\n return {}\n except ValueError as e:\n logging.warning(\"failed to decode json for class %s\"%classname) \n return {} \n except TypeError as e:\n logging.warning(\"failed to decode json for class %s\"%classname) \n return {}\n else:\n # walk through pages until return count is less than page_size \n results = []\n while 1:\n cmd = \"icurl -s 'http://127.0.0.1:7777/api/class/\"\n cmd+= \"%s.json?page-size=%s&page=%s%s'\" % (classname,\n page_size, page, flt)\n cmd+= \" 2> /dev/null\"\n icurlst = time.time()\n data = online_get_cli(cmd)\n logging.debug(\"icurl time: %s\" % td(icurlst, time.time()))\n\n # failed to get data\n if data is None: \n logging.warning(\"failed to get data for class: %s\" % classname)\n return {}\n\n # parse json data\n try:\n jst = time.time()\n js = json.loads(data)\n logging.debug(\"json load time: %s\" % td(jst, time.time()))\n if \"imdata\" not in js or \"totalCount\" not in js:\n logging.error(\"invalid icurl result: %s\" % js)\n return {}\n results+=js[\"imdata\"]\n logging.debug(\"results count: %s/%s\" % (\n len(results),js[\"totalCount\"]))\n if len(js[\"imdata\"])<page_size or \\\n len(results)>=int(js[\"totalCount\"]):\n logging.debug(\"all pages received\")\n r = {\n \"imdata\": results,\n \"totalCount\": len(results)\n }\n return r\n page+= 1\n \n except ValueError as e:\n logging.warning(\"failed to decode json for class %s\"%classname)\n return {} \n except TypeError as e:\n logging.warning(\"failed to decode json for class %s\"%classname)\n return {}\n \n # some unknown error, return empty result\n logging.warning(\"unexpecedt error occurred when getting class %s\"%classname)\n return {}",
"def download_data(self):\r\n self.engine = self.connect_database()\r\n self.student_demographics_file = self.staff_demographics_file = ''\r\n if self.student_demographics_url:\r\n self.student_demographics_file = self.download_file(self.student_demographics_url)\r\n if self.staff_demographics_url:\r\n self.staff_demographics_file = self.download_file(self.staff_demographics_url)\r\n if self.school_geography_url:\r\n self.school_geography_file = self.download_file(self.school_geography_url)",
"def load_dataset(self):",
"def fetch_detailed_dataset(args=None):\n\n print(\"Fetching detailed Wikipaintings dataset by scraping artwork pages.\")\n\n if args is None:\n args = {\n 'force_dataset': False,\n 'force_basic': False,\n 'num_workers': 1, 'mem': 2000,\n 'cpus_per_task': 1, 'async': True,\n 'chunk_size': 10\n }\n\n basic_df = vislab.datasets.wikipaintings.get_basic_df(force=args.pop('force_basic'), args=args)\n\n db = vislab.util.get_mongodb_client()[vislab.datasets.wikipaintings.DB_NAME]\n collection = db['image_info']\n print(\"Old collection size: {}\".format(collection.count()))\n\n force = args['force_dataset']\n if not force:\n # Exclude ids that were already computed.\n image_ids = basic_df.index.tolist()\n image_ids = vislab.util.exclude_ids_in_collection(\n image_ids, collection)\n basic_df = basic_df.loc[image_ids]\n\n # Chunk up the rows.\n rows = [row.to_dict() for ind, row in basic_df.iterrows()]\n chunk_size = args['chunk_size']\n num_chunks = len(rows) / chunk_size\n if num_chunks == 0 and len(rows) > 0:\n num_chunks = 1\n\n if num_chunks == 0:\n chunks = []\n else:\n chunks = np.array_split(rows, num_chunks)\n args_list = [(chunk.tolist(), force) for chunk in chunks]\n\n # Work the jobs.\n vislab.utils.distributed.map_through_rq(\n vislab.datasets.wikiart_scraping._fetch_artwork_infos,\n args_list, 'wikipaintings_info',\n num_workers=args['num_workers'], mem=args['mem'],\n cpus_per_task=args['cpus_per_task'], async=args['async'])\n print(\"Final collection size: {}\".format(collection.count()))\n\n # Assemble into DataFrame to return.\n # Drop artworks without an image.\n orig_df = pd.DataFrame([doc for doc in collection.find()])\n df = orig_df.dropna(subset=['image']).copy()\n\n # Rename some columns and add an index.\n new_column_names = {'image': 'image_url',\n 'locationCreated': 'location_created',\n 'dateCreated': 'date'}\n df.columns = [new_column_names.pop(col, col) for col in df.columns]\n\n print '---- Columns: ', df.columns.values\n df.index = pd.Index(df['image_id'], name='image_id')\n\n # Only take useful columns.\n columns_to_take = [\n 'image_id', 'artist_slug', 'artwork_slug', 'date',\n 'genre', 'style', 'keywords', 'name',\n 'page_url', 'image_url', 'description',\n 'location_created', 'media', 'location'\n ]\n df = df[columns_to_take]\n print '---- Columns took: ', df.columns.values\n\n # NOTE: some image urls can be unicode!\n\n# # Drop artworks with messed up image urls\n# good_inds = []\n# for ind, row in df.iterrows():\n# try:\n# str(row['image_url'])\n# good_inds.append(ind)\n# except:\n# pass\n# df = df.ix[good_inds]\n# df['image_url'] = df['image_url'].apply(lambda x: str(x))\n\n return df",
"def print_class_details(self, fname: str, classname: str) -> None:\n fobj = open(fname, \"w\")\n fobj.write(self.header % (classname, self.style))\n\n fobj.write(\"<h1>%s</h1>\\n\" % (classname))\n\n sizes = [tobj.get_max_size() for tobj in self.index[classname]]\n total = 0\n for s in sizes:\n total += s\n data = {'cnt': len(self.index[classname]), 'cls': classname}\n data['avg'] = pp(total / len(sizes))\n data['max'] = pp(max(sizes))\n data['min'] = pp(min(sizes))\n fobj.write(self.class_summary % data)\n\n fobj.write(self.charts[classname])\n\n fobj.write(\"<h2>Coalesced Referents per Snapshot</h2>\\n\")\n for snapshot in self.snapshots:\n if snapshot.classes and classname in snapshot.classes:\n merged = snapshot.classes[classname]['merged']\n fobj.write(self.class_snapshot % {\n 'name': snapshot.desc,\n 'cls': classname,\n 'total': pp(merged.size),\n })\n if merged.refs:\n self._print_refs(fobj, merged.refs, merged.size)\n else:\n fobj.write('<p>No per-referent sizes recorded.</p>\\n')\n\n fobj.write(\"<h2>Instances</h2>\\n\")\n for tobj in self.index[classname]:\n fobj.write('<table id=\"tl\" width=\"100%\" rules=\"rows\">\\n')\n fobj.write('<tr><td id=\"hl\" width=\"140px\">Instance</td>' +\n '<td id=\"hl\">%s at 0x%08x</td></tr>\\n' %\n (tobj.name, tobj.id))\n if tobj.repr:\n fobj.write(\"<tr><td>Representation</td>\" +\n \"<td>%s </td></tr>\\n\" % tobj.repr)\n fobj.write(\"<tr><td>Lifetime</td><td>%s - %s</td></tr>\\n\" %\n (pp_timestamp(tobj.birth), pp_timestamp(tobj.death)))\n if tobj.trace:\n trace = \"<pre>%s</pre>\" % (_format_trace(tobj.trace))\n fobj.write(\"<tr><td>Instantiation</td><td>%s</td></tr>\\n\" %\n trace)\n for (timestamp, size) in tobj.snapshots:\n fobj.write(\"<tr><td>%s</td>\" % pp_timestamp(timestamp))\n if not size.refs:\n fobj.write(\"<td>%s</td></tr>\\n\" % pp(size.size))\n else:\n fobj.write(\"<td>%s\" % pp(size.size))\n self._print_refs(fobj, size.refs, size.size)\n fobj.write(\"</td></tr>\\n\")\n fobj.write(\"</table>\\n\")\n\n fobj.write(self.footer)\n fobj.close()",
"def download(args):\n print(\"Downloading classes from AudioSet.\")\n\n for class_name in args.classes:\n utils.download(class_name, args)",
"def download_show(self, url):",
"def _download_data(self):\n self.raw_data = requests.get(self.api_address).json()\n self.age = datetime.now()",
"def scrap_classes():\n\n config = load_config()\n session = requests.session()\n\n with session.post('https://myclass.apps.binus.ac.id/Auth/Login', data={\n 'Username': config['login']['username'],\n 'Password': config['login']['password'],\n 'btnSubmit': True\n }) as response:\n try:\n assert response.json()['Status']\n except:\n return print('Error: Failed to login to BINUS Classes site!')\n\n with session.get('https://myclass.apps.binus.ac.id/Home/GetViconSchedule') as response:\n result = response.json()\n\n for class_data in result:\n date = class_data['DisplayStartDate']\n time = class_data['StartTime'] + ' - ' + class_data['EndTime']\n\n code = class_data['ClassCode']\n delivery = class_data['DeliveryMode'] + ' - ' + class_data['SsrComponentDescription']\n course = class_data['CourseCode'] + ' - ' + class_data['CourseTitleEn']\n\n week = class_data['WeekSession']\n session = class_data['CourseSessionNumber']\n\n meeting_url = class_data['MeetingUrl']\n meeting_id = class_data['MeetingId']\n meeting_password = class_data['MeetingPassword']\n\n student_class = StudentClass(date, time, code, delivery, course, week, session)\n if meeting_url != '-':\n meeting = MeetingInfo(meeting_id, meeting_password, meeting_url)\n student_class.meeting = meeting\n\n student_classes.append(student_class)",
"def quick_load(name='quick_save', file_format='.pkl', pkg='pandas'):\n path = os.path.abspath(os.path.join(os.path.dirname('.'), \n 'quick_saves/' + name + file_format))\n df = load_df(path, file_format=file_format, pkg=pkg)\n return df",
"def download_dataset(self):\n data_dir = self.get_download_data_dir()\n cache_dir = self.get_download_cache_dir()\n self.download_dataset_files(data_dir, cache_dir)",
"def show_dataset(self, name, module_id=None):\n # Ensure that the specified file exists\n notebook = self.api.get_notebook()\n module = notebook.get_module(module_id)\n if not module is None and name in module.datasets:\n ds = notebook.fetch_dataset(\n dataset=notebook.workflow.datasets[module.datasets[name]]\n )\n header = ['[ID]'] + [col.name for col in ds.columns]\n rows = [header]\n for row in ds.fetch_rows():\n values = [str(row.identifier)] + [str(val) for val in row.values]\n rows.append(values)\n self.output(rows)\n else:\n print('unknown dataset \\'' + name + '\\'')\n return True",
"def run(self):\n file1 = os.path.join(self.raw, '00001.info.json')\n some_data = os.path.isfile(file1)\n if not some_data:\n if self.takeout is not None:\n self.download_data()\n else:\n self.deprecated_download_data_via_youtube_dl_login()\n some_data = os.path.isfile(file1)\n if some_data:\n self.start_analysis()\n else:\n print('No data was downloaded.')",
"def download_data(self):\n self.raw_df = pd.read_table(self.DATA_LINK)",
"def inception_v3_download():",
"def download_dataset(self):\n # Download the raw data\n data_dir = os.path.join(self.output_path, \"data\")\n url = \"https://raw.githubusercontent.com/sylinrl/TruthfulQA/main/TruthfulQA.csv\"\n ensure_directory_exists(data_dir)\n ensure_file_downloaded(source_url=url, target_path=os.path.join(data_dir, self.DATASET_FILE_NAME))",
"def _download_datasets():\n def filepath(*args):\n return abspath(join(dirname(__file__), *args))\n for name in DATASETS_TO_DOWNLOAD:\n data = Dataset(name)\n url = data.url\n filename = filepath(data.filename)\n print(\"retrieving data {0} -> {1}\".format(url, filename))\n urlretrieve(url, filename)\n with open(filepath('listing.txt'), 'w') as f:\n f.write('\\n'.join(DATASETS_TO_DOWNLOAD) + '\\n')",
"def download_dataset(self, path, user, name, version=None):\n\n if version == None:\n version = self.get_dataset_latest_version(user, name)\n version = version[\"uuid\"]\n\n super().download_dataset(path, user, name, version)",
"def project_data_download(project_key, safe_name):\n cu = CampaignUsers.query.filter(CampaignUsers.wms_key==project_key).first_or_404()\n project = Campaign.query.get(cu.campaign_id)\n user = User.query.get(cu.user_id)\n layer = ObservationLayer.query.filter_by(safe_name=safe_name, user_id=user.id, campaign_id=project.id).first_or_404()\n return jsonify(layer.as_featurecollection()),200"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loads the quickdraw training data for the supplied class_name into a numpy array in mmap mode The data will not be loaded into memory, instead just reading from disk which allows reading a smal set of examples without loading all the examples into memory
|
def load_examples_for_class(class_name, examples_dir, mmap_mode='r'):
examples_filepath = os.path.join(examples_dir, f'{class_name}.npy')
return np.load(examples_filepath, mmap_mode=mmap_mode)
|
[
"def download_examples_for_class(class_name, temp_dir):\n class_url = class_name.replace('_', '%20')\n download_url = QUICKDRAW_NUMPY_BASE_URL + f'{class_url}.npy'\n download_filepath = os.path.join(temp_dir, f'{class_name}.npy')\n file_already_exists = os.path.isfile(download_filepath)\n\n if (not file_already_exists):\n log(f'Downloading [{class_name}] training data from \"{download_url}\"')\n urllib.request.urlretrieve(download_url, download_filepath)\n else:\n log(f'Data file for [{class_name}] already exists. Using existing file.')\n\n return download_filepath",
"def _loadData(self,\n root_dir,\n cls,\n min_score=-1000,\n loading_groundtruth=False):\n # construct objectDetections object to hold detection data\n t_data = tData()\n data = []\n eval_2d = True\n eval_3d = True\n\n seq_data = []\n n_trajectories = 0\n n_trajectories_seq = []\n for seq, s_name in enumerate(self.sequence_name):\n i = 0\n filename = os.path.join(root_dir, \"%s.txt\" % s_name)\n f = open(filename, \"r\")\n\n f_data = [\n [] for x in range(self.n_frames[seq])\n ] # current set has only 1059 entries, sufficient length is checked anyway\n ids = []\n n_in_seq = 0\n id_frame_cache = []\n for line in f:\n # KITTI tracking benchmark data format:\n # (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry)\n line = line.strip()\n fields = line.split(\" \")\n # classes that should be loaded (ignored neighboring classes)\n if \"car\" in cls.lower():\n classes = [\"car\", \"van\"]\n elif \"pedestrian\" in cls.lower():\n classes = [\"pedestrian\", \"person_sitting\"]\n else:\n classes = [cls.lower()]\n classes += [\"dontcare\"]\n if not any([s for s in classes if s in fields[2].lower()]):\n continue\n # get fields from table\n t_data.frame = int(float(fields[0])) # frame\n t_data.track_id = int(float(fields[1])) # id\n t_data.obj_type = fields[\n 2].lower() # object type [car, pedestrian, cyclist, ...]\n t_data.truncation = int(\n float(fields[3])) # truncation [-1,0,1,2]\n t_data.occlusion = int(\n float(fields[4])) # occlusion [-1,0,1,2]\n t_data.obs_angle = float(fields[5]) # observation angle [rad]\n t_data.x1 = float(fields[6]) # left [px]\n t_data.y1 = float(fields[7]) # top [px]\n t_data.x2 = float(fields[8]) # right [px]\n t_data.y2 = float(fields[9]) # bottom [px]\n t_data.h = float(fields[10]) # height [m]\n t_data.w = float(fields[11]) # width [m]\n t_data.l = float(fields[12]) # length [m]\n t_data.X = float(fields[13]) # X [m]\n t_data.Y = float(fields[14]) # Y [m]\n t_data.Z = float(fields[15]) # Z [m]\n t_data.yaw = float(fields[16]) # yaw angle [rad]\n if not loading_groundtruth:\n if len(fields) == 17:\n t_data.score = -1\n elif len(fields) == 18:\n t_data.score = float(fields[17]) # detection score\n else:\n logger.info(\"file is not in KITTI format\")\n return\n\n # do not consider objects marked as invalid\n if t_data.track_id is -1 and t_data.obj_type != \"dontcare\":\n continue\n\n idx = t_data.frame\n # check if length for frame data is sufficient\n if idx >= len(f_data):\n print(\"extend f_data\", idx, len(f_data))\n f_data += [[] for x in range(max(500, idx - len(f_data)))]\n try:\n id_frame = (t_data.frame, t_data.track_id)\n if id_frame in id_frame_cache and not loading_groundtruth:\n logger.info(\n \"track ids are not unique for sequence %d: frame %d\"\n % (seq, t_data.frame))\n logger.info(\n \"track id %d occurred at least twice for this frame\"\n % t_data.track_id)\n logger.info(\"Exiting...\")\n #continue # this allows to evaluate non-unique result files\n return False\n id_frame_cache.append(id_frame)\n f_data[t_data.frame].append(copy.copy(t_data))\n except:\n print(len(f_data), idx)\n raise\n\n if t_data.track_id not in ids and t_data.obj_type != \"dontcare\":\n ids.append(t_data.track_id)\n n_trajectories += 1\n n_in_seq += 1\n\n # check if uploaded data provides information for 2D and 3D evaluation\n if not loading_groundtruth and eval_2d is True and (\n t_data.x1 == -1 or t_data.x2 == -1 or t_data.y1 == -1 or\n t_data.y2 == -1):\n eval_2d = False\n if not loading_groundtruth and eval_3d is True and (\n t_data.X == -1000 or t_data.Y == -1000 or\n t_data.Z == -1000):\n eval_3d = False\n\n # only add existing frames\n n_trajectories_seq.append(n_in_seq)\n seq_data.append(f_data)\n f.close()\n\n if not loading_groundtruth:\n self.tracker = seq_data\n self.n_tr_trajectories = n_trajectories\n self.eval_2d = eval_2d\n self.eval_3d = eval_3d\n self.n_tr_seq = n_trajectories_seq\n if self.n_tr_trajectories == 0:\n return False\n else:\n # split ground truth and DontCare areas\n self.dcareas = []\n self.groundtruth = []\n for seq_idx in range(len(seq_data)):\n seq_gt = seq_data[seq_idx]\n s_g, s_dc = [], []\n for f in range(len(seq_gt)):\n all_gt = seq_gt[f]\n g, dc = [], []\n for gg in all_gt:\n if gg.obj_type == \"dontcare\":\n dc.append(gg)\n else:\n g.append(gg)\n s_g.append(g)\n s_dc.append(dc)\n self.dcareas.append(s_dc)\n self.groundtruth.append(s_g)\n self.n_gt_seq = n_trajectories_seq\n self.n_gt_trajectories = n_trajectories\n return True",
"def load_dataset(self):",
"def load(self):\n if os.path.exists(self.loaded_data):\n with open(self.loaded_data, 'rb') as f:\n preloaded_data = pickle.load(f)\n # Train part\n self.class2imgid = preloaded_data['class2imgid']\n self.path2class_sketch = preloaded_data['path2class_sketch']\n self.class2path_sketch = preloaded_data['class2path_sketch']\n self.path2class_image = preloaded_data['path2class_image']\n self.class2path_image = preloaded_data['class2path_image']\n self.id2path = preloaded_data['id2path']\n # Test part\n self.class2id = preloaded_data['class2id']\n self.id2class = TEST_CLASS\n self.class2imgid_test = preloaded_data['class2imgid_test']\n self.class2path_sketch_test = preloaded_data['class2path_sketch_test']\n self.class2path_image_test = preloaded_data['class2path_image_test']\n self.path2class_sketch_test = preloaded_data['path2class_sketch_test']\n self.path2class_image_test = preloaded_data['path2class_image_test']\n # Shared part\n self.loaded_image = preloaded_data['loaded_image']\n return\n self.id2class = TEST_CLASS\n self.class2id = dict()\n for idx, cls in enumerate(self.id2class):\n self.class2id[cls] = idx\n\n self.class2imgid, self.path2class_sketch, self.class2path_sketch, self.path2class_image, self.class2path_image = \\\n self.load_stats(self.stats_file_train, TRAIN_CLASS, self.sketch_files_train, self.image_files_train)\n \n self.class2imgid_test, self.path2class_sketch_test, self.class2path_sketch_test, self.path2class_image_test, self.class2path_image_test = \\\n self.load_stats(self.stats_file_test, TEST_CLASS, self.sketch_files_test, self.image_files_test)\n\n for path in self.path2class_sketch.keys():\n self.loaded_image[path] = self.load_each_image(path)\n self.id2path.append(path)\n\n for path in self.path2class_image.keys():\n self.loaded_image[path] = self.load_each_image(path)\n \n for path in self.path2class_sketch_test.keys():\n self.loaded_image[path] = self.load_each_image(path)\n\n for path in self.path2class_image_test.keys():\n self.loaded_image[path] = self.load_each_image(path)\n \n assert len(self.id2path) == len(self.path2class_sketch.keys())\n preloaded_data = dict()\n # Train part\n preloaded_data['class2imgid'] = self.class2imgid\n preloaded_data['path2class_sketch'] = self.path2class_sketch\n preloaded_data['class2path_sketch'] = self.class2path_sketch\n preloaded_data['path2class_image'] = self.path2class_image\n preloaded_data['class2path_image'] = self.class2path_image\n preloaded_data['id2path'] = self.id2path\n # Test part\n preloaded_data['class2id'] = self.class2id\n preloaded_data['class2imgid_test'] = self.class2imgid_test\n preloaded_data['class2path_sketch_test'] = self.class2path_sketch_test\n preloaded_data['class2path_image_test'] = self.class2path_image_test\n preloaded_data['path2class_sketch_test'] = self.path2class_sketch_test\n preloaded_data['path2class_image_test'] = self.path2class_image_test\n # Shared part\n preloaded_data['loaded_image'] = self.loaded_image\n \n with open(self.loaded_data, 'wb') as f:\n pickle.dump(preloaded_data, f)\n return",
"def _load_data(self) -> None:\n\n # load test split containing, for each class\n # the test filenames\n with open(\"scr_test_split.json\", \"r\") as f:\n test_split_dict = json.load(f)\n\n data = []\n targets = []\n for classname in self.classes:\n files = [el for el in os.listdir(os.path.join(self.root, classname))\n if el.endswith('.wav')]\n\n features = []\n for i, f in enumerate(files):\n # load appropriate files based on fixed split\n if self.split == 'test' and f not in test_split_dict[classname]:\n continue\n elif self.split == 'train' and f in test_split_dict[classname]:\n continue\n\n audio, sample_rate = torchaudio.load(os.path.join(self.root, classname, f))\n assert sample_rate == self.sample_rate\n features.append(self.mel_spectr(audio).permute(0, 2, 1))\n\n data.append(torch.cat(features, dim=0)) # batch-first sequence\n targets.append(torch.ones(data[-1].size(0)).long() * self.class_to_id[classname])\n\n self.data = torch.cat(data)\n self.targets = torch.cat(targets)",
"def loader(training_path, segmented_path, batch_size):",
"def _read_train(self):\n outfilename = str(self.pb.wd + \n \"out_\"+str(self.pb.conf_num)+\"_0.npz\")\n outfile = np.load(outfilename)\n self.train_predictions = outfile['train_predictions']\n self.hypotheses = outfile['hypotheses']",
"def load_class(self):\n\n path = os.path.join(self.train_path, 'class_info.pickle')\n file = open(path, 'rb')\n self.img_class = pickle.load(file)\n print('Class labels loaded for {} files'.format(len(self.img_class)))\n file.close()",
"def loadTrainingData(filename, images, facePoints=None, delim=None, offset=None): # real signature unknown; restored from __doc__\n pass",
"def load_class_grid(self,filepath):\r\n self.class_grid = np.load(filepath)",
"def create_splits(self):\n # Quickdraw is stored in a number of .npy files, one for every class\n # with each .npy file storing an array containing the images of that class.\n class_npy_files = sorted(tf.io.gfile.listdir(self.data_root))\n class_names = [fname[:fname.find('.')] for fname in class_npy_files]\n # Sort the class names, for reproducibility.\n class_names.sort()\n num_classes = len(class_npy_files)\n # Split into train, validation and test splits that have 70% / 15% / 15%\n # of the data, respectively.\n num_trainval_classes = int(0.85 * num_classes)\n num_train_classes = int(0.7 * num_classes)\n num_valid_classes = num_trainval_classes - num_train_classes\n num_test_classes = num_classes - num_trainval_classes\n\n train_inds, valid_inds, test_inds = gen_rand_split_inds(\n num_train_classes, num_valid_classes, num_test_classes)\n splits = {\n 'train': [class_names[i] for i in train_inds],\n 'valid': [class_names[i] for i in valid_inds],\n 'test': [class_names[i] for i in test_inds]\n }\n return splits",
"def mmap_feature_file():\n return numpy.memmap(\"features.np\", mode=\"r+\",\n dtype=numpy.uint8, shape=(1000000, 1 + 4 * 36))",
"def load_true(basedir, fname):\n return np.load(os.path.join(basedir, \"train_labels\", f\"{fname}.npy\"))",
"def read(self, epoch_number):\n super().read(epoch_number)\n packed_array = []\n for file in self._local_file_list:\n with np.load(file, allow_pickle=True) as data:\n rows = data['x']\n packed_array.append({\n 'dataset': rows,\n 'current_sample': 0,\n 'total_samples': rows.shape[2]\n })\n self._dataset = packed_array",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = zip(images, labels)",
"def load_imagenet_scores(self, cls_file):\n cls = np.load(cls_file, allow_pickle=True).item()\n if 'kinetics' in cls_file:\n for idx in range(len(self.video_infos)):\n seq_name = self.video_infos[idx]['frame_dir'].split('/')[-1][:11]\n self.video_infos[idx]['imagenet_scores'] = cls[seq_name]\n else:\n for idx in range(len(self.video_infos)):\n seq_name = self.video_infos[idx]['frame_dir'].split('/')[-1]\n self.video_infos[idx]['imagenet_scores'] = cls[seq_name]",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = list(zip(images, labels))",
"def load_data():\n module_path = os.getcwd()\n\n train = np.genfromtxt(module_path + '/datasets/data/fordA/FordA_TRAIN.tsv', delimiter=\"\\t\")\n test = np.genfromtxt(module_path + '/datasets/data/fordA/FordA_TEST.tsv', delimiter=\"\\t\")\n x_train = np.expand_dims(train[:,1:], axis=2)\n x_test = np.expand_dims(test[:,1:], axis=2)\n y_train = train[:,0]\n y_test = test[:,0]\n y_train[y_train == -1] = 0\n y_test[y_test == -1] = 0\n\n pic_train = __class_to_pic(y_train, module_path)\n pic_test = __class_to_pic(y_test, module_path)\n\n y_train = to_categorical(y_train, num_classes=2)\n y_test = to_categorical(y_test, num_classes=2)\n\n return (x_train, y_train, pic_train), (x_test, y_test, pic_test)",
"def train(self):\n\n for gesture in GESTURES:\n # read data file in data/\n datafile = 'data/{0}.npz'.format(gesture)\n # load training data\n data = np.load(datafile)\n self.known_gestures[gesture] = [data[k] for k in data]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the current transaction id stored in session, otherwise generate one.
|
def _get_transaction(self, request):
guid = request.session.get('transaction_id', None)
if not guid:
guid = str(uuid.uuid4())
request.session['transaction_id'] = guid
return guid
|
[
"def GetNextTransactionID():\r\n global TransactionID\r\n\r\n # Wrap the ID around.\r\n if TransactionID <= -32767:\r\n TransactionID = 0\r\n\r\n # Decrement it.\r\n TransactionID = TransactionID - 1\r\n\r\n return TransactionID",
"def _get_next_transaction_id(self):\r\n transaction_id = self._transaction_id\r\n self._transaction_id += 1\r\n if self._transaction_id > 8388607:\r\n self._transaction_id = 2\r\n return transaction_id",
"def generate_session_id() -> str:\n return generate_unique_id()",
"def tx_id(self) -> int:\n return self._tx_id",
"def _new_session_id(self):\n return os.urandom(32).encode('hex')",
"def get_last_transaction_id():\n try:\n last_id = models.AccountTransaction.objects.latest().transaction_id\n except models.AccountTransaction.DoesNotExist:\n last_id = 0\n # last_id = 14462590267\n return last_id",
"def __generate_session_id():\n session_id_generator = get_class_from_config(\n firenado.conf.session['id_generators'][\n firenado.conf.app['session']['id_generator']\n ], \"function\"\n )\n return session_id_generator()",
"def gen_id(namespace=''):\n global genid_prev_id\n with genid_prev_id_tl:\n prev = genid_prev_id\n if prev is None:\n prev = sha1(os.urandom(24)).hexdigest()\n else:\n entropy = str(time.clock()) + str(time.time()) + str(os.getpid())\n prev = sha1(prev + entropy).hexdigest()\n genid_prev_id = prev\n return namespace + prev",
"def tracking_id(request):\n if request.session.get(TRACKING_ID_SESSION_KEY, '') == '':\n request.session[TRACKING_ID_SESSION_KEY] = _generate_tracking_id()\n return request.session[TRACKING_ID_SESSION_KEY]",
"def generate_session_id():\n return str(secrets.randbits(32))",
"def make_session_id() -> str:\n return hashlib.sha1(\n (f'{time.time()}' + f'{random.randint(0, 1000)}').encode()\n ).hexdigest()",
"def gen_tx_id():\n fake = Faker()\n return fake.bothify(text='TXID??????????????????????????????')",
"def get_id(self):\n if self.integration_number is None:\n return '1'\n else:\n return str(self.integration_number + 1)",
"def get_current_invoiceID() -> str:\n return DATABASE.get('/Invoices/currentInvoiceID', None)",
"def get_id():\r\n new_id = ID_COUNTER.count\r\n ID_COUNTER.count += 1\r\n return new_id",
"def generate_tx_hash(self):\n self.id = get_transaction_id(self)",
"def current_user_id():\n if not hasattr(g, 'current_user_id'):\n try:\n id = int(request.headers.get(HEADER_CURRENT_USER_ID_KEY))\n except:\n id = 1\n if not id:\n id = 1\n setattr(g, 'current_user_id', id)\n return g.current_user_id",
"def generate_correlation_id():\n global last_id\n\n last_id += 1\n\n return last_id",
"def getSessionKey():\n return getFromCache(EMPTY_IDTYPELIST, SESSION_KEY)",
"def generate_transaction_id(stmt_line):\n return str(abs(hash((stmt_line.date,\n stmt_line.memo,\n stmt_line.amount))))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Does an initial upload of documents and gets the generated eFiling Hub url.
|
def upload(self, request, files, parties=None):
# Find the transaction id .. this will be a unique guid generated by eDivorce thats passed to Efiling Hub. We
# will tie it to the session.
transaction_id = self._get_transaction(request)
bce_id = self._get_bceid(request)
# if bce_id is None .. we basically have an anonymous user so raise an error
if bce_id is None:
raise PermissionDenied()
response = self._get_api(request, f'{self.api_base_url}/submission/documents', transaction_id, bce_id,
headers={}, files=files)
if response.status_code == 200:
response = json.loads(response.text)
if "submissionId" in response and response['submissionId'] != "":
# get the redirect url
headers = {
'Content-Type': 'application/json'
}
package_data = self._format_package(request, files, parties=parties)
url = f"{self.api_base_url}/submission/{response['submissionId']}/generateUrl"
response = self._get_api(request, url, transaction_id, bce_id, headers=headers,
data=json.dumps(package_data))
if response.status_code == 200:
response = json.loads(response.text)
return response['efilingUrl'], 'success'
response = json.loads(response.text)
return None, f"{response['error']} - {response['message']}"
return None, f'{response.status_code} - {response.text}'
|
[
"def get_blob_upload_url():\n\n return '{blob_api_path}/uploadblob'.format(blob_api_path=BLOB_API_COMMON_PATH)",
"def get_file_upload_url(self):\n res = self.instamojo_api_request(method='GET', path='offer/get_file_upload_url/')\n return res",
"def get_upload_url(self):\n context = aq_inner(self.context)\n folder_url = self.ploneview.getCurrentFolderUrl()\n return '%s/@@quick_upload' %folder_url",
"def file_upload_url(self):\n url = self.request.link(GeneralFileCollection(self.app), name='upload')\n return self.csrf_protected_url(url)",
"def get_file_upload_url(self):\n response = self.api_request(method='GET', path='offer/get_file_upload_url/')\n return response",
"def upload_url(self) -> str:\n return pulumi.get(self, \"upload_url\")",
"def upload_file_site():\n return render_template('upload.html')",
"def image_upload_url(self):\n url = self.request.link(ImageFileCollection(self.app), name='upload')\n return self.csrf_protected_url(url)",
"def create_url(self):\n headers = self.headers\n headers[\"upload-length\"] = str(self.file_size)\n headers[\"upload-metadata\"] = \",\".join(self.encode_metadata())\n resp = self._api_client.rest_client.POST(self.client.url, headers=headers)\n self.real_filename = resp.headers.get(\"Upload-Filename\")\n url = resp.headers.get(\"location\")\n if url is None:\n msg = \"Attempt to retrieve create file url with status {}\".format(resp.status_code)\n raise tus_uploader.TusCommunicationError(msg, resp.status_code, resp.content)\n return tus_uploader.urljoin(self.client.url, url)",
"def get_fs_url(self):",
"async def _get_photo_upload_uri(self):\n resp = await self._api.photos.getMessagesUploadServer(peer_id=self._bound_peer_id)\n self._upload_uri = resp['upload_url']\n self._log.debug(f\"Photo upload URI: {self._upload_uri}\")",
"def p2p_upload_start(self):\n # TODO: forse, questo metodo non serve, perche' le statistiche sono gia' elaborate in p2p_download_start\n pass",
"def home():\n return redirect(url_for('handle_upload'))",
"def upload():\n\n user_input = request.form\n url = user_input['url']\n\n requested_img = {'url': url}\n session['url'] = url\n\n return render_template(\"index.html\", requested_img=requested_img)",
"def get_file_upload_url(AssignmentId=None, QuestionIdentifier=None):\n pass",
"def _url_upload_format(self):\n return self.__url_root + self.__url_suffix_upload",
"def start_generation(self) -> None:\n debug_out.clear_output()\n self.generate_status_bar.value = \"Generating...\"\n print(\"generating....\")\n\n mydict = self.clean_file_upload.value\n if len(mydict) == 0:\n self.generate_status_bar.value = \"Error: there is no file uploaded\"\n else:\n bytes_val = mydict[next(iter(mydict.keys()))]['content']\n\n f = io.BytesIO(bytes_val)\n df = pd.read_csv(f)\n\n initials_generator = self.setup_initials_generators()\n initials = initials_generator.transform(df)\n doc_generator = self.setup_doc_generator(df)\n doc_generator.generate(df, initials['Initial'])\n\n self.generate_status_bar.value = \\\n \"Finished! The document was generated at %s\" % doc_generator.output_file_path",
"def process_document(self): \n filename = self.request.form[\"filename\"]\n fileURL = None\n \n # Get the item_files for this RepositoryItem \n repoItem_files = self.context.getItem_files()\n for rawfile in repoItem_files:\n if rawfile[\"filename\"] == filename:\n fileURL = self.convertFileToPdf(self.context, rawfile)\n \n return fileURL",
"def test_access_document_upload_form(self):\n\t\tc = Client()\n\t\tlog = c.login(username='bobby', password='bob')\n\t\tself.assertTrue(log)\n\t\tresponse = c.get(\"/documents/upload\")\n\t\tself.assertTrue('Add document' in response.content)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A factory for DetectBlanksWrapper classes.
|
def DetectBlanks(DriverType=None):
from xia2.Driver.DriverFactory import DriverFactory
DriverInstance = DriverFactory.Driver(DriverType)
class DetectBlanksWrapper(DriverInstance.__class__):
def __init__(self):
super(DetectBlanksWrapper, self).__init__()
self.set_executable("dials.detect_blanks")
self._sweep_filename = None
self._experiments_filename = None
self._reflections_filename = None
self._json_filename = None
self._phi_step = None
self._counts_fractional_loss = None
self._misigma_fractional_loss = None
self._results = None
def set_sweep_filename(self, sweep_filename):
self._sweep_filename = sweep_filename
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def set_reflections_filename(self, reflections_filename):
self._reflections_filename = reflections_filename
def set_json_filename(self, json_filename):
self._json_filename = json_filename
def get_json_filename(self):
return self._json_filename
def set_phi_step(self, phi_step):
self._phi_step = phi_step
def set_counts_fractional_loss(self, counts_fractional_loss):
self._counts_fractional_loss = counts_fractional_loss
def set_misigma_fractional_loss(self, misigma_fractional_loss):
self._misigma_fractional_loss = misigma_fractional_loss
def get_results(self):
return self._results
def run(self):
self.clear_command_line()
if self._sweep_filename is not None:
self.add_command_line("%s" % self._sweep_filename)
if self._experiments_filename is not None:
self.add_command_line("%s" % self._experiments_filename)
assert self._reflections_filename is not None
self.add_command_line("%s" % self._reflections_filename)
if self._json_filename is None:
self._json_filename = os.path.join(
self.get_working_directory(), "%s_blanks.json" % self.get_xpid()
)
self.add_command_line("json=%s" % self._json_filename)
if self._phi_step is not None:
self.add_command_line("phi_step=%s" % self._phi_step)
if self._counts_fractional_loss is not None:
self.add_command_line(
"counts_fractional_loss=%s" % self._counts_fractional_loss
)
if self._misigma_fractional_loss is not None:
self.add_command_line(
"misigma_fractional_loss=%s" % self._misigma_fractional_loss
)
self.start()
self.close_wait()
self.check_for_errors()
assert os.path.exists(self._json_filename), self._json_filename
import json
with open(self._json_filename, "rb") as f:
self._results = json.load(f)
return DetectBlanksWrapper()
|
[
"def _create_wrapper(cls_spec, element_info, myself):\n # only use the meta class to find the wrapper for BaseWrapper\n # so allow users to force the wrapper if they want\n if cls_spec != myself:\n obj = object.__new__(cls_spec)\n obj.__init__(element_info)\n return obj\n\n new_class = cls_spec.find_wrapper(element_info)\n obj = object.__new__(new_class)\n\n obj.__init__(element_info)\n\n return obj",
"def __new__(cls, *conditions):\n return null_condition_binary_check(*conditions) or super().__new__(cls)",
"def __init__(self, num_samples,\n sparsity,\n mean,\n stddev, input_shape, dtype, drange, wrap):\n 1/0 # Unimplemented.\n assert(isinstance(parameters, ChannelThresholderParameters))\n self.args = args = parameters\n self.channel = ChannelEncoder(input_shape, args.num_samples, args.sparsity,\n dtype=dtype, drange=drange, wrap=wrap)\n self.output_shape = self.channel.output_shape\n self.thresholds = np.random.normal(args.mean, args.stddev, self.output_shape)\n self.thresholds = np.array(self.thresholds, dtype)",
"def createDetectorMatcher():\n #detector = cv2.BRISK_create()\n detector = cv2.AKAZE_create()\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING)\n return (detector, matcher)",
"def New(*args, **kargs):\n obj = itkConstantPadImageFilterIRGBUS2IRGBUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def createLSDDetector() -> retval:\n ...",
"def New(*args, **kargs):\n obj = itkConstantPadImageFilterIRGBAUS2IRGBAUS2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def LBPHFaceRecognizer_create(radius=None, neighbors=None, grid_x=None, grid_y=None, threshold=None): # real signature unknown; restored from __doc__\n pass",
"def New(*args, **kargs):\n obj = itkConstantPadImageFilterIRGBAUS3IRGBAUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def brick(cls):\n return BrickDecorator(cls).create()",
"def New(*args, **kargs):\n obj = itkConstantPadImageFilterICVF33ICVF33_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkConstantPadImageFilterIRGBUS3IRGBUS3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def DummyDetector(index):\r\n class Dummy:\r\n def __init__(self, startingLine=None):\r\n pass\r\n \r\n def isStart(self, line):\r\n return line.lineIndex == index\r\n \r\n def isEnd(self, line):\r\n return line.lineIndex == index\r\n return Dummy",
"def BIF_create(num_bands=None, num_rotations=None): # real signature unknown; restored from __doc__\n pass",
"def test_default_class_initialization(self):\n\n wf = WordFilter.create_default_filter()\n\n self.assertEqual(len(wf.bloom_filter1), 1000)\n self.assertEqual(len(wf.bloom_filter2), 1000)\n self.assertEqual(len(wf.hash_table), 30)",
"def create(pChecker, color=..., thickness=...) -> retval:\n ...",
"def test_init(self):\n # pylint: disable=protected-access\n\n class Dummy(object):\n # pylint: disable=too-few-public-methods\n pass\n\n class DummySubclass(simple_wbd.IndicatorDataset):\n # pylint: disable=too-few-public-methods\n pass\n\n api = simple_wbd.IndicatorAPI()\n self.assertEqual(api._dataset_class, simple_wbd.IndicatorDataset)\n\n api = simple_wbd.IndicatorAPI(Dummy)\n self.assertEqual(api._dataset_class, simple_wbd.IndicatorDataset)\n\n api = simple_wbd.IndicatorAPI(\"bad data\")\n self.assertEqual(api._dataset_class, simple_wbd.IndicatorDataset)\n\n api = simple_wbd.IndicatorAPI(DummySubclass)\n self.assertEqual(api._dataset_class, DummySubclass)",
"def custom_indicator_class_factory(indicator_type, base_class, class_dict, value_fields):\n value_count = len(value_fields)\n\n def init_1(self, tcex, value1, xid, **kwargs): # pylint: disable=possibly-unused-variable\n \"\"\"Init method for Custom Indicator Types with one value\"\"\"\n summary = self.build_summary(value1) # build the indicator summary\n base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)\n for k, v in class_dict.items():\n setattr(self, k, v)\n\n def init_2( # pylint: disable=possibly-unused-variable\n self, tcex, value1, value2, xid, **kwargs\n ):\n \"\"\"Init method for Custom Indicator Types with two values.\"\"\"\n summary = self.build_summary(value1, value2) # build the indicator summary\n base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)\n for k, v in class_dict.items():\n setattr(self, k, v)\n\n def init_3( # pylint: disable=possibly-unused-variable\n self, tcex, value1, value2, value3, xid, **kwargs\n ):\n \"\"\"Init method for Custom Indicator Types with three values.\"\"\"\n summary = self.build_summary(value1, value2, value3) # build the indicator summary\n base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)\n for k, v in class_dict.items():\n setattr(self, k, v)\n\n class_name = indicator_type.replace(' ', '')\n init_method = locals()[f'init_{value_count}']\n return type(str(class_name), (base_class,), {'__init__': init_method})",
"def make(cls, *args, **kwargs):\n\n # only flag overrides return flag\n to_annotate = copy.deepcopy(kwargs)\n return_object = kwargs.pop(\"return_object\", False)\n return_bins = kwargs.pop(\"return_bins\", False)\n return_counts = kwargs.pop(\"return_counts\", False)\n\n rolling = kwargs.pop(\"rolling\", False)\n if rolling:\n # just initialize a fake classifier\n data = list(range(10))\n cls_instance = cls(data, *args, **kwargs)\n # and empty it, since we'll be using the update\n cls_instance.y = np.array([])\n else:\n cls_instance = None\n\n # wrap init in a closure to make a consumer.\n # Qc Na: \"Objects/Closures are poor man's Closures/Objects\"\n def classifier(data, cls_instance=cls_instance):\n if rolling:\n cls_instance.update(data, inplace=True, **kwargs)\n yb = cls_instance.find_bin(data)\n else:\n cls_instance = cls(data, *args, **kwargs)\n yb = cls_instance.yb\n outs = [yb, None, None, None]\n outs[1] = cls_instance if return_object else None\n outs[2] = cls_instance.bins if return_bins else None\n outs[3] = cls_instance.counts if return_counts else None\n outs = [a for a in outs if a is not None]\n if len(outs) == 1:\n return outs[0]\n else:\n return outs\n\n # for debugging/jic, keep around the kwargs.\n # in future, we might want to make this a thin class, so that we can\n # set a custom repr. Call the class `Binner` or something, that's a\n # pre-configured Classifier that just consumes data, bins it, &\n # possibly updates the bins.\n classifier._options = to_annotate\n return classifier",
"def _element_constructor_nocheck(self, l):\n return self.element_class(self, l, check=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
build wordnets from standard list
|
def main():
word_nets = []
mood_list = open("mood_list.txt","r").readlines()
for m in mood_list:
m = m.replace('\n', '')
raw_input()
print "MOOD :", m
synonyms = wn.synsets(m, pos=wn.ADJ)
print synonyms
for synonym in synonyms:
for lem in synonym.lemmas:
print lem.name
|
[
"def make_vocab(word_list):\n vocab = {}\n for i, word in enumerate(word_list):\n vocab[word] = Vocab(count=1, index=i)\n return vocab",
"def getWordEmbeddings(self, sentence, train):\n \n for root in sentence:\n c = float(self.wordsCount.get(root.norm, 0))\n dropFlag = not train or (random.random() < (c/(0.25+c)))\n sys.stdout.flush()\n root.wordvec = self.wlookup[int(self.vocab.get(root.norm, 0)) if dropFlag else 0]\n root.cposvec = self.plookup[int(self.cpos.get(root.cpos,0))] if self.pdims > 0 else None\n\n #For word embeddings\n if self.external_embedding is not None:\n if root.form in self.external_embedding:\n root.evec = self.elookup[self.extrnd[root.form]]\n elif root.norm in self.external_embedding:\n root.evec = self.elookup[self.extrnd[root.norm]]\n else:\n if (self.oov_external_embedding is not None and root.form.replace(\" \",\"_\") in self.oov_external_embedding):\n root.evec = self.oov_elookup[self.oov_extrnd[root.form.replace(\" \",\"_\")]]\n else:\n root.evec = self.elookup[0]\n else:\n root.evec = None\n\n #For cpostag embeddings\n if self.cpos_external_embedding is not None:\n if root.cpos in self.cpos_external_embedding:\n root.cposevec = self.cpos_elookup[self.cpos_extrnd[root.cpos]]\n else:\n root.cposevec = self.cpos_elookup[0]\n else:\n root.cposevec = None\n \n #For postag embeddings\n if self.pos_external_embedding is not None:\n if root.pos in self.pos_external_embedding:\n root.posevec = self.pos_elookup[self.pos_extrnd[root.pos]]\n else:\n root.posevec = self.pos_elookup[0]\n else:\n root.posevec = None\n# \n #For feats embeddings\n if self.feats_external_embedding is not None:\n if root.feats in self.feats_external_embedding:\n root.featsevec = self.feats_elookup[self.feats_extrnd[root.feats]]\n else:\n root.featsevec = self.feats_elookup[0]\n else:\n root.featsevec = None\n \n \n #For lemmas embeddings\n# if self.lemmas_external_embedding is not None:\n# if root.lemma in self.lemmas_external_embedding:\n# root.lemmasevec = self.lemmas_elookup[self.lemmas_extrnd[root.lemma]]\n# else:\n# root.lemmasevec = self.lemmas_elookup[0]\n# else:\n# root.lemmasevec = None \n \n \n # root.ivec = concatenate(filter(None, [root.wordvec, root.cposvec, root.evec, root.cposevec, root.posevec, root.featsevec, root.lemmasevec]))\n root.ivec = concatenate(filter(None, [root.wordvec, root.cposvec, root.evec, root.cposevec, root.posevec, root.featsevec]))\n \n if self.blstmFlag:\n forward = self.surfaceBuilders[0].initial_state()\n backward = self.surfaceBuilders[1].initial_state()\n\n for froot, rroot in zip(sentence, reversed(sentence)):\n forward = forward.add_input( froot.ivec )\n backward = backward.add_input( rroot.ivec )\n froot.fvec = forward.output()\n rroot.bvec = backward.output()\n for root in sentence:\n root.vec = concatenate( [root.fvec, root.bvec] )\n\n if self.bibiFlag:\n bforward = self.bsurfaceBuilders[0].initial_state()\n bbackward = self.bsurfaceBuilders[1].initial_state()\n\n for froot, rroot in zip(sentence, reversed(sentence)):\n bforward = bforward.add_input( froot.vec )\n bbackward = bbackward.add_input( rroot.vec )\n froot.bfvec = bforward.output()\n rroot.bbvec = bbackward.output()\n for root in sentence:\n root.vec = concatenate( [root.bfvec, root.bbvec] )\n\n else:\n for root in sentence:\n root.ivec = (self.word2lstm.expr() * root.ivec) + self.word2lstmbias.expr()\n root.vec = tanh( root.ivec )",
"def _build_graph(self, word_list: List[str], begin_word: str) -> Dict[str, List[str]]:\n if not begin_word in word_list:\n word_list.append(begin_word)\n res = {w: [] for w in word_list}\n for w1 in word_list:\n for w2 in word_list:\n if (w1 != w2) and self._is_valid(w1, w2):\n res[w1].append(w2)\n\n return res",
"def _build_generalized(self, xs):\n terminal_gen = self._terminalSymbolsGenerator()\n\n _xs = [x + next(terminal_gen) for x in xs]\n self.words = _xs\n _xs = ''.join(_xs)\n self.word = _xs\n self._generalized_word_starts(xs)\n self._build(_xs)\n self.root._traverse(self._label_generalized)",
"def build_bag(data):\n\tbag = []\n\tfor sample in data:\n\n\t\tbag += [word.lower() for word in sample[0] if word not in bag and len(word) > 0]\n\n\t# Set the list to insure all dupes are removed\n\tbag = list(set(bag))\n\tbag.sort()\n\treturn bag",
"def bag_of_words(s, w):\n bow = [None] * (len(s) - w + 1)\n for i in range(0, len(s) - w + 1):\n bow[i] = s[i:i + w]\n return bow",
"def _prep_wordnet_synsets(gap, distractors):\n ref_tag = gap.pos_tags[-1]\n gap_syn = wn.synsets(gap.text.replace(' ', '_'), POS_TO_WN[ref_tag])\n gap_hypomeronyms = []\n candidates_syn = []\n for syn in gap_syn:\n gap_hypomeronyms += _get_hypomeronyms(syn)\n for cand, _ in distractors:\n candidates_syn.append(\n wn.synsets(cand.replace(\" \", \"_\"), POS_TO_WN[ref_tag]))\n return candidates_syn, gap_syn, gap_hypomeronyms",
"def _word_blocks(w):\n\n # Validate input\n if type(w) != str:\n return w\n\n # Split string into consonant/vowel/other clusters\n clusters = [x for x in re.split(\"([\"+_VOWELS+\"]+)|([\"+_CONSONANTS+\"]+)\",\n w, flags=re.IGNORECASE) if x]\n\n # Initialize return lists\n blocks = []\n cats = []\n\n # Check for single-block words\n if len(clusters) == 1 and clusters[0][0].lower() in _VOWELS:\n return ([w], [\"v_w\"])\n if (len(clusters) == 2 and clusters[0][0].lower() in _CONSONANTS\n and clusters[1][0].lower() in _VOWELS):\n return ([w], [\"cv_w\"])\n\n # Read through clusters in order\n i = -1 # current cluster index\n pbreak = False # whether the previous cluster is a break\n nbreak = False # whether the next cluster is a break\n c = \"\" # first character of current cluster\n while i < len(clusters) - 1:\n # Find current letter and whether it's against a break\n i += 1\n if i == 0 or clusters[i-1][0].isalpha() == False:\n pbreak = True\n else:\n pbreak = False\n if i == len(clusters) - 1 or clusters[i+1][0].isalpha() == False:\n nbreak = True\n else:\n nbreak = False\n c = clusters[i][0].lower()\n # Consonant\n if c in _CONSONANTS:\n # Consonant at beginning\n if pbreak == True:\n blocks.append(clusters[i])\n cats.append(\"c_b\")\n # Other consonant\n else:\n blocks.append(clusters[i])\n cats.append(\"c\")\n # Vowel\n elif c in _VOWELS:\n # Vowel followed by consonant\n if nbreak == False and clusters[i+1][0].lower() in _CONSONANTS:\n blocks.append(clusters[i] + clusters[i+1])\n cats.append(\"vc\")\n i += 1\n # Other vowel\n else:\n blocks.append(clusters[i])\n cats.append(\"v\")\n # Non-letter\n else:\n blocks.append(clusters[i])\n cats.append(\"n\")\n\n # Return lists\n return (blocks, cats)",
"def hard_words(word_list):\n dict_max = 0\n for word in word_list:\n if len(word) > dict_max:\n dict_max = len(word)\n\n length = random.randint(8, dict_max)\n constraints = init_constraint(length)\n return ''.join(constraints)",
"def init(self, trainfiles):\n for filepaths in trainfiles:\n\n # load files and tokenize words in sentences\n with open(filepaths, \"r\") as text:\n sent_list = tokenize_sentence(text.read())\n\n for sentences in sent_list:\n word_list = sentence_to_word(sentences)\n\n # check unknown words\n for index, words in enumerate(word_list):\n if words not in self.token_list:\n word_list[index] = \"<UNK>\"\n\n # add word to vocab\n self.token_list.append(words)\n\n word_list.insert(0, \"<s>\")\n word_list.append(\"</s>\")\n\n for i in range(len(word_list)-1):\n self.lang_model.append((word_list[i], word_list[i+1]))\n\n for (word1, word2) in self.lang_model:\n self.bigram_dict[(word1, word2)] += 1\n self.words_dict[word1] += 1",
"def buildDict(self, words):\n for w in words:\n self.add(w)",
"def doc_transform(doc_batch):\n docs = []\n for d in doc_batch:\n words = []\n for s in d:\n words += s\n docs.append(words)\n # nw = len(words)\n return docs",
"def gen_words(self, doc):\r\n doc = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#”“¥:%……&*()]+\".decode(\"utf8\"),\r\n \"\".decode(\"utf8\"), doc.decode('utf8'))\r\n suffix_indexes = extract_cand_words(doc, self.max_word_len)\r\n word_cands = {}\r\n # compute frequency and neighbors\r\n for suf in suffix_indexes:\r\n word = doc[suf[0]:suf[1]]\r\n if word not in word_cands:\r\n word_cands[word] = GetWordInfo(word)\r\n word_cands[word].update_att(doc[suf[0]-1:suf[0]], doc[suf[1]:suf[1]+1])\r\n\r\n # compute the tf and info_entropy\r\n doc_lens = len(doc)\r\n for word in word_cands:\r\n word_cands[word].compute_indexes(doc_lens)\r\n\r\n # compute PMI for every word, if len(word)>1\r\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\r\n\r\n for v in values:\r\n if len(v.text) == 1:\r\n continue\r\n v.compute_info_entropy(word_cands)\r\n return sorted(values, key=lambda v: v.freq, reverse=True)",
"def vrt2lists():\n corpus_folder = os.path.join('data', 'corpora', 'ylenews-sv-2012-2018-s-vrt',\n 'vrt')\n corpus = []\n tag_corpus = []\n files = list(os.walk(corpus_folder))[0][2]\n for file in files:\n with open(os.path.join(corpus_folder, file), encoding='utf8') as f:\n data = f.read().split('</sentence>')\n for sent in data:\n sentence = []\n tag_sentence = []\n items = [element.split('\\t') for element in sent.split('\\n')]\n for item in items:\n if len(item) == 8:\n word = item[0]\n tag = item[3]\n #sentence.append((word, tag))\n sentence.append(word)\n tag_sentence.append(tag)\n if len(sentence) > 1 and len(sentence) == len(tag_sentence):\n corpus.append(sentence)\n tag_corpus.append(tag_sentence)\n\n \n # Save the corpora\n with open(os.path.join('data','corpora','Yle_sv.pkl'), 'wb') as f:\n pickle.dump(corpus, f, 4)\n \n with open(os.path.join('data','corpora','Yle_sv_pos.pkl'), 'wb') as f:\n pickle.dump(tag_corpus, f, 4)\n\n #with open(os.path.join('data','corpora','Yle_sv_words_tags.pkl'), 'wb') as f:\n #pickle.dump(corpus, f, 4)",
"def create_training(self):\n train = []\n print()\n print(\"Creating Word2Vec Training Data...\")\n # Get list of English Stop-words from nltk.corpus\n stop_words = stopwords.words('english')\n # Remove punctiation symbols from Stop-words\n stop_words = [''.join(\" \" if i in string.punctuation else i for i in word) for word in stop_words]\n # Insert space before and after each word to be able to find and replace correctly\n stop_words = [\" \" + word + \" \" for word in stop_words]\n\n with open(self.processed_pth, 'r', encoding=\"utf8\") as f:\n f_content = f.readlines()\n for line in f_content:\n # Replace Stop-words with blank space in original sentence\n for stop_word_found in [w for w in stop_words if w in line]:\n line = line.replace(stop_word_found, \" \")\n final_sentence = line.split()\n\n # Append final version of sentence to training data\n train.append(line.split())\n\n print(\"Done!\")\n print(\"Lenth of Training Data is\", len(train))\n # Save Training data as a (.npy) file for the train.py to load and train\n np.save(self.train_data_pth, train)\n print(\"Saved Training Data as NPY!\")\n\n return train",
"def _make_bert_vocab_productions(vocab: Collection[str]) -> List[str]:\n\n productions = []\n\n productions.append('_W_ -> _Vw_ _W-_')\n productions.append('_W_ -> _Vw_')\n productions.append('_W-_ -> _Vsh_')\n productions.append('_W-_ -> _Vsh_ _W-_')\n\n for word in vocab:\n word = state_tree.NQStateTree.clean_escape_characters(word)\n if word.startswith('##'):\n productions.append(\"_Vsh_ -> '{}'\".format(word))\n else:\n # No point in having the agent generate these \"fake\" tokens.\n if word.startswith('[unused') or word in ('[pos]', '[neg]', '[contents]',\n '[title]', '[UNK]', '[PAD]',\n '[SEP]', '[CLS]', '[MASK]'):\n continue\n productions.append(\"_Vw_ -> '{}'\".format(word))\n\n return productions",
"def create_sets():\n train_labeled = []\n test_labeled = []\n train_lines, test_lines = read_files()\n word = []\n for line in train_lines:\n data, label, next_id = split_sample(line)\n if next_id == '-1':\n word.append((data, label))\n train_labeled.append(word)\n word = []\n else:\n word.append((data, label))\n word = []\n for line in test_lines:\n data, label, next_id = split_sample(line)\n if next_id == '-1':\n word.append((data, label))\n test_labeled.append(word)\n word = []\n else:\n word.append((data, label))\n\n return train_labeled, test_labeled",
"def conllu2list():\n data_file_1 = os.path.join('data','corpora','UD_Swedish-Talbanken','sv_talbanken-ud-train.conllu')\n data_file_2 = os.path.join('data','corpora','UD_Swedish-Talbanken','sv_talbanken-ud-test.conllu')\n data_file_3 = os.path.join('data','corpora','UD_Swedish-Talbanken','sv_talbanken-ud-dev.conllu')\n sentences = []\n corpus = []\n \n # Read conllu files\n with open(data_file_1, 'r', encoding='utf8') as f:\n data = f.read()\n sentences.extend(parse(data))\n with open(data_file_2, 'r', encoding='utf8') as f:\n data = f.read()\n sentences.extend(parse(data))\n with open(data_file_3, 'r', encoding='utf8') as f:\n data = f.read()\n sentences.extend(parse(data))\n \n # Extract tokens and POS tags\n for sentence in sentences:\n sent = []\n for token in sentence:\n sent.append((token['form'], token['upostag']))\n corpus.append(sent)\n \n # Save the corpus\n with open(os.path.join('data','corpora','UD_Swedish-Talbanken.pkl'), 'wb') as f:\n pickle.dump(corpus, f, 4)",
"def add_list_of_words(G,list_of_words):\n\timport itertools\n\t#G = Graph.copy()\n\twordset = set(list_of_words)\n\tif len(wordset)>0:\n\t\tcouples = itertools.combinations(wordset, 2)\n\t\t#G.add_edges_from(edge_list)\n\t\tfor edge in couples:\n\t\t\tif G.has_edge(edge[0],edge[1]):\n\t\t\t\t# we added this one before, just increase the weight by one\n\t\t\t\tG[edge[0]][edge[1]]['weight'] += 1\n\t\t\telse:\n\t\t\t\t# new edge. add with weight=1\n\t\t\t\tG.add_edge(edge[0], edge[1], weight=1)\n\treturn G"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parses the document for answers to the question why.
|
def extract(self, document):
candidate_list = []
for i in range(document.length):
for candidate in self._evaluate_tree(document.posTrees[i]):
candidate_list.append([candidate[0], candidate[1], i])
candidate_list = self._evaluate_candidates(document, candidate_list)
self.answer(document, 'why', candidate_list)
return document
|
[
"def parse_question_answers(self):\n output = {}\n zfile = zipfile.ZipFile(self.filename)\n form = zfile.read('word/document.xml')\n xmlroot = ET.fromstring(form)\n for field in xmlroot.getiterator():\n field_tag = field.find(self.TAG_FIELDPROP+'/'+self.TAG_FIELDTAG)\n if field_tag is None:\n continue\n tag = field_tag.get(self.ATTR_FIELDTAGVAL, None)\n value = self.parse_multiline_text(field.find(self.TAG_FIELD_CONTENT))\n _id = self.tag_to_id(tag)\n if _id and _id not in output:\n output[_id] = {}\n if self.is_question(tag):\n output[_id]['question'] = value\n elif self.is_answer(tag):\n if self.is_multianswer(tag):\n # Check if it's a checkbox\n field_checkbox = field_tag.getparent().find(self.TAG_FIELD_CHECKBOX)\n if field_checkbox is not None:\n output.pop(_id, None)\n parent_id = self.get_parent_id(_id)\n output[parent_id] = output[parent_id] if parent_id in output else {}\n output[parent_id]['answer'] = []\n output[parent_id]['options'] = output[parent_id]['options'] if 'options' in output[parent_id] else []\n output[parent_id]['options'].append(self.get_checkbox_label(field_checkbox))\n if self.is_checked(field_checkbox):\n output[parent_id]['answer'].append(self.get_checkbox_label(field_checkbox))\n else:\n # Must be additional text if no checkbox found\n output.pop(_id, None)\n parent_id = self.get_parent_id(_id)\n output[parent_id] = output[parent_id] if parent_id in output else {}\n output[parent_id]['additional_text'] = value\n else:\n # Single answer\n output[_id]['answer'] = value\n zfile.close()\n return self.dict_to_arr(output)",
"def process_query_reply(wisdom,reply):\n\n answer_elements= []\n if reply == \"\":\n return Answer(self)\n \n root= ''\n\n try:\n root= ET.fromstring(reply)\n except ET.ParseError:\n # If the answer is not well-formed, choose a default answer\n answer= Answer(wisdom)\n answer.set_question_ID(wisdom.ID + ':no_answer:' + str(answer_elements.__len__()) )\n answer.status= ''\n return answer \n\n qID= status= ''\n for child in root: \n if child.tag == 'qID':\n qID= child.text\n continue\n if child.tag == 'status':\n status= child.text\n continue\n\n text=''\n link=''\n drs=''\n weight=1\n pairs= []\n rules= []\n \n for c2 in child:\n if c2.tag == 'text':\n text= c2.text\n if c2.tag == 'link':\n link= c2.text\n if c2.tag == 'drs':\n drs= c2.text\n if c2.tag == 'weight':\n weight= c2.text\n if c2.tag == 'data':\n for c3 in c2: # <dataitem>\n WP= name= ''\n for c4 in c3:\n if c4.tag == 'WP':\n WP= c4.text\n if c4.tag == 'name':\n name= c4.text\n pairs.append( QPair(WP,name) )\n if c2.tag == 'rules':\n for c3 in c2: # <ruleitem>\n rule = Rule() \n for c4 in c3:\n if c4.tag == 'text':\n rule.text = c4.text\n if c4.tag == 'link':\n rule.description= c4.text\n rules.append( rule )\n\n answ= AnswerElement()\n answ.text = text\n answ.description = link\n answ.drs = drs\n answ.weight = weight\n answ.pairs = pairs\n answ.rules = rules\n answ.wisdom = wisdom\n answer_elements.append( answ )\n \n answer= Answer(wisdom)\n answer.set_elements(answer_elements)\n answer.set_question_ID(wisdom.ID + ':' + qID.rstrip().lstrip() + ':' + str(answer_elements.__len__()) )\n answer.status= status\n\n return answer",
"def docParse(self):\n text = self.text\n text = self.simplify(text)\n nlp = self.nlp\n full_doc = nlp(text)\n \n # Slit into sentences and find Simple sentences\n sent_doc_ls = list(sent for sent in full_doc.sents)\n spl_ls = self.simple_find(sent_doc_ls)\n doc_ls = list(nlp.pipe(spl_ls))\n\n print(\"Finding triples (Subject-Verb-Object) from your doc...\\n\")\n # Our triples will be (ent1, rel, ent2)\n triples = self.all_triples(doc_ls) \n return triples",
"def generateWhyQuestion(self, sentence):\n\n # identification for conjunctions at the beginning of the sentence\n if nltk.word_tokenize(sentence)[0].find(\"Because\") != -1 or nltk.word_tokenize(sentence)[0].find(\n \"Hence\") != -1 or nltk.word_tokenize(sentence)[0].find(\"Therefore\") != -1 or \\\n nltk.word_tokenize(sentence)[0].find(\"But\") != -1:\n wordDic = {'Because of': '', 'Because': '', 'Therefore,': '', 'Therefore': '', 'Hence,': '', 'But if': '',\n \"But\": '', 'Hence': ''}\n preProcess = PreProcess()\n sentence = preProcess.multipleReplace(sentence,\n wordDic) # replacing the conjunction according to the worddic\n if str(sentence).count(',') > 0:\n doc = nlp(sentence) # document representation of the sentence\n pos = '' # post tag\n for chunk in doc.noun_chunks:\n if chunk.root.dep_ == 'nsubjpass' or chunk.root.dep_ == 'nsubj':\n pos = nltk.pos_tag(nltk.word_tokenize(chunk.text))\n\n if str(pos).find('PRP') == -1:\n break;\n question = '';\n return question\n else:\n ## cheching for hverb\n hverb = ['is', 'are', 'can', 'was', 'were']\n words = nltk.word_tokenize(sentence); # word tokenizing\n tagged = nltk.pos_tag(words); # words tagging\n verb = []\n for x, y in enumerate(tagged):\n if y[1] == 'MD' or y[1] == 'VBZ' or y[1] == 'VBP': # identifying helping verbs using rule base approach\n verb.append(y[0])\n if verb[0] in hverb: # identifying whether the verb list contains a hverb defined above\n\n sentence = sentence.replace(verb[0], '')\n # return \"Why \" + sentence + \" ?\"\n # else:\n return 'Why is ' + sentence + ' ?' # question\n\n # identifying a conjunction is present in the middle of sentence\n elif str(sentence).find('because') != -1 or str(sentence).find('therefore') != -1 or str(sentence).find(\n 'although') != -1 or str(sentence).find('since') != -1:\n\n doc = nlp(sentence) # document representation of the sentence\n headword = [] # the head word of the pronoun\n pos = '' # pos tag\n for chunk in doc.noun_chunks:\n if chunk.root.dep_ == 'nsubjpass' or chunk.root.dep_ == 'nsubj':\n pos = nltk.pos_tag(nltk.word_tokenize(chunk.text))\n\n if str(pos).find('PRP') == -1:\n headword.append(chunk.text)\n headword = headword[0]; # replacing the pronoun with the headword\n helper = QuestionFormationHelper()\n sentencePart = helper.helperForWhyQuestions(sentence);\n\n if len(sent_tokenize(sentencePart)) <= 2:\n return 'Why ' + sentencePart + \" ?\"\n else:\n if str(sentencePart).count(',') > 0:\n sentencePart = helper.slicer1(sentencePart, ',');\n\n s = ''\n for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentencePart))):\n if (pos == 'PRP'):\n word = headword\n\n s = s + word + \" \"\n # s = ''.join(word)\n\n question = 'Why ' + s + ' ?';\n return question\n else:\n return ''",
"def analyze_doc(self, doc):\n # pre processing stage\n pp_doc = clean_emojis(doc)\n if self.translate:\n pp_doc = translate_doc(doc, src=self.src_lang, dest=\"en\")\n pp_doc = tokenize_and_remove_stop_words(text=pp_doc, join_words=True,\n language=self.language)\n # get polarity score from pre processed doc\n score = self.get_polarity_score(pp_doc)\n # determine polarity from score and thresholds\n if score < self.neu_inf_lim:\n predicted_sentiment = \"neg\"\n elif score < self.neu_sup_lim:\n predicted_sentiment = \"neu\"\n else:\n predicted_sentiment = \"pos\"\n return (doc, predicted_sentiment, score)",
"def _parse_question_description(question: Tag) -> str:\n description = \"\"\n directive_sections = question.find_all(\"directive\", recursive=False)\n if len(directive_sections) >= 1:\n description = \" \".join(\n [\n _get_clean_string(description)\n for description in directive_sections[0].find_all(\"text\")\n ]\n )\n if len(directive_sections) > 1:\n warn(\n f\"More than one 'directive' section provided for question {question}.\"\n \" Only the first one was used.\"\n )\n return description",
"def questionParse(self):\n text = self.text\n text = text.lower()\n nlp = self.nlp\n doc = nlp(text)\n print(\"Finding entities set and relations set...\\n\")\n ents_set = set(str(ent) for ent in doc.ents)\n rels_list = self.get_relation(doc)\n rels_set = set(str(rel[-1]) for rel in rels_list)\n return ents_set, rels_set",
"def create_questions(self):\n prev_node = self.root_node\n node_iter = PreOrderIter(prev_node)\n question_list = []\n\n terms = []\n definitions = []\n\n sentence_list = []\n question_starter_list = []\n\n for node in node_iter:\n # check if question is empty in node\n\n if node == self.root_node:\n continue\n\n question_starter = ''\n\n # checks if tree has valid document structure\n num_first_layer = len(self.root_node.children)\n num_below_first_layer = len(self.root_node.descendants) - len(self.root_node.children)\n if (len(self.root_node.descendants) > len(self.annotation_list) and num_below_first_layer > num_first_layer):\n question_starter = self.get_question_starter(node=node)\n # checks if prev node is not a sibling \n # Creates a property question based on subtopic here\n if len(node.ancestors) > 1 and prev_node is node.parent:\n node_layer = [sibling for sibling in node.siblings]\n node_layer.append(node)\n\n # if there is only one property, skip question\n if len(node_layer) <= 1:\n continue\n\n temp_term = \"%sWhat are the %s properties?\" % (question_starter, len(node_layer))\n temp_definition = '\\n'.join([ \"%s. \" % (i+1) + sibling.text for i, sibling in enumerate(node_layer) ])\n terms.append(temp_term)\n definitions.append(temp_definition)\n\n prev_node = node\n \n # Appends \n sentence_list.extend(node.sentences)\n question_starter_list.extend([question_starter] * len(node.sentences))\n\n for annotation in self.annotation_list:\n sentence_list.extend(annotation['sentences'])\n question_starter_list.extend([''] * len(annotation['sentences']))\n\n questions, question_starters = self.questions_from_sentlist(sentence_list=sentence_list, question_starter_list=question_starter_list)\n if not questions:\n print('Failed to score sentences')\n return terms, definitions\n\n temp_terms = [ q_starter+question.sentence.return_string() for question, q_starter in zip(questions, question_starters)]\n temp_definitions = [str(question.answer.content) for question in questions]\n\n # extend the question list\n temp_terms.extend(terms)\n temp_definitions.extend(definitions)\n\n return temp_terms, temp_definitions",
"def find_relevant_answers(self, model, q_tokens):\n # a = [('kaç', 'Adj'), ('kaçta', 'Adv'),\n # ('kaçıncı', 'Adj'), ('nasıl', 'Adj'), ('nasıl', 'Adv'),\n # ('ne', 'Adj'), ('ne', 'Pron'), ('neredeki', 'Pron')]\n\n rows, cols = model.document_term_matrix_tfidf().shape\n relevant_rows = list(range(rows))\n if any(adj in max(Properties.questionPOS[\"Num\"], q_tokens[0], key=len) for adj in\n min(Properties.questionPOS[\"Num\"], q_tokens[0], key=len)):\n relevant_rows = self.find_rows(model, looking_for=\"Num\")\n return relevant_rows\n # for row in relevant_rows:\n # print(self.real_doc_sentences[row])\n # exit(1)",
"def parse_document(self, response):\n #save_str_file(response.text, 'IEEE_document.json')\n\n # 取结果中的metadata部分(论文的元数据,其中包含需要的内容)\n pattern = re.compile('metadata={.*};')\n search_res = pattern.search(response.text)\n\n paper_item = IEEEPaperItem()\n \n # TODO: in what case doesn't the document contain metadata? \n if search_res:\n content = json.loads(search_res.group()[9:-1])\n required = ['title', 'authors', 'abstract',\n 'doi', 'publicationTitle', 'publicationYear', 'metrics',\n 'contentType', 'keywords']\n # contentType: conference, journal, book\n for i in required:\n paper_item[i] = content.get(i, None)\n\n paper_item['publication_number'] = response.meta['publication_number']\n paper_item['issue_number'] = response.meta['issue_number']\n\n # deal with reference\n yield scrapy.Request(\n url='https://ieeexplore.ieee.org/rest/document/{}/references'.format(content['articleNumber']),\n callback=self.parse_references,\n meta={'paper_item': paper_item}\n )\n else:\n yield None",
"def parseDocdata(ctx, rawDoc, check_permissions=False):\n\n # using this link no login required to get the list of nuage docs\n # but that list wont have links to HTML docs, only PDFs\n # though we can construct links to HTML by having doc_id only\n # https://infoproducts.alcatel-lucent.com/cgi-bin/get_doc_list.pl?&entry_id=1-0000000000662&srch_how=Full%20Text&srch_str=&release=4.0.R6.1\n\n # with this link you get both PDF and HTML links for nuage family\n # but it requires login and have no info about restricted status of docs\n # https://infoproducts.alcatel-lucent.com/aces/cgi-bin/au_get_doc_list.pl?&entry_id=1-0000000000662&srch_how=Full%20Text&srch_str=&release=4.0.R6.1\n\n doc_list = []\n td_contents_patt = re.compile(r'<td.+?>(.+?)</td>')\n\n # raw doc comes as html in a string without newlines\n # to parse doc data which is in each <tr> I am inserting newlines\n raw_doc_entries = rawDoc.replace('<tr ', '\\n <tr ').split('\\n')\n\n # switch to show notification about restriced documents\n show_restricted_docs_notification = True\n\n for raw_entry in raw_doc_entries:\n doc_data = {} # dict to hold doc data for one particular entry\n\n # this mysterious additional symbols in responce data appeared again\n # now they messed with </td> tag in responce for nuage-vsp rel 4.0.r5\n # refer to this output https://regex101.com/r/1KdJHJ/2\n # and look for matched result\n # when requesting data from browser I cant see any bogus symbols\n # same issue was the reason to create get_json_resp() function\n # TODO: this is not 100% reproducible. Analyze later and create an issue\n # as a workaround I will validate every </td>\n raw_entry = re.sub(r'</t\\S*d>', '</td>', raw_entry)\n\n # example: https://regex101.com/r/NhwnOp/1\n td_contents = td_contents_patt.findall(raw_entry)\n if td_contents:\n if (not ctx.obj['LOGGED_IN']) and ('a login is required for access' in td_contents[1]):\n if show_restricted_docs_notification:\n click.echo(\n ' The following documents are available to logged in users only. '\n 'They will not be included in the documentation set...')\n show_restricted_docs_notification = False\n click.echo(' ' + td_contents[0].strip())\n continue\n if len(td_contents) <= 1:\n continue\n # click.echo('Adding doc {}'.format(td_contents[0]))\n if check_permissions:\n docs_permissions.update(\n parse_td(raw_td=td_contents, check_permissions=True))\n else:\n doc_data.update(parse_td(raw_td=td_contents))\n\n # when dealing with combined product some docs might be in\n # both sections. Append only unique docs.\n if doc_data not in doc_list:\n doc_list.append(doc_data)\n # pprint(doc_list)\n # os.sys.exit()\n return doc_list",
"def why(msg):\n post_data = get_report_data(msg)\n if not post_data:\n raise CmdException(\"That's not a report.\")\n else:\n *post, _ = fetch_post_id_and_site_from_url(post_data[0])\n why_info = get_why(post[1], post[0])\n if why_info:\n return why_info\n else:\n raise CmdException(\"I don't have the `why` data for that post (anymore?). \"\n \"You should be able to find it on metasmoke.\")",
"def clean_conversations(self):\n\n print(\"Reading sample conversations...\")\n # Read agent's messages from sample_conversations\n conversations = pd.read_json(self.fname)\n messages = [i['Messages'] for i in [j for j in conversations['Issues']]]\n agent_messages_all = [[j['Text'] for j in i if not j['IsFromCustomer']] for i in messages]\n agent_messages_list = [item for sublist in [a for a in agent_messages_all if len(a) > 0] for item in sublist]\n agent_messages = [item for sublist in [nltk.sent_tokenize(a) for a in agent_messages_list] for item in sublist]\n\n print(\"Extracting frequently asked problems...\")\n # Get agent's questions from sample conversations\n # get messages which contain questions\n agent_questions_uncleaned = [text for text in agent_messages if \"?\" in text]\n # get the question sentense\n agent_questions_cleaned = [self.get_questions(text) for text in agent_questions_uncleaned]\n # correct spelling error\n print(\"Checking spelling...This will take for a while...\")\n agent_questions_corrected = agent_questions_cleaned\n # agent_questions_corrected = [str(TextBlob(i).correct()) for i in agent_questions_cleaned]\n # remove repeated questions\n questions = list(set(agent_questions_corrected))\n\n print(\"Done correcting, now analyzing the questions...\")\n # get ngrams from the questions\n frequencies = Counter()\n for question in questions:\n ngram = nltk.ngrams(question.split(), self.ngram_n)\n frequencies += Counter(ngram)\n # Map ngram to questions from low frequency to high frequency gram\n temp = []\n ngrams = []\n sorted_questions_all = []\n visited = set()\n for row, freq in frequencies.most_common()[::-1]:\n gram = ' '.join(row)\n for question in questions:\n if question not in visited:\n if gram in question:\n temp.append(question)\n visited.add(question)\n if (len(temp) > 0):\n sorted_questions_all.append(temp[:])\n ngrams.append(gram)\n temp = []\n # Get one question to represent a ngram\n sorted_questions = [s[0] for s in sorted_questions_all]\n self.ngram_dict = dict(zip(ngrams, sorted_questions))\n with open(\"ngram_dict.json\", 'w') as w:\n json.dump(self.ngram_dict, w)",
"def test_process_document(self) -> None:\n documents = (\n (\n ([(\"a\", 20), (\"b\", 10), (\"c\", 10), (\"d\", 10)]),\n {\"a\": 1.0, \"b\": 0.5, \"c\": 0.5, \"d\": 0.5},\n ),\n ([\"a\"], {\"a\": 1.0}),\n ([], {}),\n (\n [\"a\", \"b\", \"c\", \"d\", \"e\"],\n {\"a\": 1.0, \"b\": 1.0, \"c\": 1.0, \"d\": 1.0, \"e\": 1.0},\n ),\n )\n for document, expected in documents:\n document = self.convert_document(document)\n\n with self.subTest(document=document, expected=expected):\n tf_scores = self.tfidf.process_document(document)\n self.assertEqual(tf_scores, expected)",
"def _parse_question(self, question):\n # Remove any @mentions and #tags\n question = re.sub(r\"(?:\\.?@|#)\\w+\", \"\", question)\n \n parsed = {\n \"supplier\": [],\n \"org\": [],\n \"date\": [],\n }\n section = \"org\"\n for token in tokenise(question):\n if token in self.KEYWORDS:\n section = self.KEYWORDS[token]\n else:\n parsed[section].append(token)\n \n return parsed",
"def clean_document(self,document):\r\n # Remove all characters outside of Alpha Numeric\r\n # and some punctuation\r\n document = re.sub('[^A-Za-z .-]+', ' ', document)\r\n document = document.replace('-', '')\r\n document = document.replace('...', '')\r\n document = document.replace('Mr.', 'Mr').replace('Mrs.', 'Mrs')\r\n\r\n # Remove Ancronymns M.I.T. -> MIT\r\n # to help with sentence tokenizing\r\n document = self.merge_acronyms(document)\r\n\r\n # Remove extra whitespace\r\n document = ' '.join(document.split())\r\n return document",
"def get_doc_struct(self, doc):\n doc_struct = {}\n for section, section_content in doc.items():\n clauses = {}\n for clause, content in section_content.items():\n count_terms = 0\n for p in content['content']:\n if self.is_new_term(p):\n count_terms += 1\n\n clauses[clause] = {'num_para':count_terms, 'num': content['num']}\n\n doc_struct[section] = clauses\n\n return doc_struct",
"def leads_with_question(doc):\n return doc[0].text.lower() in question_words",
"def parse_parser_results(text):\n # print \"-----\"\n # print text\n # print \"-----\"\n state = 0\n tmp = {}\n coref_set = []\n results = { \"sentences\": [] }\n text = unidecode(text) # Force output conversion to ASCII to avoid RPC error\n print text\n for line in text.split(\"\\n\"):\n if line.startswith(\"Sentence #\"):\n state = 1\n if len(tmp.keys()) != 0:\n results[\"sentences\"].append(tmp)\n tmp = {}\n elif state == 1:\n tmp['text'] = line.strip()\n state = 2\n elif state == 2:\n if not line.startswith(\"[Text=\"):\n print line\n raise Exception(\"Parse error. Could not find [Text=\")\n tmp['words'] = [] \n exp = re.compile('\\[([^\\]]+)\\]')\n matches = exp.findall(line)\n for s in matches:\n tmp['words'].append(parse_bracketed(s))\n state = 3\n tmp['parsetree'] = []\n elif state == 3:\n if not (line.startswith(\" \") or line.startswith(\"(ROOT\")):\n state = 4\n tmp['parsetree'] = \" \".join(tmp['parsetree'])\n tmp['tuples'] = []\n else:\n tmp['parsetree'].append(line.strip())\n if state == 4:\n # dependency parse\n line = line.rstrip()\n if not line.startswith(\" \") and line.endswith(\")\"):\n split_entry = re.split(\"\\(|, \", line[:-1]) \n if len(split_entry) == 3:\n rel, left, right = map(lambda x: remove_id(x), split_entry)\n tmp['tuples'].append(tuple([rel,left,right]))\n elif \"Coreference set\" in line:\n state = 5\n coref_set = []\n elif state == 5:\n if \"Coreference set\" in line: # Create new coreference set if needed\n if len(coref_set) > 0:\n if results.has_key('coref'):\n results['coref'].append(coref_set)\n else:\n results['coref'] = [coref_set]\n coref_set = []\n else:\n # Updated for new coreference format\n crexp = re.compile(r\"\\((\\d*),(\\d)*,\\[(\\d*),(\\d*)\\)\\) -> \\((\\d*),(\\d)*,\\[(\\d*),(\\d*)\\)\\), that is: \\\"(.*)\\\" -> \\\"(.*)\\\"\")\n matches = crexp.findall(line)\n for src_i, src_pos, src_l, src_r, sink_i, sink_pos, sink_l, sink_r, src_word, sink_word in matches:\n src_i, src_pos, src_l, src_r = int(src_i)-1, int(src_pos)-1, int(src_l)-1, int(src_r)-1\n sink_i, sink_pos, sink_l, sink_r = int(sink_i)-1, int(sink_pos)-1, int(sink_l)-1, int(sink_r)-1\n print \"COREF MATCH\", src_i, sink_i \n coref_set.append(((src_word, src_i, src_pos, src_l, src_r), (sink_word, sink_i, sink_pos, sink_l, sink_r)))\n print \"CR\", line\n if len(tmp.keys()) != 0:\n results[\"sentences\"].append(tmp)\n if len(coref_set) > 0: # Add final coreference set if needed\n if results.has_key('coref'):\n results['coref'].append(coref_set)\n else:\n results['coref'] = [coref_set] \n return results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Put each pulse in a separate bin, mimicking the first pass
|
def prebin_pulses(pulses, readout_window):
edges = [readout_window.start]
charges = [0]
for p in pulses:
# Add one bin containing the pulse followed by
# a second that continues to the next pulse
edges.append(p.time)
charges.append(p.charge)
edges.append(p.time+p.width)
charges.append(0)
edges.append(readout_window.stop)
# Clean any zero or negative width bins resulting from pulse width round-off error.
lag = 0
for q in range(len(charges)):
if edges[q-lag+1] <= edges[q-lag]:
assert(charges[q-lag] == 0)
edges.remove(q-lag)
charges.remove(q-lag)
lag += 1
return np.asarray(charges), np.asarray(edges)
|
[
"def pulse_simple(self):\r\n # All time are in us\r\n t1_laser = 100 # First time to turn ON the laser\r\n t2_laser = 650 # Last time to turn ON the laser\r\n dt_laser = 30 # Pulse duration of the laser \r\n dt_trig = 10 # Duration of the pulse for the trigger\r\n dt_pulse = 10 # Width of the RF pulse\r\n Ntime = 5 # Number of time the RF pulse is shifted (this defines the number of blocks within the sequence )\r\n \r\n # Define raise time of the RF pulse\r\n tmin = t1_laser + 2*dt_laser\r\n tmax = t2_laser - 2*dt_laser\r\n tlin = np.linspace(tmin, tmax, Ntime) #Linear spacing\r\n # Transform it to a log scale\r\n beta = 4/(tmax-tmin) # Factor for the logaritmic spacing (how squeezed will be the point near tmin) \r\n B_log = (tmax-tmin)/(np.exp(beta*tmax)-np.exp(beta*tmin))\r\n A_log = tmin - B_log*np.exp(beta*tmin) \r\n # The following is the list of all initial time for the pulse\r\n t0_pulse_s = A_log + B_log*np.exp(beta*tlin) #Lograritmic spacing \r\n\r\n #Initialize the sequence\r\n T1_sequence = Sequence(name='T1 sequence') \r\n \r\n # The channel laser and trigger never change in each block, so we \r\n # define them outside of the loop.\r\n # Channel pulse for the laser\r\n laser = ChannelPulses(channel=2, name='Laser nice')\r\n laser.add_pulses([t1_laser,t1_laser+dt_laser,\r\n t2_laser,t2_laser+dt_laser])\r\n # Channel pulse for the trigger\r\n trig = ChannelPulses(channel=7, name='Wonderful Trigger')\r\n trig.add_pulses([t2_laser+dt_laser, t2_laser+dt_laser+dt_trig]) \r\n # Create a block of pulse pattern for each raise time of the RF pulse\r\n for i, t0_pulse in enumerate(t0_pulse_s):\r\n # Channel pulse for the RF\r\n RF = ChannelPulses(channel=3, name='Super RF')\r\n RF.add_pulses([t0_pulse, t0_pulse+dt_pulse])\r\n # Create the block of pulse pattern\r\n T1_block = PulsePatternBlock(name='T1 block %d'%i)\r\n T1_block.add_channelEvents([laser, RF, trig])\r\n # Add this block in the sequence\r\n T1_sequence.add_block(T1_block)\r\n \r\n return T1_sequence",
"def servo_pulse( pin_nr, position ):\n\n # implementeer deze functie",
"def pulse_generator(self, chID, ping):\n f_s_ori=1.5*1e6;\n \n F1 = self.filters[chID+1*2-2]\n F2 = self.filters[chID+1*2-2+1]\n D_1 = F1['DecFac']\n D_2 = F2['DecFac']\n \n filt_1 = F1['Coeff'][0][0::2]+ 1j * F1['Coeff'][0][1::2]\n filt_2 = F2['Coeff'][0][0::2] + 1j * F2['Coeff'][0][1::2]\n \n \n para = pd.DataFrame(self.parameters).transpose()\n para = para[para['ChannelID']== self.CID[chID]].iloc[ping]\n \n f_s_sig = 1 / float(para['SampleInterval'])\n FreqStart = int(para['FrequencyStart'])\n FreqEnd = int(para['FrequencyEnd'])\n \n pulse_length = float(para['PulseDuration'])\n pulse_slope = float(para['Slope'])\n \n t_sim_pulse = np.transpose(np.linspace(0,pulse_length,int(pulse_length * f_s_ori)))\n \n n_p = len(t_sim_pulse)\n \n nwtx = 2 * np.floor( pulse_slope * n_p)\n \n wtxtmp = np.hanning(nwtx)\n \n nwtxh = int(np.ceil(nwtx/2))\n \n \n env_pulse = np.concatenate([wtxtmp[0:nwtxh], np.ones(int(n_p - nwtx)), wtxtmp[nwtxh:]])\n \n sim_pulse = env_pulse * chirp(t = t_sim_pulse,\n f0 = FreqStart,\n f1=FreqEnd,\n t1=t_sim_pulse[-1])\n \n #Filter simulated pulse to create match filter%\n f_s_dec = []\n f_s_dec.append(f_s_ori / D_1)\n f_s_dec.append(f_s_dec[0] / D_2)\n \n if abs(f_s_dec[1] - f_s_sig) >= 1:\n print('Decimated pulse sample rate not matching signal sampling rate')\n #make convolution function with padding\n def conv(u,v):\n npad = len(v) - 1\n u_padded = np.pad(u, (npad // 2, npad - npad // 2), mode='constant')\n return(np.convolve(u_padded, v, 'valid'))\n \n sim_pulse_1 = conv(sim_pulse / max(abs(sim_pulse)), filt_1) \n sim_pulse_1 = resample(sim_pulse_1,int(np.floor(len(sim_pulse)/D_1)))\n \n \n sim_pulse_2 = conv(sim_pulse_1 / max(abs(sim_pulse_1)),filt_2)\n sim_pulse_2 = resample(sim_pulse_2,int(np.floor(len(sim_pulse)/D_2)))\n \n sim_pulse_2 = sim_pulse_2 / max(sim_pulse_2)\n \n y_tx_matched = np.conj(np.flipud(sim_pulse_2))\n \n return(sim_pulse_2, y_tx_matched)",
"def pulse2(wait,bmedia,numpulses,pulse,blank,\n vol1,spd1,vol2,spd2):\n # SET UP LOGS\n global transferLog\n global timeLog\n transferLog = \"transferLog.txt\"\n timeLog = datetime.datetime.fromtimestamp(time.time()).strftime('%y%m%d_%H%M%S') + \"_timeLog.txt\"\n updatelog(\"PULSE EXPERIMENT\")\n # CALCULATE USEFUL VALUES\n trans_time1 = (round(vol1/spd1) + 1) # how long the fluid transfer takes\n trans_time2 = (round(vol2/spd2) + 1) # how long the fluid transfer takes\n # ESTABLISH BASELINE\n time.sleep(wait)\n # REMOVE INITIAL MEDIA\n updatelog(\"Removing initial media.\")\n drive.m2(0,bmedia,100);time.sleep(bmedia/100 + 1)\n # START THE PULSES\n for i in range(numpulses):\n # ligand pulse\n drive.m1(1,vol1,spd1);time.sleep(trans_time1)\n updatelog(\"Pulse \" + str(i+1) + \" START\")\n time.sleep(pulse)\n updatelog(\"Pulse \" + str(i+1) + \" END\")\n drive.m1(0,vol1,spd1);time.sleep(trans_time1)\n # blank media\n drive.m2(1,vol2,spd2);time.sleep(trans_time2)\n if i != (numpulses-1):\n updatelog(\"Blank \" + str(i+1) + \" START\")\n time.sleep(blank)\n updatelog(\"Blank \" + str(i+1) + \" END\")\n drive.m2(0,vol2,spd2);time.sleep(trans_time2)\n # if this is the last pulse, don't remove the blank media\n else:\n updatelog(\"EXPERIMENT COMPLETE\\n\")",
"def decode_pulse(self,pList):\n\n\t\tbitList = []\n\t\tsIndex = -1\n\n\t\t# convert the timespans in seconds to milli-seconds\n\t\t# look for the start of the IR remote signal\n\t\t\n\t\tfor p in range(0,len(pList)):\n\t\t\ttry:\n\t\t\t\tpList[p]=float(pList[p])*1000\n\t\t\t\tif self.verbose == True:\n\t\t\t\t\tprint(pList[p])\n\t\t\t\tif pList[p]<11:\n\t\t\t\t\tif sIndex == -1:\n\t\t\t\t\t\tsIndex = p\n\t\t\texcept: \n\t\t\t\tpass\n\n\t\t# if no acceptable start is found return -1\n\n\t\tif sIndex == -1:\n\t\t\treturn -1\n\n\t\tif sIndex+1 >= len(pList):\n\t\t\treturn -1\n\t\t\n\t\t#print(sIndex, pList[sIndex], pList[sIndex+1])\n\n\t\tif (pList[sIndex]<4 or pList[sIndex]>11):\n\t\t\treturn -1\n\n\t\tif (pList[sIndex+1]<2 or pList[sIndex+1]>6):\n\t\t\treturn -1\n\n\t\t\"\"\" pulses are made up of 2 parts, a fixed length low (approx 0.5-0.6ms)\n\t\tand a variable length high. The length of the high determines whether or\n\t\tnot a 0,1 or control pulse/bit is being sent. Highes of length approx 0.5-0.6ms\n\t\tindicate a 0, and length of approx 1.6-1.7 ms indicate a 1\"\"\" \n\t\t\n\t\t \n\t\tfor i in range(sIndex+2,len(pList),2):\n\t\t\tif i+1 < len(pList):\n\t\t\t\tif pList[i+1]< 0.9: \n\t\t\t\t\tbitList.append(0)\n\t\t\t\telif pList[i+1]< 2.5:\n\t\t\t\t\tbitList.append(1)\n\t\t\t\telif (pList[i+1]> 2.5 and pList[i+1]< 45):\n\t\t\t\t\t#print('end of data found')\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\tif self.verbose == True:\n\t\t\tprint(bitList)\n\n\t\t# convert the list of 1s and 0s into a\n\t\t# binary number\n\n\t\tpulse = 0\n\t\tbitShift = 0\n\n\t\tfor b in bitList: \n\t\t\tpulse = (pulse<<bitShift) + b\n\t\t\tbitShift = 1 \n\n\t\treturn pulse",
"def extract_cal_pulse(rs,constants):\n \n\n [dark_interval_end_time, laser_pulse_time, cal_pulse_end_time] = \\\n constants['apd_pulse_timing']\n bin_duration = constants['binwidth']\n s_bin = int(laser_pulse_time / bin_duration) # laser pulse bin number\n\n cw_i2scan='enable_cw_i2scan' in constants and constants['enable_cw_i2scan']\n #if a i2_scans generated with cw seedlaser\n #use entire record after end of normal cal pulse\n if cw_i2scan:\n dark_interval_end_bin = int(cal_pulse_end_time / bin_duration)- 1\n cal_pulse_end_bin = len(rs.molecular_counts)\n else: \n dark_interval_end_bin = int(dark_interval_end_time / bin_duration)- 1\n cal_pulse_end_bin = int(np.ceil(cal_pulse_end_time / bin_duration))\n \n if hasattr(rs,'molecular_counts'):\n rs.molecular_cal_pulse = np.sum(rs.molecular_counts[:\n ,dark_interval_end_bin:cal_pulse_end_bin], 1)\n if not cw_i2scan:\n rs.molecular_cal_pulse -= (cal_pulse_end_bin-dark_interval_end_bin) * rs.mol_dark_counts[:,0]\n rs.molecular_cal_pulse = \\\n hau.T_Array(rs.molecular_cal_pulse \\\n / (1.0* rs.seeded_shots))\n if hasattr(rs,'combined_hi_counts'): \n rs.combined_hi_cal_pulse = np.sum(rs.combined_hi_counts[:\n , dark_interval_end_bin:cal_pulse_end_bin], 1)\n if not cw_i2scan:\n rs.combined_hi_cal_pulse -= (cal_pulse_end_bin-dark_interval_end_bin) * rs.c_hi_dark_counts[:,0] \n rs.combined_hi_cal_pulse = \\\n hau.T_Array(rs.combined_hi_cal_pulse \\\n / (1.0* rs.seeded_shots))\n if hasattr(rs,'combined_lo_counts'):\n rs.combined_lo_cal_pulse = np.sum(rs.combined_lo_counts[:\n , dark_interval_end_bin:cal_pulse_end_bin], 1)\n if not cw_i2scan:\n rs.combined_lo_cal_pulse -= (cal_pulse_end_bin-dark_interval_end_bin) * rs.c_lo_dark_counts[:,0] \n rs.combined_lo_cal_pulse = \\\n hau.T_Array(rs.combined_lo_cal_pulse \\\n / (1.0* rs.seeded_shots)) \n if hasattr(rs,'molecular_i2a_counts'):\n rs.molecular_i2a_cal_pulse = np.sum(rs.molecular_i2a_counts[:\n , dark_interval_end_bin:cal_pulse_end_bin], 1)\n if not cw_i2scan:\n rs.molecular_i2a_cal_pulse -= (cal_pulse_end_bin-dark_interval_end_bin) * rs.mol_i2a_dark_counts[:,0] \n rs.molecular_i2a_cal_pulse = \\\n hau.T_Array(rs.molecular_i2a_cal_pulse \\\n / (1.0* rs.seeded_shots))\n if hasattr(rs,'combined_1064_counts'):\n rs.combined_1064_cal_pulse = np.sum(rs.combined_1064_counts[:\n , dark_interval_end_bin:cal_pulse_end_bin], 1) # \\\n #-(cal_pulse_end_bin-dark_interval_end_bin) * rs.combined_1064_dark_counts[:,0] \n rs.combined_1064_cal_pulse = \\\n hau.T_Array(rs.combined_1064_cal_pulse \\\n / (1.0* rs.seeded_shots))\n \n return",
"def bin_spikes(trials, spk_times, time_bin):\r\n angles_dict = Counter(trials[:,0]) # we get a dictionary of the values and their counts\r\n dir_rates = np.zeros( (len(angles_dict),2 ) )\r\n angles = angles_dict.items()\r\n index = 0\r\n # for each angle sum all the APs over all the trials. angle[0] contains the number of trials for that angle\r\n for angle in angles: # select a particular angle\r\n fire_cnt = 0\r\n for a in range(0,len(trials[:,0])):\r\n if(angle[0] == trials[a,0]):\r\n activity_time = trials[a,1]\r\n for api in range(0,len(spk_times)):\r\n if((spk_times[api] >= (activity_time - time_bin)) and (spk_times[api] <= (activity_time + time_bin)) ):\r\n fire_cnt = fire_cnt + 1\r\n #print \"Fire at activity:\" + str(activity_time) + \"AP Time: \" + str(spk_times[api]) + \"Angle:\" + str(angle[0])\r\n # Update the (angle, fire count) into the OP array\r\n # We need to divide by the nunmber of trials to get the average spike count per trial\r\n # Divide by 2*time_bin to convert the spike count to Firing rate in spikes / second\r\n dir_rates[index] = [angle[0], fire_cnt /(angle[1]* 2 * time_bin)]\r\n index = index + 1\r\n \r\n dir_rates = dir_rates[dir_rates[:,0].argsort()] # sort by angle\r\n # argsort() returns the indexes of the sorted elements\r\n print dir_rates\r\n\r\n # Now lets plot the data\r\n #plt.figure()\r\n width = 45\r\n ax = plt.subplot(2,2,1)\r\n rects1 = ax.bar(dir_rates[:,0] - width/2, dir_rates[:,1],width)\r\n ax.set_xlabel(\"Direction of Motion (degrees)\")\r\n ax.set_ylabel(\"Firing Rate (spikes/s)\")\r\n ax.set_title(\"Example Neuron Tuning Curve\")\r\n ax.set_xlim([-width/2,315 + width/2])\r\n ax.set_xticks(dir_rates[:,0])\r\n # derive the labels for the x-ticks\r\n label = []\r\n for i in range(0,len(dir_rates[:,0])):\r\n label.append(str(int(dir_rates[i,0])))\r\n \r\n ax.set_xticklabels(label)\r\n \r\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\r\n ax.get_xticklabels() + ax.get_yticklabels()):\r\n item.set_fontsize(11)\r\n \r\n # http://matplotlib.org/examples/pylab_examples/polar_demo.html\r\n # for the Polar plot, duplicate the first value into the value for 360\r\n #dir_rates = np.append(dir_rates, [360,dir_rates[0,1]])\r\n theta = np.append(dir_rates[:,0], 360)\r\n r = np.append(dir_rates[:,1], dir_rates[0,1])\r\n ax = plt.subplot(222,polar=True)\r\n ax.set_title(\"Example Neuron Tuning Curve\")\r\n ax.plot(np.deg2rad(theta),r,label=\"Firing Rate (spikes/s)\")\r\n ax.legend(loc=8,fontsize=7)\r\n\r\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\r\n ax.get_xticklabels() + ax.get_yticklabels()):\r\n item.set_fontsize(11)\r\n \r\n plt.show()\r\n\r\n \r\n \r\n return dir_rates",
"def PulseConv(\n signal, # const double * signal,\n pulseSize, # int pulseSize,\n):\n signalSize = len(signal)\n if pulseSize > signalSize:\n # ENH: handle this endcase\n raise ValueError(f\"pulseSize ({pulseSize}) > signalSize ({signalSize})\")\n pulseHeight = 1.0 / pulseSize\n\n # Circular padding init\n result = np.zeros(signalSize, dtype=np.float_)\n for k in range((pulseSize + 1) // 2):\n result[0] += signal[k]\n for k in range(pulseSize // 2):\n result[0] += signal[k]\n result[0] *= pulseHeight\n\n n = 1\n for k in range(pulseSize // 2, signalSize + (pulseSize // 2) - 1):\n tail = k - pulseSize\n if tail < 0:\n tail = -tail - 1\n head = k\n if head >= signalSize:\n head = signalSize - 1 - (head - signalSize)\n result[n] = result[n - 1] + ((signal[head] - signal[tail]) * pulseHeight)\n n += 1\n\n return result",
"def _pulse(self):\n self.u = dot(self.u_sfq, self.u)",
"def build_generator_signals(pulse_len, sample_rate, DRAG_params):\n A_pi = np.pi\n A_pi_half = np.pi/2\n \n # Arguments for envelopes\n pi_envelope_args = {\n 'A': A_pi,\n 'x_coeff': DRAG_params['x_coeff'],\n 'y_coeff': DRAG_params['y_coeff'],\n 'det_coeff': DRAG_params['det_coeff'],\n 'tg': pulse_len/2,\n 'tn': pulse_len/2,\n 'tsigma': pulse_len/4\n }\n \n pi_half_envelope_args = {\n 'A': A_pi_half,\n 'x_coeff': DRAG_params['x_coeff'],\n 'y_coeff': DRAG_params['y_coeff'],\n 'det_coeff': DRAG_params['det_coeff'],\n 'tg': pulse_len/2,\n 'tn': pulse_len/2,\n 'tsigma': pulse_len/4\n }\n \n times, pi_env, pi_deriv, pi_dets = \\\n DRAG_utils.create_ge_envelopes(sample_rate,\n pulse_len,\n pi_envelope_args)\n times, pi_half_env, pi_half_deriv, pi_half_dets = \\\n DRAG_utils.create_ge_envelopes(sample_rate, \n pulse_len, \n pi_half_envelope_args)\n identity = np.zeros(len(pi_env['r']))\n \n # Construct the pulse dictionary:\n pulse_dict = {\n 'identity': identity,\n 'pi': np.array(pi_env['r']),\n 'pi_derivative': np.array(pi_deriv['r']),\n 'pi_detuning': np.array(pi_dets['r']),\n 'pi_half': np.array(pi_half_env['r']),\n 'pi_half_derivative': np.array(pi_half_deriv['r']),\n 'pi_half_detuning': np.array(pi_half_dets['r']),\n }\n return pulse_dict",
"def normalize_bins(self):\n self.norm_bin = np.ones(self.nbins)\n for i in range(self.nbins):\n f = lambda z: self.raw_dndz_bin(z, i)\n\n norm = integrate.simps(f(np.linspace(self.z_min,self.z_max,1000)), x=np.linspace(self.z_min,self.z_max,1000))\n\n \n self.norm_bin[i] = 1.0/norm\n print(self.norm_bin[i])",
"def pulse(self):\n for _, star_system in self.star_systems.iteritems():\n star_system.pulse()",
"def processBinFile(OpenedFile):\n raw_data = np.fromfile(OpenedFile, dtype = np.uint8)\n bin_file_size = len(raw_data) \n ii = np.zeros((1,128), dtype=np.int)\n start_byte = 0\n rp_i = 0\n rp_locs = np.zeros(6240, dtype='int') \n for i in range(1, int(bin_file_size/32096) + 1):\n raw_fire_time = raw_data[start_byte + 24:start_byte + 32]\n roll_b = raw_data[start_byte + 16:start_byte + 18].view('int16')\n pitch_b = raw_data[start_byte + 18:start_byte + 20].view('int16')\n if((roll_b != 8224) | (pitch_b != 8224)):\n rp_locs[rp_i] = i\n ROLL_R[rp_i] = roll_b\n rp_i = rp_i + 1\n \n for k in range(0, 8):\n raw_signal = raw_data[start_byte + k * 4008 + 40 : start_byte + k * 4008 + 4040].view('uint16')\n raw_signal = np.float16((raw_signal.astype(\"double\")-32768)/32768)\n raw_signal = np.asmatrix(raw_signal)\n #raw_first_ref = raw_data[start_byte+k*4008+32:start_byte +k*4008+34]\n #first_ref = raw_first_ref.view('uint16')\n channel_index = raw_data[start_byte + k*4008 + 38].astype(\"int\")\n SIGNAL_MATRICES[channel_index, ii[0,channel_index], :] = raw_signal\n ii[0,channel_index] = ii[0,channel_index] + 1\n start_byte = start_byte +32096\n return SIGNAL_MATRICES, ROLL_R",
"def bin_info(b_axis, b_count):\n\n plt.figure()\n plt.plot(b_axis[0], b_count[0], \".-\")\n plt.plot(b_axis[0], b_count[1], \".-\") \n plt.plot(b_axis[0], b_count[2], \".-\")\n plt.plot(b_axis[0], b_count[3], \".-\") \n plt.title(\"Shots per fringe (4000 = 0)\")\n plt.xlabel(\"Fringe\")\n plt.ylabel(\"Shots per fringe\")\n\n plt.figure()\n plt.plot(numpy.bincount(numpy.array(b_count[0], dtype=numpy.int)))\n plt.plot(numpy.bincount(numpy.array(b_count[1], dtype=numpy.int)))\n plt.plot(numpy.bincount(numpy.array(b_count[2], dtype=numpy.int)))\n plt.plot(numpy.bincount(numpy.array(b_count[3], dtype=numpy.int)))\n plt.title(\"Bins with certain number of shots\")\n plt.xlabel(\"Number of shots\")\n plt.ylabel(\"Number of bins\")\n \n plt.show()",
"def single_pulse_to_fpga(self,ticks, DIOstates):\r\n #TODO USE bitwise stuff instead !!!!! That night speed up things. \r\n # Or keep it as it is if it is not more clear. \r\n #Put the number of ticks into the 32 bit number. \r\n x = int(ticks) #The first 16-bit is associated with the nb of ticks\r\n \r\n #The next 16 bits encode the state of the corresponding channel (0 = Off, 1 = On)\r\n for i in range(16, 32):\r\n #Each DIO state is either 0 or 1. So we write the next 16 bit in binary. \r\n x += DIOstates[i-16]*2**i \r\n #Return the 32 bit number.\r\n return x",
"def plotOverlappingBins(firetimes, numbins, time_period, settletime, bin_width_time):\n CAUSAL = True\n binlist = [0]*numbins\n firetimes = array(firetimes)\n ## MOOSE often inserts one or two spiketime = 0.0 entries\n ## when storing spikes, so discount those:\n firetimes = firetimes[ where(firetimes>0.0)[0] ]\n bindt = time_period/float(numbins)\n ## if CAUSAL, take spikes only to the left of bin centre_times.\n if CAUSAL: centre_times = arange(bindt, time_period+bindt/2.0, bindt)\n else: centre_times = arange(bindt/2, time_period, bindt)\n bin_half_t = bin_width_time/2.0\n rightmost_t = time_period\n for firetime in firetimes:\n ## The end bins will not show correct firing rate!\n if firetime>=settletime and firetime<(settletime+time_period):\n firetime -= settletime\n ## Each firetime is in multiple bins depending on bin_width_time\n for binnum,bin_centre_t in enumerate(centre_times):\n ## if CAUSAL, take spikes only to the left of bin centre_times.\n if CAUSAL:\n bin_left = bin_centre_t - bin_width_time\n bin_right = bin_centre_t\n else:\n bin_left = bin_centre_t - bin_half_t\n bin_right = bin_centre_t + bin_half_t\n if firetime >= bin_left and firetime < bin_right:\n binlist[binnum] += 1\n ## Next lines implement circularity of firetimes\n if bin_left < 0 and firetime >= (bin_left+rightmost_t):\n binlist[binnum] += 1\n if bin_right > rightmost_t and firetime < (bin_right-rightmost_t):\n binlist[binnum] += 1\n return [float(binspikes)/bin_width_time for binspikes in binlist] # return firing rate in Hz",
"def pulseOut(self, pinNum, pulse, count):\r\n if self.mcuType == 0:\r\n if pulse < 1 or pulse > 65535:\r\n sys.stderr.write('Pulse Value is Out of Range: [1-65535]\\n')\r\n return\r\n if count < 1 or count > 255:\r\n sys.stderr.write('Pulse Repeat Value is Out of Range: [1-255]\\n')\r\n return\r\n bL = pulse & 255\r\n bH = pulse >> 8\r\n self.mcuserial.write('p' + chr(pinNum) + chr(count))\r\n #time.sleep(0.1)\r\n self.mcuserial.write('p' + chr(bL) + chr(bH))\r\n pd = \"\"\r\n while pd != \"pd\":\r\n pd = self.mcuserial.read(2)\r\n else:\r\n sys.stderr.write('Your current pyMCU board does not support this feature.\\n')",
"def create_discrete_binning(policy_data, out):\n discretized_data = np.floor((policy_data - mins) / bin_sizes).astype(np.uint)\n # Stackoverflow #2004364\n np.add.at(out, tuple([discretized_data[:, d] for d in range(ndim)]), 1)\n out /= len(policy_data)",
"def binarize(i, bins):\n\n hist, edges = np.histogram(i, bins=bins, range=[10, 2000], normed=True)\n edges = (edges[:-1] + edges[1:])/2\n hist *= edges\n\n return hist"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print crossword assignment to the terminal.
|
def print(self, assignment):
letters = self.letter_grid(assignment)
for i in range(self.crossword.height):
for j in range(self.crossword.width):
if self.crossword.structure[i][j]:
print(letters[i][j] or " ", end="")
else:
print("█", end="")
print()
|
[
"def echo() -> None:\n from efro.terminal import Clr\n clrnames = {n for n in dir(Clr) if n.isupper() and not n.startswith('_')}\n first = True\n out: List[str] = []\n for arg in sys.argv[2:]:\n if arg in clrnames:\n out.append(getattr(Clr, arg))\n else:\n if not first:\n out.append(' ')\n first = False\n out.append(arg)\n out.append(Clr.RST)\n print(''.join(out))",
"def panda(self):\n print\n print 32 * ' ' + \".;;.\"\n print 31 * ' ' + \"/;;;;\\ ___ .;;. \" + \\\n Fore.GREEN + \" |\\\\\" + Fore.RESET\n print 30 * ' ' + \"|;(;;;-\\\"\\\" `'-.,;;;;;\\\\ \" + \\\n Fore.GREEN + \" +-+\" + Fore.RESET\n print 31 * ' ' + \"\\;'\" + 12 * ' ' + \"';;;);/ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 31 * ' ' + \"/\" + 16 * ' ' + \"\\;;' \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 30 * ' ' + \"/ .;. .;. \\\\ \" + \\\n Fore.GREEN + \" |X| ___\" + Fore.RESET\n print 30 * ' ' + \"| ;;o;; ;;o;; | \" + \\\n Fore.GREEN + \" +-+ /MMMMMA.\" + Fore.RESET\n print 30 * ' ' + \"; '\\\"-'` `'-\\\"' | \" + \\\n Fore.GREEN + \" |X| /____ \" + Fore.RESET\n print 30 * ' ' + \"/\\ ._. / \" + \\\n Fore.GREEN + \" |X| / `VMMMA.\" + Fore.RESET\n print 28 * ' ' + \";;;;;_ ,_Y_, _.' \" + \\\n Fore.GREEN + \" |X|/ \" + Fore.RESET\n print 27 * ' ' + \"/;;;;;\\`--.___.--;. \" + \\\n Fore.GREEN + \" +-+\" + Fore.RESET\n print 26 * ' ' + \"/|;;;;;;;.__.;;;. \\\\\\\\ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 25 * ' ' + \"; \\;;;;;;;;;;;;;;\\ ;\\__ .;. \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 25 * ' ' + \"| ';;;;;;;;=;;;;' |-__;;;;/ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 25 * ' ' + \"| `\\\"\\\"` .---._ /;/;;\\;;/ \" + \\\n Fore.GREEN + \" +-+\" + Fore.RESET\n print 24 * ' ' + \"/ ; /;;;;;;;-;/;;/|;/ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 24 * ' ' + \"\\_,\\ |;;;;;;;;;;;;| | \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 28 * ' ' + \"'-...--';;;;;;;;;;;;\\/ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 37 * ' ' + \"`\\\"\\\"\\\"` `\\\"` \"\n print\n print 30 * ' ' + \"Thanks for flying\" \n print Fore.GREEN + 27 * ' ' + \"B O N A P A R T I C L E\" + Fore.RESET\n print Fore.YELLOW + 27 * ' ' + \"-----------------------\" + Fore.RESET\n print",
"def generate_outprint():\n s = 'Switched to IPython ... defined variables:\\n\\n\\t'\n s += 'corpus, results, concordance, edited ...\\n\\n\\tType \"quit\" to return to corpkit environment'\n return s",
"def instructions(self):\n os.system('clear')\n print('\\n')\n print('{:^80}'.format('-----------Tic Tac Toe-----------'), end='\\n\\n')\n print('{:^80}'.format('Squares are numbered 1-9 starting'))\n print('{:^80}'.format('with the top left corner.'))",
"def print_space(self):\n print(\"The car has {0} doors and {1} seets\".format(self.doors, self.seets))",
"def show_in_console(self):\n print(' '+'_' * self.height * 2)\n for i in range(self.width):\n if i == 0:\n print(' .'+''.join(self.laby[i]))\n else:\n print(' '+'|'+''.join(self.laby[i]))",
"def printTerm4(self):\n vprint=[]\n counter=0\n for x in self.pl:\n if self.pl[x] != ['I']:\n counter=counter+1\n vprint += '\\sigma_'\n cosa=self.pl[x][0]\n vprint += self.pl[x]\n vprint += '^'\n vprint += str(x)\n vprint=''.join(vprint)\n return self.c,vprint,counter",
"def _l(self, paper, **_):\n print(\"=\" * 80)\n paper.format_term_long()\n print(\"=\" * 80)\n return None",
"def print_prompt(self):\n clear_term()\n\n print('Press \"w\", \"a\", \"s\", or \"d\" to move Up, Left, Down or Right respectively.')\n print('Enter \"p\" to quit.\\n')\n self.grid.draw_grid()\n print('\\nScore: ' + str(self.grid.score))",
"def print(self):\r\n # Iterate through the words in key value pairs\r\n for length in sorted(self.words):\r\n words = self.words[length]\r\n # Sort in place - that way, future sorts don't suffer in efficiency\r\n words.sort()\r\n # Print the length, and then the words delimited by \", \"\r\n print('{}: {}'.format(length, ', '.join(words)))",
"def print_machine(machine):\n\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Transition table:\\n\")\n sys.stdout.write(\"\\n\")\n\n TTable = machine.trans_table\n\n sys.stdout.write(\" \")\n for j in xrange(len(TTable[0])):\n sys.stdout.write(\"+-----\")\n sys.stdout.write(\"+\\n\")\n\n sys.stdout.write(\" \")\n for j in xrange(len(TTable[0])):\n sys.stdout.write(\"| %d \" % j)\n sys.stdout.write(\"|\\n\")\n\n sys.stdout.write(\" +---\")\n for j in xrange(len(TTable[0])):\n sys.stdout.write(\"+-----\")\n sys.stdout.write(\"+\\n\")\n\n for i in xrange(len(TTable)):\n sys.stdout.write(\" | %c \" % states[i])\n for j in xrange(len(TTable[i])):\n sys.stdout.write(\"| \")\n if TTable[i][j][0] == -1 and \\\n TTable[i][j][1] == -1 and \\\n TTable[i][j][2] == -1:\n sys.stdout.write(\"--- \")\n else:\n sys.stdout.write(\"%c\" % symbols[TTable[i][j][0]])\n sys.stdout.write(\"%c\" % dirs [TTable[i][j][1]])\n sys.stdout.write(\"%c \" % states [TTable[i][j][2]])\n sys.stdout.write(\"|\\n\")\n\n sys.stdout.write(\" +---\")\n for j in xrange(len(TTable[0])):\n sys.stdout.write(\"+-----\")\n sys.stdout.write(\"+\\n\")\n\n sys.stdout.write(\"\\n\")\n\n sys.stdout.flush()",
"async def ascii(self, ctx, word: str):\n display_text = '```' + text2art(text=word, font=\"random\", chr_ignore=True) + '```'\n await ctx.send(display_text)",
"def phits_print(self):\n\t\tx = \" \".join(str(i) for i in self.x)\n\t\ty = \" \".join(str(i) for i in self.y)\n\t\tz = \" \".join(str(i) for i in self.z)\n\t\ttxt = \\\n\t\t\tf\" {self.sn} {self.trn} \" + \\\n\t\t\tf\"{self.symbol} {x} {y} {z}\" + \\\n\t\t\tf\" $ name: '{self.name}' \" + \\\n\t\t\t\"(Rectangular solid) [x_min x_max] [y_min y_max] [z_min z_max]\"\n\n\t\tif self.trn != \"\":\n\t\t\ttxt += f\" with tr{self.trn}\"\n\t\treturn txt",
"def print_plosives():\n\n print(\"p\")\n print(\"t\")\n print(\"k\")\n print(\"b\")\n print(\"d\")\n print(\"g\")",
"def print_state(self):\n p1_board = self.board[0:6]\n p2_board = self.board[7:13]\n p2_board.reverse()\n p1_purse = self.board[6]\n p2_purse = self.board[13]\n\n print('\\n')\n print(\"Player 1 Score: {}\".format(self.p1_score))\n print(\"Player 2 Score: {}\".format(self.p2_score))\n print('\\n')\n print(\"Active Player: {}\".format(self.active_player))\n print(\"Actions: \", self.get_legal_actions())\n print(\"Game Over: {}\".format(self.is_over))\n print('\\n')\n print('\\t ' + ' '.join(map(str, p2_board)))\n print('\\t' + str(p2_purse) + '\\t\\t' + str(p1_purse))\n print('\\t ' + ' '.join(map(str, p1_board)))\n print('\\n')\n print(\"=\"*50)",
"def display(self):\r\n print(\"\\nCop name : \", self.cop_name)\r\n print(\"Cop age : \", self.cop_age)\r\n print(\"Cop work experience : \", self.work_exp)\r\n print(\"Cop designation : \", self.designation)",
"def print_output_caption(text: str) -> None:\n print('')\n print('=' * 50)\n print(text.upper())\n print('=' * 50)",
"def display1(self):\r\n print(\"The mission of the Cop : \", self.mission)",
"def print_tuple_current_guess(self):\n print(\"Current Guess: \", self.tuple_word)",
"def displayBoard(correct, missed, secret):\n \n currentDraw = SPACEMANDRAWINGS[len(missed)] #Missing = string of incorrect leters\n \n print(currentDraw)\n print(\"Letters Missed:\", end=\" \")\n for c in missed:\n print(c, end=\" \")\n print(\"\")\n\n blanks = \"_ \" * len(secret)\n for c in range(len(secret)):\n if secret[c] in correct: #if the letter at this secret index is in the list of correct letters\n blanks = blanks[:c*2] + secret[c] +\" \"+ blanks[(c*2):-2] #create blanks till the index of the correct letter place\n #the correct letter, then create blanks till the end\n print(\"Secret Word: {}\".format(blanks))\n print(\"\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save crossword assignment to an image file.
|
def save(self, assignment, filename):
from PIL import Image, ImageDraw, ImageFont
cell_size = 100
cell_border = 2
interior_size = cell_size - 2 * cell_border
letters = self.letter_grid(assignment)
# Create a blank canvas
img = Image.new(
"RGBA",
(self.crossword.width * cell_size,
self.crossword.height * cell_size),
"black"
)
font = ImageFont.truetype("assets/fonts/OpenSans-Regular.ttf", 80)
draw = ImageDraw.Draw(img)
for i in range(self.crossword.height):
for j in range(self.crossword.width):
rect = [
(j * cell_size + cell_border,
i * cell_size + cell_border),
((j + 1) * cell_size - cell_border,
(i + 1) * cell_size - cell_border)
]
if self.crossword.structure[i][j]:
draw.rectangle(rect, fill="white")
if letters[i][j]:
w, h = draw.textsize(letters[i][j], font=font)
draw.text(
(rect[0][0] + ((interior_size - w) / 2),
rect[0][1] + ((interior_size - h) / 2) - 10),
letters[i][j], fill="black", font=font
)
img.save(filename)
|
[
"def save_image(img, filename):\n cv2.imwrite(filename, img)",
"def save(self, filename):\n assert(self.canvas is not None)\n self.canvas.update()\n self.canvas.postscript(file=f'{filename}.eps')\n img = Image.open(f'{filename}.eps')\n img.save(f'{filename}.png', 'png')",
"def save_image(image, file_name):\n imsave(file_name, image)",
"def save_image(input, output, target, filename):\n all_images = torch.cat((input, output, target))\n vutils.save_image(all_images, filename=\"saved_models/\" + filename, normalize=True)",
"def WriteImage(self, filename):\r\n cv2.imwrite(filename,self.img)",
"def save(self, fn):\n plt.imsave(fn, self.display)",
"def _save_button_clicked(self):\n\n fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self,\"Save File\",UWBsim.BASE_DIR,\"All Files (*);;YAML files (*.yaml)\")\n \n yaml_dump = {}\n for i in range(len(self.anchor_positions)):\n key = str(i)\n yaml_dump[key] = {}\n yaml_dump[key]['x'] = str(self.anchorLineEdits[i][0].text())\n yaml_dump[key]['y'] = str(self.anchorLineEdits[i][1].text())\n yaml_dump[key]['z'] = str(self.anchorLineEdits[i][2].text())\n\n if not fileName.endswith('.yaml'):\n fileName = fileName + '.yaml'\n \n with open(fileName, 'w') as f:\n yaml.safe_dump(yaml_dump, f)",
"def save_as_file(self, data_filename=DATASETS_PATH+'data.pt', labels_filename=DATASETS_PATH+'labels.pt',\n ids_filename=DATASETS_PATH+'ids.pt'):\n print(\"Saving...\")\n torch.save([image for image in self.data], data_filename)\n torch.save([label for label in self.labels], labels_filename)\n torch.save([filename for filename in self.filenames], ids_filename)\n print(\"Done\")",
"def save_image(self,path):\n image = PIL.Image.new(\"RGB\",(self.width,self.height))\n image.putdata([piedit.colors.hex_to_rgb(p) for p in self.pixels])\n image.save(path, \"PNG\")\n self.message_handler.handle_message(\"FILE_SAVED\")\n self.set_current_file(path)\n self.set_changes_made(False)\n self.set_window_title(os.path.basename(path))",
"def save_control_shape_to_file(name, icon, curve, file_path):\n data = {\n \"name\": name,\n \"icon\": icon,\n \"sort\": 100,\n \"curves\": get_shape_data(curve),\n }\n with open(file_path, \"w\") as fp:\n yaml.dump(data, fp)",
"def write_image(image: Image, filename: str) -> None:\n image.save(filename)",
"def save_img(self, label):\n dataset_to_save = self.dataset\n # New images will be saved outside SOTA dataset if the line below is\n # uncommented\n # dataset_to_save = \"extra-dataset\"\n\n label_path = \"utils/datasets/{0}/{1}\".format(dataset_to_save, label)\n if not os.path.exists(label_path):\n os.makedirs(label_path)\n img_num = 0\n while os.path.exists(\"{0}/{1}{2}.png\".format(label_path, label, img_num)):\n img_num += 1\n\n img_path = \"{0}/{1}{2}.png\".format(label_path, label, img_num)\n\n cv2.imwrite(img_path, self.display_img)",
"def save_image(self):\n self.driving_env.save_image()",
"def save_image(window: tk.Tk) -> None:\r\n\r\n window.SaveFile()",
"def _save_annotation(annotation, filename):\n pil_image = Image.fromarray(annotation.astype(dtype=np.uint8))\n with tf.gfile.Open(filename, mode='w') as f:\n #NOTE: maybe this \n pil_image.save(f, 'PNG')",
"def save_box(_img, _p0, _p1, _dir_out):\r\n global opt_squared, img_org\r\n\r\n now = datetime.datetime.now()\r\n filename = now.strftime('%Y-%m-%d_%H-%M-%S')\r\n\r\n if opt_squared:\r\n _p0, _p1 = make_squared(_p0, _p1)\r\n\r\n x0 = int(min(_p0[0], _p1[0]) // resize_ratio)\r\n y0 = int(min(_p0[1], _p1[1]) // resize_ratio)\r\n x1 = int(max(_p0[0], _p1[0]) // resize_ratio)\r\n y1 = int(max(_p0[1], _p1[1]) // resize_ratio)\r\n\r\n img_boxed = img_org[y0:y1, x0:x1]\r\n cv2.imwrite(os.path.join(_dir_out, filename + '.png'), img_boxed)\r\n\r\n print('saved image x0:{0}, y0:{1}, x1:{2}, y1:{3}'.format(x0, y0, x1, y1))",
"def save_image(im_obj, filename):\n im_obj.save(filename + \".bmp\", \"BMP\")",
"def do_save_image(\n self,\n workspace,\n filename,\n pixels,\n pixel_type,\n c=0,\n z=0,\n t=0,\n size_c=1,\n size_z=1,\n size_t=1,\n channel_names=None,\n ):\n bioformats.formatwriter.write_image(\n filename,\n pixels,\n pixel_type,\n c=c,\n z=z,\n t=t,\n size_c=size_c,\n size_z=size_z,\n size_t=size_t,\n channel_names=channel_names,\n )",
"def save(self, filename):\n\t\tself.getZ().write(filename)",
"def saving_only_annotations(path,img ,xmin, xmax, ymin, ymax,name_damage, img_name):\n name = (path + '/'+ name_damage+\"_\"+img_name+ \"adionis_.jpg\")\n annotation = img[ymin:ymax, xmin:xmax]\n cv2.imwrite(name, annotation)\n print(\"saving image\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update `self.domains` such that each variable is nodeconsistent. (Remove any values that are inconsistent with a variable's unary constraints; in this case, the length of the word.)
|
def enforce_node_consistency(self):
# loop thru self.domain to access each variable
for var, domain in self.domains.items():
# remove words that do not fit the length of the space
inconsistent = []
for word in domain:
if len(word) != var.length:
inconsistent.append(word)
for word in inconsistent:
self.domains[var].remove(word)
|
[
"def preProcess(self, variables, domains, constraints, vconstraints):\n if len(variables) == 1:\n variable = variables[0]\n domain = domains[variable]\n for value in domain[:]:\n if not self(variables, domains, {variable: value}):\n domain.remove(value)\n constraints.remove((self, variables))\n vconstraints[variable].remove((self, variables))",
"def reconstrain(self):\n\n # test all solver splits\n subsolvers = self.state.se._solver.split()\n\n for solver in subsolvers:\n solver.timeout = 1000 * 10 # 10 seconds\n if not solver.satisfiable():\n for var in solver.variables:\n if var in self.variable_map:\n self.state.add_constraints(self.variable_map[var])\n else:\n l.warning(\"var %s not found in self.variable_map\", var)",
"def propagateConstraint(self):\n # compares assignments and determines if the assigment breaks the\n # constraints\n for var in self.vars:\n if not var.isAssigned():\n continue\n varAssignment = var.getAssignment()\n for otherVar in self.vars:\n if var == otherVar:\n continue\n if otherVar.size() == 1 and \\\n otherVar.getAssignment() == varAssignment:\n return False\n otherVar.removeValueFromDomain(varAssignment)\n return True",
"def assign(self, x, value):\n x.value = value\n\n modified = []\n\n # Actualizamos el dominio de los vecinos, eliminando este valor\n for var in x.vecinos:\n # Solo modificamos los dominios de variables sin asignar\n if var.value == 0 and value in var.domain:\n var.domain -= {value}\n modified.append(var)\n \n return modified",
"def __check_possible_domain(self, curr_variable, assignment, variables_copy):\n var_obj = variables_copy[curr_variable]\n # if the variable has an assignment already - do not check it!\n if var_obj.value is not None:\n return False\n copy_of_possible_domain = deepcopy(var_obj.get_possible_domain())\n previous_val = assignment[curr_variable]\n for domain_value in copy_of_possible_domain: # for d in possible domain.\n assignment[curr_variable] = domain_value\n constraints = var_obj.get_constraints()\n if not self.check_constraint_agreement(constraints, assignment):\n var_obj.remove_from_possible_domain(domain_value)\n # we should remove this value because there is at least one constraint who isn't happy about it.\n assignment[curr_variable] = previous_val\n if len(var_obj.get_possible_domain()) != 0:\n if len(var_obj.get_possible_domain()) == 1:\n assignment[curr_variable] = next(iter(var_obj.get_possible_domain()))\n return False # variable has at least one value in it's possible domain meaning still isn't empty.\n return True # curr_variable is wiped out.",
"def _generate_variables(self, names, domain):\n for i, name in enumerate(names):\n constraints = self.constraints.get_constraints_by_variable(name)\n var = Variable(name, domain[i], constraints)\n if var not in self.variables:\n self.variables[name] = var\n # adding neighbours to a variable:\n neighbours_names = set()\n for constraint in constraints:\n self.__add_neighbours_to_var(name, constraint.get_variables())\n # for neighbour in constraint.variables:\n # neighbours_names.add(neighbour)\n # if name in neighbours_names:\n # neighbours_names.remove(name) # remove self from neighbours.\n # self.variables[name].set_neighbours(neighbours_names) # give a reference to the set.\n else:\n raise Exception(\"Variable name repeats twice!\")",
"def forward_check(self, var, val):\n for v in self.neighbors[var]:\n if v not in self.assignment:\n changed_domain = self.domains[v].copy()\n for dom_val in self.domains[v]:\n if self.constraint_not_satisfied(var, val, v, dom_val):\n changed_domain.remove(dom_val)\n self.removed_domains[var].append((v, dom_val))\n self.domains[v] = changed_domain\n if len(self.domains[v]) == 0:\n return False\n return True",
"def test_consistency(self):\r\n import itertools\r\n num_solves = 4\r\n vars_lists = []\r\n ineqs_lists = []\r\n var_ids_order_created = []\r\n for k in range(num_solves):\r\n sum = 0\r\n constraints = []\r\n var_ids = []\r\n for i in range(100):\r\n var = Variable(name=str(i))\r\n var_ids.append(var.id)\r\n sum += var\r\n constraints.append(var >= i)\r\n var_ids_order_created.append(var_ids)\r\n obj = Minimize(sum)\r\n p = Problem(obj, constraints)\r\n objective, constr_map = p.canonicalize()\r\n all_ineq = itertools.chain(constr_map[s.EQ], constr_map[s.LEQ])\r\n var_offsets, var_sizes, x_length = p._get_var_offsets(objective, all_ineq)\r\n # Sort by offset.\r\n vars_ = sorted(var_offsets.items(), key=lambda (var_id, offset): offset)\r\n vars_ = [var_id for (var_id, offset) in vars_]\r\n vars_lists.append(vars_)\r\n ineqs_lists.append(constr_map[s.LEQ])\r\n\r\n # Verify order of variables is consistent.\r\n for i in range(num_solves):\r\n self.assertEqual(var_ids_order_created[i],\r\n vars_lists[i])\r\n for i in range(num_solves):\r\n for idx, constr in enumerate(ineqs_lists[i]):\r\n var_id, _ = lu.get_expr_vars(constr.expr)[0]\r\n self.assertEqual(var_ids_order_created[i][idx],\r\n var_id)",
"def consistance_noeuds(self):\n\n for c in self.contraintes:\n if c.dimension() == 1:\n # /!\\ iterer sur domaine[:], sinon on ne peut pas supprimer d'elements\n for v in c.variables[0].domaine[:]:\n if not c.est_valide(v):\n c.variables[0].domaine.remove(v)\n c.variables[0].label.remove(v)",
"def forward_check(self, x):\n changed = set()\n for (y, rel) in self.constr[x]:\n if self.assign[y] is None:\n to_remove = set()\n for v in self.var[y]:\n if (self.assign[x], v) not in rel:\n to_remove.add(v)\n if to_remove:\n self.remove_vals(y, to_remove)\n changed.add(y)\n return changed",
"def generate_powerlaw_var_constraints(\n num_var: int, domain_size: int, constraint_range: int\n) -> Tuple[Dict[str, Variable], Dict[str, Constraint], Domain]:\n\n # Use a barabasi powerlaw based constraints graph\n graph = nx.barabasi_albert_graph(num_var, 2)\n\n # import matplotlib.pyplot as plt\n # plt.subplot(121)\n # nx.draw(graph) # default spring_layout\n # plt.show()\n\n domain = Domain(\"d\", \"d\", range(domain_size))\n variables = {}\n for n in graph.nodes:\n v = Variable(var_name(n), domain)\n variables[v.name] = v\n logger.debug(\"Create var for node %s : %s\", n, v)\n\n constraints = {}\n for i, (n1, n2) in enumerate(graph.edges):\n v1 = variables[var_name(n1)]\n v2 = variables[var_name(n2)]\n values = random_assignment_matrix([v1, v2], range(constraint_range))\n c = NAryMatrixRelation([v1, v2], values, name=c_name(n1, n2))\n logger.debug(\"Create constraints for edge (%s, %s) : %s\", v1, v2, c)\n constraints[c.name] = c\n\n logger.info(\n \"Generates %s variables and %s constraints in a powerlaw\" \"network\",\n len(variables),\n len(constraints),\n )\n\n return variables, constraints, domain",
"def free_variables(self):\n # Task 7.6",
"def update(self):\n thermo_constraints = self._generate_constraints()\n\n for cons in thermo_constraints:\n if cons.name not in self.constraints:\n self.add_cons_vars([cons])\n logging.debug(\"Constraint {} added to the model\".format(cons.name))\n else:\n logging.warning(\n \"Constraint {} already in the model, removing previous entry\".format(\n cons.name\n )\n )\n self.solver.remove(cons.name)\n self.add_cons_vars([cons])",
"def _collect_vars(self):\n res = set()\n self.objective.collect_variables(res)\n for c in self.constraints:\n c.collect_variables(res)\n self.variables = list(res)\n self.var_slices = {}\n start = 0\n for var in self.variables:\n self.var_slices[var] = slice(start, start + var.size)\n start += var.size",
"def select_unassigned_variable(domains: Dict[Tuple[int, int],\r\n Set[int]])-> Tuple[int,int]:\r\n return min(filter(lambda cell: len(domains[cell])>1,domains.keys()), key = lambda cell: len(domains[cell]))",
"def _configure_nonlinear_variables(self, X_uniform):\n # determine number of variables and bins to use for KBinsDiscretizer.\n if self.ordering == \"pca\":\n self.nonlinear_indices_, self.residual_indices_ = self._configure_vars_pca(X_uniform)\n elif self.ordering == \"mi\":\n self.nonlinear_indices_, self.residual_indices_ = self._configure_vars_mi(X_uniform)\n elif self.ordering == \"phik\":\n self.nonlinear_indices_, self.residual_indices_ = self._configure_vars_phik(X_uniform)\n\n self.n_vars_ = len(self.nonlinear_indices_)\n self.n_resid_vars_ = len(self.residual_indices_)",
"def verify_domain_validity(self):\n self.component_count['domain'] = {}\n self.component_count['domain']['intents'] = len(self.domain.intents)\n self.component_count['domain']['utterances'] = len(self.domain.templates)\n self.component_count['domain']['actions'] = len(self.domain.user_actions)\n self.component_count['domain']['forms'] = len(self.domain.form_names)\n self.component_count['domain']['slots'] = len(self.domain.slots)\n self.component_count['domain']['entities'] = len(self.domain.entities)\n self.component_count['utterances'] = len(self.domain.templates)\n if self.domain.is_empty():\n self.summary['domain'] = [\"domain.yml is empty!\"]",
"def restrain(self):\n dofs = self.dofs\n for i in range(6):\n dofs[i] = BoundaryDof()",
"def eliminate_from_neighbors(csp, var) :\n\n constraints = csp.constraints_between(None, var)\n modified = []\n for con1 in constraints:\n variable2 = con1.var1\n domain1 = csp.get_domain(var)\n domain2 = csp.get_domain(variable2)\n\n remove = []\n for value1 in domain2:\n count = 0\n for value2 in domain1:\n if con1.check(value1,value2):\n break\n count += 1\n\n if count == len(domain1):\n remove.append(value1)\n\n if len(remove) != 0:\n for v in remove:\n domain2.remove(v)\n\n modified.append(variable2)\n if len(domain2) == 0:\n return None\n return sorted(modified)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make variable `x` arc consistent with variable `y`. To do so, remove values from `self.domains[x]` for which there is no possible corresponding value for `y` in `self.domains[y]`. Return True if a revision was made to the domain of `x`; return False if no revision was made.
|
def revise(self, x, y):
revised = False
# binary constraint: neighbor overlap
# assess domain of x for consistency with domain of y (i.e. is overlap the same letter?)
if self.crossword.overlaps[x, y] is None:
# then no overlap between x and y, no revisions made
return revised
# get overlapping values between vars x and y, return value will be some pair (i, j)
i, j = self.crossword.overlaps[x, y]
removable = []
for x_word in self.domains[x]:
# flag resets for each x_word
flag = 0
for y_word in self.domains[y]:
if x_word[i] == y_word[j]:
# x is arc consistent with y if there is a value in the domain of y
# not editing y here, just looking for at least 1 matching word for each in x domain
flag = 1
# if flag not triggered, then none of the values in domain y match the current x word
if not flag:
removable.append(x_word)
revised = True
# removing from domain here as i was triggering an error for changing set during iteration
for x_word in removable:
self.domains[x].remove(x_word)
return revised
|
[
"def __check_possible_domain(self, curr_variable, assignment, variables_copy):\n var_obj = variables_copy[curr_variable]\n # if the variable has an assignment already - do not check it!\n if var_obj.value is not None:\n return False\n copy_of_possible_domain = deepcopy(var_obj.get_possible_domain())\n previous_val = assignment[curr_variable]\n for domain_value in copy_of_possible_domain: # for d in possible domain.\n assignment[curr_variable] = domain_value\n constraints = var_obj.get_constraints()\n if not self.check_constraint_agreement(constraints, assignment):\n var_obj.remove_from_possible_domain(domain_value)\n # we should remove this value because there is at least one constraint who isn't happy about it.\n assignment[curr_variable] = previous_val\n if len(var_obj.get_possible_domain()) != 0:\n if len(var_obj.get_possible_domain()) == 1:\n assignment[curr_variable] = next(iter(var_obj.get_possible_domain()))\n return False # variable has at least one value in it's possible domain meaning still isn't empty.\n return True # curr_variable is wiped out.",
"def isInDomain(xy):\n u = (xy[0]-x)/self.h\n return np.all((u >= self.domain[0]) & (u <= self.domain[1]))",
"def forward_check(self, var, val):\n for v in self.neighbors[var]:\n if v not in self.assignment:\n changed_domain = self.domains[v].copy()\n for dom_val in self.domains[v]:\n if self.constraint_not_satisfied(var, val, v, dom_val):\n changed_domain.remove(dom_val)\n self.removed_domains[var].append((v, dom_val))\n self.domains[v] = changed_domain\n if len(self.domains[v]) == 0:\n return False\n return True",
"def Revise(csp, Xi, Xj):\n revised = False\n for x in csp.domain[Xi]:\n canSatisfy = False\n for y in csp.domain[Xj]:\n if x != y:\n canSatisfy = True\n break\n if not canSatisfy:\n csp.domain[Xi].remove(x)\n revised = True\n return revised",
"def is_in_domain(px_ax: float, py_ax: float) -> bool:\n return 0.0 <= px_ax <= 1.0 and 0.0 <= py_ax <= 1.0",
"def _forward_compatibility_check(self, affected_var_name, affected_var_value):\n for var_name, var_domain in self.domain.items():\n if var_name not in self.affectation and (affected_var_name, var_name) in self.constraints:\n new_var_domain = [\n value\n for value in var_domain\n if self.constraints[(affected_var_name, var_name)](affected_var_value, value)\n ]\n if len(new_var_domain) == 0:\n # one of the non-assigned variable is no longer possible to assign\n return False\n if len(new_var_domain) < len(var_domain):\n self.domain_cache[affected_var_name][var_name] = var_domain\n self.domain[var_name] = new_var_domain\n\n return True",
"def dans_cercle(self,r,x,y):\r\n self.r_num(r)\r\n valid = (isinstance(x, int) or isinstance(x, float)) and \\\r\n (isinstance(y, int) or isinstance(y, float))\r\n if valid:\r\n if math.sqrt(x**2+y**2) < self.r:\r\n return True\r\n else:\r\n return False\r\n else:\r\n raise ValueError",
"def _admissible(self, x: np.ndarray) -> bool:\n return np.all(x <= self.ub) and np.all(x >= self.lb)",
"def consistent_with(self, other):\n for wcs1, wcs2 in zip(self.wcs, other.wcs):\n try:\n ra, dec = at.get_center_of_projection(wcs1)\n except TypeError: # if this returns None\n return False\n x, y = wcs1.invert(ra, dec)\n x2, y2 = wcs2.invert(ra, dec)\n dx = other.xoffset - self.xoffset\n dy = other.yoffset - self.yoffset\n distsq = dx * dx + dy * dy\n if distsq > 100 and (x-x2)**2 + (y-y2)**2 < 0.25 * distsq:\n return False\n return True",
"def _compatible(self, other):\n\n if not isinstance(other, Cuboid):\n return False\n \n if len(self._p_min) != len(other._p_min):\n return False\n \n for dom in set(self._domains.keys()) & set(other._domains.keys()):\n if self._domains[dom] != other._domains[dom]:\n return False\n \n dom_union = dict(self._domains)\n dom_union.update(other._domains)\n return all(dom in list(cs._domains.items()) for dom in list(dom_union.items()))",
"def ac3(self, arcs=None):\n # setup\n # queue = arcs\n if arcs is None:\n arcs = []\n # grab all neighbor pairs in the problem and add them to arcs\n for pair, overlaps in self.crossword.overlaps.items():\n # Crossword.overlaps is dict of ALL pairs, need just the ones w overlap\n if overlaps is not None:\n arcs.append(pair)\n\n # loop\n while len(arcs) != 0:\n # loop thru arcs until it is empty\n # grab one arc (pair of variables that are neighbors) & remove it from the queue as we are now considering it\n x, y = arcs.pop()\n\n # run it thru revise()\n # if false - nothing was changed do nothing, if true - x was changed\n if self.revise(x, y):\n if len(self.domains[x]) == 0:\n # check length of domain, if 0 then we cannot solve\n return False\n # otherwise, we need to re-review previous arcs now that there has been a change\n for pair, overlaps in self.crossword.overlaps.items():\n if overlaps is not None:\n if x in pair:\n # gather all arcs that include the changed x EXCEPT the current y cause that would be duplicative and add to queue\n if y in pair:\n continue\n # check (Z, X) because X is what changed - make sure Z is still arc consistent with this new X\n if x == pair[0]:\n continue\n arcs.append(pair)\n # if we made it thru all of that, congrats you did it\n return True",
"def assign(self, x, value):\n x.value = value\n\n modified = []\n\n # Actualizamos el dominio de los vecinos, eliminando este valor\n for var in x.vecinos:\n # Solo modificamos los dominios de variables sin asignar\n if var.value == 0 and value in var.domain:\n var.domain -= {value}\n modified.append(var)\n \n return modified",
"def constraint_not_satisfied(self, var, val, v, dom_val):\n if val == dom_val:\n return True\n col1 = int(var[1:]) - 1\n col2 = int(v[1:]) - 1\n if val == dom_val - abs(col2 - col1) or val == dom_val + abs(col2 - col1):\n return True\n return False",
"def is_correct_domain(domain=None):\n if domain is None:\n return False\n else:\n return True if is_correct_domain_pattern.match(domain) else False",
"def make_arc_consistent(cn):\n queue = list(cn.get_constraints())\n queue = list(set().union(queue,[tuple(reversed(x)) for x in queue]))\n while queue:\n (xi, xj) = queue.pop(0)\n if arc_reduce(cn, xi, xj): \n # if a cell has 0 possibilities, sudoku has no solution\n if len(cn.get_domain(xi)) == 0:\n return False\n for Xk in cn.get_vars_in_contraint_with(xi):\n if Xk != xi:\n queue.append((Xk, xi)) \n return True",
"def areDomainsIdentical(var1, var2):\n #check they have the same number of axis\n if len(var1.getAxisList()) != len(var2.getAxisList()):\n return False\n\n for i in range(len(var1.getAxisList())):\n ax1 = var1.getAxis(i)\n ax2 = var2.getAxis(i)\n #print ax1, ax2\n if axis_utils.areAxesIdentical(ax1, ax2) == False:\n return False\n\n return True",
"def solution_direction(f, x):\n\n if f(x) < 0:\n if derivative(f)(x) < 0:\n return False\n else:\n return True\n else:\n if derivative(f)(x) < 0:\n return True\n else:\n return False\n\n\n # inverse assumes that g is continuous and monotonic. ",
"def valid_path(self, unit, x, y):\n (a, b) = self.units[unit]\n if abs(x - a) <= 1 and abs(y - b) <= 1:\n return True\n elif type(unit) is Knight:\n return True\n else:\n if x == a:\n c = 0\n else:\n c = (x - a) // abs(x - a)\n if y == b:\n d = 0\n else:\n d = (y - b) // abs(y - b)\n while a != x or b != y:\n a += c\n b += d\n if self.board[a][b] is not None:\n return False\n return True",
"def is_solution(x:int, y:int) -> bool:\n\n # x and y are the values in a sequence of 15 terms of the following form:\n # xxxxyxxxxxyxxxx\n\n # x must be a positive integer\n if x <= 0:\n return False\n\n # y must be a negative integer\n if y >= 0:\n return False\n\n # a run of 6 consecutive terms must be positive\n if 5 * x + y <= 0:\n return False\n\n # a run of 11 consecutive terms must be negative\n if 9 * x + 2 * y >= 0:\n return False\n\n # x must be <= 16 or y must be >= 16\n return x <= 16 or y >= -16"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update `self.domains` such that each variable is arc consistent. If `arcs` is None, begin with initial list of all arcs in the problem. Otherwise, use `arcs` as the initial list of arcs to make consistent. Return True if arc consistency is enforced and no domains are empty; return False if one or more domains end up empty.
|
def ac3(self, arcs=None):
# setup
# queue = arcs
if arcs is None:
arcs = []
# grab all neighbor pairs in the problem and add them to arcs
for pair, overlaps in self.crossword.overlaps.items():
# Crossword.overlaps is dict of ALL pairs, need just the ones w overlap
if overlaps is not None:
arcs.append(pair)
# loop
while len(arcs) != 0:
# loop thru arcs until it is empty
# grab one arc (pair of variables that are neighbors) & remove it from the queue as we are now considering it
x, y = arcs.pop()
# run it thru revise()
# if false - nothing was changed do nothing, if true - x was changed
if self.revise(x, y):
if len(self.domains[x]) == 0:
# check length of domain, if 0 then we cannot solve
return False
# otherwise, we need to re-review previous arcs now that there has been a change
for pair, overlaps in self.crossword.overlaps.items():
if overlaps is not None:
if x in pair:
# gather all arcs that include the changed x EXCEPT the current y cause that would be duplicative and add to queue
if y in pair:
continue
# check (Z, X) because X is what changed - make sure Z is still arc consistent with this new X
if x == pair[0]:
continue
arcs.append(pair)
# if we made it thru all of that, congrats you did it
return True
|
[
"def ac3(csp, arcs=None):\n\n queue_arcs = deque(arcs if arcs is not None else csp.constraints.arcs())\n\n # TODO implement this\n while queue_arcs:\n var = queue_arcs.popleft()\n\n rev = False\n cs = csp.constraints[var[0]]\n\n for i in var[0].domain:\n satisfied = False\n for j in var[1].domain:\n if cs[0].is_satisfied(i,j):\n satisfied = True\n\n if not satisfied:\n var[0].domain.remove(i)\n rev = True\n\n if rev:\n if len(var[0].domain) == 0:\n return False\n\n for i in csp.constraints[var[0]]:\n if not (i.var2 == var[1]):\n queue_arcs.append(i.var2, var[0])\n\n return True",
"def make_arc_consistent(cn):\n queue = list(cn.get_constraints())\n queue = list(set().union(queue,[tuple(reversed(x)) for x in queue]))\n while queue:\n (xi, xj) = queue.pop(0)\n if arc_reduce(cn, xi, xj): \n # if a cell has 0 possibilities, sudoku has no solution\n if len(cn.get_domain(xi)) == 0:\n return False\n for Xk in cn.get_vars_in_contraint_with(xi):\n if Xk != xi:\n queue.append((Xk, xi)) \n return True",
"def set_arc_consistency(self):\n self.maintain_arc_consistency = True\n all_vars = list(self.var.keys())\n self.arc_consistency(all_vars)",
"def consistance_arcs(self):\n\n refaire = False\n for c in self.contraintes:\n if c.dimension() == 2 and c.reviser():\n refaire = True\n\n if refaire:\n self.consistance_arcs()",
"def is_obvious_visible_generating_set(self, input_arcs):\n did_something = True\n generated_arcs = deepcopy(input_arcs)\n while did_something:\n did_something = False\n for crossing in self.crossings:\n if crossing[0] in generated_arcs and (crossing[1] in generated_arcs or crossing[2] in generated_arcs) \\\n and not (crossing[1] in generated_arcs and crossing[2] in generated_arcs):\n did_something = True\n if crossing[1] in generated_arcs:\n generated_arcs.append(crossing[2])\n else:\n generated_arcs.append(crossing[1])\n\n return len(self.arcs) == len(generated_arcs)",
"def arc_consistency(self, Q):\n while Q:\n x = Q.pop()\n for (y, relation) in self.constr[x]:\n if self.assign[y] is None:\n if self.revise(x, y, relation):\n Q.add(y)",
"def solvable(self, domain, initial_state, goal_state):\n last_state = set([])\n reachable_literals = set(initial_state)\n positive_goals = set(goal_state[0])\n actions = domain\n\n positive_effects = set([])\n negative_effects = set([])\n for a in actions:\n positive_effects = positive_effects.union(set(a.add_effects))\n negative_effects = negative_effects.union(set(a.del_effects))\n # First check the obvious stuff\n for p in goal_state[0]:\n if p not in reachable_literals and p not in positive_effects:\n return False\n for p in goal_state[1]:\n if p in reachable_literals and p not in negative_effects:\n return False\n\n while last_state != reachable_literals:\n last_state = reachable_literals.copy()\n if positive_goals.issubset(reachable_literals):\n return True\n for a in actions:\n if a.applicable(reachable_literals):\n reachable_literals = reachable_literals.union(a.add_effects)\n\n return False",
"def has_arc(self) -> bool:\n if self.is_2d_polyline:\n return any(\n v.dxf.hasattr(\"bulge\") and bool(v.dxf.bulge) for v in self.vertices\n )\n else:\n return False",
"def valid_connection(self, component):\n\n # check the object type\n if type(component) == arc.Arc:\n # check if the origin and target are correct defined\n if (type(component.origin) == place.Place and type(component.target) == transition.Transition) or (type(component.origin) == transition.Transition and type(component.target) == place.Place):\n # iteration through all arcs to check if an arc with the same key already exists\n for key, value in self._arcs.items():\n if type(component) == type(value) and not value.key == \"new_comp\":\n if component.origin.is_equal(value.origin) and component.target.is_equal(value.target):\n return False\n return True\n else:\n # check the object type\n if type(component) == test_arc.TestArc or type(component) == inhibitory_arc.InhibitoryArc:\n # test and inhibitory arcs can only be connected from a place to a transition\n if (type(component.origin) == place.Place and type(component.target) == transition.Transition):\n # iteration through all arcs to check if an arc with the same key already exists\n for key, value in self._arcs.items():\n if type(component) == type(value) and not value.key == \"new_comp\":\n if component.origin.is_equal(value.origin) and component.target.is_equal(value.target):\n return False\n return True\n return False",
"def __check_possible_domain(self, curr_variable, assignment, variables_copy):\n var_obj = variables_copy[curr_variable]\n # if the variable has an assignment already - do not check it!\n if var_obj.value is not None:\n return False\n copy_of_possible_domain = deepcopy(var_obj.get_possible_domain())\n previous_val = assignment[curr_variable]\n for domain_value in copy_of_possible_domain: # for d in possible domain.\n assignment[curr_variable] = domain_value\n constraints = var_obj.get_constraints()\n if not self.check_constraint_agreement(constraints, assignment):\n var_obj.remove_from_possible_domain(domain_value)\n # we should remove this value because there is at least one constraint who isn't happy about it.\n assignment[curr_variable] = previous_val\n if len(var_obj.get_possible_domain()) != 0:\n if len(var_obj.get_possible_domain()) == 1:\n assignment[curr_variable] = next(iter(var_obj.get_possible_domain()))\n return False # variable has at least one value in it's possible domain meaning still isn't empty.\n return True # curr_variable is wiped out.",
"def propagateConstraint(self):\n # compares assignments and determines if the assigment breaks the\n # constraints\n for var in self.vars:\n if not var.isAssigned():\n continue\n varAssignment = var.getAssignment()\n for otherVar in self.vars:\n if var == otherVar:\n continue\n if otherVar.size() == 1 and \\\n otherVar.getAssignment() == varAssignment:\n return False\n otherVar.removeValueFromDomain(varAssignment)\n return True",
"def _compatible(self, other):\n\n if not isinstance(other, Cuboid):\n return False\n \n if len(self._p_min) != len(other._p_min):\n return False\n \n for dom in set(self._domains.keys()) & set(other._domains.keys()):\n if self._domains[dom] != other._domains[dom]:\n return False\n \n dom_union = dict(self._domains)\n dom_union.update(other._domains)\n return all(dom in list(cs._domains.items()) for dom in list(dom_union.items()))",
"def has_empty_domains(csp) :\n \n variables = csp.get_all_variables()\n for var in variables:\n if len(csp.get_domain(var)) == 0:\n return True\n return False",
"def limits_consistent(spaces: Iterable[zfit.Space]):\n try:\n _ = combine_spaces(*spaces)\n except LimitsIncompatibleError:\n return False\n else:\n return True",
"def valuesSatisfyConstraint(self, values):\n #start by sanity checking that there are enough values given\n if (len(values) != len(self.coordinates)):\n raise Exception(\"RCConstraint broken\")\n return len(set(values)) == len(values) #True IFF values has no duplicates",
"def types_vars_consistent(types: Sequence[Type],\n vars:Sequence[Variable]) -> bool:\n if len(types) != len(vars):\n return False\n else:\n return all((type_var_consistent(types[i], vars[i]) \\\n for i in range(len(types))))",
"def is_ap_solvable(assignments):\n if not isinstance(assignments, dict):\n raise TypeError\n if not all(isinstance(i, (frozenset, set)) for i in assignments.values()):\n raise TypeError\n\n all_ends = set().union(*assignments.values())\n\n assignment = {}\n\n for load_id, ends in sorted(assignments.items(), key=lambda x: len(x[-1])):\n options = set(ends).intersection(all_ends)\n if not options:\n return False\n selection = options.pop()\n all_ends.remove(selection)\n assignment[load_id] = selection\n return True",
"def is_satisfy_all_consts(self, item):\n if type(item) is not set:\n item = set([item])\n for it in item:\n for const in self.constraints:\n if not const.match(it):\n return False\n return True",
"def is_consistent(csp, variable, value):\n\n # TODO implement this\n for constraint in csp.constraints[variable]:\n if constraint.var2.domain == 1 and constraint.var2.is_assigned() == True:\n if constraint.is_satisfied(value, constraint.var2.value) == False:\n return False\n else:\n counter = 0\n for value2 in constraint.var2.domain:\n if constraint.is_satisfied(value, value2) == False:\n counter += 1\n if counter >= len(constraint.var2.domain):\n return False\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return True if `assignment` is complete (i.e., assigns a value to each crossword variable); return False otherwise.
|
def assignment_complete(self, assignment):
if len(assignment) == len(self.crossword.variables):
return True
return False
|
[
"def partial_assignment(self, assignment: Iterable) -> bool:\n for literal in assignment:\n # Remove corresponding variable from the unassigned set of the formula and add literal to assignment stack\n self.unassigned.remove(abs(literal))\n self.assignment_stack.append(literal)\n\n # For every clause in the adjacency list of this variable find out which\n # clauses become unit and which become unsatisfied in the current assignment\n for clause in self.adjacency_lists[abs(literal)]:\n if clause.is_unsatisfied(self.assignment_stack):\n return False\n\n if clause.is_unit(self.assignment_stack):\n self.unit_clauses_queue.append(clause)\n\n return True",
"def is_complete(self, assignment: dict):\n return len(assignment) == 2*self.grid_size",
"def can_evaluate(self, assignment):\n return all(v in assignment for v in self.variables)",
"def consistent(self, assignment):\n # check each assigned word for length, uniqueness, proper overlap\n # unique\n if list(assignment.values()) != list(set(assignment.values)):\n return False\n\n for var, word in assignment:\n # length\n if len(word) != var.length:\n return False\n\n # overlap\n # check set of var's neighbors\n for neigh in self.crossword.neighbors(self, var):\n # grab overlap\n i, j = self.crossword.overlaps[var, neigh]\n if word[i] != assignment[neigh][j]:\n return False\n\n return True",
"def contains(self, assignment):\n for variable in assignment.get_variables():\n if variable in self._map:\n value = assignment.get_value(variable)\n self_value = self._map[variable]\n if self_value is None and value is not None:\n return False\n elif value != self_value:\n return False\n else:\n return False\n\n return True",
"def is_assigned(self):\n return bool(self.current_property())",
"def evaluate(self, variable_assignment):\n variable_value = variable_assignment[self.get_variable()]\n return (not self.is_negation() and variable_value) or (self.is_negation() and not variable_value)",
"def consistent_with(self, assignment, sub_variables):\n for sub_variable in sub_variables:\n if assignment.get_value(sub_variable) is None:\n return False\n\n if self._map.get(sub_variable, None) is None:\n return False\n\n if assignment.get_value(sub_variable) != self._map[sub_variable]:\n return False\n\n return True",
"def is_consistent(self, value: int, assignment: dict):\n if self.is_col_occupied(value) or self.is_row_occupied(value) \\\n or self.is_block_occupied(value):\n return False\n\n for val in assignment.values():\n if self.are_adjacent(value, val):\n return False\n\n return True",
"def run_process_assignment(self):\n self.learner.process_assignment_until(None)\n self.run = False",
"def satisfying_assignment(formula):\n assignment = {}\n# for clause in formula:\n# for literal in clause: d \n# if literal[0] not in assignment:\n# assignment[literal[0]] = None\n# else:\n# continue \n if formula == []:\n# print('empty')\n return assignment\n \n # determine if there is any unit-length clause in formula\n def unit_clause(formula):\n for clause in formula:\n if len(clause) == 1:\n return (clause[0])\n return None\n \n # determine if there are clauses in formula that conflict with each other; if yes, no mapping exists, return None\n# def conflict_formula(formula, current):\n# record = {}\n# for clause in formula:\n## print(clause)\n# if len(clause) == 1 and current in clause[0]:\n## print('yes')\n# if current not in record:\n# record[current] = clause[0][1]\n# elif clause[0][1] != record[current]:\n## print('wow')\n# return True\n# else:\n# continue\n# return False\n \n # determine if there are literals in clause that conlict with each other; if yes, return True\n def conflict_clause(clause, current):\n record = {}\n for literal in clause:\n if current in literal:\n if current not in record:\n record[current] = literal[1]\n elif literal[1] != record[current]:\n return True\n else:\n continue\n return False\n \n # determine if the clause is satisfied\n# def determine_clause(clause, current):\n# for literal in clause:\n# if current[0] in literal and current[1] == literal[1]:\n## print('yes')\n# return True\n# return False\n \n # determine if the literal is satisfied\n def determine_literal(literal, current, value):\n if current != None:\n if literal[1] == value:\n return True\n else:\n return False\n else:\n return 'need_to_assign'\n \n # determine if the clause is satisfied \n def determine_clause(clause, assignment):\n result = 0\n for literal in clause:\n if literal[0] in assignment:\n current = literal[0]\n value = assignment[current]\n else:\n current = None\n value = None\n \n if determine_literal(literal, current, value) == True:\n return True\n elif determine_literal(literal, current, value) == 'need_to_assign':\n result += 1\n else:\n continue\n if result == 0:\n return False\n else:\n return 'continue'\n \n # determine if the formula is satisfied\n def determine_formula(formula, assignment):\n need = 0\n for clause in formula:\n if determine_clause(clause, assignment) == False:\n# print('pass')\n return False\n elif determine_clause(clause, assignment) == 'continue':\n# print('fail')\n need += 1\n continue\n else:\n continue\n if need == 0:\n return True\n else:\n return 'next_recursion'\n \n # formula processing\n def formula_process(formula, current, value):\n formula_new = []\n for clause in formula:\n# print(clause)\n# clause_new = clause[:]\n# clause_new = []\n# if conflict_clause(clause, current):\n## print('conflict clause')\n## assignment[current] = None\n# for literal in clause:\n# if current in literal:\n# clause_new.remove(literal)\n# formula_new.append(clause_new)\n# \n# else:\n# for literal in clause:\n## print('current new formula is: ')\n## print(formula_new)\n# if current in literal:\n# if value != literal[1]:\n## print('remove current literal')\n# clause_new.remove(literal)\n## print(clause_new)\n# else:\n## print('current clause is correct')\n# clause_new = []\n# break\n# else:\n# continue\n# if clause_new == []:\n# continue\n# else:\n# formula_new.append(clause_new)\n# return formula_new\n \n# clause_new = clause[:]\n clause_new = []\n# if conflict_clause(clause, current):\n## print('conflict clause')\n## assignment[current] = None\n# for literal in clause:\n# if current in literal:\n# clause_new.remove(literal)\n# formula_new.append(clause_new)\n \n# else:\n for literal in clause:\n# print('current new formula is: ')\n# print(formula_new)\n if current in literal:\n if value != literal[1]:\n# print('remove current literal')\n# clause_new.remove(literal)\n continue\n# print(clause_new)\n else:\n# print('current clause is correct')\n# clause_new = []\n clause_new = ['True']\n break\n# break\n else:\n# continue\n clause_new.append(literal)\n \n if clause_new == ['True']:\n continue\n elif clause_new == []:\n return None\n else:\n formula_new.append(clause_new)\n \n return formula_new\n \n # recursion function\n def recur_assign(formula, assignment):\n \n# print(formula)\n# print('current formula is: ')\n# print(formula)\n# print('assignmeng is: ')\n# print(assignment)\n # try to find unit-length clause\n unit = unit_clause(formula)\n# print(unit)\n \n # if yes, set current to variable in unit-length clause\n if unit != None:\n current = unit[0]\n value = unit[1]\n new_assignment = assignment.copy()\n new_assignment[current] = value\n# print(current)\n# if determine_formula(formula, assignment) == True:\n# return assignment\n# elif determine_formula(formula, assignment) == False:\n# return None\n# else:\n# formula_new = formula_process(formula, current, value)\n# return recur_assign(formula_new, assignment)\n formula_new = formula_process(formula, current, value)\n if formula_new == []:\n return new_assignment\n elif formula_new == None:\n return None\n else:\n# if recur_assign(formula_new, assignment) == None:\n# return None\n# else:\n return recur_assign(formula_new, new_assignment)\n # if no, set current to variable in first literal of first clause of the formula\n else:\n current = formula[0][0][0]\n new_assignment = assignment.copy()\n for value in [True, False]:\n new_assignment[current] = value\n# if determine_formula(formula, new_assignment) == True:\n# return new_assignment\n# elif determine_formula(formula, new_assignment) == False:\n# continue\n# else:\n# formula_new = formula_process(formula, current, value)\n# return recur_assign(formula_new, new_assignment)\n formula_new = formula_process(formula, current, value)\n if formula_new == []:\n return new_assignment\n elif formula_new == None:\n continue\n else:\n# if recur_assign(formula_new, new_assignment) == None:\n# continue\n# else:\n return recur_assign(formula_new, new_assignment)\n return None\n# return None\n# if all(formula):\n# print('yes')\n# assignment[current] = value\n# return assignment\n \n # determine if the current formula is satisfied; if yes, store the bool_value of current variable into dictionary\n# if determine_formula(formula, (current, value)):\n# print('all_true')\n# assignment[current] = value\n# print(assignment)\n# return assignment\n# \n# # if there are conflicting cluases in formula, return None\n# if conflict_formula(formula, current):\n# print('conflict')\n# return None\n## print(assignment)\n \n# print(assignment)\n# return assignment\n# print(recur_assign(formula, assignment))\n return recur_assign(formula, assignment)",
"def evaluates_to_true(truth_assignment: bool, positive: bool):\n return truth_assignment == positive",
"def is_filled_by(self, assignment):\n # TODO: check whether this function has a bug or not.\n for slot_key in self._slots:\n value = assignment.get_value(slot_key)\n if value == ValueFactory.none():\n return False\n\n return True",
"def test_assignment(self, default_assignment):\n assert self.state.assignment == default_assignment",
"def checkAssignment(claim, assignment):\n # Assigned references must match the claim's specs one-to-one.\n assert len(claim) == len(assignment.keys())\n\n # Check whether each assigned resource matches its spec.\n for spec in claim:\n assigned = assignment[spec.reference]\n assert spec.typeName == assigned.typeName\n assert spec.capabilities.issubset(assigned.capabilities)\n\n # Check whether all assigned resources are unique.\n usedIds = set()\n for resource in assignment.values():\n resId = resource.getId()\n assert resId not in usedIds\n usedIds.add(resId)\n\n # Check whether all assigned resources are available.\n for resource in assignment.values():\n assert not resource.isReserved()\n assert not resource.isSuspended()\n assert resource.getConnectionStatus() == ConnectionStatus.CONNECTED",
"def requires_operator_assign(self: Fdef) -> bool:\n self._resolve_if_needed()\n return self._requires_operator_assign",
"def is_ap_solvable(assignments):\n if not isinstance(assignments, dict):\n raise TypeError\n if not all(isinstance(i, (frozenset, set)) for i in assignments.values()):\n raise TypeError\n\n all_ends = set().union(*assignments.values())\n\n assignment = {}\n\n for load_id, ends in sorted(assignments.items(), key=lambda x: len(x[-1])):\n options = set(ends).intersection(all_ends)\n if not options:\n return False\n selection = options.pop()\n all_ends.remove(selection)\n assignment[load_id] = selection\n return True",
"def is_complete_assignment(self, new_cell, opened_cells, flags):\n for neighbor in self.get_cell_neighbors(new_cell):\n for cell in opened_cells:\n if cell == neighbor:\n if cell in self.opened_cells and not self.check_mines_consistency(self.open_information[cell],\n neighbor,\n flags, opened_cells):\n return False\n return True",
"def process_assign(self, node, state, *_):\n io_source = False\n is_function_call = False\n maybe_d_type_object_assign = False\n d_type_object_name = None\n # Get the GrFN element of the RHS side of the assignment which are\n # the variables involved in the assignment operations.\n sources = self.gen_grfn(node.value, state, \"assign\")\n\n node_name = node.targets[0].__repr__().split()[0][2:]\n if node_name == \"ast.Attribute\":\n node_value = node.targets[0].value\n attrib_ast = node_value.__repr__().split()[0][2:]\n if (\n attrib_ast == \"ast.Name\"\n and node_value.id in self.derived_type_objects\n ):\n maybe_d_type_object_assign = True\n d_type_object_name = node_value.id\n object_type = self.derived_type_objects[d_type_object_name]\n elif (\n attrib_ast == \"ast.Attribute\"\n and node_value.value.id in self.derived_type_objects\n ):\n maybe_d_type_object_assign = True\n d_type_object_name = node_value.value.id\n object_type = self.derived_type_objects[d_type_object_name]\n\n array_assignment = False\n is_d_type_obj_declaration = False\n # Detect assigns which are string initializations of the\n # following form: String(10). String initialization of the form\n # String(10, \"abcdef\") are valid assignments where the index of the\n # variables will be incremented but for the former case the index\n # will not be incremented and neither will its variable spec be\n # generated\n is_string_assign = False\n is_string_annotation = False\n if len(sources) > 0 and \"call\" in sources[0]:\n type_name = sources[0][\"call\"][\"function\"]\n if type_name == \"String\":\n is_string_assign = True\n # Check if it just an object initialization or initialization\n # with value assignment\n if len(sources[0][\"call\"][\"inputs\"]) == 1:\n # This is just an object initialization e.g. String(10)\n is_string_annotation = True\n elif type_name == \"Array\":\n array_assignment = True\n array_dimensions = []\n inputs = sources[0][\"call\"][\"inputs\"]\n\n # If the array type is string, the structure of inputs will\n # be a bit different than when it is int of float\n if \"call\" in inputs[0][0]:\n if inputs[0][0][\"call\"][\"function\"] == \"String\":\n array_type = \"string\"\n else:\n array_type = inputs[0][0][\"var\"][\"variable\"]\n self._get_array_dimension(sources, array_dimensions, inputs)\n elif type_name in self.derived_types:\n is_d_type_obj_declaration = True\n if isinstance(node.targets[0], ast.Name):\n variable_name = node.targets[0].id\n if variable_name not in self.module_variable_types:\n for program in self.mode_mapper[\"public_objects\"]:\n if (\n variable_name\n in self.mode_mapper[\"public_objects\"][program]\n ):\n self.module_variable_types[variable_name] = [\n program,\n type_name,\n ]\n else:\n pass\n else:\n pass\n\n # This reduce function is useful when a single assignment operation\n # has multiple targets (E.g: a = b = 5). Currently, the translated\n # python code does not appear in this way and only a single target\n # will be present.\n targets = reduce(\n (lambda x, y: x.append(y)),\n [\n self.gen_grfn(target, state, \"assign\")\n for target in node.targets\n ],\n )\n grfn = {\"functions\": [], \"variables\": [], \"containers\": []}\n # Again as above, only a single target appears in current version.\n # The `for` loop seems unnecessary but will be required when multiple\n # targets start appearing.\n target_names = []\n object_attr_num = 1\n for target in targets:\n # Bypass any assigns that have multiple targets.\n # E.g. (i[0], x[0], j[0], y[0],) = ...\n if \"list\" in target:\n return []\n target_names.append(target[\"var\"][\"variable\"])\n # Fill some data structures if this is a string\n # assignment/initialization\n if is_string_assign:\n state.variable_types[target_names[0]] = \"string\"\n state.string_assign_name = target_names[0]\n self.strings[target_names[0]] = {\n \"length\": sources[0][\"call\"][\"inputs\"][0][0][\"value\"]\n }\n if is_string_annotation:\n # If this is just a string initialization,\n # last_definition should not contain this string's index.\n # This happens only during assignments.\n del state.last_definitions[target_names[0]]\n self.strings[target_names[0]][\"annotation\"] = True\n self.strings[target_names[0]][\"annotation_assign\"] = False\n return []\n else:\n self.strings[target_names[0]][\"annotation\"] = False\n self.strings[target_names[0]][\"annotation_assign\"] = True\n\n # Pre-processing and removing certain Assigns which only pertain\n # to the Python code and do not relate to the FORTRAN code in any\n # way.\n io_match = self.check_io_variables(target_names[0])\n if io_match:\n self.exclude_list.append(target_names[0])\n return []\n\n # If the target is a list of variables, the grfn notation for the\n # target will be a list of variable names i.e. \"[a, b, c]\"\n # TODO: This does not seem right. Discuss with Clay and Paul\n # about what a proper notation for this would be\n if target.get(\"list\"):\n targets = \",\".join(\n [x[\"var\"][\"variable\"] for x in target[\"list\"]]\n )\n target = {\"var\": {\"variable\": targets, \"index\": 1}}\n\n if array_assignment:\n var_name = target[\"var\"][\"variable\"]\n state.array_assign_name = var_name\n # Just like the same reason as the variables\n # declared with annotation within function (not\n # function arguments) need to have index of zero.\n # Thus, these 3 lines of code fixes the index to\n # correct value from -1 to 0.\n if target[\"var\"][\"index\"] == -1:\n target[\"var\"][\"index\"] = 0\n state.last_definitions[target_names[0]] = 0\n is_mutable = False\n array_info = {\n \"index\": target[\"var\"][\"index\"],\n \"dimensions\": array_dimensions,\n \"elem_type\": array_type,\n \"mutable\": is_mutable,\n }\n self.arrays[var_name] = array_info\n state.array_types[var_name] = array_type\n if array_type == \"string\":\n length = inputs[0][0][\"call\"][\"inputs\"][0][0][\"value\"]\n self.strings[var_name] = {\n \"length\": length,\n \"annotation\": False,\n \"annotated_assign\": True,\n }\n\n if (\n maybe_d_type_object_assign\n and object_type\n and object_type in self.derived_types_attributes\n and target_names[0]\n in self.derived_types_attributes[object_type]\n ):\n self.current_d_object_name = d_type_object_name\n is_d_type_object_assignment = True\n\n # If targets holds more than 1 variable information and\n # it's greater than the object attribute number, then\n # the derived type object is referencing more than\n # 1 attribute (i.e. x.k.v).\n if len(targets) > 1 and len(targets) > object_attr_num:\n object_attr_num += 1\n # Therefore, we do not want to go any further before\n # collecting all the information of the attribute\n # information, so we need to simply return back to the\n # beginning of loop and restart the process\n continue\n else:\n is_d_type_object_assignment = False\n\n variable_spec = self.generate_variable_definition(\n target_names,\n d_type_object_name,\n is_d_type_object_assignment,\n state,\n )\n\n # Do not add the variable spec if this is a string annotation\n # since this can collide with the variable spec of the first\n # string assignment.\n if not is_string_annotation:\n grfn[\"variables\"].append(variable_spec)\n\n # Since a Python class (derived type) object declaration has syntax\n # is __object_name__ = __class_name__, it's considered as an\n # assignment that will create __assign__ function GrFN,\n # which should not. Thus, simply return the [grfn] here to avoid\n # generating __assign__ function.\n if is_d_type_obj_declaration:\n return [grfn]\n\n # TODO Hack to not print lambda function for IO assigns. Need a\n # proper method to handle IO moving on\n for src in sources:\n if \"call\" in src:\n if self.check_io_variables(src[\"call\"][\"function\"]):\n io_source = True\n function = src[\"call\"][\"function\"]\n # Check if the source is a function call by comparing its\n # value with the list of functions in our program (\n # obtained from the mode mapper)\n for program_functions in self.mode_mapper[\"subprograms\"]:\n if (\n function\n in self.mode_mapper[\"subprograms\"][\n program_functions\n ]\n ):\n is_function_call = True\n\n if is_function_call:\n container_name = self.generate_container_id_name(\n self.fortran_file, [\"@global\"], function\n )\n function_name = {\"name\": container_name, \"type\": \"container\"}\n else:\n function_name = self.generate_function_name(\n \"__assign__\", variable_spec[\"name\"], None\n )\n # If current assignment process is for a derived type object (i.e\n # x.k), then\n if is_d_type_object_assignment:\n # (1) we need to add derived type object as function input.\n src = [\n {\n \"var\": {\n \"variable\": d_type_object_name,\n \"index\": state.last_definitions[\n d_type_object_name\n ],\n }\n }\n ]\n sources.extend(src)\n\n # (2) Generate the object name + attributes variable name\n new_var_name = d_type_object_name\n for target_name in target_names:\n new_var_name += f\"_{target_name}\"\n self.current_d_object_attributes.append(target_name)\n\n # (3) we need to modify thee target to be \"objectName_attribute\"\n # For example, variable: x_k and index: __index_of_x_y__.\n target[\"var\"] = {\n \"variable\": new_var_name,\n \"index\": state.last_definitions[new_var_name],\n }\n\n fn = self.make_fn_dict(function_name, target, sources, state)\n if len(fn) == 0:\n return []\n\n source_list = self.make_source_list_dict(sources)\n\n if not io_source and not is_function_call:\n lambda_string = self.generate_lambda_function(\n node,\n function_name[\"name\"],\n True,\n array_assignment,\n is_string_assign,\n is_d_type_object_assignment,\n source_list,\n state,\n False,\n )\n state.lambda_strings.append(lambda_string)\n\n grfn[\"functions\"].append(fn)\n # We need to cleanup the object attribute tracking list.\n self.current_d_object_attributes = []\n return [grfn]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return True if `assignment` is consistent (i.e., words fit in crossword puzzle without conflicting characters); return False otherwise.
|
def consistent(self, assignment):
# check each assigned word for length, uniqueness, proper overlap
# unique
if list(assignment.values()) != list(set(assignment.values)):
return False
for var, word in assignment:
# length
if len(word) != var.length:
return False
# overlap
# check set of var's neighbors
for neigh in self.crossword.neighbors(self, var):
# grab overlap
i, j = self.crossword.overlaps[var, neigh]
if word[i] != assignment[neigh][j]:
return False
return True
|
[
"def assignment_complete(self, assignment):\n\n if len(assignment) == len(self.crossword.variables):\n return True\n return False",
"def is_consistent(self, value: int, assignment: dict):\n if self.is_col_occupied(value) or self.is_row_occupied(value) \\\n or self.is_block_occupied(value):\n return False\n\n for val in assignment.values():\n if self.are_adjacent(value, val):\n return False\n\n return True",
"def partial_assignment(self, assignment: Iterable) -> bool:\n for literal in assignment:\n # Remove corresponding variable from the unassigned set of the formula and add literal to assignment stack\n self.unassigned.remove(abs(literal))\n self.assignment_stack.append(literal)\n\n # For every clause in the adjacency list of this variable find out which\n # clauses become unit and which become unsatisfied in the current assignment\n for clause in self.adjacency_lists[abs(literal)]:\n if clause.is_unsatisfied(self.assignment_stack):\n return False\n\n if clause.is_unit(self.assignment_stack):\n self.unit_clauses_queue.append(clause)\n\n return True",
"def is_complete(self, assignment: dict):\n return len(assignment) == 2*self.grid_size",
"def consistent_with(self, assignment, sub_variables):\n for sub_variable in sub_variables:\n if assignment.get_value(sub_variable) is None:\n return False\n\n if self._map.get(sub_variable, None) is None:\n return False\n\n if assignment.get_value(sub_variable) != self._map[sub_variable]:\n return False\n\n return True",
"def can_evaluate(self, assignment):\n return all(v in assignment for v in self.variables)",
"def contains(self, assignment):\n for variable in assignment.get_variables():\n if variable in self._map:\n value = assignment.get_value(variable)\n self_value = self._map[variable]\n if self_value is None and value is not None:\n return False\n elif value != self_value:\n return False\n else:\n return False\n\n return True",
"def _all_same(self, check, player_letter):\n return all(self.grid[x[0]][x[1]] == player_letter for x in check)",
"def is_ap_solvable(assignments):\n if not isinstance(assignments, dict):\n raise TypeError\n if not all(isinstance(i, (frozenset, set)) for i in assignments.values()):\n raise TypeError\n\n all_ends = set().union(*assignments.values())\n\n assignment = {}\n\n for load_id, ends in sorted(assignments.items(), key=lambda x: len(x[-1])):\n options = set(ends).intersection(all_ends)\n if not options:\n return False\n selection = options.pop()\n all_ends.remove(selection)\n assignment[load_id] = selection\n return True",
"def check_word(self, word):\r\n if len(word) != len(self) or self.has_word:\r\n return False\r\n # Overlap dictionary has the slots as keys and the index of the overlap\r\n # as values. Slot1.overlaps[Slot2] == index in Slot1 at which the\r\n # overlap occurs.\r\n for other, other_ind in self.overlaps.items():\r\n ind = other.overlaps[self]\r\n # If other has a letter in the overlap and it doesn't match the\r\n # letter which will be overlapping, the word won't fit\r\n if other[other_ind] and other[other_ind] != word[ind]:\r\n return False\r\n for cur, new in zip(self.word, word):\r\n if cur and cur != new:\r\n return False\r\n return True",
"def checkAssignment(claim, assignment):\n # Assigned references must match the claim's specs one-to-one.\n assert len(claim) == len(assignment.keys())\n\n # Check whether each assigned resource matches its spec.\n for spec in claim:\n assigned = assignment[spec.reference]\n assert spec.typeName == assigned.typeName\n assert spec.capabilities.issubset(assigned.capabilities)\n\n # Check whether all assigned resources are unique.\n usedIds = set()\n for resource in assignment.values():\n resId = resource.getId()\n assert resId not in usedIds\n usedIds.add(resId)\n\n # Check whether all assigned resources are available.\n for resource in assignment.values():\n assert not resource.isReserved()\n assert not resource.isSuspended()\n assert resource.getConnectionStatus() == ConnectionStatus.CONNECTED",
"def check_unique(self, alphabet):\n letters_set = set()\n for let in alphabet:\n if let in letters_set:\n return False\n else:\n letters_set.add(let)\n return True",
"def matches(assignment_1, variables_1, assignment_2, variables_2):\n matching_variables = list(set(variables_1) & set(variables_2))\n for var in matching_variables:\n assingment_1_var_index = variables_1.index(var)\n assingment_2_var_index = variables_2.index(var)\n if assignment_1[assingment_1_var_index] != assignment_2[assingment_2_var_index]:\n return False\n return True",
"def is_strict(self):\n for row in self:\n if any(row[i] == row[i+1] for i in range(len(row)-1)):\n return False\n return True",
"def puzzle_matches_key(self):\n for letter in self.hashed_puzzle:\n if letter.isalpha():\n if self.alpha_to_guesses[letter] == self.hash_to_alpha[letter]:\n continue\n else:\n return False\n return True",
"def is_consistent(self) -> bool:\n can_place = set()\n used = set()\n\n for tile in self.tiles:\n # One or more candidates\n if len(tile.candidates) == 0:\n return False\n # Checking for any duplicates\n if tile.value in used:\n return False\n elif tile.value != sdk_tile.UNKNOWN:\n used.add(tile.value)\n can_place = can_place | tile.candidates\n\n if can_place != set(sdk_tile.CHOICES):\n return False\n return True",
"def complete(self, pieces):\n return set(pieces) == set(self.pieces.values()) and len(pieces) == len(self.pieces.values())",
"def propagateConstraint(self):\n # compares assignments and determines if the assigment breaks the\n # constraints\n for var in self.vars:\n if not var.isAssigned():\n continue\n varAssignment = var.getAssignment()\n for otherVar in self.vars:\n if var == otherVar:\n continue\n if otherVar.size() == 1 and \\\n otherVar.getAssignment() == varAssignment:\n return False\n otherVar.removeValueFromDomain(varAssignment)\n return True",
"def is_contradiction(self) -> bool:\r\n return all((not _[1] for _ in self.truth_table))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return an unassigned variable not already part of `assignment`. Choose the variable with the minimum number of remaining values in its domain. If there is a tie, choose the variable with the highest degree. If there is a tie, any of the tied variables are acceptable return values.
|
def select_unassigned_variable(self, assignment):
# setup
mrv_hueristic = {var: 0 for var in self.crossword.variables if var not in assignment.keys()}
ld_hueristic = {var: 0 for var in self.crossword.variables if var not in assignment.keys()}
# loop
for var in self.crossword.variables:
if var in assignment.keys():
# skip assigned variables
continue
# compute minimum remaining value hueristic
mrv_hueristic[var] = len(self.domains[var])
# compute largest degree hueristic
ld_hueristic[var] = len(self.crossword.neighbors(var))
temp = sorted([var for var in self.crossword.variables if var not in assignment.keys()], key=mrv_hueristic.__getitem__)
return temp[0]
|
[
"def get_next_unassigned_var(self):\n # No heuristic\n if self.ordering_choice == 0: \n return self.unassigned_vars[0]\n \n # Heuristic 1\n if self.ordering_choice == 1:\n return self.get_most_constrained()\n \n # Heuristic 2\n if self.ordering_choice == 2:\n return self.get_most_constraining()\n \n # Hybrid of Heuristic 1 and Heuristic 2\n if self.ordering_choice == 3:\n return np.random.choice([self.get_most_constraining(),\n self.get_most_constrained()], p=[0.1, 0.9])",
"def select_unassigned_variable(domains: Dict[Tuple[int, int],\r\n Set[int]])-> Tuple[int,int]:\r\n return min(filter(lambda cell: len(domains[cell])>1,domains.keys()), key = lambda cell: len(domains[cell]))",
"def get_dl_variable_assignment(self, of_variable):\n if of_variable not in self.assignments or len(self.assignments[of_variable]) == 0:\n return (None, None)\n # Return the last assignment\n value_assigned = self.assignments[of_variable][-1]\n at_decision_level = self.decision_levels[of_variable]\n return (value_assigned, at_decision_level)",
"def min_remaining_value(board):\n chosen_var = None\n num_remaining = 10\n for cell in board.values():\n curr_len = len(cell.possible_values)\n if curr_len < num_remaining and not cell.is_given and not cell.is_assigned:\n chosen_var = cell\n num_remaining = curr_len\n if curr_len == 1:\n break\n return chosen_var",
"def get_most_constrained(self):\n most_constrained = self.unassigned_vars[0] # default choice\n smallest_domain = self.domains[most_constrained]\n for var in self.unassigned_vars:\n if len(smallest_domain) > len(self.domains[var]):\n # smaller domain found, update accordingly\n smallest_domain = self.domains[var]\n most_constrained = var\n return most_constrained",
"def pickbranching_variable_random(self, did_backtrack):\n variable = None\n if did_backtrack: # Select the variable at the current decision_level\n self.did_backtrack_divide_vsids()\n variable = list(filter(lambda x: x[1] == self.decision_level, self.decision_levels.items()))[0][0]\n else:\n for var, assignment in self.assignments.items():\n if len(assignment) == 2 or var in self.decision_levels:\n continue\n variable = var\n break\n if variable is None or len(self.assignments[variable]) == 2:\n return (None, None)\n if len(self.assignments[variable]) == 0:\n values = [True, False]\n random.shuffle(values)\n value = values[0]\n else:\n value = not self.assignments[variable][-1]\n return (variable, value)",
"def get_variable_assignment(self):\n final = {}\n for variable, assignments in self.assignments.items():\n if len(assignments) == 0:\n continue\n final[variable] = assignments[-1]\n return final",
"def pickbranching_variable_vsids(self, did_backtrack):\n if did_backtrack: # Select the variable at the current decision_level\n self.did_backtrack_divide_vsids()\n variable = list(filter(lambda x: x[1] == self.decision_level, self.decision_levels.items()))[0][0]\n value = not self.assignments[variable][-1]\n \n return (variable, value)\n else:\n # Predicate: variable must be unassigned at the current decision level\n predicate = lambda x: x[0].get_variable() not in self.decision_levels\n literal = max(filter(predicate, self.vsids.items()), key=lambda x: x[1])[0]\n variable = literal.get_variable()\n value = not literal.is_negation()\n return (variable, value)",
"def get_decision_literal(self) -> int:\n number_of_clauses = 0\n decision_literal = None\n for variable in self.unassigned:\n positive_clauses = 0\n negative_clauses = 0\n for clause in self.adjacency_lists[variable]:\n if not clause.is_satisfied(self.assignment_stack):\n unassigned = clause.partial_assignment(self.assignment_stack)\n if variable in unassigned:\n positive_clauses += 1\n\n if -variable in unassigned:\n negative_clauses += 1\n\n if positive_clauses > number_of_clauses and positive_clauses > negative_clauses:\n number_of_clauses = positive_clauses\n decision_literal = variable\n\n if negative_clauses > number_of_clauses:\n number_of_clauses = negative_clauses\n decision_literal = -variable\n\n return decision_literal",
"def local_search(self, max_variables):\n assignments = self.assignments.copy()\n\n best_var = None\n best_improvement = 0\n\n for _ in range (0, max_variables):\n for var in range(0, self.cnf.num_variables):\n self.assignments[:,var] = 1-self.assignments[:,var]\n score, _, __, ___ = self.cnf.evaluate(assignments)\n improvement = score - self.get_score()\n if improvement > 0 and improvement > best_improvement:\n best_improvement = improvement\n best_var = var\n\n self.assignments[:,var] = 1-self.assignments[:,var]\n\n if best_improvement > 0:\n self.assignments[:,best_var] = 1-self.assignments[:,best_var]\n\n self.assignments = assignments",
"def unassign_val(self, var: int, value: int, assignment: dict):\n self.safe_remove_dict(assignment, var) # safe removal to prevent failure\n row = (value - 1) // self.grid_size\n col = (value - 1) % self.grid_size\n self.row_occupancy[row] -= 1\n self.col_occupancy[col] -= 1\n block = self.cell_map[value]['block'] # the variable's block\n self.block_occupancy[block] -= 1 \n if self.ordering_choice == 2 or self.ordering_choice == 3:\n self.num_edge_list = self.last_num_edge_list[:]\n self.unassigned_vars.append(var)",
"def find_variable_assignment(self, name):\n for x in self.find_insts(cls=Assign):\n if x.target.name == name:\n return x\n return None",
"def partial_assignment(self, assignment: Iterable) -> bool:\n for literal in assignment:\n # Remove corresponding variable from the unassigned set of the formula and add literal to assignment stack\n self.unassigned.remove(abs(literal))\n self.assignment_stack.append(literal)\n\n # For every clause in the adjacency list of this variable find out which\n # clauses become unit and which become unsatisfied in the current assignment\n for clause in self.adjacency_lists[abs(literal)]:\n if clause.is_unsatisfied(self.assignment_stack):\n return False\n\n if clause.is_unit(self.assignment_stack):\n self.unit_clauses_queue.append(clause)\n\n return True",
"def get_unassigned(self):\n\t\tcells = list()\n\t\tfor row in self.board:\n\t\t\tfor cell in row:\n\t\t\t\tif cell.value == '_':\n\t\t\t\t\tcells.append(cell)\n\t\tif len(cells) == 0:\n\t\t\treturn None\n\t\treturn cells[randint(0, len(cells)-1)]",
"def undo_partial_assignment(self, decision_literal: int) -> None:\n self.unit_clauses_queue.clear()\n while self.assignment_stack:\n literal = self.assignment_stack.pop()\n self.unassigned.add(abs(literal))\n if literal == decision_literal:\n break",
"def get_pruned(self, variables):\n assignment = Assignment()\n\n for variable, value in self._map.items():\n if variable not in variables:\n assignment.add_pair(variable, value)\n\n assignment._cached_hash = 0\n return assignment",
"def get_assignment(AssignmentId=None):\n pass",
"def backtrack(csp: CSP) -> Assignment:\r\n\r\n def backtrackSearch(csp_i: CSP, assignment_i: Assignment = None) -> Optional[Assignment]:\r\n \"\"\"\r\n Executes backtracking search for a complete assignment of a csp\r\n :param csp_i: csp of interest\r\n :param assignment_i: eventual partial assignment to respect\r\n :return: assignment if it exist, None otherwise\r\n \"\"\"\r\n if assignment_i is None: # if it's the init call, we run AC-3 and we initialize an assignment\r\n if not AC3(csp_i):\r\n return None\r\n assignment_i = Assignment()\r\n\r\n if len(assignment_i.getAssignment()) == csp_i.countVariables(): # if the assignment is complete, we can return it\r\n return assignment_i\r\n\r\n var = orderVariables(csp_i, assignment_i)\r\n values = orderDomainValues(csp_i, assignment_i, var)\r\n\r\n for value in values:\r\n localAssignment = copy(assignment_i) # we try to assign a var in a local copy of assignment\r\n localAssignment.addVarAssigned(var, value)\r\n if MAC(csp_i, localAssignment, csp_i.getNeighbour(var)): # if it's possible to complete the assignment, we iterate...\r\n result = backtrackSearch(csp_i, localAssignment)\r\n if result is not None: # ... if it fails, we go back and propagate the None result\r\n return result # if the recursion arrive to a None, we don't want to propagate it, but we want to try next value\r\n return None\r\n\r\n assignment = backtrackSearch(csp)\r\n if assignment is None:\r\n nullAssignment = Assignment()\r\n nullAssignment.setNull()\r\n return nullAssignment\r\n return assignment",
"def _get_least_used(self):\n if self.idx < self.size:\n return self.priority[:self.idx].min(0)[1][0, 0]\n else:\n return self.priority.min(0)[1][0, 0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return nearest indx group
|
def group_idx(self, x):
centers = self.centers
dist = [self.dist_func(x, center) for center in centers]
dist = np.array(dist)
group = np.argmin(dist)
return group
|
[
"def nearest_clusteroid_index(idx, clusteroids):\n\n clusteroid_dists = clusteroids[:, idx]\n min_dist_val = np.nanmin(clusteroid_dists)\n min_dist_idxs = np.nonzero(clusteroid_dists == min_dist_val)[0]\n if not min_dist_idxs.any():\n return np.random.choice(clusteroids.shape[0])\n return np.nanmin(min_dist_idxs)",
"def nearest(geom, df,sindex): \n matches_idx = sindex.query(geom)\n nearest_geom = min(\n [df.iloc[match_idx] for match_idx in matches_idx],\n key=lambda match: shapely.measurement.distance(match.geometry,geom)\n )\n return nearest_geom",
"def get_index_nearest(array, value):\n return (np.abs(array - value)).argmin()",
"def find_nearest_element(array,value,index=False):\n\t\tidx = n.abs(array-value).argmin()\n\t\treturn (idx,array.flat[idx]) if index else array.flat[idx]",
"def nearest(array,value):\r\n array = np.asarray(array)\r\n idx = (np.abs(array - value)).argmin()\r\n return idx",
"def _nearest(self, i):\n\n # Need the second nearest neighbor of i since the nearest neighbor\n # will be itself. Using argpartition, the k-th nearest neighbor is\n # placed at index k.\n idx = list(self.mesh[self.cell_index(self.spheres[i])])\n dists = cdist([self.spheres[i]], self.spheres[idx])[0]\n if dists.size > 1:\n j = dists.argpartition(1)[1]\n return idx[j], dists[j]\n else:\n return None, None",
"def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n if array[idx] > value:\n return idx - 1\n elif array[idx] <= value:\n return idx",
"def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n if array[idx] > value:\n return idx - 1\n elif array[idx] <= value:\n return idx",
"def nearest_cluster_center(point, cluster_centers):\n\tdef sqr_distance_2D(a, b):\n\t\treturn (a.x - b.x) ** 2 + (a.y - b.y) ** 2\n\n\tmin_index = point.group\n\tmin_dist = 1e100\n\tfor i, cc in enumerate(cluster_centers):\n\t\td = sqr_distance_2D(cc, point)\n\t\tif min_dist > d:\n\t\t\tmin_dist = d\n\t\t\tmin_index = i\n\treturn (min_index, min_dist)",
"def getnearest(v, points, distance):\n bestmatch = 0\n for i in range(len(points)):\n d = distance(points[i], v)\n if d < distance(points[bestmatch], v): bestmatch = i\n return bestmatch",
"def nearest_node(point, nodes,sindex): \n return nearest(point, nodes,sindex)",
"def closest_to_index(field: LikelihoodField, ix: Tuple[int, int]) -> Optional[float]:\n (row, col) = ix\n\n if row < 0 or row >= field.height or col < 0 or col >= field.width:\n return None\n\n if (dist := field.field[row][col]) == DIST_UNKNOWN:\n return None\n\n return dist",
"def find_ref_group(groups, key):\n for index, group in enumerate(groups):\n if len(group) != 5:\n return -1\n if is_group_ref_group(group, key):\n return index\n\n return -1",
"def get_closest(point, allpoints):\n best_index = None\n best_distance = 999999999\n is_dupe = False\n\n for index, p in enumerate(allpoints):\n # if p == point:\n # continue\n dist = getdist(point, p)\n if dist <= best_distance:\n if dist == best_distance:\n is_dupe = True\n else:\n is_dupe = False\n best_distance = dist\n best_index = index\n\n if is_dupe:\n return None\n\n return best_index",
"def _find_nearest(self, array, value):\n \n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n \n return array[idx], idx",
"def nearest(k, peers, uid):\n # XXX: It only works with len(peers) < 10^6 more than that count\n # of peers and the time it takes to compute the nearest peers will\n # timeout after 5 seconds on the other side. See RPCProtocol and\n # Peer.peers.\n return nsmallest(k, peers, key=functools.partial(operator.xor, uid))",
"def get_sorted_ind(x):\n\td = dist.pdist(x)\n\tD = dist.squareform(d)\n\tY = sch.linkage(D, method='average', metric='cosine') \n\tZ = sch.dendrogram(Y)\n\tidx = Z['leaves'] \n\treturn idx",
"def keep_k_nearest(self):\n start = time.time()\n dist = self.compute_euclidean_distances()\n idx = dist.argsort()\n neighbours = idx[:, :self.knn + 1 ]\n dist_knn = np.zeros((self.n_data, self.n_data))\n for i in range(self.n_data):\n dist_knn[i, neighbours[i, :]] = dist[i, neighbours[i, :]]\n end = time.time()\n print(\"Compute keep k nearest: \" + \"{:.4f}\".format(end - start))\n return dist_knn",
"def nearest_unmasked(arr, use_indices=False):\n # Check the input\n if not isinstance(arr, np.ma.MaskedArray):\n raise TypeError('Must provide a numpy masked array.')\n if arr.ndim != 1:\n raise ValueError('Must be a 1D array.')\n if use_indices:\n return nearest_unmasked(np.ma.MaskedArray(np.arange(arr.size), mask=arr.mask.copy()))\n\n # Get the difference of each element with every other element\n nearest = np.absolute(arr[None,:]-arr.data[:,None])\n # Ignore the diagonal\n nearest[np.diag_indices(arr.size)] = np.ma.masked\n # Return the location of the minimum value ignoring the masked values\n return np.ma.argmin(nearest, axis=1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show rate of notification by weekday.
|
def weekday_rate(self, disease=None, **kwargs):
data = self.region.pydemic.epidemic_curve(disease, diff=True)
data = trim_weeks(data)
return plt.weekday_rates(data, **kwargs)
|
[
"def week():",
"def weekday_chart(flts):\n global HTML_FILE\n if START_WITH.lower() in [\"sun\", \"sunday\", \"sonntag\", \"so\"]: # accept different entries for Sunday ...\n wd_order = WEEKDAY_DICT_SUN\n else:\n wd_order = WEEKDAY_DICT_MON\n flts_by_dow = flts[\"WEEKDAY\"].value_counts() # DayOfWeek\n\n # averaged by weeknumber\n wkdy_averaged = pd.DataFrame({\"Total\": flts_by_dow}, index=wd_order) # .fillna(0)\n wkdy_averaged[\"Weekday\"] = wkdy_averaged.index\n # replace NaN with 0 for weekdays which are in spec_wkdy, but have no flights (due to filtering)\n wkdy_averaged.loc[wkdy_averaged[\"Weekday\"].isin(spec_wkdy) & wkdy_averaged[\"Total\"].isna()] = 0\n wkdy_averaged[\"Weekday\"] = wkdy_averaged.index # again re-assign column, as it is also set to 0 in step above\n\n wkdy_raw = wkdy_averaged[\"Total\"].to_list()\n weeks_floor = DAY_DIFF // 7\n if weeks_floor < 1: # if less than 7 days in timeframe, weeks_floor will be one\n weeks_floor = 1\n wkdy_aver = []\n wkdy_factor = []\n day_range = len(spec_wkdy) if spec_wkdy else 7\n if WEEKDAYS_w_HIGHER_COUNT:\n if spec_wkdy:\n if any(i in spec_wkdy for i in WEEKDAYS_w_HIGHER_COUNT):\n for _ in wd_order:\n if _ in WEEKDAYS_w_HIGHER_COUNT:\n wkdy_factor.append(weeks_floor + 1)\n else:\n wkdy_factor.append(weeks_floor)\n for d in range(wd_order[spec_wkdy[0]], day_range + wd_order[spec_wkdy[0]]):\n wkdy_aver.append(round(wkdy_raw[d] / wkdy_factor[d], 2))\n else:\n for d in range(wd_order[spec_wkdy[0]], day_range + wd_order[spec_wkdy[0]]):\n wkdy_aver.append(round(wkdy_raw[d] / weeks_floor, 2))\n else:\n for _ in wd_order:\n if _ in WEEKDAYS_w_HIGHER_COUNT:\n wkdy_factor.append(weeks_floor + 1)\n else:\n wkdy_factor.append(weeks_floor)\n for d in range(day_range):\n wkdy_aver.append(round(wkdy_raw[d] / wkdy_factor[d], 2))\n\n if spec_wkdy:\n wkdy_averaged = wkdy_averaged.dropna()\n\n wkdy_averaged[\"Average Flights\"] = wkdy_aver\n\n else:\n if WEEKDAYS_w_HIGHER_COUNT:\n for _ in wd_order:\n if _ in WEEKDAYS_w_HIGHER_COUNT:\n wkdy_factor.append(weeks_floor + 1)\n else:\n wkdy_factor.append(weeks_floor)\n for d in range(wd_order[spec_wkdy[0]], day_range + wd_order[spec_wkdy[0]]):\n wkdy_aver.append(round(wkdy_raw[d] / wkdy_factor[d], 2))\n wkdy_averaged[\"Average Flights\"] = wkdy_aver\n else:\n wkdy_averaged[\"Average Flights\"] = round(wkdy_averaged[\"Total\"] / weeks_floor, 2)\n wkdy_averaged = wkdy_averaged[[\"Weekday\", \"Total\", \"Average Flights\"]]\n\n HTML_FILE += f\"<H4 style='text-align:left'>WEEKDAY BAR CHARTS</H4>\\n\"\n HTML_FILE += f\"<H4 style='text-align:left'>{len(flts)} {DEPARR}</H4>\\n\"\n sns.set_style(\"darkgrid\")\n plt.figure(figsize=(9, 5), dpi=100)\n ax = sns.countplot(data=flts, x='WEEKDAY', order=wd_order, color=TOPFIFTEEN_COLORS[6])\n ax.tick_params(axis='x', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n ax.tick_params(axis='y', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n\n plt.grid(True, axis=\"y\")\n title_line = f\"Total {DEPARR} per Weekday \"\n\n if spec_wkdy: # TODO SPECWKDY THINGY\n if WEEKDAYS_w_HIGHER_COUNT:\n if any(i in WEEKDAYS_w_HIGHER_COUNT for i in spec_wkdy):\n # check, if spec_wkdy contains any weekday from weekday with higher count, otherwise irrelevant\n days_in_both = [wd for wd in WEEKDAYS_w_HIGHER_COUNT if wd in spec_wkdy]\n if len(days_in_both) == 1: # correct grammar in title line, use \"has\" if single weekday in list\n title_line += f\"\\nNote: {list_items_to_string(days_in_both, ending=' and ')} has more occurences in \" \\\n f\"chosen timeframe, numbers are not representative,\\n\" \\\n f\"see next page for weekly averaged number of {DEPARR}\"\n else: # correct grammar in title line, use \"have\" if multiple weekdays in list\n title_line += f\"\\nNote: {list_items_to_string(days_in_both, ending=' and ')} have more occurences in \" \\\n f\"chosen timeframe, numbers are not representative,\\n\" \\\n f\"see next page for weekly averaged number of {DEPARR}\"\n else:\n title_line += f\"\\nNote: {list_items_to_string(spec_wkdy, ending=' and ')} have more occurences in \" \\\n f\"chosen timeframe, numbers are not representative,\\n\" \\\n f\"see next page for weekly averaged number of {DEPARR}\"\n elif WEEKDAYS_w_HIGHER_COUNT:\n # weekdays with higher count will distort numbers, so a note will be added to the plot title\n if len(WEEKDAYS_w_HIGHER_COUNT) == 1: # correct grammar in title line, use \"has\" if single weekday in list\n title_line += f\"\\nNote: {list_items_to_string(WEEKDAYS_w_HIGHER_COUNT, ending=' and ')} has more occurences \" \\\n f\"in chosen timeframe, numbers are not representative,\\n\" \\\n f\"see next page for weekly averaged number of {DEPARR}\"\n else: # correct grammar in title line, use \"have\" if multiple weekdays in list\n title_line += f\"\\nNote: {list_items_to_string(WEEKDAYS_w_HIGHER_COUNT, ending=' and ')} have more occurences \" \\\n f\"in chosen timeframe, numbers are not representative,\\n\" \\\n f\"see next page for weekly averaged number of {DEPARR}\"\n # print(title_line) # for testing purposes\n ax.set_title(title_line, fontsize=12)\n ax.set_xlabel(\"Weekday\", fontsize=12)\n ax.set_ylabel(f\"{DEPARR}\", fontsize=12)\n for p in ax.patches:\n ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='left', va='top', color='white',\n size=12)\n filename = \"./plots/flts_dow_start_on_\" + START_WITH + \"_\" + FILENAMESNIP + \".png\"\n plt.savefig(filename)\n FILES_CREATED.append(filename)\n png_file = f\"file:///C:/Users/roman/Python/PyCharmProjects/BER_arr_dep/plots/\" \\\n f\"flts_dow_start_on_{START_WITH}_{FILENAMESNIP}.png\"\n HTML_FILE += f'<img src={png_file} alt=\"Top Destinations\" class=\"center\">'\n plt.figure(figsize=(9, 5), dpi=100)\n ax = sns.countplot(data=flts, x='WEEKDAY', order=flts.WEEKDAY.value_counts().index, color=TOPFIFTEEN_COLORS[6])\n ax.tick_params(axis='x', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n ax.tick_params(axis='y', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n\n plt.grid(True, axis=\"y\")\n title_line1 = title_line.split(\"Weekday\")[0] + f\"ordered by amount\" + title_line.split(\"Weekday\")[1]\n ax.set_title(title_line1, fontsize=12)\n ax.set_xlabel(\"Weekday\", fontsize=12)\n ax.set_ylabel(f\"{DEPARR}\", fontsize=12)\n for p in ax.patches:\n ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='left', va='top', color='white',\n size=12)\n filename = \"./plots/flts_dow_ordered_valuecounts_start_on_\" + START_WITH + \"_\" + FILENAMESNIP + \".png\"\n plt.savefig(filename)\n FILES_CREATED.append(filename)\n\n png_file = f\"file:///C:/Users/roman/Python/PyCharmProjects/BER_arr_dep/plots/\" \\\n f\"flts_dow_ordered_valuecounts_start_on_{START_WITH}_{FILENAMESNIP}.png\"\n HTML_FILE += f'<img src={png_file} alt=\"Top Destinations\" class=\"center\">'\n # HTML_FILE += flts_by_dow.to_frame(\"Total\").to_html(index=True) # pointless as charts are self explanatory\n HTML_FILE += '<div style=\"page-break-after: always;\"></div>'\n\n plt.figure(figsize=(9, 5), dpi=100)\n ax = sns.barplot(data=wkdy_averaged, x=\"Weekday\", y=\"Average Flights\", color=TOPFIFTEEN_COLORS[6])\n ax.tick_params(axis='x', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n ax.tick_params(axis='y', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n\n for p in ax.patches:\n ax.annotate(f'\\n{round(p.get_height(), 1)}', (p.get_x() + 0.2, p.get_height()), ha='left', va='top',\n color='white', size=12)\n plt.grid(True, axis=\"y\")\n title_line = f\"Average Number of {DEPARR} per Week and Weekday\"\n if DAY_DIFF < 7:\n title_line += f\"\\nNote: Number of days in timeframe < seven days\"\n ax.set_title(title_line, fontsize=12)\n ax.set_xlabel(\"Weekday\", fontsize=12)\n ax.set_ylabel(f\"{DEPARR}\", fontsize=12)\n filename = \"./plots/flts_dow_averaged_start_on_\" + START_WITH + \"_\" + FILENAMESNIP + \".png\"\n plt.savefig(filename)\n FILES_CREATED.append(filename)\n png_file = f\"file:///C:/Users/roman/Python/PyCharmProjects/BER_arr_dep/plots/\" \\\n f\"flts_dow_averaged_start_on_{START_WITH}_{FILENAMESNIP}.png\"\n HTML_FILE += f'<img src={png_file} alt=\"Top Destinations\" class=\"center\">'\n HTML_FILE += f\"<H4 style='text-align:left'>Average {DEPARR} per week and weekday</H4>\\n\"\n\n wkdy_averaged = wkdy_averaged.dropna() # for display in HTML file\n print(wkdy_averaged)\n HTML_FILE += wkdy_averaged.to_html(index=False)\n # plt.show() # uncomment to see whilst running the code (e.g. fixing stuff)\n\n HTML_FILE += \"\\n<hr>\\n\"\n HTML_FILE += '<div style=\"page-break-after: always;\"></div>'",
"def ridership_by_day(df):\n df['day_of_week'] = df['pickup_datetime'].dt.dayofweek\n df['day_of_week'].plot.hist(bins=np.arange(8)-0.5,\n ec='black',\n ylim=(60000,75000))\n plt.xlabel('Day of Week (0=Monday, 6=Sunday)')\n plt.show()",
"def everyweek(self):\n\n self.ads.check_data()\n self.tr.check_data()\n\n # Telephony data\n self.tp.check_data()\n tp_data = self.tp.report_data(config.WEEK_REPORT_DATE, self.date_report)\n\n self.bar.update(60)\n\n # Calltouch data\n self.ct.check_data()\n ct_report = self.ct.report_data(config.WEEK_REPORT_DATE, self.date_report)\n ct_calls = ct_report.get('calls')\n ct_leads = ct_report.get('leads')\n\n self.bar.update(70)\n\n # Callbacks\n self.cb.get_data(self.date_report)\n callbacks = self.cb.report_data(config.WEEK_REPORT_DATE, self.date_report)\n num_lost_leads = callbacks.get('num_leads')\n lost_leads = callbacks.get('lost_leads')\n late_leads = callbacks.get('late_leads')\n\n self.bar.update(80)\n\n # Creating plots\n tp_plot = self.tp.plot_data()\n ct_plots = self.ct.plot_data()\n calls_plot = ct_plots.get('calls')\n leads_plot = ct_plots.get('leads')\n ads_plots = self.ads.plot_data()\n ctr_plot = ads_plots.get('ctr')\n cpc_plot = ads_plots.get('cpc')\n\n self.bar.update(90)\n\n # Creating dashboard\n link = self.pl.create_dashboard(tp_plot, calls_plot, leads_plot, ctr_plot, cpc_plot)\n\n self.bar.update(93)\n\n # Creating HTML data for email report\n html_data = self.er.html(tp_data, ct_calls, ct_leads, num_lost_leads, lost_leads, late_leads, link)\n subject = \"Отчет за период {} - {}\".format(config.WEEK_REPORT_DATE, self.date_report)\n\n self.bar.update(98)\n\n # Creating and sending email\n msg = self.er.create_mail(config.FROM_ADDR, config.TO_ADDR_DEBUG, subject, html_data)\n self.er.send_email(config.FROM_ADDR, config.TO_ADDR_DEBUG, msg)\n\n self.bar.finish()",
"def weekly_stats(ctx, tag=\"cn\", days=7.0, debug=False):\n\n if debug:\n settings.set_debug_mode()\n\n settings.set_steem_node(STEEM_API_NODES[4], condenser=True)\n\n day_of_the_week = datetime.datetime.today().weekday()\n\n # only Sunday\n if day_of_the_week == 6:\n logger.info(\"Create the weekly summary\")\n bot = CnHelloBot(tag=tag, days=days)\n bot.publish_weekly_stats()\n else:\n logger.info(\"Skip the weekly summary until its Sunday\")",
"def timedelta_arr_weekday(flts):\n global HTML_FILE\n HTML_FILE += f\"<H4 style='text-align:left'>PUNCTUALITY: AVERAGE TIME DELTA per WEEKDAY</H4>\\n\"\n if DEPARR_OPTION == \"DEP+ARR\":\n HTML_FILE += f\"<H5 style='text-align:left'>Only Arrivals will be evaluated, as data provided by BER website \" \\\n f\"on departures is inconsistent.\"\n combined = calculate_timedelta(flts)\n if type(combined) == str:\n HTML_FILE += f\"No arrivals found in filtered flights, for departures the data provided is insufficient for \" \\\n f\"evaluations.<p/>\\n\"\n HTML_FILE += \"\\n<hr>\\n\"\n return print(combined)\n\n if START_WITH.lower() in [\"sun\", \"sunday\", \"sonntag\", \"so\"]: # accept different entries for Sunday ...\n wd_order = WEEKDAY_DICT_SUN\n else:\n wd_order = WEEKDAY_DICT_MON # ... or set default start day to Monday\n\n # weekday - sort by START_WITH\n diff_wd = combined.groupby([\"WEEKDAY\"]).mean().sort_values([\"TIME_DIFF\"])\n for d in diff_wd.index: # replace weekday with numbers 0-6, specified on initialization\n if d in wd_order.keys():\n diff_wd.rename(index={d: wd_order[d]}, inplace=True)\n diff_wd.sort_index(axis=0, inplace=True) # sort Series by the number\n for k, v in wd_order.items(): # change the numbers back to weekday\n for d in diff_wd.index:\n if d in wd_order.values():\n diff_wd.rename(index={v: k}, inplace=True)\n max_y = diff_wd[\"TIME_DIFF\"].max() + 4\n min_y = diff_wd[\"TIME_DIFF\"].min() - 5\n sns.set_style(\"darkgrid\")\n sns.set_context(\"paper\", font_scale=0.8) # font scale lowered to avoid overlapping x-ticks\n plt.figure(figsize=(8, 5))\n plt.ylim(min_y, max_y)\n ax = sns.barplot(x=diff_wd.index, y=diff_wd[\"TIME_DIFF\"], color=TOPFIFTEEN_COLORS[10])\n title_line = f\"Average time differences STA - ATA of Arrivals in Minutes ordered by Weekday\"\n ax.set_title(title_line, fontsize=14)\n ax.tick_params(axis='x', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n ax.tick_params(axis='y', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n ax.set_xlabel(\"Weekday\", size=12)\n ax.set_ylabel(\"Average Minute Timedelta\", size=12)\n for p in ax.patches:\n ax.annotate(f'\\n{round(p.get_height(), 1)}', (p.get_x() + 0.2, p.get_height()), ha='left', va='top',\n color=TOPFIFTEEN_COLORS[1], size=12)\n filename = \"./plots/avrg_minute_wd_wd_\" + FILENAMESNIP + \".png\"\n FILES_CREATED.append(filename)\n plt.savefig(filename)\n png_file = f\"file:///C:/Users/roman/Python/PyCharmProjects/BER_arr_dep/plots/\" \\\n f\"avrg_minute_wd_wd_{FILENAMESNIP}.png\"\n HTML_FILE += f'<img src={png_file} alt=\"Late Arrivals\" class=\"center\">'\n\n diff_wd = combined.groupby([\"WEEKDAY\"]).mean().sort_values([\"TIME_DIFF\"])\n plt.figure(figsize=(8, 5))\n plt.ylim(min_y, max_y)\n ax = sns.barplot(x=diff_wd.index, y=diff_wd[\"TIME_DIFF\"], color=TOPFIFTEEN_COLORS[10])\n title_line1 = title_line.split(\" ordered by Weekday\")[0] + title_line.split(\" ordered by Weekday\")[1]\n ax.set_title(title_line1, fontsize=14)\n ax.tick_params(axis='x', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n ax.tick_params(axis='y', colors=TOPFIFTEEN_COLORS[0], labelsize=11)\n ax.set_xlabel(\"Weekday\", size=12)\n ax.set_ylabel(\"Average Minute Timedelta\", size=12)\n for p in ax.patches:\n ax.annotate(f'\\n{round(p.get_height(), 1)}', (p.get_x() + 0.2, p.get_height()), ha='left', va='top',\n color=TOPFIFTEEN_COLORS[1], size=12)\n filename = \"./plots/avrg_per_week_wd_total\" + FILENAMESNIP + \".png\"\n # plt.show() # uncomment to see whilst running the code (e.g. fixing stuff)\n plt.savefig(filename)\n FILES_CREATED.append(filename)\n png_file = f\"file:///C:/Users/roman/Python/PyCharmProjects/BER_arr_dep/plots/\" \\\n f\"avrg_per_week_wd_total{FILENAMESNIP}.png\"\n HTML_FILE += f'<img src={png_file} alt=\"Late Arrivals\" class=\"center\">'\n\n HTML_FILE += \"\\n<hr>\\n\"\n HTML_FILE += '<div style=\"page-break-after: always;\"></div>'",
"async def fpf(self, ctx):\n await ctx.send(f'Only {int(next_friday())} more seconds until the next fanny pack friday')",
"def independenceDayOfWeek():\n dayName = calendar.day_name[calendar.weekday(1776,7,4)]\n print \"The Declaration of Independence was signed on a %s.\" % dayName",
"def week_chart(request):\n now_ = now().replace(hour=0, minute=0, second=0)\n start = now_ - timedelta(days=now_.weekday() + 7 * 12) # 12 weeks\n activities = Activity.objects.filter(date__gte=start, user=request.user).order_by('date')\n\n return JsonResponse(_summarize_by_period(activities, start), safe=False)",
"def formatweekday(self, day):\r\n return '<th class=\"%s\">%s</th>' % (self.cssclasses[day], day_abbr[day])",
"def _get_weekday():\n python_weekday = datetime.datetime.now().weekday()\n api_dayorder = (python_weekday + 1) if python_weekday != 0 else 7\n return api_dayorder",
"def get_weekday(self):\n return self.data['week']",
"def daily_use(self,show=False):\r\n day_use = self.PERDAY * self.ERG\r\n\r\n if show == True:\r\n print('Daily use:',day_use,'\\t[J]')\r\n return day_use",
"def weekday_func(self):\n weekday = features.weekday_func(self.Index)\n self.X['WeekDay'] = weekday\n return weekday",
"def _display_by_week_on_list(self):\n today = str(dt.today())\n list_of_dates = self.expenses_tracker.get_week_dates(today)\n list_of_expenses = self._get_expenses_by_dates(list_of_dates)\n if list_of_expenses:\n self._set_total_label(list_of_expenses)\n self._display_expenses(list_of_expenses)",
"def test_get_weekly_feelings_of_unb(self):\n client = APIClient()\n response = client.get(\"/api/diagnosis/\")\n\n self.assertEqual(200, response.status_code)\n\n for day in WEEK_DAYS:\n post = Post.objects.filter(created_at=day[0]).first()\n self.assertEqual(post.id, response.data[day[1]][0]['id'])",
"def _d_weekly(self):\n _str = ''\n _dayseries = enum.ScheduleDays(self.obj.DaySpec_WeeklyDaysOfWeek).str_days\n _dayseries_daynames = [re.search('^.*_(\\w+)$', x).group(1)[:3] for x in _dayseries]\n _daystrings = ''.join(_dayseries_daynames)\n _interval = self.obj.DaySpec_WeeklyInterval\n _plural = 'Week' if _interval == 1 else 'Weeks'\n _interval = _interval if _interval != 1 else '' # EveryWeek instead of Every1Week - idk just felt like it\n _str = f'Every{_interval}{_plural}.{_daystrings}'\n\n return _str",
"def freq_per_hour_weekly(self):\n feat = [((int(log.split('\\t')[4]) - 1) * 24 + (int(log.split('\\t')[8]))) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(168):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq",
"def type_working_calendar(self, cr, uid, resource_calendar_id, day, context=None):\n res = 0.0\n for working_day in resource_calendar_id.attendance_ids:\n if (int(working_day.dayofweek) + 1) == day.isoweekday():\n res += 0.5\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a plot of cases and deaths
|
def cases_and_deaths(self, disease=None, **kwargs):
curves = self.region.pydemic.epidemic_curve(disease)
kwargs.setdefault("tight_layout", True)
return plt.cases_and_deaths(curves, **kwargs)
|
[
"def death_and_cases_plot(cases_dataframe, death_dataframe, country_name, y_axis_type):\n # create a figure object with width and height\n death_and_cases_fig = figure(x_axis_type=\"datetime\", y_axis_type=y_axis_type,\n width=1000, height=400, sizing_mode='fixed')\n # creating columnDataSource object, for the dataframes\n cases_source = ColumnDataSource(cases_dataframe)\n death_sources = ColumnDataSource(death_dataframe)\n # not use scientific numbers on Y-axis\n death_and_cases_fig.yaxis.formatter = BasicTickFormatter(use_scientific=False)\n # add a line renderer using the cases_source's two columns with a label, color and line width to the figure object\n death_and_cases_fig.line(x='Date', y=country_name, source=cases_source, color='Blue',\n line_width=2, legend_label=\"Cases\")\n # add another line renderer using the death_source's two columns with a label, color and line width.\n death_and_cases_fig.line(x='Date', y=country_name, source=death_sources, color='Red',\n line_width=2, legend_label=\"Deaths\")\n # name and field pairs for the Hover tool\n tooltips = [('Date', '@Date{%F}'), (country_name, \"$y{int}\")]\n # formatting scheme of date column\n formatters = {'@Date': 'datetime'}\n # create a Hover tool for the figure with the tooltips and specify the formatting scheme\n death_and_cases_fig.add_tools(HoverTool(tooltips=tooltips, formatters=formatters))\n # get rid of the default toolbar\n death_and_cases_fig.toolbar_location = None\n death_and_cases_fig.title.text = 'Covid cases and deaths'\n death_and_cases_fig.title.text_color = \"midnightblue\"\n death_and_cases_fig.title.text_font_size = \"25px\"\n death_and_cases_fig.xaxis.axis_label = 'Date'\n death_and_cases_fig.yaxis.axis_label = 'Confirmed Cases'\n death_and_cases_fig.legend.location = \"top_left\"\n return death_and_cases_fig",
"def plot(covid_death_rate_data, args):\n if args.area:\n covid_death_rate_data = filter_by_area(covid_death_rate_data, args.area)\n\n covid_death_rate_data.plot(title=\"Death rate (running average over {} days)\".\n format(args.days), logy=args.log)\n plt.show()",
"def genGraphCases():\n genGraphic1(casesAmount,deathsAmount)",
"def graph_death_rate(total_us_deaths, us_death_rate):\n\n t = np.linspace(0, len(us_death_rate), len(us_death_rate))\n\n # Graph \n fig = plt.figure(figsize = (15, 24))\n\n # Set x labels\n ticks = [i * 7 for i in range(len(t) // 7 + 1)]\n labels = [us_death_rate.index[i] for i in ticks]\n\n # Graph # of deaths\n ax = fig.add_subplot(211, facecolor = '#ffffff')\n ax.plot(t, total_us_deaths)\n ax.set_title('Total US COVID Deaths')\n ax.set_ylabel('# of Deaths')\n ax.set_xlabel('Dates')\n\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels = labels, rotation = 45)\n\n # Graph death rate\n ax = fig.add_subplot(212, facecolor = '#000000')\n ax.plot(t, us_death_rate)\n ax.set_title('Total US COVID Deaths')\n ax.set_ylabel('Daily # of Deaths')\n ax.set_xlabel('Dates')\n\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels = labels, rotation = 45)\n\n return fig",
"def display_deaths(df):\n df_kuolemat = df.copy() #let us copy the dataframe so sorting it won't mess up the original\n kuolemat = df_kuolemat[[\"Country/Region\",\"Deaths\"]]\n kuolemat = kuolemat.sort_values(by = \"Deaths\", ascending = True)\n kuolemattail = kuolemat.tail(5)\n ax = plt.gca()\n plt.title(\"Top 5 countries by covid deaths\")\n kuolemattail.plot(kind='bar',x='Country/Region',y='Deaths',ax=ax)\n plt.xticks(rotation = 15)\n plt.show()",
"def gender_survival_breakdown(df):\n\n df = df.drop([\"Ticket\", \"Cabin\"], axis=1)\n # Remove NaN values\n df = df.dropna()\n\n fig = plt.figure(figsize=(18, 6))\n\n # create a plot of two subsets, male and female, of the survived variable.\n # After we do that we call value_counts() so it can be easily plotted as a bar graph.\n # 'barh' is just a horizontal bar graph\n df_male = df.Survived[df.Sex == \"male\"].value_counts().sort_index()\n df_female = df.Survived[df.Sex == \"female\"].value_counts().sort_index()\n\n ax1 = fig.add_subplot(121)\n df_male.plot(kind=\"barh\", label=\"Male\", alpha=0.55)\n df_female.plot(kind=\"barh\", color=\"#FA2379\", label=\"Female\", alpha=0.55)\n plt.title(\"Who Survived? with respect to Gender, (raw value counts) \")\n plt.legend(loc=\"best\")\n ax1.set_ylim(-1, 2)\n\n # adjust graph to display the proportions of survival by gender\n ax2 = fig.add_subplot(122)\n (df_male / float(df_male.sum())).plot(kind=\"barh\", label=\"Male\", alpha=0.55)\n (df_female / float(df_female.sum())).plot(\n kind=\"barh\", color=\"#FA2379\", label=\"Female\", alpha=0.55\n )\n plt.title(\"Who Survived proportionally? with respect to Gender\")\n plt.legend(loc=\"best\")\n\n ax2.set_ylim(-1, 2)\n\n return fig",
"def EstSurvivalDecade(groups, **opt):\r\n thinkplot.PrePlot(len(groups))\r\n\r\n for _, group in groups:\r\n _, sf = EstSurvival(group)\r\n thinkplot.Plot(sf, **opt)",
"def PlotDeathFitness(self, df, path):\n plt.figure(figsize=(14, 6))\n ax = plt.gca()\n ax.tick_params(width=1)\n for axis in [\"top\", \"bottom\", \"left\", \"right\"]:\n ax.spines[axis].set_linewidth(1)\n cmap = plt.get_cmap(\"coolwarm\")\n columns = [label + \"__DeathFitness\" for label in self.init_label_list]\n df_copy = df[columns].copy()\n df_copy.columns = self.init_label_list\n df_copy.plot(linewidth=1.5, ax=ax, cmap=cmap)\n plt.xlabel(\"Generation\", size=14)\n plt.ylabel(\"Death Fitness\", size=14)\n ax.tick_params(axis=\"both\", which=\"major\", labelsize=12)\n ax.legend(loc=4, fontsize=20)\n plt.savefig(fname=path, dpi=300)",
"def show_covid_error():\n time = covid_error()[0]\n errfirst = covid_error()[1]\n errsecond = covid_error()[2]\n plt.loglog(time, errfirst, 'b-', time, errsecond, 'r-')\n plt.figlegend(('First Order', 'Second Order'))\n plt.xlabel('Delta t (days)')\n plt.ylabel('Error (cases)')\n plt.show()",
"def create_picture():\n result = Data.query.all()\n\n date = []\n cases = []\n death = []\n for r in result:\n date.append(str(r.date))\n cases.append(r.cases)\n death.append(r.death)\n\n pic_path = 'daily_report_{}.png'.format(date[-1])\n\n fig = plt.figure(figsize=(100, 50))\n\n ax1 = fig.add_subplot(111)\n ax1.set_title(\"Covid-19 Daily Report\", fontsize=70)\n ax1.set_ylabel('Cases', fontsize=40)\n\n plt.xticks(rotation=270, fontsize=40)\n plt.yticks(fontsize=50)\n plot1 = ax1.plot(date, cases, '-*', color='r', label='cases')\n\n ax2 = ax1.twinx() # this is the important function\n\n plot2 = ax2.plot(date, death, '-o', color='g', label='death')\n lines = plot1 + plot2\n\n ax2.set_ylabel('Death', fontsize=40)\n ax2.set_xlabel('Date', fontsize=70)\n ax2.tick_params(axis='y', labelsize=50)\n\n plt.gca().xaxis.set_major_locator(ticker.MultipleLocator(12))\n ax1.legend(lines, [l.get_label() for l in lines], fontsize=50)\n\n plt.savefig(pic_path)\n plt.cla()\n plt.clf()\n plt.close()\n return pic_path",
"def outcomes_pregnant_vs_nonpregnant_histogram():\n plt.figure(figsize=(7,7))\n plt.subplot(211)\n outcomes = [\"ICU Admission\", \"Invasive Ventilation\",\"Maternal Death\"]\n # each array in the list represent a collection of each population group for each of the outcomes\n values = [np.array([10.5, 2.9, 1.5]), np.array([9.1, 2.3, 1.2]), np.array([19.4, 6.5, 4.2]), np.array([58.5,23.4,14.1]), np.array([42.8,19.7,23.0])]\n n = len(values) # Number of bars to plot\n w = 0.15 # With of each column\n x = np.arange(0, len(outcomes)) # Center position of group on x axis\n labels = [\"Overall\", \"Age 25-34\", \"Age 35-44\", \"Underlying diabetes\", \"Underlying CVD\"]\n for i, value, label in zip(range(5), values, labels):\n position = x + (w*(1-n)/2) + i*w\n plt.bar(position, value, width=w, label=label)\n\n plt.xticks(x, outcomes, size=8)\n plt.ylabel('Count per 1000 cases')\n plt.title(\"Outcomes in pregnant women with SARS-CoV-2\", size=8)\n plt.legend(fontsize=8)\n\n plt.subplot(212)\n # each array in the list represent a collection of each population group for each of the outcomes\n values = [np.array([3.9, 1.1, 1.2]), np.array([3.5, 0.9, 0.9]), np.array([6.4,1.8,2.3]), np.array([44.8,16.0,12.7]), np.array([32.1,10.6,11.6])]\n for i, value, label in zip(range(5), values, labels):\n position = x + (w*(1-n)/2) + i*w\n plt.bar(position, value, width=w, label=label)\n\n plt.xticks(x, outcomes, size=8)\n plt.ylabel('Count per 1000 cases')\n plt.title(\"Outcomes in non-pregnant women with SARS-CoV-2\", size=8)\n plt.legend(fontsize=8)\n\n plt.show()",
"def show_covid(alpha, delta_t):\n real_t = np.arange(0, 5.01, 0.05)\n real_covid = math.e**(1*real_t)\n plt.plot(*est_covid_first_order(alpha, delta_t), 'r-',\n *est_covid_second_order(alpha, delta_t), 'b-',\n real_t, real_covid, 'k-')\n plt.figlegend(('First Order', 'Second Order', 'Actual'))\n plt.xlabel('Time (days)')\n plt.ylabel('Cases')\n plt.show()",
"def preg_women_hist():\n plt.subplot(211)\n outcomes = [\"Maternal outcomes\"]\n values = [np.array([18.58]), np.array([1.47]), np.array([2.85])]\n upper_cf = np.array([np.array([45.82]),np.array([1.91]),np.array([7.52])])-values\n lower_cf = values-np.array([np.array([7.53]),np.array([1.14]),np.array([1.08])])\n tot_cf = np.array([lower_cf, upper_cf])\n n = len(values) # Number of bars to plot\n w = .15 # With of each column\n x = np.arange(0, len(outcomes)) # Center position of group on x axis\n labels = [\"ICU admission\", \"Preterm birth <37 weeks\", \"All cause mortality\"]\n\n for i, value, label in zip(range(3), values, labels):\n position = x + (w*(1-n)/2) + i*w\n plt.bar(position, value, width=w, label=label, yerr=tot_cf[:,i], capsize=2)\n\n plt.xticks(x, outcomes)\n plt.ylabel(\"Odds ratio\")\n plt.title(\"Odds ratios for various outcomes of pregnant women:\\n SARS-CoV-2 infected vs non-infected with 95% confidence interval.\")\n plt.xlim([-0.5,0.5])\n plt.legend()\n\n plt.subplot(212)\n outcomes = [\"Perinatal outcomes\"]\n values = (2.84)\n cf_tot = np.array([values-np.array([1.25]), np.array([6.45])-values])\n plt.bar(0, values, width=0.15, label=\"Stillbirth\", yerr=cf_tot, capsize=2)\n plt.xticks(np.arange(0, len(outcomes)), outcomes)\n plt.ylabel(\"Odds ratio\")\n plt.xlim([-0.5,0.5])\n plt.legend()\n plt.show()",
"def consecutive_victory_plot():\n\thistories = fetch_all_user_history()\n\tdata = []\n\tfor row in histories:\n\t\ttier = row['tier']\n\t\tprevious_affinity = 0\n\t\tsame_picks = 0\n\t\twin_picks = 0\n\t\tloss_picks = 0 \n\t\tmatches = row['matchlist']['matches']\n\t\tdivisor = len(matches) - 1\n\t\tfor i in range(len(matches)-1):\n\t\t\tresult_pick = matches[i]['champion']\n\t\t\tprev_pick = matches[i+1]['champion']\n\t\t\tif not 'win' in matches[i+1]:\n\t\t\t\tcontinue\n\t\t\tprev_win = matches[i+1]['win']\n\t\t\tif prev_pick == result_pick:\n\t\t\t\tsame_picks += 1\n\t\t\t\tif prev_win:\n\t\t\t\t\twin_picks += 1\n\t\t\t\telse:\n\t\t\t\t\tloss_picks += 1\n\t\tuserinfo = {}\n\t\tuserinfo['same_pick'] = same_picks / divisor\n\t\tuserinfo['win_pick'] = win_picks / divisor\n\t\tuserinfo['loss_pick'] = loss_picks / divisor\n\t\tuserinfo['tier'] = tier\n\t\tdata.append(userinfo)\n\n\t#now draw plot\n\tplt.title = \"same_picks\"\n\tplt.xlabel('probablity of re-picking previous pick')\n\tplt.ylabel('previous pick won/lost')\n\tx = [user['same_pick'] for user in data]\n\ty1 = [user['win_pick'] for user in data]\n\ty2 = [user['loss_pick'] for user in data]\n\tplt.plot(x, y1, 'r.')\n\tplt.plot(x, y2, 'b.')\n\tdraw_regression(x, y1)\n\tdraw_regression(x, y2)\n\n\tplt.show()",
"def plot_log_detected(X, w, y):\r\n x = X['day_num']\r\n plt.figure(figsize=(8, 5))\r\n plt.scatter(x, y, label=\"(day number,log detected)\")\r\n prediction = np.dot(X, w)\r\n plt.plot(x, prediction, label=\"prediction\", color=\"orange\")\r\n plt.title(\"Log the number of cases as a function of the number of days\")\r\n plt.ylabel(\"Log(number of cases)\")\r\n plt.xlabel(\"Number of days\")\r\n plt.legend()\r\n plt.show()",
"def visualization_two(cleaned_data, input_vars=None,\n output_image_name=\"hypothesis_two\"):\n\n cleaned_data = cleaned_data.loc[cleaned_data.chamber == 'house']\n comparison_groups = f.sample_of_means_percent_yay(cleaned_data)\n rep_samples = [\"Republican\", comparison_groups[0], \"r\"]\n dem_samples = [\"Democratic\", comparison_groups[1], \"b\"]\n input_vars = [rep_samples, dem_samples]\n overlapping_density(input_vars)\n ###\n\n # Starter code for labeling the image\n plt.xlabel('Percentage Voting \"Yay\"')\n plt.ylabel(\"Probability Density\")\n plt.title('Comparison of Parties Voting \"Yay\" by Percentage in House')\n plt.legend()\n\n plt.savefig(f'img/{output_image_name}.png', transparent=True)",
"def generate_birth_death_process_data(tau, end, lam, mu):\n\n def get_next_state(current_state):\n birth_probability = 0\n death_probability = 0\n\n # Special: state 0 can only birth\n if(current_state == 0):\n birth_probability = lam * tau\n death_probability = 0\n else:\n birth_probability = lam * tau\n death_probability = mu * tau\n \n random = numpy.random.random()\n # [0, birth_probability, 1-death_probability, 1]\n if random < birth_probability:\n return current_state + 1\n elif random < 1 - death_probability:\n return current_state\n else:\n return current_state - 1\n \n # X[i] is current state\n # time[i] is current time\n i = 0\n X = numpy.array([0])\n time = numpy.array([0])\n while time[i] < end:\n X = numpy.append(X, get_next_state(X[i]))\n time = numpy.append(time, time[i]+tau)\n\n i += 1\n\n pyplot.rcParams['font.family'] = 'sans-serif'\n pyplot.rcParams['font.sans-serif'] = ['SimHei', 'Helvetica', 'Calibri']\n pyplot.xlabel('时间 t')\n pyplot.ylabel('状态 X(t)')\n pyplot.title(f'生灭过程模拟 \\n $\\\\tau = {tau}, end\\ time = {end}$ \\n' f'$\\lambda = {lam}, \\mu = {mu}$')\n pyplot.plot(time, X)\n\n # Save to image\n pyplot.savefig(f'../simulation_results/birth-death-lambda={lam}&mu={mu}&tau={tau}.png', bbox_inches='tight')\n pyplot.close()",
"def plot_transitions(x: np.ndarray, y: np.ndarray, Z: np.ndarray, transitions: List[dict]):\n fig0,(ax0,ax1) = plt.subplots(1, 2, figsize=[13,4])\n fig0.suptitle('Transition Identification', fontsize=16, fontweight='semibold')\n\n ax0.pcolormesh(x, y, Z, cmap='hot')\n ax0.set_xlabel('Fast Gate Voltage (V)', fontsize=14)\n ax0.set_ylabel('TG Voltage (V)', fontsize=14)\n ax0.set_title('Source scan', fontsize=16)\n\n ax1.pcolormesh(x, y, Z, cmap='hot')\n ax1.set_xlabel('Fast Gate Voltage (V)', fontsize=14)\n ax1.set_title('Transitions Identified', fontsize=16)\n\n yvals = ax1.get_ylim()\n for transition in transitions:\n x_base = transition['location']\n if (type(x_base) is int) : x_base = x[x_base]\n\n xvals = [x_base, x_base]\n xvals[1] += (yvals[1] - yvals[0]) / transition['gradient']\n ax1.plot(xvals, yvals, '-', linewidth=4)\n plt.show()",
"def visualize_progression(progression: DataFrame, country_region: str, province_state: str):\n deaths_dataframe = progression[progression[constants.CASE_TYPE] == constants.DEATHS]\n deaths_dataframe = deaths_dataframe[constants.NEGATIVE_NUMBER_OF_RECORDS_TO_SHOW:]\n plt.subplot(*constants.FIRST_SUB_PLOT_LOCATION)\n plt.plot(deaths_dataframe[constants.DATE], deaths_dataframe[constants.CASES], constants.LINE_TYPE)\n plt.title(constants.TITLE.format(case_type=constants.DEATHS, country_region=country_region,\n province_state=province_state))\n plt.xlabel(constants.DATE)\n plt.ylabel(constants.CASES)\n\n confirmed_dataframe = progression[progression[constants.CASE_TYPE] == constants.CONFIRMED]\n confirmed_dataframe = confirmed_dataframe[constants.NEGATIVE_NUMBER_OF_RECORDS_TO_SHOW:]\n plt.subplot(*constants.SECOND_SUB_PLOT_LOCATION)\n plt.plot(confirmed_dataframe[constants.DATE], confirmed_dataframe[constants.CASES], constants.LINE_TYPE)\n plt.title(constants.TITLE.format(case_type=constants.CONFIRMED, country_region=country_region,\n province_state=province_state))\n plt.xlabel(constants.DATE)\n plt.ylabel(constants.CASES)\n\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.