query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Make string from int. Hexademical representaion will be used if input value greater that 'max_dec'.
def int2str(val, max_dec=1024): if val > max_dec: return "0x%x" % val else: return "%d" % val
[ "def try_int_to_str(val, max_dec=1024):\n if isinstance(val, int):\n if val > max_dec:\n return \"0x%x\" % val\n else:\n return \"%d\" % val\n else:\n return val", "def IntStr( num, dec=None ):\n num = int(num)\n if not dec: return str(num)\n if dec <= len(str(num)):\n return str(num)\n ans = \"0\" * (dec - (len(str(num)))) + str(num)\n return ans", "def convert_int(n: int) -> str:\n\n return str(n)", "def get_digit_string(num, base):\n remainder = num % base\n if base == 16 or base == 32:\n return to_char(remainder)\n else:\n return str(remainder)", "def int_to_tag_hex(value: int) -> str:\n return format(value, \"x\").zfill(4)", "def int_to_string( x ):\n assert x >= 0\n if x == 0: return b('\\0')\n result = []\n while x:\n ordinal = x & 0xFF\n result.append(int2byte(ordinal))\n x >>= 8\n\n result.reverse()\n return b('').join(result)", "def convert_to_string(num_int, base=10):\n base_chars = BASES[base-1]\n if num_int >= base:\n mod = num_int % base\n return convert_to_string(num_int // base, base=base) + base_chars[mod]\n else:\n return base_chars[num_int]", "def int2hex(i: int) -> str:\n chars = '0123456789abcdef'\n out = ''\n out_ = ''\n while i > 0:\n out_ += chars[i % 16]\n i //= 16\n # Output string must be reversed.\n for x in range(len(out_)-1, -1, -1):\n out += out_[x]\n # Pad so all hex values are two characters.\n if len(out) < 2:\n out = '0' + out\n return out", "def inttohex(int_):\n\tif int_ >= 0:\n\t\treturn (\"{0:0>4s}\".format(hex(int_ % (1 << 16))[2:])).upper()\n\telse:\n\t\treturn (hex((int_ + (1 << 16)) % (1 << 16)).upper()[2:]).upper()", "def inttohex(x):\n x = '%x' % x\n return ('0' if len(x) % 2 else '') + x", "def int_to_hex(num, length):\n return \"{:x}\".format(num).zfill(length)", "def inttohexstring(val, width=4):\n s = hex(val)[2:]\n return '{:0>{w}}'.format(s, w=width)", "def int2ascii(i: int) -> str:\n if i > 127:\n raise ValueError('The passed integer value must be <= 127.')\n return chr(i)", "def to_str(number, base):\n convert_string = \"0123456789ABCDEF\"\n if number < base:\n return convert_string[number]\n else:\n return to_str(number // base, base) + convert_string[number % base]", "def convertToBase7(self, num: int) -> str:\n base = 7\n out = []\n sign = 1\n if num < 0:\n sign = -1\n num *= sign\n while num >= base:\n x = num // base\n rest = num % (x * base)\n out.insert(0, str(rest))\n num = x\n out.insert(0, str(num))\n if sign < 0:\n out.insert(0, '-')\n return ''.join(out)", "def int_to_7char_str(i):\n #the pins always have 7 digits\n pin = str(i)\n l = len(pin)\n if (l < 7):\n zeros = \"\"\n for j in range(7-l):\n zeros += \"0\"\n pin = zeros + pin\n return pin", "def convert_to_two_char_string(number):\n\tif number < 10:\n\t\treturn '0%s' % number\n\telse:\n\t\treturn '%s' % number", "def convert_base_10_to_any_base(x: int, base: int) -> str:\n assert(x >= 0)\n assert(1< base < 37)\n r = ''\n import string\n while x > 0:\n r = string.printable[x % base] + r\n x //= base\n return r", "def int_to_string( long_int, padto=None ):\n if long_int > 0:\n octet_string = \"\"\n while long_int > 0:\n long_int, r = divmod( long_int, 256 )\n octet_string = chr( r ) + octet_string\n elif long_int == 0:\n octet_string = chr(0)\n else:\n raise ValueError('int_to-string unable to convert negative numbers')\n \n if padto:\n padlen = padto - len(octet_string)\n assert padlen >= 0\n octet_string = padlen*chr(0) + octet_string\n return octet_string" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if value is non negative integer
def is_non_neg_int(val): return isinstance(val, int) and val >= 0
[ "def is_int_neg(x):\n return True if is_int(x) and x < 0 else False", "def is_int_not_neg(x):\n return True if is_int(x) and x >= 0 else False", "def is_Negative(x):\n if x < 0:\n return True\n return False", "def _isNotNegative(self, value):\n\n isNotNegative = False\n if (value >= 0):\n isNotNegative = True\n else:\n raise ValueError(\"The input value should be >= 0.\")\n\n return isNotNegative", "def is_negative(i):\n pass", "def isinteger(value):\n try:\n return value == int(value)\n except TypeError:\n return False", "def isInt(cls, value):\n return cls.asInt(value) is not None", "def is_positive(x: int) -> bool:\n return x > 0", "def _unsigned_int(number):\n try:\n return int(number) >= 0\n except ValueError:\n return False", "def is_positive(number):\n if number > 0:\n return True\n return None", "def _is_int(val):\n try:\n int(val)\n return True\n except ValueError:\n return False", "def is_integer(self):\n return type(self.value) == int", "def unsigned_int(number):\n try:\n return int(number) >= 0\n except ValueError:\n return False", "def check_valid_integer(value):\n if value is None:\n return\n check_type(integer_types, value)", "def is_positive_integer(obj, name: str) -> None:\n\n if type(obj) != int:\n raise ValueError(f\"{name} should be an integer.\")\n elif obj <= 0:\n raise ValueError(f\"{name} should be an positive integer.\")", "def checkIfInt(value):\n if not isinstance(value, int):\n raise TypeError((\"value is not an int. \"\n \"value = {0}\").format(value))", "def validation(value):\n\n if isinstance(value, int) and value > 0:\n return True\n else:\n print(HELP_MSG)\n return False", "def _is_integer(num):\n try:\n out = int(float(num)) == float(num)\n except ValueError:\n out = False\n return out", "def is_int(x):\n return type(x) == int" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if value is list
def is_list(val): return isinstance(val, list)
[ "def _is_valid_value(value: List) -> bool:\n return isinstance(value, list)", "def isListLike(value):\r\n\r\n return isinstance(value, (list, tuple, set))", "def _is_list(e):\n return isinstance(e, LIST_TYPE)", "def is_list(x):\n return type(x) == list", "def _check_is_list(obj):\n return isinstance(obj, (list, List))", "def is_list(obj):\n return isinstance(obj, list)", "def islist(tok):\n return type(tok) == list", "def is_list(type_value):\n\n if not is_typing_type(type_value):\n return False\n\n try:\n if sys.version_info < (3, 7):\n return type_value.__origin__ == typing.List\n return type_value.__origin__ == list\n except AttributeError:\n return False", "def isList(obj):\n\treturn type(obj)==list", "def _is_list(cls, annotation: Any) -> bool:\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list", "def is_list_like ( v ) :\n return isinstance ( v , listlike_type ) and not isinstance ( v , string_types )", "def _is_array(v):\n return isinstance(v, list)", "def is_string_list(val):\r\n if type(val) != list:\r\n return False\r\n\r\n for i in val:\r\n if type(i) != str:\r\n return False\r\n\r\n return True", "def is_list(self) -> bool:\n return self.restrictions.is_list", "def is_pj_lot(value):\n return isinstance(value, list)", "def is_list(node):\r\n return (isinstance(node, Node)\r\n and len(node.children) > 1\r\n and isinstance(node.children[0], Leaf)\r\n and isinstance(node.children[-1], Leaf)\r\n and node.children[0].value == \"[\"\r\n and node.children[-1].value == \"]\")", "def test_is_list(self):\n self.assertEqual(type(self.randomcode),list, 'Code not a list')", "def _is_chromosome_list(self, chromosome):\n try:\n list(chromosome)\n return True # it is a list\n except TypeError:\n return False # it is a single number", "def is_listlike(item):\n try:\n [x for x in item]\n return not is_stringlike(item)\n except TypeError:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if string starts from a letter
def is_first_letter(val): return ord(val[0].lower()) in range(ord('a'), ord('z') + 1)
[ "def contains_at_least_one_letter(string):\n first_letter = string[0]\n for letter in string[1:]:\n if first_letter == letter:\n return True\n else:\n first_letter = letter\n return False", "def IsNameStartChar(c):\n if c <= u\"z\":\n if c >= u\"a\":\n return True\n elif c <= u\"Z\":\n if c >= u\"A\":\n return True\n else:\n return c == u\":\"\n else:\n return c == u\"_\"\n else:\n return NameStartCharClass.test(c)", "def first_letter(self, letter):\n return self[0] == letter", "def is_alphabetic(word_str):\n return re.match(r'^[a-zA-Z]+$', word_str) is not None", "def contains_letters(string):\n return bool(re.search(r'[a-z]', string, re.IGNORECASE))", "def is_letter(character: str) -> bool:\n return ord('a') <= ord(character) <= ord('z')", "def single_letter(word):\n\tif len(word)==1 and word!='a' and word!='I':\n\t\treturn True\n\treturn False", "def str_starts_with(s, val, start=0):\n return s.startswith(val, clamp_str_index(s, start))", "def not_letter(character: str) -> bool:\n return character not in LETTERS", "def custom_startswith(string, incomplete):\n if os.environ.get(\"_CLICK_COMPLETION_COMMAND_CASE_INSENSITIVE_COMPLETE\"):\n string = string.lower()\n incomplete = incomplete.lower()\n return string.startswith(incomplete)", "def my_isalpha(s):\n registry_1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n registry_2 = registry_1.lower()\n alpha = True\n if len(s) > 0:\n for i in range(0, len(s)):\n if s[i] not in registry_1 or s[i] not in registry_2:\n alpha = False\n return(alpha)", "def isBaseNameStartChar(c: 'char const') -> \"SbBool\":\n return _coin.SbName_isBaseNameStartChar(c)", "def _single_prefix_char(token: str, parser: argparse.ArgumentParser) -> bool:\n return len(token) == 1 and token[0] in parser.prefix_chars", "def SbName_isBaseNameStartChar(c: 'char const') -> \"SbBool\":\n return _coin.SbName_isBaseNameStartChar(c)", "def contains_lowercase(s):\n return contain_lower_regexp.search(s) is not None", "def IsLetter(c):\n return IsBaseChar(c) or IsIdeographic(c)", "def first_character(self, from_end: bool = False) -> \"Regex\":", "def covers_alphabet(sentence: str) -> bool:\n # greater than or equal to include , ; ! etc.\n return set(sentence.lower()) >= set(\"abcdefghijklmnopqrstuvwxyz\")", "def is_prefixed_with(string, prefix):\n return string.find(prefix) == 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate simple register map template
def create_template_simple(): rmap = RegisterMap() rmap.add_registers(Register('DATA', 'Data register', 0x0).add_bitfields( BitField(width=32, access='rw', hardware='ioe'))) rmap.add_registers(Register('CTRL', 'Control register', 0x4).add_bitfields( BitField(width=16, access='rw', reset=0x0100, hardware='o'))) rmap.add_registers(Register('STATUS', 'Status register', 0x8).add_bitfields( BitField(width=8, access='ro', hardware='i'))) rmap.add_registers(Register('START', 'Start register', 0x100).add_bitfields( BitField(width=1, access='wosc', hardware='o'))) return rmap
[ "def create_template():\n # register map\n rmap = RegisterMap()\n\n rmap.add_registers(Register('DATA', 'Data register', 0x4).add_bitfields([\n BitField(\"FIFO\", \"Write to push value to TX FIFO, read to get data from RX FIFO\",\n width=8, lsb=0, access='rw', hardware='q'),\n BitField(\"FERR\", \"Frame error flag. Read to clear.\", width=1, lsb=16, access='rolh', hardware='i'),\n BitField(\"PERR\", \"Parity error flag. Read to clear.\", width=1, lsb=17, access='rolh', hardware='i'),\n ]))\n\n rmap.add_registers(Register('STAT', 'Status register', 0xC).add_bitfields([\n BitField(\"BUSY\", \"Transciever is busy\", width=1, lsb=2, access='ro', hardware='ie'),\n BitField(\"RXE\", \"RX FIFO is empty\", width=1, lsb=4, access='ro', hardware='i'),\n BitField(\"TXF\", \"TX FIFO is full\", width=1, lsb=8, access='ro', hardware='i'),\n ]))\n\n rmap.add_registers(Register('CTRL', 'Control register', 0x10).add_bitfields([\n BitField(\"BAUD\", \"Baudrate value\", width=2, lsb=0, access='rw', hardware='o').add_enums([\n EnumValue(\"B9600\", 0, \"9600 baud\"),\n EnumValue(\"B38400\", 1, \"38400 baud\"),\n EnumValue(\"B115200\", 2, \"115200 baud\"),\n ]),\n BitField(\"TXEN\", \"Transmitter enable. Can be disabled by hardware on error.\",\n width=1, lsb=4, access='rw', hardware='oie'),\n BitField(\"RXEN\", \"Receiver enable. Can be disabled by hardware on error.\",\n width=1, lsb=5, access='rw', hardware='oie'),\n BitField(\"TXST\", \"Force transmission start\", width=1, lsb=6, access='wosc', hardware='o'),\n ]))\n\n rmap.add_registers(Register('LPMODE', 'Low power mode control', 0x14).add_bitfields([\n BitField(\"DIV\", \"Clock divider in low power mode\", width=8, lsb=0, access='rw', hardware='o'),\n BitField(\"EN\", \"Low power mode enable\", width=1, lsb=31, access='rw', hardware='o'),\n ]))\n\n rmap.add_registers(Register('INTSTAT', 'Interrupt status register', 0x20).add_bitfields([\n BitField(\"TX\", \"Transmitter interrupt flag. Write 1 to clear.\", width=1, lsb=0, access='rw1c', hardware='s'),\n BitField(\"RX\", \"Receiver interrupt. Write 1 to clear.\", width=1, lsb=1, access='rw1c', hardware='s'),\n ]))\n\n rmap.add_registers(Register('ID', 'IP-core ID register', 0x40).add_bitfields([\n BitField(\"UID\", \"Unique ID\", width=32, lsb=0, access='ro', hardware='f', reset=0xcafe0666),\n ]))\n\n return rmap", "def _reg_map(self, reg_name):\n\n reg_map = None\n\n if self.architecture == uc.UC_ARCH_MIPS:\n reg_map = {\n \"zero\" : UC_MIPS_REG_ZERO, #=2\n \"at\" : UC_MIPS_REG_AT, #=3\n \"v0\" : UC_MIPS_REG_V0, #=4\n \"v1\" : UC_MIPS_REG_V1, #=5\n \"a0\" : UC_MIPS_REG_A0, #=6\n \"a1\" : UC_MIPS_REG_A1, #=7\n \"a2\" : UC_MIPS_REG_A2, #=8\n \"a3\" : UC_MIPS_REG_A3, #=9\n \"t0\" : UC_MIPS_REG_T0, #=10\n \"t1\" : UC_MIPS_REG_T1, #=11\n \"t2\" : UC_MIPS_REG_T2, #=12\n \"t3\" : UC_MIPS_REG_T3, #=13\n \"t4\" : UC_MIPS_REG_T4, #=14\n \"t5\" : UC_MIPS_REG_T5, #=15\n \"t6\" : UC_MIPS_REG_T6, #=16\n \"t7\" : UC_MIPS_REG_T7, #=17\n \"s0\" : UC_MIPS_REG_S0, #=18\n \"s1\" : UC_MIPS_REG_S1, #=19\n \"s2\" : UC_MIPS_REG_S2, #=20\n \"s3\" : UC_MIPS_REG_S3, #=21\n \"s4\" : UC_MIPS_REG_S4, #=22\n \"s5\" : UC_MIPS_REG_S5, #=23\n \"s6\" : UC_MIPS_REG_S6, #=24\n \"s7\" : UC_MIPS_REG_S7, #=25\n \"t8\" : UC_MIPS_REG_T8, #=26\n \"t9\" : UC_MIPS_REG_T9, #=27\n \"k0\" : UC_MIPS_REG_K0, #=28\n \"k1\" : UC_MIPS_REG_K1, #=29\n \"gp\" : UC_MIPS_REG_GP, #=30\n \"sp\" : UC_MIPS_REG_SP, #=31\n \"fp\" : UC_MIPS_REG_FP, #=32\n #\"s8\" : UC_MIPS_REG_S8, #=32\n \"ra\" : UC_MIPS_REG_RA, #=33\n \"hi\" : UC_MIPS_REG_HI, #= 129\n \"lo\" : UC_MIPS_REG_LO, #= 130\n \"pc\" : UC_MIPS_REG_PC, #= 1\n #UC_MIPS_REG_HI0, #=45\n #UC_MIPS_REG_HI1, #=46\n #UC_MIPS_REG_HI2, #=47\n #UC_MIPS_REG_HI3, #=48\n #UC_MIPS_REG_LO0, #=45\n #UC_MIPS_REG_LO1, #=46\n #UC_MIPS_REG_LO2, #=47\n #UC_MIPS_REG_LO3, #=48\n }\n\n elif self.architecture == uc.UC_ARCH_ARM64:\n reg_map = {\n \"r0\" : UC_ARM64_REG_X0, #= 199\n \"r1\" : UC_ARM64_REG_X1, #= 200\n \"r2\" : UC_ARM64_REG_X2, #= 201\n \"r3\" : UC_ARM64_REG_X3, #= 202\n \"r4\" : UC_ARM64_REG_X4, #= 203\n \"r5\" : UC_ARM64_REG_X5, #= 204\n \"r6\" : UC_ARM64_REG_X6, #= 205\n \"r7\" : UC_ARM64_REG_X7, #= 206\n \"r8\" : UC_ARM64_REG_X8, #= 207\n \"r9\" : UC_ARM64_REG_X9, #= 208\n \"r10\" : UC_ARM64_REG_X10, #= 209\n \"r11\" : UC_ARM64_REG_X11, #= 210\n \"r12\" : UC_ARM64_REG_X12, #= 211\n \"r13\" : UC_ARM64_REG_X13, #= 212\n \"r14\" : UC_ARM64_REG_X14, #= 213\n \"r15\" : UC_ARM64_REG_X15, #= 214\n \"r16\" : UC_ARM64_REG_X16, #= 215\n \"r17\" : UC_ARM64_REG_X17, #= 216\n \"r18\" : UC_ARM64_REG_X18, #= 217\n \"r19\" : UC_ARM64_REG_X19, #= 218\n \"r20\" : UC_ARM64_REG_X20, #= 219\n \"r21\" : UC_ARM64_REG_X21, #= 220\n \"r22\" : UC_ARM64_REG_X22, #= 221\n \"r23\" : UC_ARM64_REG_X23, #= 222\n \"r24\" : UC_ARM64_REG_X24, #= 223\n \"r25\" : UC_ARM64_REG_X25, #= 224\n \"r26\" : UC_ARM64_REG_X26, #= 225\n \"r27\" : UC_ARM64_REG_X27, #= 226\n \"r28\" : UC_ARM64_REG_X28, #= 227\n\n \"r29\" : UC_ARM64_REG_X29, #= 1\n \"r30\" : UC_ARM64_REG_X30, #= 2\n\n \"r31\" : UC_ARM64_REG_SP, #= 4\n \"sp\" : UC_ARM64_REG_SP, #= 4\n #\"xzr\" : UC_ARM64_REG_XZR, #= 7\n\n \"pc\" : UC_ARM64_REG_PC, #= 260\n }\n\n elif self.architecture == uc.UC_ARCH_X86:\n if self.mode == uc.UC_MODE_16:\n raise Exception(\"Register map not implemented\")\n elif self.mode == uc.UC_MODE_32:\n raise Exception(\"Register map not implemented\")\n elif self.mode == uc.UC_MODE_64:\n reg_map = {\n \"rax\" : UC_X86_REG_RAX,\n \"rbx\" : UC_X86_REG_RBX,\n \"rcx\" : UC_X86_REG_RCX,\n \"rdx\" : UC_X86_REG_RDX,\n \"rdi\" : UC_X86_REG_RSI,\n \"rsi\" : UC_X86_REG_RDI,\n \"rbp\" : UC_X86_REG_RBP,\n \"rsp\" : UC_X86_REG_RSP,\n \"rip\" : UC_X86_REG_RIP,\n \"r8\" : UC_X86_REG_R8,\n \"r9\" : UC_X86_REG_R9,\n \"r10\" : UC_X86_REG_R10,\n \"r11\" : UC_X86_REG_R11,\n \"r12\" : UC_X86_REG_R12,\n \"r13\" : UC_X86_REG_R13,\n \"r14\" : UC_X86_REG_R14,\n \"r15\" : UC_X86_REG_R15,\n }\n\n elif self.architecture == uc.UC_ARCH_ARM:\n if self.mode == uc.UC_MODE_ARM:\n reg_map = {\n \"r0\" : UC_ARM64_REG_W0, #= 199\n \"r1\" : UC_ARM64_REG_W1, #= 200\n \"r2\" : UC_ARM64_REG_W2, #= 201\n \"r3\" : UC_ARM64_REG_W3, #= 202\n \"r4\" : UC_ARM64_REG_W4, #= 203\n \"r5\" : UC_ARM64_REG_W5, #= 204\n \"r6\" : UC_ARM64_REG_W6, #= 205\n \"r7\" : UC_ARM64_REG_W7, #= 206\n \"r8\" : UC_ARM64_REG_W8, #= 207\n \"r9\" : UC_ARM64_REG_W9, #= 208\n \"r10\" : UC_ARM64_REG_W10, #= 209\n \"r11\" : UC_ARM64_REG_W11, #= 210\n \"r12\" : UC_ARM64_REG_W12, #= 211\n \"r13\" : UC_ARM64_REG_W13, #= 212\n \"r14\" : UC_ARM64_REG_W14, #= 213\n \"r15\" : UC_ARM64_REG_W15, #= 214\n \"r16\" : UC_ARM64_REG_W16, #= 215\n \"r17\" : UC_ARM64_REG_W17, #= 216\n \"r18\" : UC_ARM64_REG_W18, #= 217\n \"r19\" : UC_ARM64_REG_W19, #= 218\n \"r20\" : UC_ARM64_REG_W20, #= 219\n \"r21\" : UC_ARM64_REG_W21, #= 220\n \"r22\" : UC_ARM64_REG_W22, #= 221\n \"r23\" : UC_ARM64_REG_W23, #= 222\n \"r24\" : UC_ARM64_REG_W24, #= 223\n \"r25\" : UC_ARM64_REG_W25, #= 224\n \"r26\" : UC_ARM64_REG_W26, #= 225\n \"r27\" : UC_ARM64_REG_W27, #= 226\n \"r28\" : UC_ARM64_REG_W28, #= 227\n\n \"r29\" : UC_ARM64_REG_W29, #= 1\n \"r30\" : UC_ARM64_REG_W30, #= 2\n\n \"r31\" : UC_ARM64_REG_SP, #= 4\n \"sp\" : UC_ARM64_REG_SP, #= 4\n #\"xzr\" : UC_ARM64_REG_XZR, #= 7\n\n \"pc\" : UC_ARM64_REG_PC, #= 260\n }\n elif self.mode == uc.UC_MODE_THUMB:\n raise Exception(\"Register map for ARM thumb-mode not implemented\")\n\n else:\n raise Exception(\"Register map not implemented\")\n\n return reg_map.get(reg_name, 0x11223344)", "def custom_template_map(self):\n return dict(self.custom_template_items)", "def vpp_show_lisp_map_register(node):\n\n vat = VatExecutor()\n vat.execute_script_json_out('lisp/show_lisp_map_register.vat', node)\n return JsonParser().parse_data(vat.get_script_stdout())", "def generateRegisterMacro(per, reg, desc, size, offset, acc):\n\tret = generateInfoHeader(desc)\n\tret += generateDefine(\"%s_%s_OFFSET\" % (per, reg), \"(%su)\" % (format(offset, \"#3x\")))\n\tret += generateDefine(\"%s_%s_TYPE\" % (per, reg), generateType(size, acc))\n\tret += generateDefine(\"%s_%s_ADDRESS\" % (per, reg), \"(%s_BASE + %s_%s_OFFSET)\" % (per, per, reg))\n\tret += generateDefine(\"%s_%s\" % (per, reg), \"*((%s_%s_TYPE *)%s_%s_ADDRESS)\\n\" % (per, reg, per, reg))\n\treturn ret", "def register():\n \n return render_template('register.html')", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('SAM').get('abstractTypes')\n exolinks = globalMap.get('SAM').get('exolinks')\n\n # DataType AmountUnit\n currentMap = {}\n abstractTypes['AmountUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006'] = currentMap\n loadMaps['SAM.AmountUnit'] = currentMap\n currentMap['tag'] = 'SAM.AmountUnit'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType ConcentrationUnit\n currentMap = {}\n abstractTypes['ConcentrationUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005'] = currentMap\n loadMaps['SAM.ConcentrationUnit'] = currentMap\n currentMap['tag'] = 'SAM.ConcentrationUnit'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType Solvent\n currentMap = {}\n abstractTypes['Solvent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005'] = currentMap\n loadMaps['SAM.Solvent'] = currentMap\n currentMap['tag'] = 'SAM.Solvent'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AbstractSample\n currentMap = {}\n abstractTypes['AbstractSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00007'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00007'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.AbstractSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AbstractSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AbstractSample.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009'] = currentMap\n loadMaps['SAM.AbstractSample.details'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute AbstractSample.ionicStrength\n currentMap = {}\n contentMap['ionicStrength'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006'] = currentMap\n loadMaps['SAM.AbstractSample.ionicStrength'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.ionicStrength'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006'\n currentMap['name'] = 'ionicStrength'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute AbstractSample.isActive\n currentMap = {}\n contentMap['isActive'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008'] = currentMap\n loadMaps['SAM.AbstractSample.isActive'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.isActive'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008'\n currentMap['name'] = 'isActive'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AbstractSample.isHazard\n currentMap = {}\n contentMap['isHazard'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007'] = currentMap\n loadMaps['SAM.AbstractSample.isHazard'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.isHazard'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007'\n currentMap['name'] = 'isHazard'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AbstractSample.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004'] = currentMap\n loadMaps['SAM.AbstractSample.name'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AbstractSample.ph\n currentMap = {}\n contentMap['ph'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005'] = currentMap\n loadMaps['SAM.AbstractSample.ph'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.ph'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005'\n currentMap['name'] = 'ph'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute AbstractSample.solvent\n currentMap = {}\n contentMap['solvent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014'] = currentMap\n loadMaps['SAM.AbstractSample.solvent'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.solvent'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014'\n currentMap['name'] = 'solvent'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005')\n\n # Role AbstractSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AbstractSample.hazardPhrases\n currentMap = {}\n contentMap['hazardPhrases'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013'] = currentMap\n loadMaps['SAM.AbstractSample.hazardPhrases'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.hazardPhrases'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013'\n currentMap['name'] = 'hazardPhrases'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CLAS').get('exolinks')\n\n # Role AbstractSample.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003'] = currentMap\n loadMaps['SAM.AbstractSample.sampleCategories'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.sampleCategories'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CLAS').get('exolinks')\n\n # Role AbstractSample.sampleComponents\n currentMap = {}\n contentMap['sampleComponents'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001'] = currentMap\n loadMaps['SAM.AbstractSample.sampleComponents'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.sampleComponents'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001'\n currentMap['name'] = 'sampleComponents'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n # End of AbstractSample\n\n currentMap = abstractTypes.get('AbstractSample')\n aList = ['ionicStrength', 'isActive', 'isHazard', 'ph']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Class RefSampleSource\n currentMap = {}\n abstractTypes['RefSampleSource'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'] = currentMap\n loadMaps['SAM.RefSampleSource'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refSampleSources'\n currentMap['objkey'] = 'catalogNum'\n currentMap['class'] = ccp.api.lims.Sample.RefSampleSource\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefSampleSource.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefSampleSource.catalogNum\n currentMap = {}\n contentMap['catalogNum'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00017'] = currentMap\n loadMaps['SAM.RefSampleSource.catalogNum'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.catalogNum'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00017'\n currentMap['name'] = 'catalogNum'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefSampleSource.dataPageUrl\n currentMap = {}\n contentMap['dataPageUrl'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00018'] = currentMap\n loadMaps['SAM.RefSampleSource.dataPageUrl'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.dataPageUrl'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00018'\n currentMap['name'] = 'dataPageUrl'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role RefSampleSource.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefSampleSource.supplier\n currentMap = {}\n contentMap['supplier'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00016'] = currentMap\n loadMaps['SAM.RefSampleSource.supplier'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.supplier'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00016'\n currentMap['name'] = 'supplier'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('AFFI').get('exolinks')\n # End of RefSampleSource\n\n currentMap = abstractTypes.get('RefSampleSource')\n aList = ['catalogNum', 'dataPageUrl']\n currentMap['simpleAttrs'] = aList\n aList = ['supplier', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleComponent\n currentMap = {}\n abstractTypes['SampleComponent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'] = currentMap\n loadMaps['SAM.SampleComponent'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleComponents'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.lims.Sample.SampleComponent\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleComponent.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleComponent.concDisplayUnit\n currentMap = {}\n contentMap['concDisplayUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00004'] = currentMap\n loadMaps['SAM.SampleComponent.concDisplayUnit'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concDisplayUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00004'\n currentMap['name'] = 'concDisplayUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute SampleComponent.concentration\n currentMap = {}\n contentMap['concentration'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00001'] = currentMap\n loadMaps['SAM.SampleComponent.concentration'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentration'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00001'\n currentMap['name'] = 'concentration'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.concentrationError\n currentMap = {}\n contentMap['concentrationError'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00002'] = currentMap\n loadMaps['SAM.SampleComponent.concentrationError'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentrationError'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00002'\n currentMap['name'] = 'concentrationError'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.concentrationUnit\n currentMap = {}\n contentMap['concentrationUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00003'] = currentMap\n loadMaps['SAM.SampleComponent.concentrationUnit'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentrationUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00003'\n currentMap['name'] = 'concentrationUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005')\n\n # Attribute SampleComponent.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00006'] = currentMap\n loadMaps['SAM.SampleComponent.details'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00006'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute SampleComponent.purity\n currentMap = {}\n contentMap['purity'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00005'] = currentMap\n loadMaps['SAM.SampleComponent.purity'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.purity'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00005'\n currentMap['name'] = 'purity'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:43_00001'] = currentMap\n loadMaps['SAM.SampleComponent.serial'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:43_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role SampleComponent.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role SampleComponent.container\n currentMap = {}\n contentMap['container'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00004'] = currentMap\n loadMaps['SAM.SampleComponent.container'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.container'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00004'\n currentMap['name'] = 'container'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role SampleComponent.contents\n currentMap = {}\n contentMap['contents'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00003'] = currentMap\n loadMaps['SAM.SampleComponent.contents'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.contents'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00003'\n currentMap['name'] = 'contents'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role SampleComponent.refComponent\n currentMap = {}\n contentMap['refComponent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00002'] = currentMap\n loadMaps['SAM.SampleComponent.refComponent'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.refComponent'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00002'\n currentMap['name'] = 'refComponent'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('REFS').get('exolinks')\n # End of SampleComponent\n\n currentMap = abstractTypes.get('SampleComponent')\n aList = ['concDisplayUnit', 'concentration', 'concentrationError', 'concentrationUnit', 'purity', 'serial']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'contents']\n currentMap['simpleAttrs'] = aList\n aList = ['container']\n currentMap['optLinks'] = aList\n aList = ['refComponent', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleStore\n currentMap = {}\n abstractTypes['SampleStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'] = currentMap\n loadMaps['SAM.SampleStore'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.SampleStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute SampleStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00006'] = currentMap\n loadMaps['SAM.SampleStore.name'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00006'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role SampleStore.abstractSamples\n currentMap = {}\n contentMap['abstractSamples'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00005'] = currentMap\n loadMaps['SAM.SampleStore.abstractSamples'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore.abstractSamples'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00005'\n currentMap['name'] = 'abstractSamples'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n\n # Role SampleStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of SampleStore\n\n currentMap = abstractTypes.get('SampleStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['abstractSamples', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['abstractSamples']\n currentMap['children'] = aList\n\n # Class RefSample\n currentMap = {}\n abstractTypes['RefSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'] = currentMap\n loadMaps['SAM.RefSample'] = currentMap\n currentMap['tag'] = 'SAM.RefSample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.RefSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefSample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute RefSample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute RefSample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute RefSample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute RefSample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute RefSample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute RefSample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Role RefSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefSample.conformings\n currentMap = {}\n contentMap['conformings'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00002'] = currentMap\n loadMaps['SAM.RefSample.conformings'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.conformings'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00002'\n currentMap['name'] = 'conformings'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role RefSample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role RefSample.refSamplePositions\n currentMap = {}\n contentMap['refSamplePositions'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00010'] = currentMap\n loadMaps['SAM.RefSample.refSamplePositions'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.refSamplePositions'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00010'\n currentMap['name'] = 'refSamplePositions'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = False\n currentMap['content'] = globalMap.get('HOLD').get('exolinks')\n\n # Role RefSample.refSampleSources\n currentMap = {}\n contentMap['refSampleSources'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00008'] = currentMap\n loadMaps['SAM.RefSample.refSampleSources'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.refSampleSources'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00008'\n currentMap['name'] = 'refSampleSources'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n\n # Role RefSample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role RefSample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of RefSample\n\n currentMap = abstractTypes.get('RefSample')\n aList = ['ionicStrength', 'isActive', 'isHazard', 'ph']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent', 'conformings']\n currentMap['simpleAttrs'] = aList\n aList = ['sampleComponents', 'refSampleSources', 'sampleCategories', 'refSamplePositions', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['refSampleSources', 'sampleComponents']\n currentMap['children'] = aList\n\n # Class Sample\n currentMap = {}\n abstractTypes['Sample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'] = currentMap\n loadMaps['SAM.Sample'] = currentMap\n currentMap['tag'] = 'SAM.Sample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.Sample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Sample.amountDisplayUnit\n currentMap = {}\n contentMap['amountDisplayUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011'] = currentMap\n loadMaps['SAM.Sample.amountDisplayUnit'] = currentMap\n currentMap['tag'] = 'SAM.Sample.amountDisplayUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011'\n currentMap['name'] = 'amountDisplayUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Sample.amountUnit\n currentMap = {}\n contentMap['amountUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010'] = currentMap\n loadMaps['SAM.Sample.amountUnit'] = currentMap\n currentMap['tag'] = 'SAM.Sample.amountUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010'\n currentMap['name'] = 'amountUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006')\n\n # Attribute Sample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Sample.batchNum\n currentMap = {}\n contentMap['batchNum'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013'] = currentMap\n loadMaps['SAM.Sample.batchNum'] = currentMap\n currentMap['tag'] = 'SAM.Sample.batchNum'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013'\n currentMap['name'] = 'batchNum'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Sample.colPosition\n currentMap = {}\n contentMap['colPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005'] = currentMap\n loadMaps['SAM.Sample.colPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.colPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005'\n currentMap['name'] = 'colPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Sample.currentAmount\n currentMap = {}\n contentMap['currentAmount'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009'] = currentMap\n loadMaps['SAM.Sample.currentAmount'] = currentMap\n currentMap['tag'] = 'SAM.Sample.currentAmount'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009'\n currentMap['name'] = 'currentAmount'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Sample.currentAmountFlag\n currentMap = {}\n contentMap['currentAmountFlag'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012'] = currentMap\n loadMaps['SAM.Sample.currentAmountFlag'] = currentMap\n currentMap['tag'] = 'SAM.Sample.currentAmountFlag'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012'\n currentMap['name'] = 'currentAmountFlag'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Sample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute Sample.initialAmount\n currentMap = {}\n contentMap['initialAmount'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008'] = currentMap\n loadMaps['SAM.Sample.initialAmount'] = currentMap\n currentMap['tag'] = 'SAM.Sample.initialAmount'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008'\n currentMap['name'] = 'initialAmount'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Sample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute Sample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute Sample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute Sample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute Sample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute Sample.rowPosition\n currentMap = {}\n contentMap['rowPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004'] = currentMap\n loadMaps['SAM.Sample.rowPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.rowPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004'\n currentMap['name'] = 'rowPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Sample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Attribute Sample.subPosition\n currentMap = {}\n contentMap['subPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006'] = currentMap\n loadMaps['SAM.Sample.subPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.subPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006'\n currentMap['name'] = 'subPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role Sample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Sample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role Sample.holder\n currentMap = {}\n contentMap['holder'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003'] = currentMap\n loadMaps['SAM.Sample.holder'] = currentMap\n currentMap['tag'] = 'SAM.Sample.holder'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003'\n currentMap['name'] = 'holder'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('HOLD').get('exolinks')\n\n # Role Sample.refSample\n currentMap = {}\n contentMap['refSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003'] = currentMap\n loadMaps['SAM.Sample.refSample'] = currentMap\n currentMap['tag'] = 'SAM.Sample.refSample'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003'\n currentMap['name'] = 'refSample'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role Sample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role Sample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of Sample\n\n currentMap = abstractTypes.get('Sample')\n aList = ['amountDisplayUnit', 'amountUnit', 'batchNum', 'colPosition', 'currentAmount', 'currentAmountFlag', 'initialAmount', 'ionicStrength', 'isActive', 'isHazard', 'ph', 'rowPosition', 'subPosition']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['refSample']\n currentMap['optLinks'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'holder', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Class CrystalSample\n currentMap = {}\n abstractTypes['CrystalSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'] = currentMap\n loadMaps['SAM.CrystalSample'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.CrystalSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute CrystalSample.a\n currentMap = {}\n contentMap['a'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00018'] = currentMap\n loadMaps['SAM.CrystalSample.a'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.a'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00018'\n currentMap['name'] = 'a'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.alpha\n currentMap = {}\n contentMap['alpha'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00003'] = currentMap\n loadMaps['SAM.CrystalSample.alpha'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.alpha'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00003'\n currentMap['name'] = 'alpha'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.amountDisplayUnit\n contentMap['amountDisplayUnit'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011')\n\n # Attribute CrystalSample.amountUnit\n contentMap['amountUnit'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010')\n\n # Attribute CrystalSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute CrystalSample.b\n currentMap = {}\n contentMap['b'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00001'] = currentMap\n loadMaps['SAM.CrystalSample.b'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.b'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00001'\n currentMap['name'] = 'b'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.batchNum\n contentMap['batchNum'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013')\n\n # Attribute CrystalSample.beta\n currentMap = {}\n contentMap['beta'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00004'] = currentMap\n loadMaps['SAM.CrystalSample.beta'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.beta'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00004'\n currentMap['name'] = 'beta'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.c\n currentMap = {}\n contentMap['c'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00002'] = currentMap\n loadMaps['SAM.CrystalSample.c'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.c'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00002'\n currentMap['name'] = 'c'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.colPosition\n contentMap['colPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005')\n\n # Attribute CrystalSample.colour\n currentMap = {}\n contentMap['colour'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00014'] = currentMap\n loadMaps['SAM.CrystalSample.colour'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.colour'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00014'\n currentMap['name'] = 'colour'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.crystalType\n currentMap = {}\n contentMap['crystalType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00013'] = currentMap\n loadMaps['SAM.CrystalSample.crystalType'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.crystalType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00013'\n currentMap['name'] = 'crystalType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.currentAmount\n contentMap['currentAmount'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009')\n\n # Attribute CrystalSample.currentAmountFlag\n contentMap['currentAmountFlag'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012')\n\n # Attribute CrystalSample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute CrystalSample.gamma\n currentMap = {}\n contentMap['gamma'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00005'] = currentMap\n loadMaps['SAM.CrystalSample.gamma'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.gamma'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00005'\n currentMap['name'] = 'gamma'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.initialAmount\n contentMap['initialAmount'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008')\n\n # Attribute CrystalSample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute CrystalSample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute CrystalSample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute CrystalSample.morphology\n currentMap = {}\n contentMap['morphology'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00012'] = currentMap\n loadMaps['SAM.CrystalSample.morphology'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.morphology'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00012'\n currentMap['name'] = 'morphology'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute CrystalSample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute CrystalSample.rowPosition\n contentMap['rowPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004')\n\n # Attribute CrystalSample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Attribute CrystalSample.spaceGroup\n currentMap = {}\n contentMap['spaceGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00011'] = currentMap\n loadMaps['SAM.CrystalSample.spaceGroup'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.spaceGroup'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00011'\n currentMap['name'] = 'spaceGroup'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute CrystalSample.subPosition\n contentMap['subPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006')\n\n # Attribute CrystalSample.x\n currentMap = {}\n contentMap['x'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00015'] = currentMap\n loadMaps['SAM.CrystalSample.x'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.x'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00015'\n currentMap['name'] = 'x'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.y\n currentMap = {}\n contentMap['y'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00016'] = currentMap\n loadMaps['SAM.CrystalSample.y'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.y'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00016'\n currentMap['name'] = 'y'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.z\n currentMap = {}\n contentMap['z'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00017'] = currentMap\n loadMaps['SAM.CrystalSample.z'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.z'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00017'\n currentMap['name'] = 'z'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Role CrystalSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role CrystalSample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role CrystalSample.holder\n contentMap['holder'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003')\n\n # Role CrystalSample.refSample\n contentMap['refSample'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003')\n\n # Role CrystalSample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role CrystalSample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of CrystalSample\n\n currentMap = abstractTypes.get('CrystalSample')\n aList = ['a', 'alpha', 'amountDisplayUnit', 'amountUnit', 'b', 'batchNum', 'beta', 'c', 'colPosition', 'currentAmount', 'currentAmountFlag', 'gamma', 'initialAmount', 'ionicStrength', 'isActive', 'isHazard', 'ph', 'rowPosition', 'spaceGroup', 'subPosition', 'x', 'y', 'z']\n currentMap['headerAttrs'] = aList\n aList = ['colour', 'crystalType', 'details', 'morphology', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['refSample']\n currentMap['optLinks'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'holder', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Out-of-package link to RefSampleSource\n currentMap = {}\n exolinks['RefSampleSource'] = currentMap\n loadMaps['SAM.exo-RefSampleSource'] = currentMap\n currentMap['tag'] = 'SAM.exo-RefSampleSource'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'\n currentMap['name'] = 'RefSampleSource'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.RefSampleSource\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to SampleComponent\n currentMap = {}\n exolinks['SampleComponent'] = currentMap\n loadMaps['SAM.exo-SampleComponent'] = currentMap\n currentMap['tag'] = 'SAM.exo-SampleComponent'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'\n currentMap['name'] = 'SampleComponent'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.SampleComponent\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to SampleStore\n currentMap = {}\n exolinks['SampleStore'] = currentMap\n loadMaps['SAM.exo-SampleStore'] = currentMap\n currentMap['tag'] = 'SAM.exo-SampleStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'\n currentMap['name'] = 'SampleStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.SampleStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to RefSample\n currentMap = {}\n exolinks['RefSample'] = currentMap\n loadMaps['SAM.exo-RefSample'] = currentMap\n currentMap['tag'] = 'SAM.exo-RefSample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'\n currentMap['name'] = 'RefSample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.RefSample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Sample\n currentMap = {}\n exolinks['Sample'] = currentMap\n loadMaps['SAM.exo-Sample'] = currentMap\n currentMap['tag'] = 'SAM.exo-Sample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'\n currentMap['name'] = 'Sample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.Sample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to CrystalSample\n currentMap = {}\n exolinks['CrystalSample'] = currentMap\n loadMaps['SAM.exo-CrystalSample'] = currentMap\n currentMap['tag'] = 'SAM.exo-CrystalSample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'\n currentMap['name'] = 'CrystalSample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.CrystalSample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))", "def printRegTranslation():\n print \"[-] Register translation :\"\n for initId in regNamesTable.keys():\n print \"\\t\" + initId + \" <---> R\" + str(regNamesTable[initId])\n print \"\\n\"", "def MakeFunctionMap():\r\n\treturn ExtendFunctionMap({})", "def _generate_dynamic_values(map, prefix, index_start, index_end, value_start):\r\n for index in range(index_start, index_end + 1):\r\n name = '%s%s' % (prefix, index)\r\n value = value_start + index - index_start\r\n map[name] = value", "def build_template(self):\n map22_file = os.path.join(self.input_dir, \"data\", \"maps\", \"shipping\", \"map22\", \"map22.bin\")\n character_folder = os.path.join(self.input_dir, \"data\", \"characters\")\n\n map22 = BinFile(map22_file)\n\n character_names = self.parse_character_names(map22)\n sets = self.parse_sets(map22, character_names)\n traits = self.parse_traits(map22)\n champs = self.parse_champs(map22, traits, character_folder)\n output_sets = self.build_output_sets(sets, traits, champs)\n items = self.parse_items(map22)\n\n return {\"sets\": output_sets, \"items\": items}", "def gen_mapping(args, service, weight=None, labels={}):\n prefix = args.prefix\n mapping = {\n \"apiVersion\": \"getambassador.io/v1\",\n \"kind\": \"Mapping\",\n \"metadata\": {\n \"name\": f\"mapping-for-{service}\"\n },\n \"spec\": {\n \"prefix\": prefix,\n \"service\": service\n }\n }\n\n if args.namespace:\n mapping[\"metadata\"][\"namespace\"] = args.namespace\n\n if len(labels) > 0:\n mapping[\"metadata\"][\"labels\"] = labels\n\n if weight:\n mapping[\"spec\"][\"weight\"] = weight\n\n return mapping", "def _gen_registers(self) -> None:\n self.qregisters[\"data\"] = QuantumRegister(\n self.params[\"num_data\"], name=self.name + \"_data\"\n )\n self.qregisters[\"mz\"] = QuantumRegister(\n self.params[\"num_syn\"], name=self.name + \"_mp\"\n )\n self.qregisters[\"ancilla\"] = QuantumRegister(1, name=self.name + \"_ancilla\")", "def registry_key_mapping(self, stix_data):\n\n mapper = {\n 'type': 'Registry Key',\n 'Key Name': '@.key',\n 'confidence': '@.confidence',\n }\n if not stix_data.get('values'):\n return mapper\n\n for i in range(len(stix_data.get('values'))):\n mapper['Value Name'] = f'@.values[{i}].name'\n mapper['Value Type'] = f'@.values[{i}].data_type'\n mapper.setdefault('attribute', []).append(\n {'type': 'Value Data', 'value': f'@.values[{i}].data'}\n )\n return mapper", "def create_add_map_def(n):\n\tdic = {}\n\tfor i in range(n):\n\t\tdic['address_+' + str(i) + \"]\"] = \"[Subnet+\" + str(i)+ \"]\"\n\t\tdic['address_+' + str(i) + \"/mm]\"] = \"[Subnet+\" + str(i)+ \"/mm]\"\n\treturn dic", "def generate_config_map(env, name):\n data = yaml.load(\"\"\"\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: \"\"\ndata: {}\n\"\"\")\n data['metadata']['name'] = name\n data['data'] = env\n return data", "def template_outputs_map(stackname):\n with open(core.stack_path(stackname), 'r') as fh:\n stack = json.load(fh)\n output_map = stack.get('Outputs', [])\n return {output_key: output['Value'] for output_key, output in output_map.items()}", "def register():\n return render_template('dashboard/register.html', tagname = 'register')", "def get_template_map(self, template: PulseTemplate) -> Dict[str, Expression]:\n return self.__get_template_map(template).copy()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate register map template
def create_template(): # register map rmap = RegisterMap() rmap.add_registers(Register('DATA', 'Data register', 0x4).add_bitfields([ BitField("FIFO", "Write to push value to TX FIFO, read to get data from RX FIFO", width=8, lsb=0, access='rw', hardware='q'), BitField("FERR", "Frame error flag. Read to clear.", width=1, lsb=16, access='rolh', hardware='i'), BitField("PERR", "Parity error flag. Read to clear.", width=1, lsb=17, access='rolh', hardware='i'), ])) rmap.add_registers(Register('STAT', 'Status register', 0xC).add_bitfields([ BitField("BUSY", "Transciever is busy", width=1, lsb=2, access='ro', hardware='ie'), BitField("RXE", "RX FIFO is empty", width=1, lsb=4, access='ro', hardware='i'), BitField("TXF", "TX FIFO is full", width=1, lsb=8, access='ro', hardware='i'), ])) rmap.add_registers(Register('CTRL', 'Control register', 0x10).add_bitfields([ BitField("BAUD", "Baudrate value", width=2, lsb=0, access='rw', hardware='o').add_enums([ EnumValue("B9600", 0, "9600 baud"), EnumValue("B38400", 1, "38400 baud"), EnumValue("B115200", 2, "115200 baud"), ]), BitField("TXEN", "Transmitter enable. Can be disabled by hardware on error.", width=1, lsb=4, access='rw', hardware='oie'), BitField("RXEN", "Receiver enable. Can be disabled by hardware on error.", width=1, lsb=5, access='rw', hardware='oie'), BitField("TXST", "Force transmission start", width=1, lsb=6, access='wosc', hardware='o'), ])) rmap.add_registers(Register('LPMODE', 'Low power mode control', 0x14).add_bitfields([ BitField("DIV", "Clock divider in low power mode", width=8, lsb=0, access='rw', hardware='o'), BitField("EN", "Low power mode enable", width=1, lsb=31, access='rw', hardware='o'), ])) rmap.add_registers(Register('INTSTAT', 'Interrupt status register', 0x20).add_bitfields([ BitField("TX", "Transmitter interrupt flag. Write 1 to clear.", width=1, lsb=0, access='rw1c', hardware='s'), BitField("RX", "Receiver interrupt. Write 1 to clear.", width=1, lsb=1, access='rw1c', hardware='s'), ])) rmap.add_registers(Register('ID', 'IP-core ID register', 0x40).add_bitfields([ BitField("UID", "Unique ID", width=32, lsb=0, access='ro', hardware='f', reset=0xcafe0666), ])) return rmap
[ "def create_template_simple():\n rmap = RegisterMap()\n\n rmap.add_registers(Register('DATA', 'Data register', 0x0).add_bitfields(\n BitField(width=32, access='rw', hardware='ioe')))\n\n rmap.add_registers(Register('CTRL', 'Control register', 0x4).add_bitfields(\n BitField(width=16, access='rw', reset=0x0100, hardware='o')))\n\n rmap.add_registers(Register('STATUS', 'Status register', 0x8).add_bitfields(\n BitField(width=8, access='ro', hardware='i')))\n\n rmap.add_registers(Register('START', 'Start register', 0x100).add_bitfields(\n BitField(width=1, access='wosc', hardware='o')))\n\n return rmap", "def custom_template_map(self):\n return dict(self.custom_template_items)", "def generateRegisterMacro(per, reg, desc, size, offset, acc):\n\tret = generateInfoHeader(desc)\n\tret += generateDefine(\"%s_%s_OFFSET\" % (per, reg), \"(%su)\" % (format(offset, \"#3x\")))\n\tret += generateDefine(\"%s_%s_TYPE\" % (per, reg), generateType(size, acc))\n\tret += generateDefine(\"%s_%s_ADDRESS\" % (per, reg), \"(%s_BASE + %s_%s_OFFSET)\" % (per, per, reg))\n\tret += generateDefine(\"%s_%s\" % (per, reg), \"*((%s_%s_TYPE *)%s_%s_ADDRESS)\\n\" % (per, reg, per, reg))\n\treturn ret", "def register():\n \n return render_template('register.html')", "def build_template(self):\n map22_file = os.path.join(self.input_dir, \"data\", \"maps\", \"shipping\", \"map22\", \"map22.bin\")\n character_folder = os.path.join(self.input_dir, \"data\", \"characters\")\n\n map22 = BinFile(map22_file)\n\n character_names = self.parse_character_names(map22)\n sets = self.parse_sets(map22, character_names)\n traits = self.parse_traits(map22)\n champs = self.parse_champs(map22, traits, character_folder)\n output_sets = self.build_output_sets(sets, traits, champs)\n items = self.parse_items(map22)\n\n return {\"sets\": output_sets, \"items\": items}", "def vpp_show_lisp_map_register(node):\n\n vat = VatExecutor()\n vat.execute_script_json_out('lisp/show_lisp_map_register.vat', node)\n return JsonParser().parse_data(vat.get_script_stdout())", "def _reg_map(self, reg_name):\n\n reg_map = None\n\n if self.architecture == uc.UC_ARCH_MIPS:\n reg_map = {\n \"zero\" : UC_MIPS_REG_ZERO, #=2\n \"at\" : UC_MIPS_REG_AT, #=3\n \"v0\" : UC_MIPS_REG_V0, #=4\n \"v1\" : UC_MIPS_REG_V1, #=5\n \"a0\" : UC_MIPS_REG_A0, #=6\n \"a1\" : UC_MIPS_REG_A1, #=7\n \"a2\" : UC_MIPS_REG_A2, #=8\n \"a3\" : UC_MIPS_REG_A3, #=9\n \"t0\" : UC_MIPS_REG_T0, #=10\n \"t1\" : UC_MIPS_REG_T1, #=11\n \"t2\" : UC_MIPS_REG_T2, #=12\n \"t3\" : UC_MIPS_REG_T3, #=13\n \"t4\" : UC_MIPS_REG_T4, #=14\n \"t5\" : UC_MIPS_REG_T5, #=15\n \"t6\" : UC_MIPS_REG_T6, #=16\n \"t7\" : UC_MIPS_REG_T7, #=17\n \"s0\" : UC_MIPS_REG_S0, #=18\n \"s1\" : UC_MIPS_REG_S1, #=19\n \"s2\" : UC_MIPS_REG_S2, #=20\n \"s3\" : UC_MIPS_REG_S3, #=21\n \"s4\" : UC_MIPS_REG_S4, #=22\n \"s5\" : UC_MIPS_REG_S5, #=23\n \"s6\" : UC_MIPS_REG_S6, #=24\n \"s7\" : UC_MIPS_REG_S7, #=25\n \"t8\" : UC_MIPS_REG_T8, #=26\n \"t9\" : UC_MIPS_REG_T9, #=27\n \"k0\" : UC_MIPS_REG_K0, #=28\n \"k1\" : UC_MIPS_REG_K1, #=29\n \"gp\" : UC_MIPS_REG_GP, #=30\n \"sp\" : UC_MIPS_REG_SP, #=31\n \"fp\" : UC_MIPS_REG_FP, #=32\n #\"s8\" : UC_MIPS_REG_S8, #=32\n \"ra\" : UC_MIPS_REG_RA, #=33\n \"hi\" : UC_MIPS_REG_HI, #= 129\n \"lo\" : UC_MIPS_REG_LO, #= 130\n \"pc\" : UC_MIPS_REG_PC, #= 1\n #UC_MIPS_REG_HI0, #=45\n #UC_MIPS_REG_HI1, #=46\n #UC_MIPS_REG_HI2, #=47\n #UC_MIPS_REG_HI3, #=48\n #UC_MIPS_REG_LO0, #=45\n #UC_MIPS_REG_LO1, #=46\n #UC_MIPS_REG_LO2, #=47\n #UC_MIPS_REG_LO3, #=48\n }\n\n elif self.architecture == uc.UC_ARCH_ARM64:\n reg_map = {\n \"r0\" : UC_ARM64_REG_X0, #= 199\n \"r1\" : UC_ARM64_REG_X1, #= 200\n \"r2\" : UC_ARM64_REG_X2, #= 201\n \"r3\" : UC_ARM64_REG_X3, #= 202\n \"r4\" : UC_ARM64_REG_X4, #= 203\n \"r5\" : UC_ARM64_REG_X5, #= 204\n \"r6\" : UC_ARM64_REG_X6, #= 205\n \"r7\" : UC_ARM64_REG_X7, #= 206\n \"r8\" : UC_ARM64_REG_X8, #= 207\n \"r9\" : UC_ARM64_REG_X9, #= 208\n \"r10\" : UC_ARM64_REG_X10, #= 209\n \"r11\" : UC_ARM64_REG_X11, #= 210\n \"r12\" : UC_ARM64_REG_X12, #= 211\n \"r13\" : UC_ARM64_REG_X13, #= 212\n \"r14\" : UC_ARM64_REG_X14, #= 213\n \"r15\" : UC_ARM64_REG_X15, #= 214\n \"r16\" : UC_ARM64_REG_X16, #= 215\n \"r17\" : UC_ARM64_REG_X17, #= 216\n \"r18\" : UC_ARM64_REG_X18, #= 217\n \"r19\" : UC_ARM64_REG_X19, #= 218\n \"r20\" : UC_ARM64_REG_X20, #= 219\n \"r21\" : UC_ARM64_REG_X21, #= 220\n \"r22\" : UC_ARM64_REG_X22, #= 221\n \"r23\" : UC_ARM64_REG_X23, #= 222\n \"r24\" : UC_ARM64_REG_X24, #= 223\n \"r25\" : UC_ARM64_REG_X25, #= 224\n \"r26\" : UC_ARM64_REG_X26, #= 225\n \"r27\" : UC_ARM64_REG_X27, #= 226\n \"r28\" : UC_ARM64_REG_X28, #= 227\n\n \"r29\" : UC_ARM64_REG_X29, #= 1\n \"r30\" : UC_ARM64_REG_X30, #= 2\n\n \"r31\" : UC_ARM64_REG_SP, #= 4\n \"sp\" : UC_ARM64_REG_SP, #= 4\n #\"xzr\" : UC_ARM64_REG_XZR, #= 7\n\n \"pc\" : UC_ARM64_REG_PC, #= 260\n }\n\n elif self.architecture == uc.UC_ARCH_X86:\n if self.mode == uc.UC_MODE_16:\n raise Exception(\"Register map not implemented\")\n elif self.mode == uc.UC_MODE_32:\n raise Exception(\"Register map not implemented\")\n elif self.mode == uc.UC_MODE_64:\n reg_map = {\n \"rax\" : UC_X86_REG_RAX,\n \"rbx\" : UC_X86_REG_RBX,\n \"rcx\" : UC_X86_REG_RCX,\n \"rdx\" : UC_X86_REG_RDX,\n \"rdi\" : UC_X86_REG_RSI,\n \"rsi\" : UC_X86_REG_RDI,\n \"rbp\" : UC_X86_REG_RBP,\n \"rsp\" : UC_X86_REG_RSP,\n \"rip\" : UC_X86_REG_RIP,\n \"r8\" : UC_X86_REG_R8,\n \"r9\" : UC_X86_REG_R9,\n \"r10\" : UC_X86_REG_R10,\n \"r11\" : UC_X86_REG_R11,\n \"r12\" : UC_X86_REG_R12,\n \"r13\" : UC_X86_REG_R13,\n \"r14\" : UC_X86_REG_R14,\n \"r15\" : UC_X86_REG_R15,\n }\n\n elif self.architecture == uc.UC_ARCH_ARM:\n if self.mode == uc.UC_MODE_ARM:\n reg_map = {\n \"r0\" : UC_ARM64_REG_W0, #= 199\n \"r1\" : UC_ARM64_REG_W1, #= 200\n \"r2\" : UC_ARM64_REG_W2, #= 201\n \"r3\" : UC_ARM64_REG_W3, #= 202\n \"r4\" : UC_ARM64_REG_W4, #= 203\n \"r5\" : UC_ARM64_REG_W5, #= 204\n \"r6\" : UC_ARM64_REG_W6, #= 205\n \"r7\" : UC_ARM64_REG_W7, #= 206\n \"r8\" : UC_ARM64_REG_W8, #= 207\n \"r9\" : UC_ARM64_REG_W9, #= 208\n \"r10\" : UC_ARM64_REG_W10, #= 209\n \"r11\" : UC_ARM64_REG_W11, #= 210\n \"r12\" : UC_ARM64_REG_W12, #= 211\n \"r13\" : UC_ARM64_REG_W13, #= 212\n \"r14\" : UC_ARM64_REG_W14, #= 213\n \"r15\" : UC_ARM64_REG_W15, #= 214\n \"r16\" : UC_ARM64_REG_W16, #= 215\n \"r17\" : UC_ARM64_REG_W17, #= 216\n \"r18\" : UC_ARM64_REG_W18, #= 217\n \"r19\" : UC_ARM64_REG_W19, #= 218\n \"r20\" : UC_ARM64_REG_W20, #= 219\n \"r21\" : UC_ARM64_REG_W21, #= 220\n \"r22\" : UC_ARM64_REG_W22, #= 221\n \"r23\" : UC_ARM64_REG_W23, #= 222\n \"r24\" : UC_ARM64_REG_W24, #= 223\n \"r25\" : UC_ARM64_REG_W25, #= 224\n \"r26\" : UC_ARM64_REG_W26, #= 225\n \"r27\" : UC_ARM64_REG_W27, #= 226\n \"r28\" : UC_ARM64_REG_W28, #= 227\n\n \"r29\" : UC_ARM64_REG_W29, #= 1\n \"r30\" : UC_ARM64_REG_W30, #= 2\n\n \"r31\" : UC_ARM64_REG_SP, #= 4\n \"sp\" : UC_ARM64_REG_SP, #= 4\n #\"xzr\" : UC_ARM64_REG_XZR, #= 7\n\n \"pc\" : UC_ARM64_REG_PC, #= 260\n }\n elif self.mode == uc.UC_MODE_THUMB:\n raise Exception(\"Register map for ARM thumb-mode not implemented\")\n\n else:\n raise Exception(\"Register map not implemented\")\n\n return reg_map.get(reg_name, 0x11223344)", "def _gen_registers(self) -> None:\n self.qregisters[\"data\"] = QuantumRegister(\n self.params[\"num_data\"], name=self.name + \"_data\"\n )\n self.qregisters[\"mz\"] = QuantumRegister(\n self.params[\"num_syn\"], name=self.name + \"_mp\"\n )\n self.qregisters[\"ancilla\"] = QuantumRegister(1, name=self.name + \"_ancilla\")", "def _generate_dynamic_values(map, prefix, index_start, index_end, value_start):\r\n for index in range(index_start, index_end + 1):\r\n name = '%s%s' % (prefix, index)\r\n value = value_start + index - index_start\r\n map[name] = value", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('SAM').get('abstractTypes')\n exolinks = globalMap.get('SAM').get('exolinks')\n\n # DataType AmountUnit\n currentMap = {}\n abstractTypes['AmountUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006'] = currentMap\n loadMaps['SAM.AmountUnit'] = currentMap\n currentMap['tag'] = 'SAM.AmountUnit'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType ConcentrationUnit\n currentMap = {}\n abstractTypes['ConcentrationUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005'] = currentMap\n loadMaps['SAM.ConcentrationUnit'] = currentMap\n currentMap['tag'] = 'SAM.ConcentrationUnit'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # DataType Solvent\n currentMap = {}\n abstractTypes['Solvent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005'] = currentMap\n loadMaps['SAM.Solvent'] = currentMap\n currentMap['tag'] = 'SAM.Solvent'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AbstractSample\n currentMap = {}\n abstractTypes['AbstractSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00007'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00007'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.AbstractSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AbstractSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AbstractSample.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009'] = currentMap\n loadMaps['SAM.AbstractSample.details'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute AbstractSample.ionicStrength\n currentMap = {}\n contentMap['ionicStrength'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006'] = currentMap\n loadMaps['SAM.AbstractSample.ionicStrength'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.ionicStrength'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006'\n currentMap['name'] = 'ionicStrength'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute AbstractSample.isActive\n currentMap = {}\n contentMap['isActive'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008'] = currentMap\n loadMaps['SAM.AbstractSample.isActive'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.isActive'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008'\n currentMap['name'] = 'isActive'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AbstractSample.isHazard\n currentMap = {}\n contentMap['isHazard'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007'] = currentMap\n loadMaps['SAM.AbstractSample.isHazard'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.isHazard'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007'\n currentMap['name'] = 'isHazard'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AbstractSample.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004'] = currentMap\n loadMaps['SAM.AbstractSample.name'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AbstractSample.ph\n currentMap = {}\n contentMap['ph'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005'] = currentMap\n loadMaps['SAM.AbstractSample.ph'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.ph'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005'\n currentMap['name'] = 'ph'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute AbstractSample.solvent\n currentMap = {}\n contentMap['solvent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014'] = currentMap\n loadMaps['SAM.AbstractSample.solvent'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.solvent'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014'\n currentMap['name'] = 'solvent'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2009-11-19-14:50:32_00005')\n\n # Role AbstractSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AbstractSample.hazardPhrases\n currentMap = {}\n contentMap['hazardPhrases'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013'] = currentMap\n loadMaps['SAM.AbstractSample.hazardPhrases'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.hazardPhrases'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013'\n currentMap['name'] = 'hazardPhrases'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CLAS').get('exolinks')\n\n # Role AbstractSample.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003'] = currentMap\n loadMaps['SAM.AbstractSample.sampleCategories'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.sampleCategories'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('CLAS').get('exolinks')\n\n # Role AbstractSample.sampleComponents\n currentMap = {}\n contentMap['sampleComponents'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001'] = currentMap\n loadMaps['SAM.AbstractSample.sampleComponents'] = currentMap\n currentMap['tag'] = 'SAM.AbstractSample.sampleComponents'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001'\n currentMap['name'] = 'sampleComponents'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n # End of AbstractSample\n\n currentMap = abstractTypes.get('AbstractSample')\n aList = ['ionicStrength', 'isActive', 'isHazard', 'ph']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Class RefSampleSource\n currentMap = {}\n abstractTypes['RefSampleSource'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'] = currentMap\n loadMaps['SAM.RefSampleSource'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refSampleSources'\n currentMap['objkey'] = 'catalogNum'\n currentMap['class'] = ccp.api.lims.Sample.RefSampleSource\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefSampleSource.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefSampleSource.catalogNum\n currentMap = {}\n contentMap['catalogNum'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00017'] = currentMap\n loadMaps['SAM.RefSampleSource.catalogNum'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.catalogNum'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00017'\n currentMap['name'] = 'catalogNum'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefSampleSource.dataPageUrl\n currentMap = {}\n contentMap['dataPageUrl'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00018'] = currentMap\n loadMaps['SAM.RefSampleSource.dataPageUrl'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.dataPageUrl'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00018'\n currentMap['name'] = 'dataPageUrl'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role RefSampleSource.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefSampleSource.supplier\n currentMap = {}\n contentMap['supplier'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00016'] = currentMap\n loadMaps['SAM.RefSampleSource.supplier'] = currentMap\n currentMap['tag'] = 'SAM.RefSampleSource.supplier'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00016'\n currentMap['name'] = 'supplier'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('AFFI').get('exolinks')\n # End of RefSampleSource\n\n currentMap = abstractTypes.get('RefSampleSource')\n aList = ['catalogNum', 'dataPageUrl']\n currentMap['simpleAttrs'] = aList\n aList = ['supplier', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleComponent\n currentMap = {}\n abstractTypes['SampleComponent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'] = currentMap\n loadMaps['SAM.SampleComponent'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleComponents'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.lims.Sample.SampleComponent\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleComponent.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleComponent.concDisplayUnit\n currentMap = {}\n contentMap['concDisplayUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00004'] = currentMap\n loadMaps['SAM.SampleComponent.concDisplayUnit'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concDisplayUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00004'\n currentMap['name'] = 'concDisplayUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute SampleComponent.concentration\n currentMap = {}\n contentMap['concentration'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00001'] = currentMap\n loadMaps['SAM.SampleComponent.concentration'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentration'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00001'\n currentMap['name'] = 'concentration'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.concentrationError\n currentMap = {}\n contentMap['concentrationError'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00002'] = currentMap\n loadMaps['SAM.SampleComponent.concentrationError'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentrationError'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00002'\n currentMap['name'] = 'concentrationError'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.concentrationUnit\n currentMap = {}\n contentMap['concentrationUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00003'] = currentMap\n loadMaps['SAM.SampleComponent.concentrationUnit'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.concentrationUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00003'\n currentMap['name'] = 'concentrationUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00005')\n\n # Attribute SampleComponent.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00006'] = currentMap\n loadMaps['SAM.SampleComponent.details'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00006'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute SampleComponent.purity\n currentMap = {}\n contentMap['purity'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00005'] = currentMap\n loadMaps['SAM.SampleComponent.purity'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.purity'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00005'\n currentMap['name'] = 'purity'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute SampleComponent.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:43_00001'] = currentMap\n loadMaps['SAM.SampleComponent.serial'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:43_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role SampleComponent.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role SampleComponent.container\n currentMap = {}\n contentMap['container'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00004'] = currentMap\n loadMaps['SAM.SampleComponent.container'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.container'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00004'\n currentMap['name'] = 'container'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role SampleComponent.contents\n currentMap = {}\n contentMap['contents'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00003'] = currentMap\n loadMaps['SAM.SampleComponent.contents'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.contents'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00003'\n currentMap['name'] = 'contents'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role SampleComponent.refComponent\n currentMap = {}\n contentMap['refComponent'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00002'] = currentMap\n loadMaps['SAM.SampleComponent.refComponent'] = currentMap\n currentMap['tag'] = 'SAM.SampleComponent.refComponent'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:42_00002'\n currentMap['name'] = 'refComponent'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('REFS').get('exolinks')\n # End of SampleComponent\n\n currentMap = abstractTypes.get('SampleComponent')\n aList = ['concDisplayUnit', 'concentration', 'concentrationError', 'concentrationUnit', 'purity', 'serial']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'contents']\n currentMap['simpleAttrs'] = aList\n aList = ['container']\n currentMap['optLinks'] = aList\n aList = ['refComponent', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleStore\n currentMap = {}\n abstractTypes['SampleStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'] = currentMap\n loadMaps['SAM.SampleStore'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.SampleStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute SampleStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute SampleStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00006'] = currentMap\n loadMaps['SAM.SampleStore.name'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00006'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role SampleStore.abstractSamples\n currentMap = {}\n contentMap['abstractSamples'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00005'] = currentMap\n loadMaps['SAM.SampleStore.abstractSamples'] = currentMap\n currentMap['tag'] = 'SAM.SampleStore.abstractSamples'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:28_00005'\n currentMap['name'] = 'abstractSamples'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n\n # Role SampleStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of SampleStore\n\n currentMap = abstractTypes.get('SampleStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['abstractSamples', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['abstractSamples']\n currentMap['children'] = aList\n\n # Class RefSample\n currentMap = {}\n abstractTypes['RefSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'] = currentMap\n loadMaps['SAM.RefSample'] = currentMap\n currentMap['tag'] = 'SAM.RefSample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.RefSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefSample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute RefSample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute RefSample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute RefSample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute RefSample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute RefSample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute RefSample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Role RefSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefSample.conformings\n currentMap = {}\n contentMap['conformings'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00002'] = currentMap\n loadMaps['SAM.RefSample.conformings'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.conformings'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00002'\n currentMap['name'] = 'conformings'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role RefSample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role RefSample.refSamplePositions\n currentMap = {}\n contentMap['refSamplePositions'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00010'] = currentMap\n loadMaps['SAM.RefSample.refSamplePositions'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.refSamplePositions'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00010'\n currentMap['name'] = 'refSamplePositions'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = False\n currentMap['content'] = globalMap.get('HOLD').get('exolinks')\n\n # Role RefSample.refSampleSources\n currentMap = {}\n contentMap['refSampleSources'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00008'] = currentMap\n loadMaps['SAM.RefSample.refSampleSources'] = currentMap\n currentMap['tag'] = 'SAM.RefSample.refSampleSources'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00008'\n currentMap['name'] = 'refSampleSources'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('SAM').get('abstractTypes')\n\n # Role RefSample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role RefSample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of RefSample\n\n currentMap = abstractTypes.get('RefSample')\n aList = ['ionicStrength', 'isActive', 'isHazard', 'ph']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent', 'conformings']\n currentMap['simpleAttrs'] = aList\n aList = ['sampleComponents', 'refSampleSources', 'sampleCategories', 'refSamplePositions', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['refSampleSources', 'sampleComponents']\n currentMap['children'] = aList\n\n # Class Sample\n currentMap = {}\n abstractTypes['Sample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'] = currentMap\n loadMaps['SAM.Sample'] = currentMap\n currentMap['tag'] = 'SAM.Sample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.Sample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Sample.amountDisplayUnit\n currentMap = {}\n contentMap['amountDisplayUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011'] = currentMap\n loadMaps['SAM.Sample.amountDisplayUnit'] = currentMap\n currentMap['tag'] = 'SAM.Sample.amountDisplayUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011'\n currentMap['name'] = 'amountDisplayUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Sample.amountUnit\n currentMap = {}\n contentMap['amountUnit'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010'] = currentMap\n loadMaps['SAM.Sample.amountUnit'] = currentMap\n currentMap['tag'] = 'SAM.Sample.amountUnit'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010'\n currentMap['name'] = 'amountUnit'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00006')\n\n # Attribute Sample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Sample.batchNum\n currentMap = {}\n contentMap['batchNum'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013'] = currentMap\n loadMaps['SAM.Sample.batchNum'] = currentMap\n currentMap['tag'] = 'SAM.Sample.batchNum'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013'\n currentMap['name'] = 'batchNum'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Sample.colPosition\n currentMap = {}\n contentMap['colPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005'] = currentMap\n loadMaps['SAM.Sample.colPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.colPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005'\n currentMap['name'] = 'colPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Sample.currentAmount\n currentMap = {}\n contentMap['currentAmount'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009'] = currentMap\n loadMaps['SAM.Sample.currentAmount'] = currentMap\n currentMap['tag'] = 'SAM.Sample.currentAmount'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009'\n currentMap['name'] = 'currentAmount'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Sample.currentAmountFlag\n currentMap = {}\n contentMap['currentAmountFlag'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012'] = currentMap\n loadMaps['SAM.Sample.currentAmountFlag'] = currentMap\n currentMap['tag'] = 'SAM.Sample.currentAmountFlag'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012'\n currentMap['name'] = 'currentAmountFlag'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Sample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute Sample.initialAmount\n currentMap = {}\n contentMap['initialAmount'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008'] = currentMap\n loadMaps['SAM.Sample.initialAmount'] = currentMap\n currentMap['tag'] = 'SAM.Sample.initialAmount'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008'\n currentMap['name'] = 'initialAmount'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Sample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute Sample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute Sample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute Sample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute Sample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute Sample.rowPosition\n currentMap = {}\n contentMap['rowPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004'] = currentMap\n loadMaps['SAM.Sample.rowPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.rowPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004'\n currentMap['name'] = 'rowPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Sample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Attribute Sample.subPosition\n currentMap = {}\n contentMap['subPosition'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006'] = currentMap\n loadMaps['SAM.Sample.subPosition'] = currentMap\n currentMap['tag'] = 'SAM.Sample.subPosition'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006'\n currentMap['name'] = 'subPosition'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role Sample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Sample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role Sample.holder\n currentMap = {}\n contentMap['holder'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003'] = currentMap\n loadMaps['SAM.Sample.holder'] = currentMap\n currentMap['tag'] = 'SAM.Sample.holder'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003'\n currentMap['name'] = 'holder'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('HOLD').get('exolinks')\n\n # Role Sample.refSample\n currentMap = {}\n contentMap['refSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003'] = currentMap\n loadMaps['SAM.Sample.refSample'] = currentMap\n currentMap['tag'] = 'SAM.Sample.refSample'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003'\n currentMap['name'] = 'refSample'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role Sample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role Sample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of Sample\n\n currentMap = abstractTypes.get('Sample')\n aList = ['amountDisplayUnit', 'amountUnit', 'batchNum', 'colPosition', 'currentAmount', 'currentAmountFlag', 'initialAmount', 'ionicStrength', 'isActive', 'isHazard', 'ph', 'rowPosition', 'subPosition']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['refSample']\n currentMap['optLinks'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'holder', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Class CrystalSample\n currentMap = {}\n abstractTypes['CrystalSample'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'] = currentMap\n loadMaps['SAM.CrystalSample'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'abstractSamples'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Sample.CrystalSample\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute CrystalSample.a\n currentMap = {}\n contentMap['a'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00018'] = currentMap\n loadMaps['SAM.CrystalSample.a'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.a'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00018'\n currentMap['name'] = 'a'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.alpha\n currentMap = {}\n contentMap['alpha'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00003'] = currentMap\n loadMaps['SAM.CrystalSample.alpha'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.alpha'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00003'\n currentMap['name'] = 'alpha'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.amountDisplayUnit\n contentMap['amountDisplayUnit'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00011')\n\n # Attribute CrystalSample.amountUnit\n contentMap['amountUnit'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00010')\n\n # Attribute CrystalSample.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute CrystalSample.b\n currentMap = {}\n contentMap['b'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00001'] = currentMap\n loadMaps['SAM.CrystalSample.b'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.b'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00001'\n currentMap['name'] = 'b'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.batchNum\n contentMap['batchNum'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00013')\n\n # Attribute CrystalSample.beta\n currentMap = {}\n contentMap['beta'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00004'] = currentMap\n loadMaps['SAM.CrystalSample.beta'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.beta'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00004'\n currentMap['name'] = 'beta'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.c\n currentMap = {}\n contentMap['c'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00002'] = currentMap\n loadMaps['SAM.CrystalSample.c'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.c'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00002'\n currentMap['name'] = 'c'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.colPosition\n contentMap['colPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00005')\n\n # Attribute CrystalSample.colour\n currentMap = {}\n contentMap['colour'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00014'] = currentMap\n loadMaps['SAM.CrystalSample.colour'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.colour'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00014'\n currentMap['name'] = 'colour'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.crystalType\n currentMap = {}\n contentMap['crystalType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00013'] = currentMap\n loadMaps['SAM.CrystalSample.crystalType'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.crystalType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00013'\n currentMap['name'] = 'crystalType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.currentAmount\n contentMap['currentAmount'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00009')\n\n # Attribute CrystalSample.currentAmountFlag\n contentMap['currentAmountFlag'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00012')\n\n # Attribute CrystalSample.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00009')\n\n # Attribute CrystalSample.gamma\n currentMap = {}\n contentMap['gamma'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00005'] = currentMap\n loadMaps['SAM.CrystalSample.gamma'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.gamma'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:45_00005'\n currentMap['name'] = 'gamma'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.initialAmount\n contentMap['initialAmount'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00008')\n\n # Attribute CrystalSample.ionicStrength\n contentMap['ionicStrength'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00006')\n\n # Attribute CrystalSample.isActive\n contentMap['isActive'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00008')\n\n # Attribute CrystalSample.isHazard\n contentMap['isHazard'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00007')\n\n # Attribute CrystalSample.morphology\n currentMap = {}\n contentMap['morphology'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00012'] = currentMap\n loadMaps['SAM.CrystalSample.morphology'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.morphology'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00012'\n currentMap['name'] = 'morphology'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute CrystalSample.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00004')\n\n # Attribute CrystalSample.ph\n contentMap['ph'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00005')\n\n # Attribute CrystalSample.rowPosition\n contentMap['rowPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00004')\n\n # Attribute CrystalSample.solvent\n contentMap['solvent'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2012-03-28-17:22:44_00014')\n\n # Attribute CrystalSample.spaceGroup\n currentMap = {}\n contentMap['spaceGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00011'] = currentMap\n loadMaps['SAM.CrystalSample.spaceGroup'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.spaceGroup'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00011'\n currentMap['name'] = 'spaceGroup'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute CrystalSample.subPosition\n contentMap['subPosition'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00006')\n\n # Attribute CrystalSample.x\n currentMap = {}\n contentMap['x'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00015'] = currentMap\n loadMaps['SAM.CrystalSample.x'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.x'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00015'\n currentMap['name'] = 'x'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.y\n currentMap = {}\n contentMap['y'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00016'] = currentMap\n loadMaps['SAM.CrystalSample.y'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.y'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00016'\n currentMap['name'] = 'y'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute CrystalSample.z\n currentMap = {}\n contentMap['z'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00017'] = currentMap\n loadMaps['SAM.CrystalSample.z'] = currentMap\n currentMap['tag'] = 'SAM.CrystalSample.z'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:44_00017'\n currentMap['name'] = 'z'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Role CrystalSample.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role CrystalSample.hazardPhrases\n contentMap['hazardPhrases'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00013')\n\n # Role CrystalSample.holder\n contentMap['holder'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:23:27_00003')\n\n # Role CrystalSample.refSample\n contentMap['refSample'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:41_00003')\n\n # Role CrystalSample.sampleCategories\n contentMap['sampleCategories'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00003')\n\n # Role CrystalSample.sampleComponents\n contentMap['sampleComponents'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-18:22:46_00001')\n # End of CrystalSample\n\n currentMap = abstractTypes.get('CrystalSample')\n aList = ['a', 'alpha', 'amountDisplayUnit', 'amountUnit', 'b', 'batchNum', 'beta', 'c', 'colPosition', 'currentAmount', 'currentAmountFlag', 'gamma', 'initialAmount', 'ionicStrength', 'isActive', 'isHazard', 'ph', 'rowPosition', 'spaceGroup', 'subPosition', 'x', 'y', 'z']\n currentMap['headerAttrs'] = aList\n aList = ['colour', 'crystalType', 'details', 'morphology', 'name', 'solvent']\n currentMap['simpleAttrs'] = aList\n aList = ['refSample']\n currentMap['optLinks'] = aList\n aList = ['sampleComponents', 'sampleCategories', 'holder', 'hazardPhrases', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['sampleComponents']\n currentMap['children'] = aList\n\n # Out-of-package link to RefSampleSource\n currentMap = {}\n exolinks['RefSampleSource'] = currentMap\n loadMaps['SAM.exo-RefSampleSource'] = currentMap\n currentMap['tag'] = 'SAM.exo-RefSampleSource'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00008'\n currentMap['name'] = 'RefSampleSource'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.RefSampleSource\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to SampleComponent\n currentMap = {}\n exolinks['SampleComponent'] = currentMap\n loadMaps['SAM.exo-SampleComponent'] = currentMap\n currentMap['tag'] = 'SAM.exo-SampleComponent'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00002'\n currentMap['name'] = 'SampleComponent'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.SampleComponent\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to SampleStore\n currentMap = {}\n exolinks['SampleStore'] = currentMap\n loadMaps['SAM.exo-SampleStore'] = currentMap\n currentMap['tag'] = 'SAM.exo-SampleStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-05-12:37:24_00008'\n currentMap['name'] = 'SampleStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.SampleStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to RefSample\n currentMap = {}\n exolinks['RefSample'] = currentMap\n loadMaps['SAM.exo-RefSample'] = currentMap\n currentMap['tag'] = 'SAM.exo-RefSample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00003'\n currentMap['name'] = 'RefSample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.RefSample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Sample\n currentMap = {}\n exolinks['Sample'] = currentMap\n loadMaps['SAM.exo-Sample'] = currentMap\n currentMap['tag'] = 'SAM.exo-Sample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00001'\n currentMap['name'] = 'Sample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.Sample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to CrystalSample\n currentMap = {}\n exolinks['CrystalSample'] = currentMap\n loadMaps['SAM.exo-CrystalSample'] = currentMap\n currentMap['tag'] = 'SAM.exo-CrystalSample'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00004'\n currentMap['name'] = 'CrystalSample'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Sample.CrystalSample\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))", "def template_outputs_map(stackname):\n with open(core.stack_path(stackname), 'r') as fh:\n stack = json.load(fh)\n output_map = stack.get('Outputs', [])\n return {output_key: output['Value'] for output_key, output in output_map.items()}", "def register():\n return render_template('dashboard/register.html', tagname = 'register')", "def printRegTranslation():\n print \"[-] Register translation :\"\n for initId in regNamesTable.keys():\n print \"\\t\" + initId + \" <---> R\" + str(regNamesTable[initId])\n print \"\\n\"", "def generate_config_map(env, name):\n data = yaml.load(\"\"\"\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: \"\"\ndata: {}\n\"\"\")\n data['metadata']['name'] = name\n data['data'] = env\n return data", "def writeToMap(self):\n pass", "def registry_key_mapping(self, stix_data):\n\n mapper = {\n 'type': 'Registry Key',\n 'Key Name': '@.key',\n 'confidence': '@.confidence',\n }\n if not stix_data.get('values'):\n return mapper\n\n for i in range(len(stix_data.get('values'))):\n mapper['Value Name'] = f'@.values[{i}].name'\n mapper['Value Type'] = f'@.values[{i}].data_type'\n mapper.setdefault('attribute', []).append(\n {'type': 'Value Data', 'value': f'@.values[{i}].data'}\n )\n return mapper", "def gen_mapping(args, service, weight=None, labels={}):\n prefix = args.prefix\n mapping = {\n \"apiVersion\": \"getambassador.io/v1\",\n \"kind\": \"Mapping\",\n \"metadata\": {\n \"name\": f\"mapping-for-{service}\"\n },\n \"spec\": {\n \"prefix\": prefix,\n \"service\": service\n }\n }\n\n if args.namespace:\n mapping[\"metadata\"][\"namespace\"] = args.namespace\n\n if len(labels) > 0:\n mapping[\"metadata\"][\"labels\"] = labels\n\n if weight:\n mapping[\"spec\"][\"weight\"] = weight\n\n return mapping", "def get_template_map(self, template: PulseTemplate) -> Dict[str, Expression]:\n return self.__get_template_map(template).copy()", "def create_control_map(self, inputs):\n for name, info in inputs.items():\n topic = '/'.join([info['topic'], info['field']])\n self.control_topic_map[topic] = name\n self.control_map[name] = {\n 'value': 0,\n 'nextSampleTime': 1,\n 'enable': False\n }\n self.controls_list_master = set(self.control_map.keys())\n self.controls_list = list(self.controls_list_master)\n log.debug('Control map %s', self.control_map)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return CDD superfamilies (clans)
def get_clans(cddid: str, fam2supfam: str) -> list[Clan]: superfamilies = {} families = set() if cddid.lower().endswith(".gz"): fh = gzip.open(cddid, "rt") else: fh = open(cddid, "rt") for line in fh: fields = line.rstrip().split("\t") accession = fields[1] name = fields[2] descr = fields[3].lstrip("N/A. ") if re.match(r"cl\d+", accession): superfamilies[accession] = Clan(accession, name, descr) elif re.match(r"cd\d+", accession): families.add(accession) fh.close() with open(fam2supfam, "rt") as fh: for line in fh: line = line.rstrip() if not line: continue fields = line.split("\t") family_acc = fields[0] supfam_acc = fields[2] if supfam_acc in superfamilies and family_acc in families: superfamilies[supfam_acc].members.append({ "accession": family_acc, "score": 1 }) return list(superfamilies.values())
[ "def super_categories(self):\n return [BasesOfQSymOrNCSF(self.base()).Commutative()]", "def super_concepts_of(self, cnl, direct=False):\n return cognipy_call(self._uid, \"GetSuperConceptsOf\", cnl, direct)", "def table_father_not_concordant():\n pass", "def _direct_superclasses(self):\n return self._directly_connected(rdflib.RDFS.subPropertyOf,\n blacklist=BLACKLIST)", "def sub_family(self):\n return self._sub_family", "def get_domain_for_each_sf(sccs_list, scop, astral):\n \n # Bio.SCOP actually doesn't seem to have a facility to look up by\n # sccs so we'll build a dictionary ourselves of all superfamilies\n # keyed by sccs\n all_superfamilies = scop.getRoot().getDescendents('sf')\n sccs_dict = dict([(sf.sccs, sf) for sf in all_superfamilies])\n\n domain_sids = []\n for sccs in sccs_list:\n sf = sccs_dict[sccs]\n domain_list = [ dom for dom in sf.getDescendents('domain')\n if astral.isDomainInId(dom, 95) ]\n# sys.stderr.write('xxx ' + str(domain_list))\n if len(domain_list) > 0:\n domain = random.choice(domain_list)\n domain_sids.append(domain.sid)\n \n return domain_sids", "def get_clade_distribution(self):\n counts = defaultdict(lambda: defaultdict(lambda: 0))\n\n for family in self.families:\n if family.clade is not None:\n for s in family.sequences:\n counts[s.species_id][family.clade_id] += 1\n\n return counts", "def agglomerative_hierarchical(self):\r\n self.hac = sc_alg.AgglomerativeClustering(n_clusters=3, affinity='euclidean', \r\n linkage='ward', compute_full_tree=False).fit(self.val_hist)\r\n\r\n \r\n self.visualization_clusters(self._hac)\r\n \r\n print(\"labels from HAC:\\n{0}\".format(self.hac.labels_)) \r\n \r\n return self.kmeans.labels_", "def to_coco(self):\n for cid, node in self.id_to_node.items():\n # Skip if background already added\n cat = {\n 'id': cid,\n 'name': node,\n }\n parents = list(self.graph.predecessors(node))\n if len(parents) == 1:\n cat['supercategory'] = parents[0]\n else:\n if len(parents) > 1:\n raise Exception('not a tree')\n yield cat", "def _get_merged_child_type_cdfs(self, da):\n # get all nodes occurring in training data items containing the DAIs from the current DA\n merged_counts = defaultdict(Counter)\n for dai in da:\n try:\n for parent_id in self.child_type_counts[dai]:\n merged_counts[parent_id].update(self.child_type_counts[dai][parent_id])\n except KeyError:\n log_warn('DAI ' + unicode(dai) + ' unknown, adding nothing to CDF.')\n\n# log_info('Node types: %d' % sum(len(c.keys()) for c in merged_counts.values()))\n\n # remove nodes that are not compatible with the current DA (their list of\n # minimum compatibility DAIs is not similar to the current DA)\n for _, counts in merged_counts.items():\n for node in counts.keys():\n if not self._compatible(da, NodeData(t_lemma=node[1], formeme=node[0])):\n del counts[node]\n\n# log_info('Node types after pruning: %d' % sum(len(c.keys()) for c in merged_counts.values()))\n# log_info('Compatible lemmas: %s' % ' '.join(set([n[1] for c in merged_counts.values()\n# for n in c.keys()])))\n\n return self.cdfs_from_counts(merged_counts)", "def get_bbs_by_organicity(self):\n logger.info('Collapse clusters in grouped superclusters')\n if not hasattr(self,'cluster_organicity'):\n self.detect_organicity()\n stop = False\n while not stop:\n stop = True\n # find external bonds\n cluster_conn = self.cluster_conn()\n # find out if organic clusters are bonded to other organic clusters\n for i, c in enumerate(cluster_conn):\n if self.cluster_organicity[i] == True:\n for b in c:\n if self.cluster_organicity[b] == True:\n # and if they are, create new clusters which contain everything the first clusters contained\n self.clusters.append(self.clusters[i] + self.clusters[b])\n # and then remove those clusters\n if i > b:\n del self.clusters[i]\n del self.clusters[b]\n if i < b:\n del self.clusters[i]\n stop = False\n # recompute organicity, because of the changed indices\n self.detect_organicity()\n break\n if not stop:\n break\n return self.clusters", "def hierachical_clustering(df, distanceCut = 2):\n\n # distance matrix\n # print (df.values[:2, 1:5])\n # Y = pdist(df.values[:, 1:], 'correlation')\n Y = pdist(df.values, 'correlation')\n print(df.shape, Y.shape)\n\n # linkage matrix\n Z = linkage(Y, method='ward')\n Clus = fcluster(Z, distanceCut, criterion='distance')\n\n print(Clus) # This is cluster number for each row in df\n\n number_features, number_clusters = len(Clus), len(set(list(Clus)))\n print(\"number of features: \", number_features)\n print(\"number of communities: \", number_clusters)\n\n # Compile clusters\n ClusDict = {}\n for ii in range(number_features):\n # if ClusDict.has_key(Clus[ii]):\n if Clus[ii] in ClusDict:\n ClusDict[ Clus[ii] ].append(ii)\n else:\n ClusDict[ Clus[ii] ] = [ii]\n\n #print(ClusDict.items()[:3]) # This organizes cluster, members\n return Clus, ClusDict", "def construct_superclass_mapping(fine_labels, super_labels):\n pairs = set(zip(fine_labels, super_labels))\n class_mapping = [s_label for _, s_label in sorted(pairs, key=lambda x: x[0])]\n return class_mapping", "def get_basis_family_names(self):\n from aiida.orm import Group\n\n return [\n _.name for _ in Group.query(\n nodes=self, type_string=self.basisfamily_type_string)\n ]", "def make_coco_categories():\n cats = []\n for i, bdd_class in enumerate(BDD_CLASSES):\n cat = {\n \"supercategory\": 'none',\n \"id\": i + 1,\n \"name\": bdd_class\n }\n cats.append(cat)\n return cats", "def get_category_via_superclass(graph: nx.MultiDiGraph, curie: str, load_ontology: bool = True) -> Set[str]:\n logging.debug(\"curie: {}\".format(curie))\n new_categories = []\n toolkit = get_toolkit()\n if PrefixManager.is_curie(curie):\n ancestors = get_ancestors(graph, curie, relations=['subclass_of'])\n if len(ancestors) == 0 and load_ontology:\n cls = get_curie_lookup_service()\n ontology_graph = cls.ontology_graph\n new_categories += [x for x in get_category_via_superclass(ontology_graph, curie, False)]\n logging.debug(\"Ancestors for CURIE {} via subClassOf: {}\".format(curie, ancestors))\n seen = []\n for anc in ancestors:\n mapping = toolkit.get_by_mapping(anc)\n seen.append(anc)\n if mapping:\n # there is direct mapping to BioLink Model\n logging.debug(\"Ancestor {} mapped to {}\".format(anc, mapping))\n seen_labels = [graph.nodes[x]['name'] for x in seen if 'name' in graph.nodes[x]]\n new_categories += [x for x in seen_labels]\n new_categories += [x for x in toolkit.ancestors(mapping)]\n break\n return set(new_categories)", "def _apply_hierarchy(self, cc_dct, age, sex):\n cc_lst_all = []\n for dx, cc_lst in cc_dct.items():\n cc_lst_all += [cc for cc in cc_lst if cc != \"HCCNA\"]\n cc_cnt = Counter(set(cc_lst_all))\n \n for k, v in self.hier.items():\n if k in cc_cnt:\n for v_i in v:\n cc_cnt[v_i] -= 1\n cc_lst_unique = [k for k, v in cc_cnt.items() if v > 0]\n return cc_lst_unique", "def get_classification_hierarchy_levels(self):\n return [l.name for l in self.class_hr]", "def get_superclass(wikidata_pageID):\n query = \"\"\"\n SELECT ?descendant ?descendantLabel\n WHERE\n {\n wd:\"\"\" + wikidata_pageID + \"\"\" wdt:P279 ?descendant.\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"[AUTO_LANGUAGE]\". }\n }\n \"\"\"\n DATA = wikidata_query(query)\n superclass_IDs = [DATA['results']['bindings'][i]['descendantLabel']['value'] for i in range(len(DATA['results']['bindings']))]\n\n return [get_enLabel(super_id) for super_id in superclass_IDs]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test display at origin
def test_display_at_origin(self): Base._Base__nb_object = 0 r1 = Square(2) my_stdout = StringIO() sys.stdout = my_stdout r1.display() sys.stdout = sys.__stdout__ expected = "##\n##\n" self.assertEqual(expected, my_stdout.getvalue())
[ "def test_display_not_at_origin(self):\n Base._Base__nb_object = 0\n r1 = Square(2, 1, 1, 2)\n my_stdout = StringIO()\n sys.stdout = my_stdout\n r1.display()\n sys.stdout = sys.__stdout__\n expected = \"\\n ##\\n ##\\n\"\n self.assertEqual(expected, my_stdout.getvalue())", "def GetDisplayPoint(self):\n ...", "def test_display_player_location(self):\n self.Player1.character = \"Miss Scarlet\"\n self.board.display_player_location(self.Player1)", "def test_display_coords(self):\n with patch('sys.stdout', new=StringIO()) as fake_out:\n r1 = Square(2, 1, 1)\n r1.display()\n self.assertEqual(fake_out.getvalue(), \"\\n ##\\n ##\\n\")\n with patch('sys.stdout', new=StringIO()) as fake_out:\n r2 = Square(1, 4, 4)\n r2.display()\n self.assertEqual(fake_out.getvalue(), \"\\n\\n\\n\\n #\\n\")", "def in_screen(self, coord):\n\t\treturn coord.x >= 0 and coord.x < self.width and coord.y >= 0 and coord.y < self.height", "def test_display_method_w_coordinates(self):\n output = io.StringIO()\n sys.stdout = output\n sq9 = Square(2, x=1, y=1)\n sq9.display()\n sys.stdout = sys.__stdout__\n self.assertEqual(output.getvalue(), \"\\n ##\\n ##\\n\")", "def test_display_nocoords(self):\n with patch('sys.stdout', new=StringIO()) as fake_out:\n r1 = Square(2)\n r1.display()\n self.assertEqual(fake_out.getvalue(), \"##\\n##\\n\")\n with patch('sys.stdout', new=StringIO()) as fake_out:\n r2 = Square(1)\n r2.display()\n self.assertEqual(fake_out.getvalue(), \"#\\n\")", "def test_top_from_point(self):\n dlg_wrapper = self.dlg.set_focus()\n x, y = self.combo_fixed.rectangle().mid_point()\n dlg_from_point = self.dlg.top_from_point(x, y)\n self.assertEqual(dlg_from_point, dlg_wrapper)\n\n dlg2_from_point = Desktop(backend=\"uia\").top_from_point(x, y)\n self.assertEqual(dlg2_from_point, dlg_wrapper)", "def _is_visible(self, obj):\n # TODO: FINISH THIS\n window_w = SCREEN_W\n window_h = SCREEN_H\n return obj.right >= 0 and obj.left <= window_w and obj.top >= 0 and obj.bottom <= window_h", "def isinview(self):\n term = getsession().terminal\n return (self.xloc > 0 and self.xloc +self.width -1 <= term.width\n and self.yloc > 0 and self.yloc +self.height -1 <= term.height)", "def SetDisplayFromWebTest():\n\n res = WaitForFilePath(\"/tmp/.X11-unix\", 60)\n assert res\n\n pattern = \"/tmp/.X11-unix/X*\"\n res = WaitForFilePath(pattern, 60)\n assert res\n\n # If we find \"/tmp/.X11-unix/X1\", then we will set DISPLAY to be \":1\".\n display = \":\" + res[0][len(pattern) - 1 :]\n os.environ[\"DISPLAY\"] = display\n logging.info(\"Set DISPLAY=%s\", display)", "def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH", "def handle_xdisploc(self, xdisploc):\n self.log.debug('X Display is {}'.format(xdisploc))", "def is_onscreen(self):\n x,y = self.loc\n w,h = get_screen_size()\n\n screen = Rect(0, 0, w, h)\n actor = Rect(x, y, self.width, self.height)\n\n if screen.colliderect(actor): return True\n else: return False", "def display(self):\n return self.position.display(self.image)", "def is_onscreen(self):\n x, y = self.loc\n w, h = get_screen_size()\n\n screen = Rect(0, 0, w, h)\n actor = Rect(x, y, self.width, self.height)\n\n if screen.colliderect(actor):\n return True\n else:\n return False", "def is_evolved_screen(self):\n if not self.is_in_battle():\n return False\n\n address = 0x4bb1\n values = [164, 181, 174, 171, 181, 164, 163, 127, 168, 173, 179, 174, 79]\n\n for (index, value) in enumerate(values):\n if self.emulator.vba.read_memory_at(address + index) != value:\n return False\n else:\n return True", "def verify_display_on_screen(self, contents, **kwargs):\n try:\n if self.phone.verifyInDisplayResponses(contents):\n return True\n except:\n #08-11-2019: Phone sends incomplete screen info after a screen reset.To overcome vol down key is pressed once, which will make the phone to send complete info\n self.press_key(\"DecreaseVolume\")\n try:\n if self.phone.verifyInDisplayResponses(contents):\n return True\n except:\n #self.capture_screenshot()\n self.get_all_screen_content()\n if self.phone.phoneModel in [\"Mitel6910\"]:\n logger.error(\"Contents : %s \\n\"%(\" \".join(self.phone_display_contentscreen.values())))\n elif self.phone.phoneModel in [\"Mitel6867i\"]:\n logger.error(\"Contents : %s \\n\"%(self.phone_display_contents))\n else:\n logger.error(\"Expected Message : '%s'\"%contents)\n logger.error(\"Available Contents in Phone %s are below \\n\" %self.phone.extensionNumber)\n logger.error(\"Banner : %s\" % (self.phone_display_banner))\n logger.error(\"Programmable Keys: %s\"%(\", \".join(self.phone_display_programmablekeys.values())))\n logger.error(\"Bottom Soft Keys : %s\"%(\", \".join(self.phone_display_foxkeys.values())))\n #logger.error(\"oldBuffer : %s \\n\" % (self.phone_display_contents))\n #logger.error(\"Contents in Secondary Display Buffer: %s\"%(self.secondaryBuffer.keys()))\n return False\n # except Exception as err:\n # fn = sys._getframe().f_code.co_name\n # raise Exception('func \"%s\" - err: \"%s\"!' % (fn, err))", "def is_visible() -> bool:\n return win.winfo_ismapped()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test display not at origin
def test_display_not_at_origin(self): Base._Base__nb_object = 0 r1 = Square(2, 1, 1, 2) my_stdout = StringIO() sys.stdout = my_stdout r1.display() sys.stdout = sys.__stdout__ expected = "\n ##\n ##\n" self.assertEqual(expected, my_stdout.getvalue())
[ "def test_display_at_origin(self):\n Base._Base__nb_object = 0\n r1 = Square(2)\n my_stdout = StringIO()\n sys.stdout = my_stdout\n r1.display()\n sys.stdout = sys.__stdout__\n expected = \"##\\n##\\n\"\n self.assertEqual(expected, my_stdout.getvalue())", "def test_display_nocoords(self):\n with patch('sys.stdout', new=StringIO()) as fake_out:\n r1 = Square(2)\n r1.display()\n self.assertEqual(fake_out.getvalue(), \"##\\n##\\n\")\n with patch('sys.stdout', new=StringIO()) as fake_out:\n r2 = Square(1)\n r2.display()\n self.assertEqual(fake_out.getvalue(), \"#\\n\")", "def GetDisplayPoint(self):\n ...", "def is_visible() -> bool:\n return win.winfo_ismapped()", "def in_screen(self, coord):\n\t\treturn coord.x >= 0 and coord.x < self.width and coord.y >= 0 and coord.y < self.height", "def checkScreen(self):\r\n if not window.screen == self.screen:\r\n window.setScreen(self.screen)", "def off_screen(self):\n # Note: this will be used for testing, but not used in the final version of the code for the sake of simplicity.\n # TODO 13: Return True if the y position of this Raindrop is greater than 800.\n pass", "def _is_visible(self, obj):\n # TODO: FINISH THIS\n window_w = SCREEN_W\n window_h = SCREEN_H\n return obj.right >= 0 and obj.left <= window_w and obj.top >= 0 and obj.bottom <= window_h", "def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH", "def handle_xdisploc(self, xdisploc):\n self.log.debug('X Display is {}'.format(xdisploc))", "def is_absolute(self):\n return False", "def verify_display_on_screen(self, contents, **kwargs):\n try:\n if self.phone.verifyInDisplayResponses(contents):\n return True\n except:\n #08-11-2019: Phone sends incomplete screen info after a screen reset.To overcome vol down key is pressed once, which will make the phone to send complete info\n self.press_key(\"DecreaseVolume\")\n try:\n if self.phone.verifyInDisplayResponses(contents):\n return True\n except:\n #self.capture_screenshot()\n self.get_all_screen_content()\n if self.phone.phoneModel in [\"Mitel6910\"]:\n logger.error(\"Contents : %s \\n\"%(\" \".join(self.phone_display_contentscreen.values())))\n elif self.phone.phoneModel in [\"Mitel6867i\"]:\n logger.error(\"Contents : %s \\n\"%(self.phone_display_contents))\n else:\n logger.error(\"Expected Message : '%s'\"%contents)\n logger.error(\"Available Contents in Phone %s are below \\n\" %self.phone.extensionNumber)\n logger.error(\"Banner : %s\" % (self.phone_display_banner))\n logger.error(\"Programmable Keys: %s\"%(\", \".join(self.phone_display_programmablekeys.values())))\n logger.error(\"Bottom Soft Keys : %s\"%(\", \".join(self.phone_display_foxkeys.values())))\n #logger.error(\"oldBuffer : %s \\n\" % (self.phone_display_contents))\n #logger.error(\"Contents in Secondary Display Buffer: %s\"%(self.secondaryBuffer.keys()))\n return False\n # except Exception as err:\n # fn = sys._getframe().f_code.co_name\n # raise Exception('func \"%s\" - err: \"%s\"!' % (fn, err))", "def test_invalid_cut_coords_with_display_mode(\n plot_func,\n display_mode,\n cut_coords,\n img_3d_mni,\n expected_error_message,\n):\n if plot_func == plot_glass_brain and display_mode != \"ortho\":\n return\n with pytest.raises(ValueError, match=expected_error_message):\n plot_func(\n img_3d_mni,\n display_mode=display_mode,\n cut_coords=cut_coords,\n )", "def test_display_coords(self):\n with patch('sys.stdout', new=StringIO()) as fake_out:\n r1 = Square(2, 1, 1)\n r1.display()\n self.assertEqual(fake_out.getvalue(), \"\\n ##\\n ##\\n\")\n with patch('sys.stdout', new=StringIO()) as fake_out:\n r2 = Square(1, 4, 4)\n r2.display()\n self.assertEqual(fake_out.getvalue(), \"\\n\\n\\n\\n #\\n\")", "def is_display_active(self):\n return self.op is not None", "def SetDisplayFromWebTest():\n\n res = WaitForFilePath(\"/tmp/.X11-unix\", 60)\n assert res\n\n pattern = \"/tmp/.X11-unix/X*\"\n res = WaitForFilePath(pattern, 60)\n assert res\n\n # If we find \"/tmp/.X11-unix/X1\", then we will set DISPLAY to be \":1\".\n display = \":\" + res[0][len(pattern) - 1 :]\n os.environ[\"DISPLAY\"] = display\n logging.info(\"Set DISPLAY=%s\", display)", "def test_display_method_w_coordinates(self):\n output = io.StringIO()\n sys.stdout = output\n sq9 = Square(2, x=1, y=1)\n sq9.display()\n sys.stdout = sys.__stdout__\n self.assertEqual(output.getvalue(), \"\\n ##\\n ##\\n\")", "def is_evolved_screen(self):\n if not self.is_in_battle():\n return False\n\n address = 0x4bb1\n values = [164, 181, 174, 171, 181, 164, 163, 127, 168, 173, 179, 174, 79]\n\n for (index, value) in enumerate(values):\n if self.emulator.vba.read_memory_at(address + index) != value:\n return False\n else:\n return True", "def test_display_player_location(self):\n self.Player1.character = \"Miss Scarlet\"\n self.board.display_player_location(self.Player1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if .wit directory exists in any parentdirectory.
def is_wit_exists(abs_path): parent_dir = os.path.dirname(abs_path) drive = os.path.join(os.path.splitdrive(abs_path)[0], os.sep) while parent_dir != drive: wit_path = os.path.join(parent_dir, ".wit") is_exists = os.path.exists(wit_path) if is_exists: return parent_dir parent_dir = os.path.dirname(parent_dir) raise WitDirNotFoundError( f"'.wit' directory doesn't exist in any parent-directory of {abs_path}.")
[ "def test_subdirs_exist(self):\n for dirname in template_directories:\n self.assertTrue(\n os.path.isdir(os.path.join(self.builtdir, dirname))\n )", "def test_check_dir_existence_sub_dir_not_found(self):\n self.assertFalse(self.existing_dirs.append('unexpected_dir'))", "def is_in_rootdir():\r\n return os.path.isdir('test') and os.path.isdir('elftools')", "def _check_directories(self):\n mode = os.F_OK | os.R_OK | os.W_OK | os.X_OK\n for attr in ('data_dir', 'data_underlay_dir'):\n path = getattr(self, attr)\n \n # allow an empty underlay path or None\n if attr == 'data_underlay_dir' and not path:\n continue\n\n path_pages = os.path.join(path, \"pages\")\n if not (os.path.isdir(path_pages) and os.access(path_pages, mode)):\n msg = '''\n\"%(attr)s\" does not exists at \"%(path)s\", or has incorrect ownership and\npermissions.\n\nMake sure the directory and the subdirectory pages are owned by the web server and are readable,\nwritable and executable by the web server user and group.\n\nIt is recommended to use absolute paths and not relative paths. Check\nalso the spelling of the directory name.\n''' % {'attr': attr, 'path': path,}\n raise error.ConfigurationError(msg)", "def inDir(fil):\n currentdir = os.listdir('.')\n if fil in currentdir :\n return False\n else :\n return True", "def wantDirectory(self, dirname):\n if self.options.test_env:\n return in_dir(dirname, self.options.test_env)\n return False", "def folder_exists(L, p):\n for f in L:\n if f.path == p:\n return True\n return False", "def test_check_dir_existence_root_is_wrong(self):\n self.assertFalse(check_dir_existence('/some/wrong/path', self.existing_dirs))", "def dir_exists(self, path=''):\n if path == '':\n return True\n else:\n return False", "def test_templates_exist(self):\n index_path = os.path.join(self.panda_templates_dir, 'homepage.html')\n\n self.assertTrue(os.path.isfile(index_path), f\"{FAILURE_HEADER}Your index.html template does not exist, or is in the wrong location.{FAILURE_FOOTER}\")", "def check_dirs():\n for app_dir in {app.config[\"UPLOAD_FOLDER\"], app.config[\"CURRENT_TEMPLATE_DIR\"]}:\n if not os.path.exists(app_dir):\n os.makedirs(app_dir)", "def test_template_lookup_path(self):\n lookup_list = settings.TEMPLATES[0]['DIRS']\n found_path = False\n \n for entry in lookup_list:\n entry_normalised = os.path.normpath(entry)\n \n if entry_normalised == os.path.normpath(settings.TEMPLATE_DIR):\n found_path = True\n \n self.assertTrue(found_path, f\"{FAILURE_HEADER}Your project's templates directory is not listed in the TEMPLATES>DIRS lookup list. Check your settings.py module.{FAILURE_FOOTER}\")", "def directory_exists(path):\n return os.path.isdir(path) and os.path.exists(path)", "def check_path():\n root = os.path.abspath(os.path.curdir)\n assert os.path.basename(root) == \"treelite\", \"Must be run on project root.\"", "def test_files_exist(self):\n for filename in template_files:\n print(filename)\n self.assertTrue(\n os.path.exists(os.path.join(self.builtdir, filename))\n )", "def test_restricted_template_exists(self):\n template_base_path = os.path.join(settings.TEMPLATE_DIR, 'rango')\n template_path = os.path.join(template_base_path, 'restricted.html')\n self.assertTrue(os.path.exists(template_path), f\"{FAILURE_HEADER}We couldn't find the 'restricted.html' template in the 'templates/rango/' directory. Did you put it in the right place? Did you complete the exercises?{FAILURE_FOOTER}\")", "def is_work_tree(path):\n\treturn path and \".git\" in os.listdir(path)", "def test_directory_no_log(self):\n self.assertFalse(valet.view(self.test_subdir)\n .find(self.test_subdir + \"?log\") >= 0)", "def does_project_exist(slug):\n return isdir(project_dir(slug))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy a file or directory to the staging area. Copy all the parent directories of the path to the root directory (which consists '.wit' dir). A directory is copied with all of its content.
def add(path): abs_path = os.path.abspath(path) root = is_wit_exists(abs_path) staging_area = os.path.join(os.path.join(root, '.wit'), 'staging_area') destination = os.path.join(staging_area, os.path.relpath(abs_path, start=root)) if os.path.isfile(abs_path): if not os.path.exists(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) shutil.copy2(abs_path, destination) else: shutil.copytree(abs_path, destination)
[ "def copytree(src, dst):\n if os.path.isfile(src):\n shutil.copy2(src, dst)\n else:\n shutil.copytree(src, dst)", "def copy(self, coderoot, testroot, case, compiler=None, stagedir=None):\n source = self.path(coderoot, case, compiler, stagedir)\n if source is not None:\n copyfile(source, testroot)", "def copy_content(src, dest):\n for item in os.listdir(src):\n full_path = os.path.join(src, item)\n if os.path.isdir(full_path):\n full_dest = os.path.join(dest, item)\n if os.path.exists(full_dest):\n rmtree(full_dest)\n copytree(full_path, full_dest)\n else:\n copy(full_path, dest)", "def copy(self, dest):\n if os.path.isdir(self.path):\n shutil.copytree(self.path, dest, symlinks=False, ignore=None)\n else:\n shutil.copy2(self.path, dest)", "def copy_folder_or_file(self, path_from, path_to):\n url = self._base_url + \"/resources/copy\"\n\n payload = {'path': path_to, 'from': path_from}\n r = requests.post(url, headers=self.base_headers, params=payload)\n self._check_code(r)", "def _CopyBundle(self, source_path, full_source_path, output_path):\n self._PrintVerbose('Copying %s to %s' % (source_path, output_path))\n try:\n shutil.copytree(full_source_path, output_path)\n except OSError as e:\n self._PrintError('Copy failed. %s' % e)\n return 650\n return 0", "def copytree(src, dst, uid, gid):\n fl = os.listdir(src)\n os.mkdir(dst)\n shutil.copymode(src, dst)\n os.lchown(dst, uid, gid)\n for f in fl:\n srcF = os.path.join(src, f)\n dstF = os.path.join(dst, f)\n if os.path.isdir(srcF):\n copytree(srcF, dstF, uid, gid)\n else:\n shutil.copyfile(srcF, dstF)\n shutil.copymode(srcF, dstF)\n os.lchown(dstF, uid, gid)", "def copy_tree_content(source_dir, target_dir):\n fs_childs = os.listdir(source_dir)\n for fs_child in fs_childs:\n source_item = os.path.join(source_dir, fs_child)\n copytree(source_item, os.path.join(target_dir, fs_child))", "def copy(input_path, output_path):\n _check_output_path(output_path)\n _makedirs(output_path)\n try:\n shutil.copy2(input_path, output_path)\n except FileNotFoundError:\n raise DoxhooksFileSystemError(\"Cannot find file:\", input_path)\n except OSError as error:\n raise DoxhooksFileSystemError(\n \"Cannot copy file from {!r} to {!r}.\"\n .format(input_path, output_path)) from error", "def copyFolder(self, Src, Dest, Overwrite=False):\n # Remove the existed folder.\n if os.path.exists(Dest) and Overwrite:\n shutil.rmtree(Dest)\n self.logger.warning('The following folder will be overwrited. {}'\\\n .format(Dest))\n try:\n shutil.copytree(Src, Dest, \\\n ignore=shutil.ignore_patterns('*.control', 'year.txt'))\n except OSError as e:\n # If the error was caused because the source wasn't a directory \n if e.errno == errno.ENOTDIR:\n shutil.copy(Src, Dest)\n else:\n self.logger.error('PathError Directory not copied. '+\\\n 'Error: %s' % e)\n return None", "def cp_tree(srcdir, dstdir, ignore=None, ignore_permissions=False):\n LOG.debug(\"> console:cp -r %s %s\", srcdir, dstdir)\n _copytree(srcdir, dstdir,\n ignore=ignore,\n ignore_permissions=ignore_permissions)", "def copy_dir(self, *args, **kw):\n self._run_fill_method('copy_dir', *args, **kw)", "def copy_files_recursive(source_paths, dest_path):\n for path in source_paths:\n distutils.dir_util.copy_tree(path, dest_path)", "def copy_in(filename, dirname):\n cp(filename, os.path.join(dirname, os.path.basename(filename)))", "def CopyTreeIfChanged(source, target):\n if os.path.isfile(source):\n return CopyFileIfChanged(source, target)\n if not os.path.isdir(target):\n os.makedirs(target)\n for name in os.listdir(source):\n CopyTreeIfChanged(\n os.path.join(source, name),\n os.path.join(target, name))", "def transfer_recursive(self, src_path, dest_path, isdir_fct, listdir_fct, mkdir_fct, cp_fct):\n\n # a simple copy now if the source is a file...\n if not isdir_fct(src_path):\n cp_fct(src_path, dest_path)\n return\n # ...otherwise we src_path for relative paths\n\n stack = [src_path]\n while stack:\n path = stack.pop()\n src_relpath = os.path.relpath(path, src_path)\n dest_fullpath = os.path.join(dest_path, src_relpath)\n if isdir_fct(path):\n # For src_path, relpath() gives '.', so dest_fullpath ends \n # with \"/.\". mkdir doesn't like this, so we handle src_path \n # as a special case.\n if path == src_path:\n mkdir_fct(dest_path)\n else:\n mkdir_fct(dest_fullpath)\n stack.extend(os.path.join(path, p) for p in listdir_fct(path))\n else:\n cp_fct(path, dest_fullpath)\n return", "def copy_helper(self, app_dir, directory):\n template_dir = self.find_template_dir(directory)\n if template_dir:\n if directory == \"vendor\":\n app_dir = os.path.join(app_dir, directory)\n try:\n os.makedirs(app_dir)\n except:\n pass\n \n for root, dirs, files in os.walk(template_dir):\n rel = relpath(root, template_dir)\n if rel == \".\":\n rel = \"\"\n target_path = os.path.join(app_dir, rel)\n for d in dirs:\n try:\n os.makedirs(os.path.join(target_path, d))\n except:\n continue\n for f in files:\n shutil.copy2(os.path.join(root, f), os.path.join(target_path, f)) \n else:\n raise AppError(\"Can't create a CouchApp in %s: default template not found.\" % (\n app_dir))", "def copy(self, target_dir):\n\n c = self.context\n\n if os.path.isdir(target_dir):\n if c.options.prevent_overwrite:\n return\n else:\n try:\n shutil.rmtree(target_dir)\n except:\n return c.error('Cannot copy app to existing directory %s (cannot delete directory first)' % self)\n\n #try:\n # os.makedirs(target_dir)\n #except:\n # return c.error('There was a problem creating the directory %s - cannot copy app to appengine directory.' % target_dir)\n\n path = self.svn_path()\n\n if not path:\n return c.error('Cannot copy %s to %s - svn checkout failed' % (self, target_dir))\n\n shutil.copytree(self.svn_path(), target_dir)\n\n def remove_unwanted_directories(directory = ''):\n \"\"\"\n Removes any directories under the app directory tree that start with a .\n For example, lib/.svn, lib/routing/.svn etc...\n This will prevent all these unwanted files from being bundled with appengine\n during a deployment with local files.\n \"\"\"\n for f in os.listdir(os.path.join(target_dir, directory)):\n if f.startswith('.') or (self.context.action == \"deploy_acre\" and f in [\"js\", \"css\", \"mjt\"]):\n shutil.rmtree(os.path.join(target_dir, directory, f))\n continue\n\n if os.path.isdir(os.path.join(target_dir, directory, f)):\n remove_unwanted_directories(os.path.join(directory, f))\n\n remove_unwanted_directories()\n\n return True", "def merge_tree(src, dst):\n for path, dirs, files in os.walk(src):\n relpath = path[len(src) + 1 :]\n # Copy files over\n for file in files:\n fullsrcpath = os.path.join(path, file)\n fulldstpath = os.path.join(dst, relpath, file)\n shutil.copy2(fullsrcpath, fulldstpath)\n # Ensure the subdirectories exist\n for dir in dirs:\n fulldstpath = os.path.join(dst, relpath, dir)\n if not os.path.exists(fulldstpath):\n print(\"Making \", fulldstpath)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls a coroutine function with parameters, if it's defined.
async def try_call(self, func, *opts): if inspect.iscoroutinefunction(func): await func(*opts)
[ "async def _invoke(callback: Callable, *params: object) -> Any:\n _rich_traceback_guard = True\n parameter_count = count_parameters(callback)\n result = callback(*params[:parameter_count])\n if isawaitable(result):\n result = await result\n return result", "async def call_with_args(self, fn, request):\n\n sig = signature(fn)\n # delete undeclared parameters\n if request:\n params = {k:v for k, v in request.body.data.items() if k in sig.parameters}\n else:\n params = {}\n\n if 'request' in sig.parameters.keys():\n bn = sig.bind_partial(request=request, **params)\n else:\n bn = sig.bind_partial(**params)\n\n res = fn(*bn.args, **bn.kwargs)\n\n if iscoroutine(res):\n return await res\n else:\n return res", "def _execute_function_without_arguments(fun):\n return fun()", "def coroutine(func):\n @wraps(func)\n def start(*args, **kw):\n coro = func(*args, **kw)\n coro.next()\n return coro\n return start", "def invokeAsynchronous(\n function, # type: Callable\n args=None, # type: Optional[Iterable[Any]]\n kwargs=None, # type: Optional[Dict[String, Any]]\n description=None, # type: Optional[String]\n):\n # type: (...) -> Thread\n print(function, args, kwargs, description)\n return Thread()", "async def call_hook(self, hook_name: str, *args, **kwargs):\n coroutines = self.hooks.get(hook_name)\n if coroutines is None:\n self.logger.warning(\n f\"{self.name}: calling the {hook_name!r} hook, but \"\n \"this hook hasn't been registered.\"\n )\n\n for coroutine in coroutines:\n try:\n await coroutine(*args, **kwargs)\n except asyncio.CancelledError:\n return\n except Exception:\n self.logger.exception(f\"hook {hook_name!r}: an exception occurred while executing a coroutine:\")", "def call(fn, args=(), kwargs={}):\r\n return fn(*args, **kwargs)", "def loop_apply_coroutine(loop, func: types.FunctionType, *args, **kwargs) -> object:\n if asyncio.iscoroutinefunction(func):\n future = asyncio.ensure_future(\n func(*args, **kwargs), loop=loop)\n\n loop.run_until_complete(future)\n return future.result()\n else:\n return func(*args, **kwargs)", "def execute_func(self, *args):\n self.start_value = self.function(self.start_value)\n self.function_call_back(self.start_value)", "async def _exec(self, oid, fn, args, kwargs):\n with tracing.Event(\n self.tracer, f\"_exec(oid=0x{oid:x},fn={fn},args={args},kwargs={kwargs})\"\n ) as event_tracer:\n msg_type = self.MsgTypes.REPLY_RESULT\n\n try:\n try:\n obj_item = self.objects[oid]\n except KeyError:\n raise ServiceObjectNotFoundError(f\"0x{oid:x} not found\")\n\n def r():\n try:\n return getattr(obj_item.obj, fn)(*args, **kwargs)\n except BaseException as exc:\n event_tracer.exception(exc)\n raise\n finally:\n event_tracer.debug(\"executed\")\n\n if fn == \"__getattribute__\":\n with tracing.Event(event_tracer, f\"getattr\"):\n r = getattr(obj_item.obj, *args)\n elif fn == \"__setattribute__\":\n with tracing.Event(event_tracer, f\"setattr\"):\n r = setattr(obj_item.obj, *args)\n elif fn == \"__incref__\":\n with tracing.Event(event_tracer, f\"__incref__\"):\n r = self.__incref__(oid, obj_item)\n elif fn == \"__decref__\":\n with tracing.Event(event_tracer, f\"__decref__\"):\n r = self.__decref__(oid, obj_item)\n else:\n with tracing.Event(\n event_tracer, f\"run_in_executor({self.executor})\"\n ):\n r = await self.loop.run_in_executor(self.executor, r)\n\n if asyncio.iscoroutine(r):\n with tracing.Event(event_tracer, \"result is coroutine\"):\n r = await r\n\n except BaseException as e:\n event_tracer.exception(\"executed, got exception={e}\")\n msg_type = self.MsgTypes.REPLY_EXCEPTION\n exc_type, exc_value, exc_tb = sys.exc_info()\n r = exc_type(\n str(exc_value)\n + \"\\n\\nService Traceback (most recent call last):\\n\"\n + \"\".join(traceback.format_tb(exc_tb)).rstrip()\n )\n\n event_tracer.debug(f\"executed, result={r}\")\n return msg_type, r", "def coroutine(gen_func):\n def wrapped(*args, **kwargs):\n coro = gen_func(*args, **kwargs)\n return loop.create_task(coro)\n return wrapped", "def async_func(self, *args, **kwargs):\n del args\n task = TaskRunner(run_function=func, obj=self, kwargs=kwargs)\n ret = task.start()\n return ret", "def call_as_future(f, loop, *args, **kwargs):\n if not asyncio.iscoroutinefunction(f):\n f = asyncio.coroutine(f)\n\n return asyncio.ensure_future(f(*args, **kwargs), loop=loop)", "def maybe_run(*args, **kwargs):\n if args[1] is None:\n return None\n args = iter(args)\n return next(args)(*args, **kwargs)", "def wrapper(self, *args, **kw):\n\n def call():\n \"\"\"Calls function on loop thread\"\"\"\n try:\n func(self, *args, **kw)\n except Exception:\n logger.exception(\n \"failed to call async [%r] with [%r] [%r]\", func, args, kw\n )\n\n self.loop.call_soon_threadsafe(call)", "async def run(self, func: Callable[..., T], /, *args: Any, **kwargs: Any) -> T:\n # TODO: How to make sure that we can get the same event loop at instance creation ?\n _loop = get_event_loop()\n _func = partial(func, *args, **kwargs)\n return await _loop.run_in_executor(self, _func)", "def function_runner(f):\n\n f()", "async def invoke_async(self, enum_type: ComponentTypes, method: str, *arugs, **kwargs) -> Any:\n component = self.get_component(enum_type)\n func = getattr(component, method, None)\n if callable(func):\n if iscoroutinefunction(func):\n return await func(*arugs, **kwargs)", "def call_on_main_thread(self, function, *args, **kwargs):\n self.waiting_calls.put((function, args, kwargs))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Establishes connection to websocket endpoint and calls on_connected callback.
async def connect(self): self.websocket = await websockets.connect(self.url, **self.kwargs) await self.try_call(self.on_connected)
[ "async def websocket_connected(ws):\n await ws.send_str(json.dumps({\"subject\": Subject.websocket.value, \"event\": Event.connected.value}))\n logger.debug(\"websocket: new connection from user %s\", ws.cirrina.web_session.get(\"username\"))", "def __connect(self):\n \n self.ws = websocket.WebSocketApp(self.ws_url,\n on_message=self.__on_message,\n on_close=self.__on_close,\n on_open=self.__on_open,\n on_error=self.__on_error,\n header=self.__get_auth()\n )\n self.wst = threading.Thread(target=lambda: self.ws.run_forever())\n self.wst.start()\n self.logger.info('ws thread start')", "def onOpen(self):\n logger.info(\"WebSocket connection open.\")\n\n msg = {\n 'id': str(uuid.uuid4()),\n 'type': 'authorization',\n 'data': { 'token': 'Bearer ' + self.access_token }\n }\n self.sendMessage(json.dumps(msg).encode('utf8'))\n self.on_connected()", "def on_open_websocket(self):\n\n self.log('Websocket open')", "def onOpen(self):\n self.factory.register_client(self, self.platform)\n logger.info(\"WebSocket connection open.\")", "def on_open(_):\n\n print(\"WebSocket successfully connected!\")\n global web_socket_open\n web_socket_open = True\n send_login_request(sts_token, False)", "async def connect(self):\n print('Connecting...')\n self.connection = await websockets.client.connect('wss://ws.mjrlegends.com:2096')\n if self.connection.open:\n print('Connection established. Client correctly connected')\n json_message = json.dumps({\n \"type\": \"LISTEN\",\n \"nonce\": \"4jgUaUv0zdxBMe2tN6YSZaCROCwkO92baSaFzgT50sWFySI15ErkVpoIqfqLwoZ6\",\n \"channel_id\": '32907202',\n \"topics\": [\"channel_points_reward_redeem\"],\n \"token\": self.auth_token\n })\n await self.send_message(json_message)\n return self.connection", "async def test_websocket_connect(self):\n with _patch_local_sources_watcher(), self._patch_app_session():\n await self.server.start()\n\n self.assertFalse(self.server.browser_is_connected)\n\n # Open a websocket connection\n ws_client = await self.ws_connect()\n self.assertTrue(self.server.browser_is_connected)\n\n # Get this client's SessionInfo object\n self.assertEqual(1, self.server._runtime._session_mgr.num_active_sessions())\n session_info = self.server._runtime._session_mgr.list_active_sessions()[0]\n\n # Close the connection\n ws_client.close()\n await asyncio.sleep(0.1)\n self.assertFalse(self.server.browser_is_connected)\n\n # Ensure AppSession.disconnect_file_watchers() was called, and that our\n # session exists but is no longer active.\n session_info.session.disconnect_file_watchers.assert_called_once()\n self.assertEqual(0, self.server._runtime._session_mgr.num_active_sessions())\n self.assertEqual(1, self.server._runtime._session_mgr.num_sessions())", "def on_connected(self):\n log.debug('on_connected called.')", "def _connect() -> NoReturn:\n websocket.enableTrace(False)\n ws = websocket.WebSocketApp(\n f\"ws://{cfg.MCZ_IP}:{cfg.MCZ_PORT}\",\n on_open=on_open,\n on_message=on_message,\n on_error=on_error,\n )\n while True:\n try:\n ws.run_forever(ping_interval=5, ping_timeout=2)\n except KeyboardInterrupt:\n log.info(\"Connection interrupted by user\")\n break\n except:\n pass", "async def register(websocket):\n USERS.add(websocket)\n logger.info('New client connected')\n await send_state_data()", "def connect(self):\n\t\tif not self.session:\n\t\t\traise Exception() # TODO: better exception\n\t\tr = requests.post(self.query_endpoint + '/request/chatauth',\n\t\t\tparams={\n\t\t\t\t'session': self.session\n\t\t\t}\n\t\t)\n\t\tself.token = r.json()['result']\n\n\t\tself.ws = websocket.WebSocketApp(\n\t\t\t'ws://{}:{}/chatserver'.format(self.chat_host, self.chat_port),\n\t\t\ton_message=self.ws_message,\n\t\t\ton_open=self.ws_open,\n\t\t\t# on_error=debug,\n\t\t\t# on_close=debug\n\t\t) # TODO: handle ws disconnect\n\n\t\tthread = threading.Thread(target=self.ws.run_forever)\n\t\tthread.daemon = True\n\t\tthread.start()", "def pusher_connected(self, data):\n # Inform user that pusher is done connecting\n self.logger.info(\"Pusherclient connected\")\n\n # Bind the events we want to listen to\n self.callback_client.bind(\"payment_authorized\",\n self.payment_authorized)\n self.callback_client.bind(\"shortlink_scanned\",\n self.shortlink_scanned)", "async def _connect(self, url: str) -> WebSocketClientProtocol:\n try:\n logger.debug('Starting new websocket connection: %s', url)\n websocket = await connect(url)\n\n # Isolate specific exceptions, so they are not retried in `get_job_status`.\n except (SSLError, InvalidURI) as ex:\n raise ex\n\n # pylint: disable=broad-except\n except Exception as ex:\n exception_to_raise = WebsocketError('Failed to connect to the server.')\n\n logger.info('An exception occurred. Raising \"%s\" from \"%s\"',\n repr(exception_to_raise), repr(ex))\n raise exception_to_raise from ex\n\n try:\n # Authenticate against the server.\n auth_request = self._authentication_message()\n await websocket.send(auth_request.as_json())\n\n # Verify that the server acknowledged our authentication.\n auth_response_raw = await websocket.recv()\n\n auth_response = WebsocketResponseMethod.from_bytes(\n auth_response_raw) # type: ignore[arg-type]\n\n if auth_response.type_ != 'authenticated':\n raise WebsocketIBMQProtocolError('Failed to authenticate against the server: {}'\n .format(auth_response.as_json()))\n except ConnectionClosed as ex:\n await websocket.close()\n exception_to_raise = WebsocketAuthenticationError(\n 'Unexpected error occurred when authenticating against the server.')\n\n logger.info('An exception occurred. Raising \"%s\" from \"%s\"',\n repr(exception_to_raise), repr(ex))\n raise exception_to_raise from ex\n\n return websocket", "def start_websocket(self):\n print 'Starting web socket...'\n self.ws.run_forever()", "async def listen(self) -> None:\n self.logger.info(\"Starting listening to showdown websocket\")\n coroutines = []\n try:\n async with websockets.connect(self.websocket_url) as websocket:\n self._websocket = websocket\n while True:\n message = str(await websocket.recv())\n self.logger.info(\"<<< %s\", message)\n coroutines.append(ensure_future(self._handle_message(message)))\n except websockets.exceptions.ConnectionClosedOK:\n self.logger.warning(\n \"Websocket connection with %s closed\", self.websocket_url\n )\n except (CancelledError, RuntimeError) as e:\n self.logger.critical(\"Listen interrupted by %s\", e)\n finally:\n for coroutine in coroutines:\n coroutine.cancel()", "def on_open(ws):\n\n print('Connected. Server generated request ID = ', ws.sock.headers['x-requestid'])\n\n def run(*args):\n \"\"\"Background task which streams audio.\"\"\"\n # Send WAVE header to provide audio format information\n data = get_wave_header(16000)\n ws.send(data, websocket.ABNF.OPCODE_BINARY)\n # Stream pyAudio Microphone Audio\n while True:\n #sys.stdout.write('.')\n data = stream.read(CHUNK)\n ws.send(data, websocket.ABNF.OPCODE_BINARY)\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n ws.close()\n print('Background thread terminating...')\n\n thread.start_new_thread(run, ())", "def on_open(ws):\n def run(*args):\n # subscribe to blocks\n BlockchainInfoWebSocketAPI.subscribe_to_blocks(ws)\n\n # ping every 25 seconds to prevent remote server from disconnecting\n while 1:\n log.debug(\"BlockchainInfoWebSocketAPI: doing heartbeat ping to blockchain.info\")\n ws.send(\"\")\n time.sleep(25)\n\n # run the \"run\" method in a new thread\n thread.start_new_thread(run, ())", "def on_connect():\n\n print('User connected')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert this rotation string into a classification
def rotclass(val): # if we have seven years of either corn and soy if val.count("B") + val.count("C") > 6: return "Ag" return "Non Ag"
[ "def rn_classification(array):\n _class = ['', '', '']\n\n # integer / fraction\n # den length == 1 and den = [1, 1, 1]\n if len(array[1]) == 1 and (array[1][0] == 1).all():\n _class[0] = 'integer'\n else:\n _class[0] = 'fraction'\n\n # rational / irrational for each linear\n for p, lin in enumerate(array):\n if (lin[:, 1:] == 1).all():\n _class[p + 1] = 'rational'\n elif len(lin) == 1 and p == 0:\n # den cannot be simple irrational\n _class[p + 1] = 'simple irrational'\n elif lin.dtype == int_:\n _class[p + 1] = 'mixed irrational'\n else:\n _class[p + 1] = 'composed irrational'\n # if first-type class is 'integer', won't parse for denominator\n if _class[0] == 'integer':\n break\n return _class", "def _get_rotated_all_label(stem: str) -> Classification:\n class_name, original_image_number, patch_number, rotated_degrees = stem.split(\"-\", 3)\n attributes = {\n \"original image sample number\": original_image_number,\n \"patch number\": int(patch_number[1:]),\n \"rotated degrees\": int(rotated_degrees[1:]),\n }\n return Classification(category=class_name, attributes=attributes)", "def reconstructOrient(self, s):\r\n self.orient = np.fromstring(s, dtype = np.float64, sep = \",\").reshape(3, 3)\r\n return self.orient", "def classify(self, image: np.ndarray) -> str:\n cropped_image = self.image_preprocessor.preprocess(image)\n image_for_model = cropped_image.reshape((1, *cropped_image.shape))\n predicted_class = self.image_classifier.classify(image_for_model)\n return predicted_class", "def _transformer_classify(self, label: str) -> str:\n\n result = self.classifier(label, OrganismLabelClassifier.zeroshot_categories)\n\n labels, scores = result[\"labels\"], result[\"scores\"]\n\n # logger.debug(f\"label {label} transformer classification:\\nlabels:\\t {labels}\\nscores:\\t{scores}\")\n\n top_label = labels[0]\n return top_label # type: ignore", "def classify(self, sText):\n sumPos = 0.0\n sumNeg = 0\n sumAll = 0.0\n sText = sText.lower()\n tokens = self.tokenize(sText)\n strPos = self.pos.keys()\n for i in range (len(self.pos)):\n sumAll += self.pos[strPos[i]]+self.neg[strPos[i]]\n for i in range (len(tokens)):\n if self.pos.has_key(tokens[i])==False:\n self.pos[tokens[i]]=0\n self.neg[tokens[i]]=0\n sumPos += math.log(float(self.pos[tokens[i]]+1)/float(sumAll))\n sumNeg += math.log(float(self.neg[tokens[i]]+1)/float(sumAll))\n print sumPos\n print sumNeg\n if sumPos >= sumNeg:\n return \"Positive\"\n else:\n return \"Negtive\"", "def fromString(self, str: 'SbString') -> \"SbBool\":\n return _coin.SbRotation_fromString(self, str)", "def __init__(self, motif_string):\n self.motif = []\n rule = \"is\"\n seq = \"\"\n for c in motif_string:\n if c == '{':\n rule = \"not\"\n seq = \"\"\n elif c == '}':\n self.motif.append((rule, seq))\n rule = \"is\"\n seq = \"\"\n elif c == '[':\n rule = \"or\"\n seq = \"\"\n elif c == ']':\n self.motif.append((rule, seq))\n rule = \"is\"\n seq = \"\"\n else:\n if rule != \"is\":\n seq += c\n else:\n self.motif.append((rule, c))", "def classify(self):\n\n if self.annotation_type == None or self.annotation_type not in self._annotation_classifications:\n return None\n\n try:\n classification = self._annotation_classifications[self.annotation_type][self.attributes['assertion']]\n except KeyError:\n classification = self.annotation_type\n if self.attributes['temporality'] != 'current':\n classification += ' - {}'.format(self.attributes['temporality'].title())\n\n # Exclude any annotations of a surgical site infection that doesn't have anatomy\n # TODO: Implement coreference resolution in `from_markup()`\n\n self._classification = classification\n return classification", "def predict(self, text):", "def convert_strings_to_one_hot(Y, classes):\n return label_binarize(Y, classes=classes)", "def _get_without_rotate_all_label(stem: str) -> Classification:\n class_name, original_image_number, patch_number = stem.split(\"-\", 2)\n attributes = {\n \"original image sample number\": original_image_number,\n \"patch number\": int(patch_number[1:]),\n \"rotated degrees\": 0,\n }\n return Classification(category=class_name, attributes=attributes)", "def data_classification(self, data=[]):\n data_type = ''\n self.logger.info('Attempting to classify: {0}'.format(data))\n #This section classifies an input as heartbeat, expecting integer\n if len(data) == 1:\n try:\n value = data[0]\n int(value)\n self.instruction_list.append(self.heartbeat._make(data))\n data_type = 'heartbeat'\n except ValueError as input_error:\n self.logger.error('{0}, expecting heartbeat with epoch timestamp'.format(input_error))\n #This section classifies the input as a bid\n if len(data) == 5:\n is_bid_syntax_valid = self.validate_bid_format(data)\n if is_bid_syntax_valid:\n self.instruction_list.append(self.bid._make(data))\n data_type = 'bid'\n else:\n self.logger.error('Invalid syntax for classifying object as a bid: {0}'.format(data))\n # This section classifies the input as a user listing\n if len(data) == 6:\n is_listing_syntax_valid = self.validate_listing_format(data)\n if is_listing_syntax_valid:\n self.instruction_list.append(self.user_listing._make(data))\n data_type = 'user_listing'\n else:\n self.logger.error('Invalid syntax for classifying object as a user listing: {0}'.format(data))\n \n if data_type:\n self.logger.info('Successfully classified {0} as {1}'.format(self.instruction_list[-1], data_type))\n else:\n self.logger.debug('Unable to classify instruction: {0}'.format(data))\n return data_type", "def classification_type(self) -> ClassificationType:\n return ClassificationType.AMINO_ACID_SUBSTITUTION", "def convert_train_data(text_item):\n planes = []\n for plane in range(0, 16):\n # 360 first bits are 90 hex chars\n hex_string = text_item[plane][0:90]\n integer = int(hex_string, 16)\n as_str = format(integer, '0>360b')\n # remaining bit that didn't fit\n last_digit = text_item[plane][90]\n assert last_digit == \"0\" or last_digit == \"1\"\n as_str += last_digit\n assert len(as_str) == 361\n plane = [0.0 if digit == \"0\" else 1.0 for digit in as_str]\n planes.append(plane)\n stm = text_item[16][0]\n assert stm == \"0\" or stm == \"1\"\n if stm == \"0\":\n planes.append([1.0] * 361)\n planes.append([0.0] * 361)\n else:\n planes.append([0.0] * 361)\n planes.append([1.0] * 361)\n assert len(planes) == 18\n probabilities = []\n for val in text_item[17].split():\n float_val = float(val)\n # Work around a bug in leela-zero v0.3\n if math.isnan(float_val):\n return False, None\n probabilities.append(float_val)\n assert len(probabilities) == 362\n winner = float(text_item[18])\n assert winner == 1.0 or winner == -1.0\n # Get one of 8 symmetries\n symmetry = random.randrange(8)\n sym_planes = [apply_symmetry(plane, symmetry) for plane in planes]\n sym_probabilities = apply_symmetry(probabilities, symmetry)\n return True, (sym_planes, sym_probabilities, [winner])", "def classifying_func(features):\n return classify_line(features, model, encoder)", "def convert_to_verse_classification(data):\n\t\n\tnicknames = get_nicknames()\n\tnew_data = []\n\n\tfor dictio in data:\n\t\tartist = dictio['artist']\n\t\tlyrics = dictio['lyrics']\n\t\tlyrics = re.sub(\"\\n\",\"___\",lyrics) # replace by ___ to preserse the location of the newline\n\t\tverses = re.findall(\"\\[.+?\\].+?\\[\",lyrics,overlapped=True) # [...] indicates the start of a new verse\n\t\tverses = [re.sub(\"___\",\"\\n\",verse) for verse in verses] # reinsert the newlines\n\t\tverses = [re.sub(\"\\n+\\[\",\"\",verse) for verse in verses] # remove a remaining [\n\t\tfor verse in verses:\n\t\t\tif isinstance(dictio['featuring'],float): # if the entire song is by the same artist, simply add each verse to the data\n\t\t\t\tverse = re.sub(\"\\[.+?\\]\",\"\",verse)\n\t\t\t\tif len(verse.split()) > 20:\n\t\t\t\t\tnew_dictio = dictio.copy()\n\t\t\t\t\tnew_dictio['lyrics'] = verse.strip()\n\t\t\t\t\tif new_dictio not in new_data:\n\t\t\t\t\t\tnew_data.append(new_dictio)\n\t\t\t\t\t\tall_verses.append(verse.strip())\n\t\t\telse: # if the song in by multiple artists, check the artist of each verse\n\t\t\t\theader = re.findall(\"\\[.+?\\]\",verse) # header of a verse, as in [..]\n\t\t\t\tif header != []:\n\t\t\t\t\theader = header[0].lower()\n\t\t\t\t\theader = header.split(':')\n\t\t\t\t\tif len(header) > 1:\n\t\t\t\t\t\theader = header[1].strip()[:-1]\n\t\t\t\t\tfor nickname in nicknames[dictio['artist']]:\n\t\t\t\t\t\tif header == nickname.lower():\n\t\t\t\t\t\t\tverse = re.sub(\"\\[.+?\\]\",\"\",verse)\n\t\t\t\t\t\t\tif len(verse.split()) > 20:\n\t\t\t\t\t\t\t\tnew_dictio = dictio.copy()\n\t\t\t\t\t\t\t\tnew_dictio['lyrics'] = verse.strip()\n\t\t\t\t\t\t \n\t\t\t\t\t\t\t\tif new_dictio not in new_data:\n\t\t\t\t\t\t\t\t\tnew_data.append(new_dictio)\n \n\tsong_titles = [dictio['song_title'] for dictio in new_data] # add song titles to the data to be able to trace the original song of each verse\n\n\treturn new_data", "def classify(img):\n # possible TODO: call the imagekit script on the input image\n # decodes from base64 to an image type suitable for prediction model\n imgDecoded = base64.b64decode(img)\n image = Image.open(BytesIO(imgDecoded))\n image = image.convert(\"RGB\")\n target_size = (IMG_WIDTH, IMG_HEIGHT)\n np_img = image.resize(target_size)\n np_img = img_to_array(np_img) # (224, 224, 3)\n np_img = np.expand_dims(np_img, axis=0)\n datagen = ImageDataGenerator(rescale=1./255).flow(\n np_img, \n batch_size=BATCH_SIZE\n )\n \n # make prediction\n # predict_classes will return the label, which is an integer (0, 1, 2... for\n # each animal in alphabetical order)\n # convert this to a string name, then return\n with graph.as_default():\n bottleneck_features_web = vgg16_model.predict_generator(datagen, BATCH_SIZE)\n prediction = model.predict_classes(bottleneck_features_web, batch_size=BATCH_SIZE)[0]\n animal = ANIMALS[prediction]\n fun_fact = getFunFacts(animal)\n\n article = \"an \" if fun_fact[0].lower() in ['a','e','i','o','u'] else \"a \"\n # text = \"This is \" + article + animal + \"!\\nDid you know? \\n\" + fun_fact + \"\\n\\n\"\n return json.dumps({'result': article + animal, 'fun': fun_fact})", "def deserialize(self, str):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print current ffmpeg status
def do_status(self): return "Waiting for {0.prefill_in} frames; Streaming from ffmpeg: {0.ffmpeg_ready}".format(self)
[ "def commandline(self): \n return self._ffmpeg_commandline()", "def status(dev):\n dev.print_status()", "def ffmpeg_parse_infos(self,filename, print_infos=False, check_duration=True):\n\n\n # open the file in a pipe, provoke an error, read output\n is_GIF = filename.endswith('.gif')\n cmd = [get_setting(\"FFMPEG_BINARY\"), \"-i\", filename]\n if is_GIF:\n cmd += [\"-f\", \"null\", \"/dev/null\"]\n\n popen_params = {\"bufsize\": 10**5,\n \"stdout\": sp.PIPE,\n \"stderr\": sp.PIPE,\n \"stdin\": DEVNULL}\n\n if os.name == \"nt\":\n popen_params[\"creationflags\"] = 0x08000000\n\n proc = sp.Popen(cmd, **popen_params)\n\n proc.stdout.readline()\n proc.terminate()\n infos = proc.stderr.read().decode('utf8')\n del proc\n\n if print_infos:\n # print the whole info text returned by FFMPEG\n print( infos )\n\n\n lines = infos.splitlines()\n if \"No such file or directory\" in lines[-1]:\n raise IOError((\"MoviePy error: the file %s could not be found !\\n\"\n \"Please check that you entered the correct \"\n \"path.\")%filename)\n\n result = dict()\n\n\n # get duration (in seconds)\n result['duration'] = None\n\n if check_duration:\n try:\n keyword = ('frame=' if is_GIF else 'Duration: ')\n line = [l for l in lines if keyword in l][0]\n match = re.findall(\"([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])\", line)[0]\n result['duration'] = cvsecs(match)\n except:\n raise IOError((\"MoviePy error: failed to read the duration of file %s.\\n\"\n \"Here are the file infos returned by ffmpeg:\\n\\n%s\")%(\n filename, infos))\n\n # get the output line that speaks about video\n lines_video = [l for l in lines if ' Video: ' in l and re.search('\\d+x\\d+', l)]\n\n result['video_found'] = ( lines_video != [] )\n\n if result['video_found']:\n\n\n try:\n line = lines_video[0]\n\n # get the size, of the form 460x320 (w x h)\n match = re.search(\" [0-9]*x[0-9]*(,| )\", line)\n s = list(map(int, line[match.start():match.end()-1].split('x')))\n result['video_size'] = s\n except:\n raise IOError((\"MoviePy error: failed to read video dimensions in file %s.\\n\"\n \"Here are the file infos returned by ffmpeg:\\n\\n%s\")%(\n filename, infos))\n\n\n # get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes\n # tbc, and sometimes tbc/2...\n # Current policy: Trust tbr first, then fps. If result is near from x*1000/1001\n # where x is 23,24,25,50, replace by x*1000/1001 (very common case for the fps).\n\n try:\n match = re.search(\"( [0-9]*.| )[0-9]* tbr\", line)\n tbr = float(line[match.start():match.end()].split(' ')[1])\n result['video_fps'] = tbr\n\n except:\n match = re.search(\"( [0-9]*.| )[0-9]* fps\", line)\n result['video_fps'] = float(line[match.start():match.end()].split(' ')[1])\n\n\n # It is known that a fps of 24 is often written as 24000/1001\n # but then ffmpeg nicely rounds it to 23.98, which we hate.\n coef = 1000.0/1001.0\n fps = result['video_fps']\n for x in [23,24,25,30,50]:\n if (fps!=x) and abs(fps - x*coef) < .01:\n result['video_fps'] = x*coef\n\n if check_duration:\n result['video_nframes'] = int(result['duration']*result['video_fps'])+1\n result['video_duration'] = result['duration']\n else:\n result['video_nframes'] = 1\n result['video_duration'] = None\n # We could have also recomputed the duration from the number\n # of frames, as follows:\n # >>> result['video_duration'] = result['video_nframes'] / result['video_fps']\n\n\n lines_audio = [l for l in lines if ' Audio: ' in l]\n\n result['audio_found'] = lines_audio != []\n\n if result['audio_found']:\n line = lines_audio[0]\n try:\n match = re.search(\" [0-9]* Hz\", line)\n result['audio_fps'] = int(line[match.start()+1:match.end()-3])\n except:\n result['audio_fps'] = 'unknown'\n\n return result", "def run(self):\n args = self.ffmpeg_location + \" -hide_banner -loglevel panic \"\n if self.ss:\n args += '-ss ' + self.ss + \" \"\n args += \"-i \" + self.infile + \" \"\n if self.filter:\n args += '-filter:v \"' + self.filter + '\" '\n if self.pix_fmt:\n args += '-pix_fmt ' + self.pix_fmt + \" \"\n if self.vcodec:\n args += '-vcodec ' + self.vcodec + \" \"\n if self.width:\n args += '-vf scale=' + str(self.width) + ':-1 '\n if self.f:\n args += '-f ' + self.f + \" \"\n if self.vframes:\n args += '-vframes ' + self.vframes + \" \"\n args += self.outfile\n print(\"running ffmpeg with:\")\n print(args)\n d = subprocess.run(args, shell=True)\n return d", "def run_ffmpeg(command):\n # Run command\n complete_process = subprocess.run(command, shell=True, capture_output=True)\n # Check for failure and stop\n complete_process.check_returncode()\n # Grab the results\n lines = complete_process.stderr.decode('utf-8').split('\\n')\n lines_iter = peek_iter(lines)\n # Put into results object\n results = FFMPEGResults()\n results.return_code = complete_process.returncode\n while lines_iter.has_next():\n line = next(lines_iter)\n if line.startswith('ffmpeg version'):\n results.build_info = capture_build_info(line, lines_iter)\n elif line.startswith('Input #'):\n results.input_info.append(capture_info(line, lines_iter))\n elif line.startswith('Output #'):\n results.output_info.append(capture_info(line, lines_iter))\n elif line.startswith('Stream mapping:'):\n results.stream_info.append(capture_info(line, lines_iter, first_line=False))\n elif line.startswith('Press'):\n continue\n else:\n results.details.append(line)\n return results", "def print_info():\n try:\n print_version()\n media = player.get_media()\n print('State: %s' % player.get_state())\n print('Media: %s' % bytes_to_str(media.get_mrl()))\n print('Track: %s/%s' % (player.video_get_track(), player.video_get_track_count()))\n print('Current time: %s/%s' % (player.get_time(), media.get_duration()))\n print('Position: %s' % player.get_position())\n print('FPS: %s (%d ms)' % (player.get_fps(), mspf()))\n print('Rate: %s' % player.get_rate())\n print('Video size: %s' % str(player.video_get_size(0))) # num=0\n print('Scale: %s' % player.video_get_scale())\n print('Aspect ratio: %s' % player.video_get_aspect_ratio())\n #print('Window:' % player.get_hwnd()\n except Exception:\n print('Error: %s' % sys.exc_info()[1])", "def ffmpeg_parse_infos(filename, print_infos=False, check_duration=True):\n\n # open the file in a pipe, provoke an error, read output\n cmd = [get_setting(\"FFMPEG_BINARY\"), \"-i\", filename]\n\n popen_params = {\"bufsize\": 10 ** 5,\n \"stdout\": sp.PIPE,\n \"stderr\": sp.PIPE,\n \"stdin\": DEVNULL}\n\n if os.name == \"nt\":\n popen_params[\"creationflags\"] = 0x08000000\n\n proc = sp.Popen(cmd, **popen_params)\n\n proc.stdout.readline()\n proc.terminate()\n infos = proc.stderr.read().decode('utf8')\n del proc\n\n if print_infos:\n # print the whole info text returned by FFMPEG\n print(infos)\n\n lines = infos.splitlines()\n if \"No such file or directory\" in lines[-1]:\n raise IOError((\"MoviePy error: the file %s could not be found !\\n\"\n \"Please check that you entered the correct \"\n \"path.\") % filename)\n\n result = dict()\n\n # get duration (in seconds)\n result['duration'] = None\n\n if check_duration:\n try:\n keyword = 'Duration: '\n line = [l for l in lines if keyword in l][0]\n match = re.findall(\"([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])\", line)[0]\n result['duration'] = cvsecs(match)\n except:\n raise IOError((\"MoviePy error: failed to read the duration of file %s.\\n\"\n \"Here are the file infos returned by ffmpeg:\\n\\n%s\") % (\n filename, infos))\n\n # get the output line that speaks about video\n lines_video = [l for l in lines if ' Video: ' in l]\n\n result['video_found'] = (lines_video != [])\n\n if result['video_found']:\n\n try:\n line = lines_video[0]\n\n # get the size, of the form 460x320 (w x h)\n match = re.search(\" [0-9]*x[0-9]*(,| )\", line)\n s = list(map(int, line[match.start():match.end() - 1].split('x')))\n result['video_size'] = s\n except:\n raise ((\"MoviePy error: failed to read video dimensions in file %s.\\n\"\n \"Here are the file infos returned by ffmpeg:\\n\\n%s\") % (\n filename, infos))\n\n # get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes\n # tbc, and sometimes tbc/2...\n # Current policy: Trust tbr first, then fps. If result is near from x*1000/1001\n # where x is 23,24,25,50, replace by x*1000/1001 (very common case for the fps).\n\n try:\n match = re.search(\"( [0-9]*.| )[0-9]* tbr\", line)\n tbr = float(line[match.start():match.end()].split(' ')[1])\n result['video_fps'] = tbr\n\n except:\n match = re.search(\"( [0-9]*.| )[0-9]* fps\", line)\n result['video_fps'] = float(line[match.start():match.end()].split(' ')[1])\n\n # It is known that a fps of 24 is often written as 24000/1001\n # but then ffmpeg nicely rounds it to 23.98, which we hate.\n coef = 1000.0 / 1001.0\n fps = result['video_fps']\n for x in [23, 24, 25, 30, 50]:\n if (fps != x) and abs(fps - x * coef) < .01:\n result['video_fps'] = x * coef\n\n if check_duration:\n result['video_nframes'] = int(result['duration'] * result['video_fps']) + 1\n result['video_duration'] = result['duration']\n else:\n result['video_nframes'] = 1\n result['video_duration'] = None\n # We could have also recomputed the duration from the number\n # of frames, as follows:\n # >>> result['video_duration'] = result['video_nframes'] / result['video_fps']\n\n lines_audio = [l for l in lines if ' Audio: ' in l]\n\n result['audio_found'] = lines_audio != []\n\n if result['audio_found']:\n line = lines_audio[0]\n try:\n match = re.search(\" [0-9]* Hz\", line)\n result['audio_fps'] = int(line[match.start() + 1:match.end()])\n except:\n result['audio_fps'] = 'unknown'\n\n return result", "def run_ffmpeg_command(self):\n\n total_dur = None\n\n cmd_with_progress = [self._cmd[0]] + [\"-progress\", \"-\", \"-nostats\"] + self._cmd_[1:]\n print(cmd_with_progress)\n stderr = []\n\n p = subprocess.Popen(\n cmd_with_progress,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=False\n )\n\n # for line in iter(p.stderr):\n while True:\n line = p.stdout.readline().decode(\"utf8\", errors='replace').strip()\n if line == '' and p.poll() is not None:\n break\n stderr.append(line.strip())\n self.output = \"\\n\".join(stderr)\n\n if not total_dur and DUR_REGEX.search(line):\n total_dur = DUR_REGEX.search(line).groupdict()\n total_dur = to_ms(**total_dur)\n continue\n if total_dur:\n result = TIME_REGEX.search(line)\n if result:\n elapsed_time = to_ms(**result.groupdict())\n yield int(elapsed_time / total_dur * 100)\n\n if p.returncode != 0:\n raise RuntimeError(\"Error running command {}: {}\".format(self.cmd, str(\"\\n\".join(stderr))))\n\n yield 100", "def help_status(self):\n print(help_msg.cmds['status'])", "def printStatus(status):\n\n print(statusInterpreter(status))", "def print_status(numcodes, totalNum, msg): #progress indicator\n print('Record: {} / {} {:>20}\\r'.format(numcodes, totalNum, msg), end='\\r'),\n sys.stdout.flush()", "def status(text):\n if SHOW_UI:\n pygame.display.set_caption(text)\n stdout.write('\\r%s' % text)\n stdout.flush()", "def check_ffmpeg():\n try:\n subprocess.call(['ffmpeg'], stderr=subprocess.DEVNULL)\n\n except FileNotFoundError:\n return False\n\n return True", "def display_status() -> None:\n nord_output = subprocess.Popen([\"nordvpn\", \"status\"],\n stdout=subprocess.PIPE)\n status = nord_output.communicate()[0].decode(\"utf-8\")\n print(status)", "def do_show_status (self, line):\n print self.trex.get_running_status()\n print termstyle.green(\"*** End of T-Rex status prompt ***\")", "def show_playing(self):\r\n\r\n if self.current_playing == \"\":\r\n print(\"No video is currently playing\")\r\n else:\r\n v = self._video_library.get_video(self.current_playing)\r\n paused_status = \"\" if self.fl_playing_video else \" - PAUSED\"\r\n tags = \"[\" + ' '.join(v.tags) + \"]\"\r\n video_info = v.title + \" (\" + v.video_id + \") \" + tags\r\n print(\"Currently playing: \" + video_info + paused_status)", "def print_media():\n for i, media in enumerate(shelf):\n print(f'\\33[35m{i + 1}) {media.__class__.__name__}\\33[0m '\n f'{media} \\n\\33[34mStatus => {media.status}\\33[0m')", "def print_status(status: str):\n print(status, file=sys.stderr)", "def show_streaming():\n return _run_speedify_cmd([\"show\", \"streaming\"])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the current time skew from the source video (dropped frames).
def do_skew(self): for conn_id, conn in self.connections.items(): return "{} - Skew: {} frames".format(conn_id, conn["time_skew"] / 3600)
[ "def currentTime(snap):", "def demostrating_video_stab(filename, new_size=(320, 240), tracking_mode=True):\n if tracking_mode:\n from .curve import tracking\n\n def decorator(func):\n funcs = {}\n for i in range(4):\n @tracking(track_len=20, detect_interval=5)\n def f(prev, cur):\n return func(prev, cur)\n funcs[i] = f\n return funcs\n\n @decorator\n def tracked(prev, cur):\n return get_grey_images(prev, cur)\n\n print('Video ' + filename + ' processing')\n R = get_cov_from_video(filename, new_size)*1e-2\n Q, P = np.diag([1e-8, 1e-7, 4e-3, 1e-7, 1e-8, 4e-3]), np.eye(6)\n F, H = np.eye(6), np.eye(6)\n X = np.zeros((6, 1))\n kf_6 = KalmanFilterND(X, F, H, P, Q, R)\n # -----------------------------------------------------------------\n R = np.ones((2, 2))*1e-6\n Q, P = np.diag([1e-3, 1e-3]), np.eye(2)\n H = np.eye(2)\n F = np.eye(2)\n X = np.zeros((2, 1))\n kf_2 = KalmanFilterND(X, F, H, P, Q, R)\n # ------------------------------------------------------------------\n R = np.ones((3, 3))*1e-6\n F = np.eye(3)\n H = np.eye(3)\n X = np.zeros(3)\n P = np.ones(3)\n Q = np.diag([4e-3, 4e-3, 1e-7])\n kf_3 = KalmanFilterND(X, F, H, P, Q, R)\n # ------------------------------------------------------------------\n cap, n_frames, fps, prev = video_open(filename, new_size)\n\n old, smoothed_affine, smoothed_translational, smoothed_similarity = [], [], [], []\n # video writer args\n fourcc = cv2.VideoWriter_fourcc(*'H264')\n fps = cap.get(5)\n video_stab = filename[:-4] + 'stab.mp4'\n out = cv2.VideoWriter(video_stab, fourcc, fps, new_size)\n cumulative_transform = np.insert(np.array([[1, 0], [0, 1]]), [2], [0], axis=1)\n last_affine = cumulative_transform.copy()\n cumulative_smoothed1 = cumulative_transform.copy()\n cumulative_smoothed2 = cumulative_transform.copy()\n cumulative_smoothed3 = cumulative_transform.copy()\n for i in range(n_frames-1):\n # read frames\n ret2, cur = cap.read()\n cur = cv2.resize(cur, new_size, cv2.INTER_AREA)\n # get affine transform between frames\n affine = cv2.estimateRigidTransform(prev, cur, False)\n # Sometimes there is no Affine transform between frames, so we use the last\n if not np.all(affine):\n affine = last_affine\n last_affine = affine\n # Accumulated frame to frame original transform\n cumulative_transform = sum_2_affine(cumulative_transform, affine)\n # save original affine for comparing with stabilized\n old.append(cumulative_transform)\n z = np.array([affine.ravel()]).T # (a1, a2, b1, a3, a4, b2)^T\n z1 = affine[:2, 2:] # b1, b2\n z2 = affine[0][2], affine[1][2], math.atan2(affine[1][0], affine[0][0]) # (b1, b2, a)\n # predict new vector and update\n x1 = kf_6.predict_and_update(z)\n x2 = kf_2.predict_and_update(z1)\n x3 = kf_3.predict_and_update(z2)\n\n # create new Affine transform\n\n smoothed_affine_motion = np.float32(x1.reshape(2, 3))\n affine_motion = compensating_transform(smoothed_affine_motion, cumulative_transform)\n\n a11, a22 = math.cos(x3[2]), math.sin(x3[2])\n smoothed_similarity_motion = np.array([[a11, -a22, x3[0]], [a22, a11, x3[1]]])\n similarity_motion = compensating_transform(smoothed_similarity_motion, cumulative_transform)\n\n smoothed_translational_motion = np.array([[1, 0, x2[0]], [0, 1, x2[1]]])\n translational_motion = compensating_transform(smoothed_translational_motion, cumulative_transform)\n\n # get stabilized frame\n cur1 = warp(cur, affine_motion, new_size)\n cur2 = warp(cur, translational_motion, new_size)\n cur3 = warp(cur, similarity_motion, new_size)\n if i > 1 and tracking_mode:\n tr1, tr2 = tracked[0](prev, cur), tracked[1](prev1, cur1)\n tr3, tr4 = tracked[2](prev2, cur2), tracked[3](prev3, cur3)\n else:\n tr1, tr2, tr3, tr4 = cur, cur1, cur2, cur3\n # Accumulated frame to frame smoothed transform\n # smoothed cumulative transform affine model\n cumulative_smoothed1 = sum_2_affine(cumulative_smoothed1, smoothed_affine_motion)\n smoothed_affine.append(cumulative_smoothed1)\n # smoothed cumulative transform similarity model\n cumulative_smoothed2 = sum_2_affine(cumulative_smoothed2, smoothed_similarity_motion)\n smoothed_similarity.append(cumulative_smoothed2)\n # smoothed cumulative transform translational model\n cumulative_smoothed3 = sum_2_affine(cumulative_smoothed3, smoothed_translational_motion)\n smoothed_translational.append(cumulative_smoothed3)\n # concatenate original and stabilized frames\n result = concatenate_n_images(tr1, tr2, tr3, tr4)\n cv2.imshow('Original/smoothed', result)\n out.write(tr2)\n prev, prev1 = tr1, tr2\n prev, prev1, prev2, prev3 = tr1, tr2, tr3, tr4\n if cv2.waitKey(np.int(1000//fps)) & 0xFF == ord('q'):\n break\n\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n\n # plot affine transform params trajectories\n trajectory(old, 'r')\n trajectory(smoothed_affine, 'g')\n trajectory(smoothed_similarity, 'b')\n trajectory(smoothed_translational, 'y')\n\n plt.show()", "def GC_skew(seq, window=...): # -> list[Unknown]:\n ...", "def write_time(self):\n time_now = str(datetime.now())\n loc = (self.WIDTH - 4*len(time_now), self.HEIGHT - 10)\n\n cv2.putText(self.img_data, time_now, loc, self.FONT, 0.24, self.RED, 1, cv2.LINE_AA)\n return None", "def _rtp_maxskew_ptime(self):\n for Id, fs in self.frames.iteritems():\n if Id not in self.streams:\n continue\n payload = self.streams[Id].RFC2833Payload\n mediaframes = [f for f in fs if f.rtp_p_type != payload]\n max_skew, ptime = self._get_maxskew_ptime(mediaframes)\n self.streams[Id].MaxSkew = max_skew\n self.streams[Id].Ptime = ptime", "def describe_video_scene():\n # print moving, still\n\n # Describe still, background in the video\n begining = \"In the background, there is \" # since we are describing the still objects\n sentenceStill = \"\"\n\n for key in still:\n if still[key][8]>0.25: # activation > threshold\n\n object = still[key][7]\n # print object\n cx = (still[key][5]+still[key][3])/2\n cy = (still[key][6]+still[key][4])/2\n # print key, still[key][5], still[key][3], still[key][6], still[key][4], cx, cy\n\n if (0 < cx < width/3) and (0 < cy < height/3):\n position = \"the upper left, \"\n elif (width/3 < cx < 2*width/3) and (0 < cy < height/3):\n position = \"the upper middle, \"\n elif (2*width/3 < cx < width) and (0 < cy < height/3):\n position = \"the upper right, \"\n elif (0 < cx < width/3) and (height/3 < cy < 2*height/3):\n position = \"the middle left, \"\n elif (width/3 < cx < 2*width/3) and (height/3 < cy < 2*height/3):\n position = \"the center, \"\n elif (2*width/3 < cx < width) and (height/3 < cy < 2*height/3):\n position = \"the middle right, \"\n elif (0 < cx < width/3) and (2*height/3 < cy < height):\n position = \"the lower left, \"\n elif (width/3 < cx < 2*width/3) and (2*height/3 < cy < height):\n position = \"the lower middle, \"\n elif (2*width/3 < cx < width) and (2*height/3 < cy < height):\n position = \"the middle right, \"\n\n sentenceStill += \"a \" + object + \" in \" + position\n sentenceStill = begining + sentenceStill[:-2] + \".\" # add fullstop, remove trailing comma\n print sentenceStill\n\n\n # Describe moving, foreground in the video\n sentenceMotion = \"In the foreground, \"\n moving_right = []\n moving_left = []\n moving_lower_edge = []\n moving_upper_edge = []\n\n for key in moving:\n object = moving[key][7]\n # print object\n if moving[key][12] in [\"towards right\"]:\n # print moving[key][12]\n moving_right.append(key)\n elif moving[key][12] in [\"towards left\"]:\n # print moving[key][12]\n moving_left.append(key)\n elif moving[key][12] in [\"towards upper edge\"]:\n # print moving[key][12]\n moving_upper_edge.append(key)\n elif moving[key][12] in [\"towards lower edge\"]:\n # print moving[key][12]\n moving_lower_edge.append(key)\n\n # print moving_right, moving_left, moving_upper_edge, moving_lower_edge\n\n if moving_right and moving_left: # if not empty\n rightMotion = [\"a \" + moving[k][7] + \" (moving \" + moving[k][12] + \" \" + moving[k][13] + \") \" for k in moving_right]\n # print rightMotion\n leftMotion = [\"a \" + moving[k][7] + \" (moving \" + moving[k][12] + \" \" + moving[k][13] + \") \" for k in moving_left]\n # print leftMotion\n sentenceMotion += ', '.join(rightMotion) + \"AND \" + ', '.join(leftMotion) + \"are moving in opposite direction.\"\n\n elif moving_right or moving_left: # if not empty\n rightMotion = [\"a \" + moving[k][7] + \" (moving \" + moving[k][12] + \" \" + moving[k][13] + \") \" for k in moving_right]\n # print rightMotion\n leftMotion = [\"a \" + moving[k][7] + \" (moving \" + moving[k][12] + \" \" + moving[k][13] + \") \" for k in moving_left]\n # print leftMotion\n sentenceMotion += ', '.join(leftMotion) + \"\" + ', '.join(rightMotion) + \"are moving in same direction.\"\n\n else:\n a = 1\n # print \"\"\n\n if moving_upper_edge and moving_upper_edge: # if not empty\n upMotion = [\"a \" + moving[k][7] + \" (moving \" + moving[k][12] + \" \" + moving[k][13] + \") \" for k in moving_upper_edge]\n # print rightMotion\n downMotion = [\"a \" + moving[k][7] + \" (moving \" + moving[k][12] + \" \" + moving[k][13] + \") \" for k in moving_lower_edge]\n # print leftMotion\n sentenceMotion += ', '.join(upMotion) + \"AND \" + ', '.join(downMotion) + \"are moving in opposite direction.\"\n\n elif moving_upper_edge or moving_upper_edge: # if not empty\n upMotion = [\"a \" + moving[k][7] + \" (moving \" + moving[k][12] + \" \" + moving[k][13] + \") \" for k in moving_upper_edge]\n # print rightMotion\n downMotion = [\"a \" + moving[k][7] + \" (moving \" + moving[k][12] + \" \" + moving[k][13] + \") \" for k in moving_lower_edge]\n # print leftMotion\n sentenceMotion += ', '.join(upMotion) + \"\" + ', '.join(downMotion) + \"are moving in same direction.\"\n\n else:\n a = 1\n # print \"\"\n\n print sentenceMotion", "def video_stabilizer(self, video=None):\r\n \r\n if video is None:\r\n video = self.video_buffer\r\n stab_video = np.zeros_like(video, dtpye=np.uint8)\r\n roi = self.get_roi(video=video, window_name='Draw ROI to stabilize the video around it')\r\n\r\n # params for ShiTomasi corner detection\r\n feature_params = dict(maxCorners=100,\r\n qualityLevel=0.1,\r\n minDistance=5,\r\n blockSize=7)\r\n \r\n # Parameters for lucas kanade optical flow\r\n lk_params = dict(winSize=(15, 15),\r\n maxLevel=8,\r\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\r\n \r\n m_dx, m_dy = 0, 0\r\n \r\n # Take first frame and find corners in it\r\n old_frame = video[0]\r\n \r\n rows, cols, depth = old_frame.shape\r\n old_roi = old_frame[roi['x1']: roi['x2'], roi['y1']: roi['y2']]\r\n old_gray = cv2.cvtColor(old_roi, cv2.COLOR_BGR2GRAY)\r\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\r\n p_start = p0.copy()\r\n \r\n for idx in range(self.length):\r\n \r\n # Get next frame\r\n frame = self.video_buffer[idx]\r\n roi = frame[roi['x1']: roi['x2'], roi['y1']: roi['y2']]\r\n frame_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\r\n \r\n # calculate optical flow\r\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\r\n \r\n # if the number of good features is < original fertues / 2 get new features\r\n if p1[np.where(st == 1)].shape[0] <= p_start.shape[0]/2:\r\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\r\n p_start = p0.copy()\r\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\r\n # Select good points\r\n try:\r\n good_cur = p1[np.where(st == 1)]\r\n good_old = p0[np.where(st == 1)]\r\n except TypeError as e:\r\n print('TypeError, no good points are avaliabole, error: {0}'.format(e))\r\n print('Exit video stabilizer at frame {0} out of {1}'.format(idx, self.length))\r\n break\r\n dx = []\r\n dy = [] \r\n \r\n # Draw points and calculate\r\n for i, (cur, old) in enumerate(zip(good_cur, good_old)):\r\n a, b = cur.ravel()\r\n c, d = old.ravel()\r\n dx.append(c - a)\r\n dy.append(d - b)\r\n \r\n m_dx += np.mean(dx)\r\n m_dy += np.mean(dy)\r\n \r\n M = np.float32([[1, 0, m_dx], [0, 1, m_dy]])\r\n \r\n stab_video[:] = cv2.warpAffine(frame, M, (cols, rows), \r\n cv2.INTER_NEAREST|cv2.WARP_INVERSE_MAP, \r\n cv2.BORDER_CONSTANT).copy()\r\n\r\n # Update the previous frame and previous points\r\n old_gray = frame_gray.copy()\r\n p0 = good_cur.reshape(-1, 1, 2)\r\n \r\n return stab_video", "def timestamp():\n debug(0,'Time elapsed since start: ', time_string(elapsed_time()) )", "def _get_maxskew_ptime(mediaframes):\n fst_frametime = mediaframes[0].time_relative\n fst_rtptime = mediaframes[0].rtp_timestamp\n ptimes = []\n skews = []\n\n for i, x in enumerate(mediaframes[1:], start=1):\n frametime = x.time_relative\n try:\n prev_frametime = mediaframes[i-1].time_relative\n except IndexError:\n prev_frametime = frametime\n exp = (x.rtp_timestamp - fst_rtptime) / 8000.0\n real = frametime - fst_frametime\n skews.append(round((exp - real) * 1000, 2))\n ptimes.append(\"{0:.2f}\".format(round(frametime - prev_frametime, 2)))\n\n try:\n ptime = str(int(float(Counter(ptimes).most_common(1)[0][0]) * 1000))\n except IndexError:\n ptime = \"?\"\n\n try:\n left_max = sorted(skews)[0]\n right_max = sorted(skews)[-1]\n if abs(left_max) > abs(right_max):\n max_skew = \"{0:.2f}\".format(left_max)\n else:\n max_skew = \"{0:.2f}\".format(right_max)\n except IndexError:\n max_skew = \"?\"\n\n return max_skew, ptime", "def manual_video_inspect(videofilepath): \n def get_selected_frame(cap, show_frame):\n cap.set(1, show_frame)\n ret, frame = cap.read() # read the first frame\n return frame\n # Open text file to save selected frames\n fold, name = os.path.split(videofilepath)\n frames_file = open(os.path.join(fold, name.split('.')[0])+\".txt\",\"w+\")\n cap = cv2.VideoCapture(videofilepath)\n if not cap.isOpened():\n raise FileNotFoundError('Couldnt load the file')\n print(\"\"\" Instructions\n - d: advance to next frame\n - a: go back to previous frame\n - s: select frame\n - f: save frame number\n - q: quit\n \"\"\")\n number_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n # Initialise showing the first frame\n #show_frame = 26351\n frame = get_selected_frame(cap, show_frame)\n while True:\n cv2.imshow('frame', frame)\n k = cv2.waitKey(25)\n if k == ord('d'):\n # Display next frame\n if show_frame < number_of_frames:\n show_frame += 1\n elif k == ord('a'):\n # Display the previous frame\n if show_frame > 1:\n show_frame -= 1\n elif k ==ord('s'):\n selected_frame = int(input('Enter frame number: '))\n if selected_frame > number_of_frames or selected_frame < 0:\n print(selected_frame, ' is an invalid option')\n show_frame = int(selected_frame)\n elif k == ord('f'): \n print('Saving frame to text')\n frames_file.write('\\n'+str(show_frame))\n elif k == ord('q'):\n frames_file.close()\n sys.exit()\n try:\n frame = get_selected_frame(cap, show_frame)\n print('Showing frame {} of {}'.format(show_frame, number_of_frames))\n except:\n raise ValueError('Could not display frame ', show_frame)", "def create_black_screen(ref_fp, dur, out_fp):\n subprocess.call([\"ffmpeg\", \"-loglevel\", \"fatal\",\\\n \"-y\",\\\n \"-i\", ref_fp,\\\n \"-preset\", \"ultrafast\",\\\n \"-vf\", \"geq=0:128:128\",\\\n \"-t\", str(round(dur, 3)),\\\n \"-video_track_timescale\", \"90k\",\\\n out_fp])", "def time_track_print():\n\tglobal _time_track_dict\n#\tif not _time_track_dict.values(): return\n\tmax_time = max(_time_track_dict.values())\n\ttupel_list = [(fn_name, \"%.2f%%\" % (100*exe_time/max_time), \"%fs\" % exe_time) for (fn_name, exe_time) in sorted(_time_track_dict.items(), key=operator.itemgetter(1), reverse=True)]\n\tmax_len_item_1 = max([len(x) for (x,_,_) in tupel_list])\n\tmax_len_item_2 = max([len(x) for (_,x,_) in tupel_list])\n\tmax_len_item_3 = max([len(x) for (_,_,x) in tupel_list])\n\tfor (x,y,z) in tupel_list:\n\t\tprint x.ljust(max_len_item_1 + 3), y.rjust(max_len_item_2), z.rjust(max_len_item_3 + 3)", "def gp_extract(filename, gp_timezone = 'US/Eastern'):\r\n global gopro_df\r\n frames = []\r\n path = 'frames/'\r\n cap = cv2.VideoCapture(filename)\r\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\r\n est = datetime.timedelta(seconds=(.0503074*total_frames))\r\n print('Estimated processing time: '+str(est))\r\n sys.stdout.flush()\r\n pbar = tqdm(total=total_frames, unit='frames',desc='Writing '+str(total_frames)+' frames from ' + filename + ' to '+ path)\r\n i=0\r\n while(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if ret == False:\r\n break\r\n frames.append(filename+'_'+str(i)+'.jpg')\r\n cv2.imwrite(path+filename+'_'+ str(i)+'.jpg',frame)\r\n i+=1\r\n pbar.update(1)\r\n pbar.close()\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n \r\n #add timestamps to each frame\r\n subprocess.Popen([r'C:\\Users\\beck\\Documents\\CSCR\\gpmf-extract\\forallTimeExtraction.bat'])\r\n time.sleep(3)\r\n filename.replace('mp4','MP4')\r\n gp_telem = pd.read_csv(filename+'.csv')\r\n i = 0\r\n sys.stdout.flush()\r\n for date in tqdm(gp_telem['date'],desc='Converting gopro timestamps',unit='timestamps'):\r\n gp_telem.loc[i,'date'] = datetime.datetime.strptime(gp_telem['date'][i][:-1],'%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=pytz.UTC)\r\n i+=1\r\n gopro_df = pd.DataFrame()\r\n gopro_df['frame'] = frames[:len(gp_telem['date'])]\r\n gopro_df['timestamp'] = gp_telem['date']\r\n return gopro_df", "def split_video(self):\n # Normalises video\n sys(f\"docker run -v /home/ubuntu/data:/data --rm --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=0 \"\n f\"darrengebler/weko-openpose-aws ffmpeg -i ../data/{self.filename} -filter:v fps=fps=24 ../data/output.mp4\")\n\n # Gets video duration\n video = subprocess.check_output(f\"docker run -v /home/ubuntu/data:/data --rm --runtime=nvidia -e \"\n f\"NVIDIA_VISIBLE_DEVICES=0 darrengebler/weko-openpose-aws ffprobe -v error \"\n f\"-select_streams v:0 -show_entries stream=duration -of \"\n f\"default=noprint_wrappers=1:nokey=1 ../data/output.mp4\", shell=True)\n video_duration = float(video.rstrip().decode())\n video_duration = int(video_duration)\n\n # Splits video into 2 second segements\n for i in range(1, video_duration - 2):\n sys(f\"docker run -v /home/ubuntu/data:/data --rm --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=0 \"\n f\"darrengebler/weko-openpose-aws ffmpeg -i ../data/output.mp4 -ss 00:00:{i} -t 00:00:02 \"\n f\"../data/output_{i}.mp4\")\n sys(f\"mkdir /home/ubuntu/data/output/poses/{self.file}/{i}\")\n\n # Removes original file from EC2 instance\n sys(f\"rm /home/ubuntu/data/{self.filename}\")\n\n self.num_splits = video_duration", "def make_frame(t):\n mlab.view(azimuth=(360*t/duration), distance=85) # camera angle\n return mlab.screenshot(antialiased=True) # return a RGB image", "def main():\n video = 'data/011100.mp4'\n out_folder = 'data'\n filename = '/home/lea/Stage/DATA/videos/011100.mp4.txt'\n sadMotion = read_txt(filename)\n #print (str(sadMotion))\n filename = 'datatest/011100.lab'\n sadSpeech = read_lab_getAll(filename)\n print (\"len speech get all \" + str(len(sadSpeech)))\n sadSpeech = concatenate(sadSpeech, 0.5)\n sadSpeech = deleteByLength(sadSpeech, 5)\n print (\"len speech concat + delete \" + str(len(sadSpeech)))\n print (\"len motion \" + str(len(sadMotion)))\n sadMotion = concatenate(sadMotion, 0.5)\n print (\"len motion concat \" + str(len(sadMotion)))\n sadMotion = deleteByLength(sadMotion, 5)\n print (\"len motion delete \" + str(len(sadMotion)))\n print (\"motion : \" + str(sadMotion))\n print ('-------------')\n sad = timeMatch(sadMotion,sadSpeech)\n print (str(sad))\n #cut_video(sad, video, out_folder)", "def get_movie_timestamps(movie_file: PathType):\n cap = cv2.VideoCapture(str(movie_file))\n timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]\n success, frame = cap.read()\n while success:\n timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC))\n success, frame = cap.read()\n cap.release()\n return np.array(timestamps)", "def get_frames(self):\n with open(\"{}-list.txt\".format(self.outfile), \"w\") as frame_list:\n clip = VideoFileClip(\"{}-trimmed.mp4\".format(self.outfile))\n clip = clip.fx(resize, height=180)\n for i, t in enumerate(np.arange(0, self.duration, 1/self.fps)):\n frame_filename = \"{0}/{0}_{1}.png\".format(self.outfile, i)\n clip.save_frame(frame_filename, t)\n # ending timestamp in millisecond\n timestamp = (1/self.fps + t) * 1000\n\n if self.adj_timestamps:\n timestamp *= 3/self.duration # 3-second duration\n frame_list.write(\"{} {}\\n\".format(\n frame_filename, int(timestamp)))", "def make_twist_msg(v, w, frame_id, time=0.0):\n msg = TwistStamped()\n msg.header.stamp = rospy.Time.from_sec(time)\n msg.header.frame_id = frame_id\n msg.twist.linear = Vector3(*v)\n msg.twist.angular = Vector3(*w)\n\n return msg" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will perform a search for a given word horizontally along a given direction from a starting point. If the word is found it will return true, otherwise it will return false
def look_horizontal(cls, word_search, word, x, y): wrong_count = 0 found = True found_direction = 0 # looking both directions for direction in range(1, -2, -2): found = True wrong_count = 0 found_direction = direction for i in range(1, len(word)): # for each letter in the word if x + (i * direction) >= len(word_search[0]) or x + (i * direction) < 0: # off the grid found = False break # if the next letter in the grid is not the next letter of the word if word_search[y][x + (i * direction)] != (FoundWord.letter_to_int(word[i])): wrong_count += 1 if wrong_count > cls.error_tolerance: found = False break if found: break if found: last_x = x + ((len(word) - 1) * found_direction) return FoundWord(x, y, last_x, y, word, wrong_count) # horizontal word therefore y stays the same else: return None
[ "def _search_in_direction(self, word, options={'pos': 0, 'direction':0, 'x': 0, 'y': 0}):\n\t\tpos = options['pos']\n\t\tdirection = options['direction']\n\n\t\t# Comptes the next position\n\t\tx, y = self.get_next_pos(word, options)\n\n\t\t# Returns False if out of bounds\n\t\tif x < 0 or x >= self.wid or y<0 or y >= self.hgt:\n\t\t\treturn {'success':False, 'pos': -1, 'x': -1, 'y': -1, 'direction': direction}\n\n\t\t# Returns False if positional alphabet doesn't match\n\t\tif word[pos] != self.data[y*self.wid + x]:\n\t\t\treturn {'success':False, 'pos': -1, 'x': -1, 'y': -1, 'direction': direction}\n\n\t\t# If all alphabets match return True\n\t\tif pos == len(word)-1:\n\t\t\treturn {'success':True, 'pos': pos, 'x': x, 'y': y, 'direction': direction}\n\n\t\treturn self._search_in_direction(word, {'pos': pos+1, 'direction':direction, 'x': x, 'y': y})", "def word_search():\n\n # TODO Indicate which words at found in the puzzle and which ones are not\n pass", "def search(grid, word):\n word_len = len(word)\n for i, j, dir in product(range(len(grid)), range(len(grid[0])), DIRECTIONS):\n if word == extract(grid, i, j, dir, word_len):\n return i, j, dir\n return None", "def is_present(self, word: str) -> bool:\n if (\n len(self._column_major_grid)\n == 0 # check if the column major grid has been generated\n or self._column_major_grid_row_length\n != self.ROW_LENGTH # check if row length has been changed\n ):\n self._generate_column_major_grid() # generate the column major grid\n if len(word) > self.ROW_LENGTH: # check if the word is too long\n return False\n # check if the word is present in both grid forms\n for current_grid in (self._grid, self._column_major_grid):\n current_index = 0 # where to start the search from\n # this condition is only needed if a potential word found by find() ends at the very end of the grid\n while current_index < len(current_grid):\n found_index = current_grid.find(word.encode(\"ascii\"), current_index)\n if found_index == -1: # no word is found in the grid\n break\n # get the index of the last letter of the word\n end_index = found_index + len(word) - 1\n # check if the first letter is on the same row as the last letter\n if found_index // self.ROW_LENGTH == end_index // self.ROW_LENGTH:\n return True\n current_index = found_index + 1 # start from after the failed word\n return False", "def start_word_search_puzzle(word: HiddenWord) -> None:\n puzzle: SearchPuzzle = SearchWordPuzzle(word.board)\n coordinates: Iterable[str] = puzzle.coordinates(word.value)\n if not coordinates:\n _logger.info(f'\"{word}\" word is absent in a grid')\n else:\n _logger.info(\n f'Found \"{word}\" word coordinates in a grid: {coordinates}'\n )", "def search(self, word: str) -> bool:\n # Traverse through the Trie from root. If we don't find a character, return False.\n curNode = self.root\n for char in word:\n if char in curNode.children:\n curNode = curNode.children[char]\n else:\n return False\n # Finally if we reach the end of word, we will retuen True if isWord was set to True otherwise False\n return curNode.isWord", "def search(self, word: str) -> bool:\n # return if complete word is in the trie, not as a prefix, but \n # if the last char of the word is the Trie leaf\n node = self.searchPrefix(word)\n return node is not None and node.checkIsEnd()", "def search_keyword(motor, input_text):\n important_words = motor.hearing.get_words(input_text)\n for word in important_words:\n word_match = motor.check_word(word)\n if word_match:\n return word_match", "def look_vertical(cls, word_search, word, x, y):\n\n wrong_count = 0\n found = True\n found_direction = 0\n\n for direction in range(1, -2, -2):\n found = True\n wrong_count = 0\n found_direction = direction\n for i in range(1, len(word)):\n if y + (i * direction) >= len(word_search) or y + (i * direction) < 0:\n # off the grid\n found = False\n break\n # if the next letter in the grid is not the next letter of the word\n if word_search[y + (i * direction)][x] != (FoundWord.letter_to_int(word[i])):\n wrong_count += 1\n if wrong_count > cls.error_tolerance:\n found = False\n break\n if found:\n break\n\n if found:\n last_y = y + ((len(word) - 1) * found_direction)\n return FoundWord(x, y, x, last_y, word, wrong_count) # vertical word therefore x stays the same\n else:\n return None", "def search(self, word):\n words = self.len2words[len(word)]\n for i, char in enumerate(word):\n words = [w for w in words if char in (\".\", w[i])]\n if not words: return False\n return True", "def _find_word(words_list, search_list, start=0):\n for index, word in enumerate(words_list[start:]):\n if word in search_list:\n return index+start\n return None", "def is_word_in(text):\r\n # translation table for conversion\r\n table = string.maketrans(\"\",\"\")\r\n # parse text to remove formatting\r\n text = text.lower().translate(table, string.punctuation)\r\n # iterate each word in text and check if word is there\r\n for words in text:\r\n if word.lower() in text:\r\n## print \"word:\", word\r\n## print True\r\n return True\r\n return False", "def is_word_in_text(what_word, where_word) -> bool:\n return where_word.find(what_word) != -1", "def contains(self, word, matchPrefix=False):\n\n if len(word) == 0:\n return False\n currentNode = self.root\n for character in word.lower():\n if character in currentNode.children:\n currentNode = currentNode.children[character]\n else:\n return False\n return matchPrefix or currentNode.isEnd", "def search(self, word: str) -> bool:\n for w in self.data[len(word)]:\n if word == w or \".\" in word and re.match(word, w):\n return True\n return False", "def is_triangle_word(word):\n\tword_val = word_value(word)\n\ti = 0\n\twhile tn(i) < word_val :\n\t\ti += 1\n\tif tn(i) == word_val :\n\t\treturn True\n\treturn False", "def checkWord(self, word, sR, sC, dR, dC):\n cR = sR\n cC = sC\n # Check if we're going out of bounds\n if ((cR + (len(word) * dR)) < 0 or\n (cC + (len(word) * dC)) < 0 or\n (cR + (len(word) * dR)) > self.rows or\n (cC + (len(word) * dC)) > self.cols):\n return\n # Check if we fit\n for c in word:\n # Bad overlap\n if (self.grid[cR][cC] != c and \n self.grid[cR][cC] != '.'):\n return False\n cR += dR\n cC += dC\n return True", "def searchword(T, w):\n \n #FIXME\n pass", "def matchWord(self, *args):\r\n return _osgDB.Field_matchWord(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will perform a search for a given word vertically along a given direction from a starting point. If the word is found it will return true, otherwise it will return false
def look_vertical(cls, word_search, word, x, y): wrong_count = 0 found = True found_direction = 0 for direction in range(1, -2, -2): found = True wrong_count = 0 found_direction = direction for i in range(1, len(word)): if y + (i * direction) >= len(word_search) or y + (i * direction) < 0: # off the grid found = False break # if the next letter in the grid is not the next letter of the word if word_search[y + (i * direction)][x] != (FoundWord.letter_to_int(word[i])): wrong_count += 1 if wrong_count > cls.error_tolerance: found = False break if found: break if found: last_y = y + ((len(word) - 1) * found_direction) return FoundWord(x, y, x, last_y, word, wrong_count) # vertical word therefore x stays the same else: return None
[ "def _search_in_direction(self, word, options={'pos': 0, 'direction':0, 'x': 0, 'y': 0}):\n\t\tpos = options['pos']\n\t\tdirection = options['direction']\n\n\t\t# Comptes the next position\n\t\tx, y = self.get_next_pos(word, options)\n\n\t\t# Returns False if out of bounds\n\t\tif x < 0 or x >= self.wid or y<0 or y >= self.hgt:\n\t\t\treturn {'success':False, 'pos': -1, 'x': -1, 'y': -1, 'direction': direction}\n\n\t\t# Returns False if positional alphabet doesn't match\n\t\tif word[pos] != self.data[y*self.wid + x]:\n\t\t\treturn {'success':False, 'pos': -1, 'x': -1, 'y': -1, 'direction': direction}\n\n\t\t# If all alphabets match return True\n\t\tif pos == len(word)-1:\n\t\t\treturn {'success':True, 'pos': pos, 'x': x, 'y': y, 'direction': direction}\n\n\t\treturn self._search_in_direction(word, {'pos': pos+1, 'direction':direction, 'x': x, 'y': y})", "def search(grid, word):\n word_len = len(word)\n for i, j, dir in product(range(len(grid)), range(len(grid[0])), DIRECTIONS):\n if word == extract(grid, i, j, dir, word_len):\n return i, j, dir\n return None", "def word_search():\n\n # TODO Indicate which words at found in the puzzle and which ones are not\n pass", "def look_horizontal(cls, word_search, word, x, y):\n\n wrong_count = 0\n found = True\n found_direction = 0\n\n # looking both directions\n for direction in range(1, -2, -2):\n found = True\n wrong_count = 0\n found_direction = direction\n for i in range(1, len(word)): # for each letter in the word\n if x + (i * direction) >= len(word_search[0]) or x + (i * direction) < 0:\n # off the grid\n found = False\n break\n # if the next letter in the grid is not the next letter of the word\n if word_search[y][x + (i * direction)] != (FoundWord.letter_to_int(word[i])):\n wrong_count += 1\n if wrong_count > cls.error_tolerance:\n found = False\n break\n if found:\n break\n\n if found:\n last_x = x + ((len(word) - 1) * found_direction)\n return FoundWord(x, y, last_x, y, word, wrong_count) # horizontal word therefore y stays the same\n else:\n return None", "def is_present(self, word: str) -> bool:\n if (\n len(self._column_major_grid)\n == 0 # check if the column major grid has been generated\n or self._column_major_grid_row_length\n != self.ROW_LENGTH # check if row length has been changed\n ):\n self._generate_column_major_grid() # generate the column major grid\n if len(word) > self.ROW_LENGTH: # check if the word is too long\n return False\n # check if the word is present in both grid forms\n for current_grid in (self._grid, self._column_major_grid):\n current_index = 0 # where to start the search from\n # this condition is only needed if a potential word found by find() ends at the very end of the grid\n while current_index < len(current_grid):\n found_index = current_grid.find(word.encode(\"ascii\"), current_index)\n if found_index == -1: # no word is found in the grid\n break\n # get the index of the last letter of the word\n end_index = found_index + len(word) - 1\n # check if the first letter is on the same row as the last letter\n if found_index // self.ROW_LENGTH == end_index // self.ROW_LENGTH:\n return True\n current_index = found_index + 1 # start from after the failed word\n return False", "def search(self, word: str) -> bool:\n # Traverse through the Trie from root. If we don't find a character, return False.\n curNode = self.root\n for char in word:\n if char in curNode.children:\n curNode = curNode.children[char]\n else:\n return False\n # Finally if we reach the end of word, we will retuen True if isWord was set to True otherwise False\n return curNode.isWord", "def search_keyword(motor, input_text):\n important_words = motor.hearing.get_words(input_text)\n for word in important_words:\n word_match = motor.check_word(word)\n if word_match:\n return word_match", "def is_word_in_text(what_word, where_word) -> bool:\n return where_word.find(what_word) != -1", "def search(self, word: str) -> bool:\n # return if complete word is in the trie, not as a prefix, but \n # if the last char of the word is the Trie leaf\n node = self.searchPrefix(word)\n return node is not None and node.checkIsEnd()", "def wordfind(myGrid, words):\r\n \r\n # (For you to implement)\r\n count = 0\r\n left = [0,-1]\r\n right = [0,1]\r\n up = [-1,0]\r\n down = [1,0]\r\n upright = [-1,1]\r\n upleft = [-1,-1]\r\n downright = [1,1]\r\n downleft = [1,-1]\r\n directions = [left,right,up,down,upright,downright,upleft,downleft]\r\n\r\n for numberOfword in range(len(words)):\r\n all_possible_letters =[]\r\n letters_capitalize = []\r\n for letter in range(0,len(words[numberOfword])):\r\n coordinates_letter = []\r\n for y in range(len(myGrid)):\r\n for x in range(len(myGrid[y])):\r\n if myGrid[y][x] == words[numberOfword][letter]\\\r\n or myGrid[y][x] == words[numberOfword][letter].upper():\r\n coordinates_letter.append([y,x])\r\n all_possible_letters.append(coordinates_letter)\r\n\r\n\r\n for direction in directions:\r\n if findLetters(all_possible_letters,direction):\r\n if len(findLetters(all_possible_letters,direction)) == len(all_possible_letters):\r\n letters_capitalize = findLetters(all_possible_letters,direction)\r\n\r\n if letters_capitalize != []:\r\n count += 1\r\n captalize(myGrid,letters_capitalize)\r\n return count", "def search_trie(word: str) -> bool:\n global trie\n lv = trie\n for letter in word:\n lv = lv.get(letter)\n if lv is None:\n return False\n if lv.get('end'):\n return True\n else:\n return False", "def start_word_search_puzzle(word: HiddenWord) -> None:\n puzzle: SearchPuzzle = SearchWordPuzzle(word.board)\n coordinates: Iterable[str] = puzzle.coordinates(word.value)\n if not coordinates:\n _logger.info(f'\"{word}\" word is absent in a grid')\n else:\n _logger.info(\n f'Found \"{word}\" word coordinates in a grid: {coordinates}'\n )", "def search(self, word):\n words = self.len2words[len(word)]\n for i, char in enumerate(word):\n words = [w for w in words if char in (\".\", w[i])]\n if not words: return False\n return True", "def is_word_in(text):\r\n # translation table for conversion\r\n table = string.maketrans(\"\",\"\")\r\n # parse text to remove formatting\r\n text = text.lower().translate(table, string.punctuation)\r\n # iterate each word in text and check if word is there\r\n for words in text:\r\n if word.lower() in text:\r\n## print \"word:\", word\r\n## print True\r\n return True\r\n return False", "def search(self, word: str) -> bool:\n for w in self.data[len(word)]:\n if word == w or \".\" in word and re.match(word, w):\n return True\n return False", "def _find_word(words_list, search_list, start=0):\n for index, word in enumerate(words_list[start:]):\n if word in search_list:\n return index+start\n return None", "def checkWord(self, word, sR, sC, dR, dC):\n cR = sR\n cC = sC\n # Check if we're going out of bounds\n if ((cR + (len(word) * dR)) < 0 or\n (cC + (len(word) * dC)) < 0 or\n (cR + (len(word) * dR)) > self.rows or\n (cC + (len(word) * dC)) > self.cols):\n return\n # Check if we fit\n for c in word:\n # Bad overlap\n if (self.grid[cR][cC] != c and \n self.grid[cR][cC] != '.'):\n return False\n cR += dR\n cC += dC\n return True", "def _is_valid_location(self, x, y, d, word):\n if d == 'acrs':\n for i, v in enumerate(range(x, x + len(word))):\n if self.grid[y][v] is not None and self.grid[y][v] != word[i]:\n return False\n\n return True\n\n elif d == 'down':\n for i, v in enumerate(range(y, y + len(word))):\n if self.grid[v][x] is not None and self.grid[v][x] != word[i]:\n return False\n\n return True\n\n else: # 'diag'\n for yv in range(y, y + len(word)):\n for i, xv in enumerate(range(x, x + len(word))):\n if self.grid[yv][xv] is not None \\\n and self.grid[yv][xv] != word[i]:\n return False\n\n return True", "def matchWord(self, *args):\r\n return _osgDB.Field_matchWord(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter out the script so we can parse the xml.
def _filter_script_tags(input_xml): output_lines = [] in_script = False for line in input_xml.splitlines(): if "<script>" in line: in_script = True if not in_script: output_lines.append(line) if "</script>" in line: in_script = False return '\n'.join(output_lines)
[ "def script(self):\n if 'Suppress-Script' in self.data['record']:\n return Subtag(self.data['record']['Suppress-Script'], 'script')\n return None", "def filterHtml(self, body):\n output = ''\n soup = BeautifulSoup(body, \"html.parser\")\n for script in soup([\"script\", \"style\"]):\n script.extract()\n text = soup.find_all(text=True)\n for t in text:\n if t == \"\\\\n\":\n continue\n if len(t) > 2:\n # als er nog blacklisted elements in zitten, haal ze eruit.\n if t.parent.name not in self.blacklist:\n output += '{} '.format(t.strip())\n try:\n t = t.replace(\"\\\\n\", \"\")\n t = t.replace(\"\\\\t\", \"\")\n except:\n ctx.log.error(\"stripping failed\")\n\n return output", "def script(self):\n return [\n p.text.strip()\n for p in self.xml.findall('p')\n if p.text and p.text.strip() and not _is_technical_note(p)\n ]", "def remove_script_filters(wf, data):\n ids = set()\n for k, d in data['uidata'].items():\n if 'colorindex' not in d:\n ids.add(k)\n\n keep = []\n delete = []\n\n for obj in data['objects']:\n if obj['uid'] in ids and \\\n obj['type'] == 'alfred.workflow.input.scriptfilter':\n log.info('Removed Script Filter \"%s\" (%s)',\n obj['config']['title'], obj['uid'])\n delete.append(obj['uid'])\n continue\n keep.append(obj)\n\n data['objects'] = keep\n\n # Remove connections and uidata\n for uid in delete:\n del data['connections'][uid]\n del data['uidata'][uid]", "def _reset_script_filters(self):\n plistpath = self.wf.workflowfile('info.plist')\n\n # backup info.plist\n with open(plistpath, 'rb') as infile:\n with open(self.wf.workflowfile('info.plist.bak'), 'wb') as outfile:\n outfile.write(infile.read())\n\n script_filters = {}\n plist = readPlist(plistpath)\n\n count = 0\n keep = []\n uids = set()\n for obj in plist['objects']:\n if obj.get('type') != 'alfred.workflow.input.scriptfilter':\n keep.append(obj)\n continue\n if obj.get('keyword') in RESERVED_KEYWORDS:\n keep.append(obj)\n continue\n\n script = obj.get('config', {}).get('script', '')\n log.debug('script: %r', script)\n m = SCRIPT_SEARCH(script)\n if not m:\n keep.append(obj)\n continue\n\n count += 1\n uids.add(obj['uid'])\n\n # Overwrite objects minus script filters\n plist['objects'] = keep\n\n # Delete positioning data\n keep = {}\n uidata = plist['uidata']\n for uid in uidata:\n if uid not in uids:\n keep[uid] = uidata[uid]\n\n # Overwrite without script filter positions\n plist['uidata'] = keep\n\n # Remove connections\n keep = {}\n connections = plist['connections']\n for uid in connections:\n if uid not in uids:\n keep[uid] = connections[uid]\n\n # Overwrite without script filter connections\n plist['connections'] = keep\n\n # Re-write info.plist without script filters\n\n writePlist(plist, plistpath)\n\n log.debug('%d Script Filters deleted from info.plist', count)\n return script_filters", "def removeIncludes(self, lines):\n\t\t\n\t\tfor index, line in enumerate(lines):\n\t\t\t\n\t\t\tif re.match('^\\+(STIL|TEMA|JAVASCRIPT)', line):\n\t\t\t\tlines[index] = ''\n\t\t\t\t\t\n\t\treturn lines", "def filter_input(self, forced=False):\n content = []\n for hunk in self.hunks(forced):\n # If a file ends with a function call, say, console.log()\n # but doesn't have a semicolon, and the next file starts with\n # a (, the individual files are ok, but when combined you get an\n # error like TypeError...\n # Forcing a semicolon in between fixes it.\n if settings.COMPRESS_ENABLED or forced:\n hunk += \";\"\n content.append(hunk)\n return content", "def clean_script_files(self, remove_template=True):\n # Remove last script file\n self.Script.clean_script_file()\n # Remove template file\n if remove_template and self.inp_script.exists():\n print('Removing {} ...'.format(str(self.inp_script)))\n os.remove(self.inp_script)\n return", "def exclude(text):\n\n unwanted = [\"\"\"\"\"\"]", "def _exerpt_body_content(self):\n root = self.parsed_xml.getroot()\n body = root.find('body')\n results = []\n for lookfor in ['article-markup', 'galley-files', 'supplemental-files']:\n node = body.find(lookfor)\n if node is not None:\n body.remove(node)\n results.append(node)\n return results", "def removeScript(self, state: 'ScXMLScriptElt') -> \"void\":\n return _coin.ScXMLScxmlElt_removeScript(self, state)", "def source_file_filter(input_api):\n files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP) + [\n r'.+/bootstrap/.*', # third party\n r'.+/jquery/.*', # third party\n r'.+/pb\\.discovery\\.go$',\n r'.+/pb\\.discovery_test\\.go$',\n r'.+\\.pb\\.go$',\n r'.+\\.pb\\.validate\\.go$',\n r'.+\\.pb_test\\.go$',\n r'.+_dec\\.go$',\n r'.+_mux\\.go$',\n r'.+_string\\.go$',\n r'.+gae\\.py$', # symlinks from outside\n r'common/api/internal/gensupport/.*', # third party\n r'common/goroutine/goroutine_id.go',\n r'common/terminal/.*', # third party\n r'server/static/bower_components/.*', # third party\n r'server/static/upload/bower_components/.*', # third party\n ]\n files_to_check = list(input_api.DEFAULT_FILES_TO_CHECK) + [\n r'.+\\.go$',\n ]\n return lambda x: input_api.FilterSourceFile(\n x, files_to_check=files_to_check, files_to_skip=files_to_skip)", "def filter_empties(text_blocks, _config):\n return [tb for tb in text_blocks if tb and tb['blockText']]", "def get_scripts(self):\n return []", "def remove_JS(string):\n return re.sub('<script.*?</script>', '', string, re.S)", "def get_scripts(text):\n\tstart = text.find(\"OTHER SCRIPTS\")\n\tend = text.find(\"\\n\", start)\n\treturn text[start:end].strip()", "def filter_hidden(file):\n return file if os.path.basename(os.path.normpath(file.name))[0] != \".\" else None", "def parseXMLScript(commandScript):\n\n\t#Import the XML Script File\n\ttree = ETProc.parse(commandScript)\n\tif tree is None:\n\t\tprint(\"Import UnSuccessful.\")\n\treturn tree", "def _remove_non_text_nodes(self, t):\n return re.sub(r'(?u)\\((CODE|ID|CODING|META)[^)]*\\)', '', t)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save username and password to config file. Entering nothing keeps the current credentials. Returns whether or not the credentials changed.
def update_credentials(): # Read old credentials config = read_config() try: old_email = config.get(ConfigParser.DEFAULTSECT, 'email') except ConfigParser.NoOptionError: old_email = '' try: old_password = config.get(ConfigParser.DEFAULTSECT, 'password') except ConfigParser.NoOptionError: old_password = '' # Prompt new credentials email = raw_input("Venmo email [{}]: " .format(old_email if old_email else None)) password = getpass.getpass(prompt="Venmo password [{}]: " .format("*"*10 if old_password else None)) email = email or old_email password = password or old_password noop = email == old_email and password == old_password incomplete = not email or not password if noop: print "WARN: credentials unchanged" return False if incomplete: print "WARN: credentials incomplete" return False # Write new credentials if email: config.set(ConfigParser.DEFAULTSECT, 'email', email) if password: config.set(ConfigParser.DEFAULTSECT, 'password', password) write_config(config) return True
[ "def check_cred_login_and_save(self, server, user, pw):\n try:\n Api.login(server, user, pw)\n except LoginException as e:\n log.error(f\"Could not log in.\\nPW: {pw}\\nError: {e}\")\n answer = sg.popup_yes_no(\n f\"Login failed\\n\\nServer: {server}\\nUser: {user}\\nPassword: {pw}\\n\\n\"\n f\"Message: {e}\\n\\n\"\n \"Still save your credentials to the config file?\",\n title=f\"{Config.APP_NAME}\",\n )\n if answer == \"Yes\":\n log.debug(\"Still saving creds\")\n Config.write_config(server, user, pw)\n return True\n else:\n log.debug(\"Retry to login\")\n return False\n else:\n log.debug(\"Logged in. Writing config\")\n Config.write_config(server, user, pw)\n return True", "def save_the_credentials(credentials):\n credentials.save_credentials()", "def save_credentials(credentials):\n credentials. save_credential()", "def check_cred_register_and_save(self, server, user, pw):\n try:\n Api.register(server, user, pw)\n except RegisterException as e:\n log.error(f\"Could not register.\\nPW: {pw}\\nError: {e}\")\n answer = sg.popup_yes_no(\n f\"Registration failed\\n\\nServer: {server}\\nUser: {user}\\nPassword: {pw}\\n\\n\"\n f\"Message: {e}\\n\\n\"\n \"Still save your credentials to the config file?\",\n title=f\"{Config.APP_NAME}\",\n )\n if answer == \"Yes\":\n log.debug(\"Still saving creds\")\n Config.write_config(server, user, pw)\n return True\n else:\n log.debug(\"Retry to register\")\n return False\n else:\n log.debug(\"Registration OK. Writing config\")\n Config.write_config(server, user, pw)\n return True", "def save(self):\n with open(self.filename, 'w') as ciphertext:\n ciphertext.write(self.__gpg.encrypt(\n self.__encoder.encode(self.__accounts).encode(),\n sign=False,\n passphrase=getpass(\"Password: \"),\n )[0].decode())\n print(\"Credentials saved.\", file=sys.stderr)", "def set_account(which, username=None, password=None):\n import base64\n import getpass\n config = ConfigParser()\n config.read( _ENCRYPT_FILE )\n if username is None:\n if _PYTHON3:\n username = input('Enter your %s login: '%which)\n else:\n username = raw_input('Enter your %s login: '%which)\n \n if password is None: password = getpass.getpass()\n \n password_ = base64.b64encode( password.encode(\"utf-8\") )\n if _PYTHON3:\n config[which.lower()] = {\"username\":username, \"password\": password_ } \n else:\n section = which.lower()\n if section not in config.sections():\n config.add_section(section)\n config.set(section,\"username\",username)\n config.set(section,\"password\",password_)\n\n \n with open( _ENCRYPT_FILE , 'w') as configfile:\n config.write(configfile)", "def test_save_and_has_credentials(self):\n descriptor = 'unit-test'\n\n # Verify Credentials don't already exist\n assert_is_none(self._ssm_driver.load_credentials(descriptor))\n\n credentials = Credentials('CREDS', is_encrypted=False)\n self._ssm_driver.save_credentials(descriptor, credentials, KMS_ALIAS)\n\n # Verify they saved correctly\n result = self._ssm_driver.load_credentials(descriptor)\n assert_is_not_none(result)", "def _save_pass(self, password):\n keyring.set_password('PyBox', self.cfg['user'], password)", "def save_password():\n\n # Pull user info from entry forms and format into a dictionary\n site = website_entry.get()\n login = username_entry.get()\n pw = password_entry.get()\n new_data = {\n site: {\n 'email': login,\n 'password': pw,\n }\n }\n\n if len(site) == 0 or len(login) == 0 or len(pw) == 0: # Verify fields are populated\n messagebox.showwarning(title='Oops!', message='Please don\"t leave any fields empty!')\n else: # Delete site and password, update file\n\n try:\n # Try to open JSON file\n with open('data.json', 'r') as file:\n data = json.load(file)\n except FileNotFoundError:\n # Create file if it does not exist\n with open('data.json', 'w') as file:\n json.dump(new_data, file, indent=4)\n else:\n # Update JSON file if it existed\n data.update(new_data)\n\n with open('data.json', 'w') as file:\n json.dump(data, file, indent=4)\n finally:\n # Delete entry fields in app\n website_entry.delete(0, 'end')\n password_entry.delete(0, 'end')\n messagebox.showinfo(title='Success!', message='Login data saved successfully.')", "def _store_pypirc(self, username, password):\n rc = self._get_rc_file()\n with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:\n f.write(DEFAULT_PYPIRC % (username, password))", "def change_credentials(app):\n input_credentials()\n # Re-login\n try:\n duolingo_login(app)\n app.logged_in = True\n except LoginFailed:\n app.logged_in = False\n update_menu(app)", "def set_credentials(self, username, password):\n self.credentials = (username, password,)", "def test_save_and_load_credentials(self):\n raw_credentials = 'aaaa'\n descriptor = 'descriptor'\n\n encrypted_raw_credentials = encrypt_with_kms(raw_credentials, REGION, KMS_ALIAS)\n\n credentials = Credentials(encrypted_raw_credentials, True, REGION)\n assert_true(self._fs_driver.save_credentials(descriptor, credentials))\n\n loaded_credentials = self._fs_driver.load_credentials(descriptor)\n\n assert_is_not_none(loaded_credentials)\n assert_true(loaded_credentials.is_encrypted())\n assert_equal(loaded_credentials.get_data_kms_decrypted(), raw_credentials.encode())", "def _SaveAuthentication(self):\n auth_file = self._AuthFilePath()\n try:\n dir = os.path.dirname(auth_file)\n if not os.path.exists(dir):\n os.makedirs(dir)\n fh = open(auth_file, 'w')\n fh.write(self._user_cookie)\n fh.close()\n except:\n logging.fatal('Failed to save authorization file %s', auth_file, exc_info=True)\n raise ScenarioLoginError('Error saving auth file for client %s.' % self.name)", "def set_credentials(self):\n addon = self.utils.get_addon()\n user = self.dialogs.show_email_dialog()\n password = self.dialogs.show_password_dialog()\n _mail = self.encode(user) if user != '' else user\n _password = self.encode(password) if password != '' else password\n addon.setSetting('email', _mail)\n addon.setSetting('password', _password)\n return (user, password)", "def save(self):\n with open(\"config.py\", 'w') as configfile:\n self.config.write(configfile)\n pass\n pass", "def upload_credentials(project_context):\n file = request.files['cfgfile']\n credentials_path = project_context[\"credentials\"]\n try:\n file.save(credentials_path)\n flash('file uploaded to {}'.format(credentials_path))\n return True\n except Exception as e:\n flask.flash(\"Failed to upload credentials file; unknown problem\")\n flask.flash(\"Encountered this exception: {}\".format(e))\n return False", "def set_config(self, ignoreAdmins=True):\n if consts.USERNAME in consts.REGISTERED_ADMINS and ignoreAdmins:\n return True # do nothing\n\n with open(consts.CREWSHEET, \"r\") as f:\n data = json.load(f)\n\n hideUntilUpdate = self.hideUntilUpdate_cb.isChecked()\n\n if CHANGELOG_KEY in data[consts.USERNAME]:\n hideUntilUpdateConf = data[consts.USERNAME][CHANGELOG_KEY]\n else:\n hideUntilUpdateConf = False\n\n if hideUntilUpdateConf == hideUntilUpdate:\n # No change required\n return True\n\n data[consts.USERNAME][CHANGELOG_KEY] = hideUntilUpdate\n with open(consts.CREWSHEET, \"w\") as f:\n json.dump(data, f, indent=4)\n return True", "def store_admin_password(self):\n os.environ[\"XDG_DATA_HOME\"] = \"/tmp\"\n keyring.set_password(\"CGCS\", self.admin_username, self.admin_password)\n del os.environ[\"XDG_DATA_HOME\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print each word form a text document from a URL.
def main(url): words = fetch_words(url) print_items(words)
[ "def main(url):\n words = fetch_words(url )\n print_items(words)", "def fetch_words(url):\n # This is function docstring which is documentation for function, modules and scripts\n story= urlopen(url)\n story_words= []\n\n for line in story:\n line_words = line.decode('utf8').split()\n for word in line_words:\n story_words.append(word)\n\n story.close()\n return story_words", "def fetch_words(url):\n with open(url) as story:\n story_words = []\n for line in story:\n line_words = line.split()\n for word in line_words:\n story_words.append(word)\n story.close()\n return story_words", "def fetch_words(url):\n with urlopen(url) as content:\n fetched_words = []\n for line in content:\n line_words = line.decode('utf-8').split()\n for word in line_words:\n fetched_words.append(word)\n return fetched_words", "def urlread( word ):\n search_url='http://dictionary.reference.com/browse/'\n html = urllib2.urlopen( search_url + word + '?' ).read()\n return html", "def print_word_count(url):\n\twc = {}\n\twith urlopen(url) as story:\n\t\tfor line in story:\n\t\t\tline_words = line.decode('utf-8').split()\n\t\t\tfor word in line_words:\n\t\t\t\twc.setdefault(word, 0)\n\t\t\t\twc[word] += 1\n\t\n\tmost_used = 0\n\tmost_used_word = ''\n\tfor word_key in wc:\n\t\tif(wc[word_key] > most_used):\n\t\t\tmost_used = wc[word_key]\n\t\t\tmost_used_word = word_key\n\n\tprint('{} is used {} times'.format(most_used_word, wc[most_used_word]))", "def url_wordcount(url):\n\n f = urllib.urlopen(url)\n text = f.read()\n text = remove_html_tags(text)\n words = extract_words(text)\n worddict = count_words(words)\n wordcount100 = top100words(worddict)\n return wordcount100", "def url_text_extractor(url):\n article = Article(url)\n article.download()\n article.parse()\n\n return article.text", "def openPage(self,words):\r\n openURL(getURL(self.ddict,self.getMatch(words)))", "def download_wordlist():\n\n print(\"\t\\r\\n\tChoose the section you want to download:\\r\\n\")\n\n print(\" 1 Moby 14 french 27 places\")\n print(\" 2 afrikaans 15 german 28 polish\")\n print(\" 3 american 16 hindi 29 random\")\n print(\" 4 aussie 17 hungarian 30 religion\")\n print(\" 5 chinese 18 italian 31 russian\")\n print(\" 6 computer 19 japanese 32 science\")\n print(\" 7 croatian 20 latin 33 spanish\")\n print(\" 8 czech 21 literature 34 swahili\")\n print(\" 9 danish 22 movieTV 35 swedish\")\n print(\" 10 databases 23 music 36 turkish\")\n print(\" 11 dictionaries 24 names 37 yiddish\")\n print(\" 12 dutch 25 net 38 exit program\")\n print(\" 13 finnish 26 norwegian \\r\\n\")\n print(\n \"\t\\r\\n\tFiles will be downloaded from \"\n + CONFIG[\"global\"][\"dicturl\"]\n + \" repository\"\n )\n print(\n \"\t\\r\\n\tTip: After downloading wordlist, you can improve it with -w option\\r\\n\"\n )\n\n filedown = input(\"> Enter number: \")\n filedown.isdigit()\n while filedown.isdigit() == 0:\n print(\"\\r\\n[-] Wrong choice. \")\n filedown = input(\"> Enter number: \")\n filedown = str(filedown)\n while int(filedown) > 38 or int(filedown) < 0:\n print(\"\\r\\n[-] Wrong choice. \")\n filedown = input(\"> Enter number: \")\n filedown = str(filedown)\n\n download_wordlist_http(filedown)\n return filedown", "def parse_sentences():\n with open('questions.hyp', 'r') as rf, open('webpages.txt', 'w') as wf: # open input and output files\n for line in rf: # iterate over lines in input file (questions.hyp has the language model transcriptions)\n # get the relevant info from the line\n ls = line.split('(')\n question = ls[0][:-1]\n wavfile = ls[1].split(' ')[0]\n webpage, url = get_webpage(question) # get webpage and url\n wf.write(f'{wavfile}\\t{webpage}\\t{url}\\n') # print to file\n print('URLs written to webpages.txt')", "def get_list_of_words(page):\n soup = BeautifulSoup(page, 'html.parser')\n return soup.find_all('p')", "def main():\n \n URL = \"http://tldp.org/guides.html\"\n res = requests.get(URL)\n soup = bs(res.text, 'html.parser')\n base_url = 'https://tldp.org/'\n links = (base_url + link.a['href'] for link in soup.find_all('li') if link.a.text.strip() == \"PDF\")\n for link in links:\n os.system(f\"wget -nc {link}\") #using wget to download", "def retrieve_file(url):\n doc = urlopen(url)\n lines = doc.read().decode()\n doc.close()\n return lines", "def download_sitting(urls, file_name):\n\ttext_sitting_string = \"\"\n\ttext_sitting_list = (my_pool.map(clean_debate, urls))\n\tfor text_topic in text_sitting_list:\n\t\tif text_topic:\n\t\t\ttext_sitting_string = f\"{text_sitting_string}{text_topic}\"\n\tfile_name = f\"{file_name}.txt\"\n\twith open(file_name, 'w', encoding = \"utf8\") as f:\n\t\tf.write(text_sitting_string)", "def guessLinks(url, words, toprint):\n count = 0\n\n newURL = url\n if not url.endswith('/'):\n newURL = url + '/'\n\n for word in words:\n currURL = newURL + word\n\n response = requests.get(currURL)\n if (response.status_code == 200):\n count += 1\n pages.append(currURL)\n addLinkToDict(currURL, pageurlparam, pageforminput)\n if toprint:\n print(currURL)\n\n for ext in extensions:\n currURL = newURL + word + ext\n response = requests.get(currURL)\n if (response.status_code == 200):\n count += 1\n pages.append(currURL)\n addLinkToDict(currURL, pageurlparam, pageforminput)\n if toprint:\n print(currURL)\n return count", "def page_from_word(word):\n ...", "def print_text(txt):\n print(txt)", "def main():\n response = requests.get('https://en.wikipedia.org/wiki/List_of_sovereign_states')\n soup = bs4.BeautifulSoup(response.text, \"lxml\")\n fIn = open('countries.txt', 'w') # Open file for writing.\n listCountries(fIn, soup)\n fIn.close() # Close the file.", "def formatWordUrl(inputWord):\n url = 'https://www.thesaurus.com/browse/'\n url = url + inputWord.strip().lower().replace(' ', '%20')\n return url" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Put a host/service in downtime.
def post(self, dt): data = dt.as_dict() data.update({'action': 'add'}) requests.post( pecan.request.ws_arbiter_url + "/downtime", data=data ) return info.Info(message='Downtime received.')
[ "def delete(self, dt):\n\n data = dt.as_dict()\n data.update({'action': 'delete'})\n\n requests.post(\n pecan.request.ws_arbiter_url + \"/downtime\",\n data=data\n )\n\n return info.Info(message='Downtime received.')", "def set_host_into_maintenance_mode(session, host_mor):\n\n try:\n task_ref = session._call_method(\n session._get_vim(), \"EnterMaintenanceMode_Task\", host_mor,\n timeout=0, evacuatePoweredOffVms=False)\n session.wait_for_task(task_ref)\n except Exception as e:\n LOG.exception(_LE(\"%s\"), e)\n raise Exception(e)", "def maintenance_mode(self, mode):\n service = self._fetch_service_config(self.id)\n old_service = service.copy() # in case anything fails for rollback\n\n try:\n service['metadata']['annotations']['router.deis.io/maintenance'] = str(mode).lower()\n self._scheduler.svc.update(self.id, self.id, data=service)\n except KubeException as e:\n self._scheduler.svc.update(self.id, self.id, data=old_service)\n raise ServiceUnavailable(str(e)) from e", "def downtime(self, downtime):\n if downtime is None:\n raise ValueError(\"Invalid value for `downtime`, must not be `None`\") # noqa: E501\n\n self._downtime = downtime", "def upstart_ensure(name):\n with fabric.api.settings(warn_only=True):\n status = sudo(\"service {0} status\".format(name))\n if status.failed:\n sudo(\"service {0} start\".format(name))\n else:\n sudo(\"service {0} restart\".format(name))", "def restart_honeycomb_on_dut(node):\n\n logger.console(\n \"\\n(re)Starting Honeycomb service on node {0}\".format(node[\"host\"]))\n\n cmd = \"sudo service honeycomb restart\"\n\n ssh = SSH()\n ssh.connect(node)\n (ret_code, _, _) = ssh.exec_command_sudo(cmd)\n if int(ret_code) != 0:\n raise HoneycombError('Node {0} failed to restart Honeycomb.'.\n format(node['host']))\n else:\n logger.info(\n \"Honeycomb service restart is in progress on node {0}\".format(\n node['host']))", "def add_oh_downtime(session, tree):\n settings = configparser.ConfigParser()\n settings.read(\"settings.ini\")\n if settings.has_section(\"general\"):\n pad = settings[\"general\"].getint(\"Oh_down_extra_min\", 5)\n else:\n pad = 5\n path = settings[\"general\"].get(\"Oh_down\")\n if not path or not os.path.exists(path):\n Message.addMessage(\"WARNING: OH down time file \\\"{}\\\" not found!\".format(path))\n return\n\n s_start = session[\"date\"]\n s_end = session[\"date\"] + datetime.timedelta(hours=session[\"duration\"])\n pattern = re.compile(\n r'(APPROVED|SCHEDULED|PLANNED).*(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}).(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2})')\n\n with open(path) as f:\n for l in f:\n g = pattern.search(l)\n if g is not None:\n start = datetime.datetime.strptime(g.group(2), \"%Y-%m-%dT%H:%M\") - datetime.timedelta(minutes=pad)\n end = datetime.datetime.strptime(g.group(3), \"%Y-%m-%dT%H:%M\") + datetime.timedelta(minutes=pad)\n\n insert_station_setup_with_time(start, end, s_start, s_end, session, tree, \"OHIGGINS\", \"down\",\n \"satellite observation\")", "def add_custom_downtime(session, tree):\n read_parameter_change_from_text_file(session, tree, \"./downtime.txt\", \"down\")", "def set_host_status(h, s):\n db = get_db()\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n db.execute('insert or replace into all_hosts (hostname, status, timestamp) values (?,?,?)', (h, s, st))\n db.commit()", "def test_usage_05_restart_mysql(self):\n tc_name = \"Usage 05 tests\"\n tc_num = 05\n instance_id = self.instance_id_list[0]\n self.client.instances.restart(instance_id)\n self.assertEqual(str(testutil.get_last_response_code(self.client)), \"202\",\n \"Error: Resize instance. Unexpected resp code: %r != %r\"\n % (str(testutil.get_last_response_code(self.client)), \"202\"))\n # check interim status of REBOOT\n testutil.wait_for_status(self.client, instance_id, \"REBOOT\")\n # wait for active, ensure time elapsed, record the duration\n status, elapsed_time = testutil.waitForActive(self.client,\n instanceId=instance_id)\n self.fixture_log.debug(\"Inst: %r is: %r after: %r seconds\" %\n (instance_id, status, elapsed_time))\n running_time = (datetime.utcnow() - self.starttime_list[tc_num]).seconds\n if 10 * self.ONEMIN > running_time:\n time.sleep((10 * self.ONEMIN) - running_time)\n # delete the ACTIVE instance\n if testutil.getInstanceStatus(self.client, instance_id) == \"ACTIVE\":\n self.client.instances.get(instance_id).delete()\n self.instance_id_list.remove(instance_id)\n duration = datetime.utcnow() - self.starttime_list[tc_num]\n rootAction = \"reddwarf.instance.delete\"\n # AH Event Sent - Check AH data data AFTER the DELETE\n time.sleep(self.AHDELAY)\n AHEventsList = self.dbaas_atomhopper_provider.events_by_resourceId(instance_id)\n single_event = [event for event in AHEventsList\n if event.rootAction == rootAction].pop()\n self.assertEqual(single_event.resourceId, instance_id,\n 'AH resourceID:%r != created instanceID:%r'\n % (single_event.resourceId, instance_id))\n testutil.valid_duration(duration, single_event, self.dateFormat)", "def add_uptime(self, uptime):\n query = f\"INSERT INTO {self._schema}.uptime VALUES (%s)\"\n self.execute(query, [uptime])", "def makeHostDeployed(self, name):\n host = (name, )\n self.cursor.execute(\"UPDATE hosts SET status = 2 WHERE name=?\", host)\n self.database.commit()", "def makeHostExpired(self, name):\n host = (name, )\n self.cursor.execute(\"UPDATE hosts SET status = 3 WHERE name=?\", host)\n self.database.commit()", "def host_swact_failed(self, host):\n if self._sw_update is not None:\n self._sw_update.handle_event(\n strategy.STRATEGY_EVENT.HOST_SWACT_FAILED, host)", "def install(self):\n super(SystemD, self).install()\n\n self.deploy_service_file(self.svc_file_path, self.svc_file_dest)\n self.deploy_service_file(self.env_file_path, self.env_file_dest)\n sh.systemctl.enable(self.name)\n sh.systemctl('daemon-reload')", "def start_maintenance(ServerName=None, EngineAttributes=None):\n pass", "def add_server(self, host_ips):\t\t\n\t\tself.swarm_manager.add_server(host_ips)", "def freeze_host(self, host):\n body = {\"host\": host}\n return self._update(\"/os-services/freeze\", body)", "def test_he_vm_restart(self):\n self.stop_service_and_check_he_vm(service_name=conf.POSTGRESQL_SERVICE)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete a host/service downtime.
def delete(self, dt): data = dt.as_dict() data.update({'action': 'delete'}) requests.post( pecan.request.ws_arbiter_url + "/downtime", data=data ) return info.Info(message='Downtime received.')
[ "def test_remove_scheduled_delete(self):\n cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')\n cli.remove_scheduled_delete(1)", "def host_delete(context, host_name, session=None):\n if session is None:\n session = nova_db_sa_api.get_session()\n with session.begin(subtransactions=True):\n nwkasn_list = network_association_find_all(context, host_name,\n session=session)\n for nwkasn in nwkasn_list:\n nwkasn.delete(context, session=session)\n # Delete dependents before host: VioServers\n vios_list = vio_server_find_all(context, host_name, session=session)\n for vios in vios_list:\n vios.delete(context, session=session)\n # Also need to clean up the entry in the HMC Hosts DB Table\n hmc_query = model_query(\n context, pvc_models.HmcHostsDTO, session=session)\n hmc_query = hmc_query.filter_by(host_name=host_name)\n hmc_query.soft_delete(synchronize_session=False)\n # Need to query the Service based on the Host to know what to delete\n query = model_query(context, nova_db_sa_models.Service,\n session=session)\n svc = query.filter_by(host=host_name).filter_by(topic='compute').\\\n first()\n # If the Service did exist, then we will delete it from the Database\n if svc is not None:\n query = model_query(\n context, nova_db_sa_models.ComputeNode, session=session)\n compute_node = query.filter_by(service_id=svc.id).first()\n # If the Compute Node exists, then we will delete it from the DB\n if compute_node is not None:\n nova_db_api.compute_node_delete(context, compute_node.id)\n # Clean up the Service and Compute Host entries from the Database\n nova_db_api.service_destroy(context, svc.id)", "def delete(self, *args, **kwargs):\n try:\n self.terminate_task()\n self.periodic_task.delete()\n except:\n pass\n return super(ShoalScrapeTask, self).delete(*args, **kwargs)", "def hmc_host_delete(context, host_name, session=None):\n # If we weren't given a session, then we need to create a new one\n if not session:\n session = nova_db_sa_api.get_session()\n # Create a Transaction around the delete in the Database\n with session.begin():\n query = model_query(\n context, pvc_models.HmcHostsDTO, session=session)\n query = query.filter_by(host_name=host_name)\n query.soft_delete(synchronize_session=False)", "def test_create_scheduled_delete(self):\n cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')\n cli.create_scheduled_delete([])", "def delete_VM(self, host):\n action = self.rmc.resource_groups.delete(group_name(host))\n action.wait()", "def delete_program_timings(prog_name) :\n\n db.delete_program_timings(prog_name)", "def delete_system_instance(id=None):\n pass", "def DeleteUptimeCheckConfig(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete_ehost(self, name):\n return self.execution_host_manager.delete_object(name)", "def delete(self):\n if not self.has('id'):\n raise Exception(\"Time entry must have an id to be deleted.\")\n\n url = \"%s/time_entries/%s\" % (TOGGL_URL, self.get('id'))\n httpexec(url, 'delete')", "def test_usage_05_restart_mysql(self):\n tc_name = \"Usage 05 tests\"\n tc_num = 05\n instance_id = self.instance_id_list[0]\n self.client.instances.restart(instance_id)\n self.assertEqual(str(testutil.get_last_response_code(self.client)), \"202\",\n \"Error: Resize instance. Unexpected resp code: %r != %r\"\n % (str(testutil.get_last_response_code(self.client)), \"202\"))\n # check interim status of REBOOT\n testutil.wait_for_status(self.client, instance_id, \"REBOOT\")\n # wait for active, ensure time elapsed, record the duration\n status, elapsed_time = testutil.waitForActive(self.client,\n instanceId=instance_id)\n self.fixture_log.debug(\"Inst: %r is: %r after: %r seconds\" %\n (instance_id, status, elapsed_time))\n running_time = (datetime.utcnow() - self.starttime_list[tc_num]).seconds\n if 10 * self.ONEMIN > running_time:\n time.sleep((10 * self.ONEMIN) - running_time)\n # delete the ACTIVE instance\n if testutil.getInstanceStatus(self.client, instance_id) == \"ACTIVE\":\n self.client.instances.get(instance_id).delete()\n self.instance_id_list.remove(instance_id)\n duration = datetime.utcnow() - self.starttime_list[tc_num]\n rootAction = \"reddwarf.instance.delete\"\n # AH Event Sent - Check AH data data AFTER the DELETE\n time.sleep(self.AHDELAY)\n AHEventsList = self.dbaas_atomhopper_provider.events_by_resourceId(instance_id)\n single_event = [event for event in AHEventsList\n if event.rootAction == rootAction].pop()\n self.assertEqual(single_event.resourceId, instance_id,\n 'AH resourceID:%r != created instanceID:%r'\n % (single_event.resourceId, instance_id))\n testutil.valid_duration(duration, single_event, self.dateFormat)", "def do_delete_monitor(self, args):\n lb = self.findlb(args.loadbalancer, readonly=False)\n monitor = lb.healthmonitor()\n monitor.delete()", "def delete(self, *args, **kwargs):\n if self.virtual_machines.all():\n children = [vm.hostname for vm in self.virtual_machines.all()]\n raise RuntimeError('cannot delete host until its VMs have been reassigned: {}'.format(children))\n super(Host, self).delete(*args, **kwargs)", "def delete_health_monitor(self, context, health_monitor, service):\n try:\n service_pending = \\\n self.lbdriver.delete_health_monitor(health_monitor, service)\n self.cache.put(service, self.agent_host)\n if service_pending:\n self.needs_resync = True\n except q_exception.NeutronException as exc:\n LOG.error(\"delete_health_monitor: NeutronException: %s\" % exc.msg)\n except Exception as exc:\n LOG.error(\"delete_health_monitor: Exception: %s\" % exc.message)", "def remove_uptime(self, end):\n query = f\"DELETE FROM {self._schema}.uptime WHERE time < %s\"\n self.execute(query, [end])", "def cancelMaintenance(cls, api_client, id):\n\n cmd = {'id': id}\n return api_client.cancelHostMaintenance(**cmd)", "def test_remove(self):\n meeting = MeetingFactory(type_id='ietf')\n url = urlreverse('ietf.meeting.views_proceedings.edit_meetinghosts', kwargs=dict(num=meeting.number))\n\n # create via UI so we don't have to deal with creating storage paths\n self.client.login(username='secretary', password='secretary+password')\n logo = logo_file()\n r = self._create_first_host(meeting, logo, url)\n self.assertRedirects(r, urlreverse('ietf.meeting.views.materials', kwargs=dict(num=meeting.number)))\n self.assertEqual(meeting.meetinghosts.count(), 1)\n host = meeting.meetinghosts.first()\n self.assertEqual(host.name, 'Some Sponsor, Inc.')\n logopath = Path(host.logo.path)\n self._assertMatch(logopath.name, r'logo-[a-z]+.png')\n self.assertTrue(logopath.exists())\n\n # now delete\n r = self.client.post(\n url,\n {\n 'meetinghosts-TOTAL_FORMS': '3',\n 'meetinghosts-INITIAL_FORMS': '1',\n 'meetinghosts-MIN_NUM_FORMS': '0',\n 'meetinghosts-MAX_NUM_FORMS': '1000',\n 'meetinghosts-0-id': str(host.pk),\n 'meetinghosts-0-meeting': str(meeting.pk),\n 'meetinghosts-0-name': 'Modified Sponsor, Ltd.',\n 'meetinghosts-0-DELETE': 'on',\n 'meetinghosts-1-id':'',\n 'meetinghosts-1-meeting': str(meeting.pk),\n 'meetinghosts-1-name': '',\n 'meetinghosts-2-id':'',\n 'meetinghosts-2-meeting': str(meeting.pk),\n 'meetinghosts-2-name': '',\n },\n )\n self.assertRedirects(r, urlreverse('ietf.meeting.views.materials', kwargs=dict(num=meeting.number)))\n self.assertEqual(meeting.meetinghosts.count(), 0)\n self.assertFalse(logopath.exists())", "def deleteEndpoints(msw, start=None):\n global cliTimeout\n\n # 21196\n mswinfo = msw.context['mswinfo']\n ver_Check = mswinfo.compareiServerVersion('4.2')\n \n if (ver_Check >= 0):\n if ( ( start is None) or (start == \"\") ) :\n msw.assertCommand(\"\"\" for reg in $(cli iedge list |grep \"Registration ID\" | awk '{print $3}' | uniq); do for port in $(cli iedge lkup $reg | grep -w Port | grep -v NAT | awk '{print $2}'); do cli iedge delete $reg $port; done done\"\"\" ,timeout=cliTimeout)\n else :\n msw.assertCommand(\"\"\" for reg in $(cli iedge list |grep \"Registration ID\" |grep \"%s\" |awk '{print $3}' | uniq); do for port in $(cli iedge lkup $reg | grep -w Port | grep -v NAT | awk '{print $2}'); do cli iedge delete $reg $port; done done\"\"\" %start ,timeout=cliTimeout)\n elif ((ver_Check < 0) and (ver_Check != -99)):\n if ( ( start is None) or (start == \"\") ) :\n msw.assertCommand(\"\"\" for reg in $(cli iedge list |grep Registration|awk '{print $3}');do for port in $(cli iedge lkup $reg | grep -w Port | grep -v NAT | awk '{print $2}'); do cli iedge delete $reg $port; done done\"\"\" ,timeout=cliTimeout)\n\n else :\n msw.assertCommand(\"\"\" for reg in $(cli iedge list |grep Registration|grep \"%s\" |awk '{print $3}');do for port in $(cli iedge lkup $reg | grep -w Port | grep -v NAT | awk '{print $2}'); do cli iedge delete $reg $port; done done\"\"\" %start ,timeout=cliTimeout)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert zone climate state.
def _assert_zone_state(hass, mode, hvac, current_temp, target_temp, preset, action): state = hass.states.get("climate.zone_1") assert hass.states.is_state("climate.zone_1", hvac) assert state.attributes["current_temperature"] == current_temp assert state.attributes["max_temp"] == Zone.MAX_TARGET_TEMP assert state.attributes["min_temp"] == Zone.MIN_TARGET_HEATING_TEMP assert state.attributes["temperature"] == target_temp assert state.attributes["hvac_action"] == action assert state.attributes["preset_mode"] == preset expected_modes = {HVACMode.OFF, HVACMode.AUTO, HVACMode.FAN_ONLY} zone = SystemManagerMock.data.get("get_zones")[0] if zone.cooling: expected_modes.update({HVACMode.COOL}) assert set(state.attributes["hvac_modes"]) == expected_modes
[ "def test_ZoneStats(self):\n zone_list = self.xml_obj.stats.zone_stats\n self.assertEqual(zone_list['dom1.example.org']['_default']['serial'], 266)\n self.assertEqual(zone_list['dom1.example.org']['_default']['qrysuccess']['value'], 11508)", "def test_ExpectedZoneCount(self):\n self.assertEqual(len(self.xml_obj.stats.zone_stats.keys()), 4)", "def test_isc_zone_name_passing(self):\n test_data = [\n 'red',\n 'red_zone',\n 'red_zone-dmz',\n ]\n result = zone_name.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])", "def test_get_zone(self):\n\n zone = \"America/Inuvik\" if not HAS_TZLOCAL else \"America/Denver\"\n\n # first set the zone\n assert self.run_function(\"timezone.set_zone\", [zone])\n\n # check it set the correct zone\n ret = self.run_function(\"timezone.get_zone\")\n assert zone in ret\n\n # compare zones\n assert self.run_function(\"timezone.zone_compare\", [zone])", "def test_zone_valid(zone_name):\n magic = get_magic(zone_name)\n assert magic == b\"TZif\"", "def _validate_test_data_for_zone(\n self,\n zone_name: str,\n items: List[TestItem],\n ) -> int:\n zone_info = self.zone_infos[zone_name]\n zone_specifier = ZoneSpecifier(\n zone_info_data=zone_info,\n viewing_months=self.viewing_months,\n debug=self.debug_specifier,\n in_place_transitions=self.in_place_transitions,\n optimize_candidates=self.optimize_candidates)\n\n num_errors = 0\n for item in items:\n if self.year is not None and self.year != item.y:\n continue\n\n # Print out diagnostics if mismatch detected or if debug flag given\n unix_seconds = item.epoch + SECONDS_SINCE_UNIX_EPOCH\n ldt = datetime.utcfromtimestamp(unix_seconds)\n header = (\n f'======== Testing {zone_name}; '\n f'at {_test_item_to_string(item)}w; '\n f'utc {ldt}; '\n f'epoch {item.epoch}; '\n f'unix {unix_seconds}'\n )\n\n if self.debug_specifier:\n logging.info(header)\n\n info = zone_specifier.get_timezone_info_for_seconds(item.epoch)\n if not info:\n logging.info(\"timezone info not found\")\n continue\n\n is_matched = info.total_offset == item.total_offset\n status = '**Matched**' if is_matched else '**Mismatched**'\n ace_time_string = to_utc_string(info.utc_offset, info.dst_offset)\n utc_string = to_utc_string(\n item.total_offset - item.dst_offset,\n item.dst_offset\n )\n body = (\n f'{status}: '\n f'AceTime({ace_time_string}); '\n f'Expected({utc_string})'\n )\n if is_matched:\n if self.debug_specifier:\n logging.info(body)\n zone_specifier.print_matches_and_transitions()\n else:\n num_errors += 1\n if not self.debug_specifier:\n logging.error(header)\n logging.error(body)\n zone_specifier.print_matches_and_transitions()\n\n return num_errors", "def test_tz(host):\n actual_output = host.run('date +\"%Z %z\"').stdout\n assert 'AEST' in actual_output", "def test_isStandardTimezone(self):\n\n data = (\n (\"America/New_York\", True),\n (\"America/Los_Angeles\", True),\n (\"America/Cupertino\", False),\n (\"America/FooBar\", False),\n )\n\n for tzid, result in data:\n self.assertEqual(TimezoneDatabase.isStandardTimezone(tzid), result, \"Failed {}\".format(tzid))", "def istargetzone(self):\n flag = False\n cntr=0\n position = self.sim.pose[:3] \n \n #Set upper bound and lower bound for target zone\n target_bounds = 40 \n lower_bounds = np.array([-target_bounds / 2, -target_bounds / 2, 0])\n upper_bounds = np.array([ target_bounds / 2, target_bounds / 2, target_bounds])\n \n #Set boundary conditions\n lower_pos = (self.target_pos + lower_bounds)\n upper_pos = (self.target_pos + upper_bounds)\n \n \n #Check whether the copter has landed with the boundaries of target zone\n for j in range(3): \n \n #Check for the boundary conditions\n if (lower_pos[j] <= position[j] and position[j] < upper_pos[j]):\n cntr = cntr + 1 \n \n #Check if all 3 conditions have been satisfied\n if cntr==3:\n flag = True\n \n return flag", "def verify_zone(self, device, **kwargs):\n return_value = self._common_search_processing(\n device=device,\n previous_entry_list_keyword=\"zone_entry_list\",\n get_entry_method=self.get_zone,\n kwargs=kwargs,\n )\n device.log(message=\"{} return value: {}\".format(self.tool.get_current_function_name(), return_value))\n return return_value", "def check_zone(self, name):\n if not self.has_zone(name):\n raise MissingZoneError(name)", "def test_isc_zone_name_dict_passing(self):\n test_data = 'white-lab.example.net'\n expected_result = {'zone_name': 'white-lab.example.net'}\n assertParserResultDictTrue(zone_name,\n test_data,\n expected_result)", "def test_get_zonedata_tostring_correct(self):\n got_var, const_bzone = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_basezone')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n got_var, const_zone = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_zone')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n # Checks base zonedata\n bzone = rapid_zonedata.get_zonedata_tostring(const_bzone)\n self.assertEqual(bzone, 'Base zonedata: z0 ([False,0.3,0.3,0.3,0.03,0.3,0.03])')\n # Checks zonedata\n zone = rapid_zonedata.get_zonedata_tostring(const_zone)\n self.assertEqual(zone, 'Zonedata: [False,0.3,0.3,0.3,0.5,0.3,0.5]')", "def _test_zone(self, zone_name, filename):\n self.kwargs.update({\n 'zone': zone_name,\n 'path': os.path.join(TEST_DIR, 'zone_files', filename),\n 'export': os.path.join(TEST_DIR, 'zone_files', filename + '_export.txt')\n })\n # Import from zone file\n self.cmd('network dns zone import -n {zone} -g {rg} --file-name \"{path}\"')\n records1 = self.cmd('network dns record-set list -g {rg} -z {zone}').get_output_in_json()\n\n # Export zone file and delete the zone\n self.cmd('network dns zone export -g {rg} -n {zone} --file-name \"{export}\"')\n self.cmd('network dns zone delete -g {rg} -n {zone} -y')\n\n # Reimport zone file and verify both record sets are equivalent\n self.cmd('network dns zone import -n {zone} -g {rg} --file-name \"{export}\"')\n records2 = self.cmd('network dns record-set list -g {rg} -z {zone}').get_output_in_json()\n\n # verify that each record in the original import is unchanged after export/re-import\n self._check_records(records1, records2)", "def assertStateOK(root):", "def state(self):\n return self._def[ATTR_VALUE_FN](self._zone)", "def test_zone_get_function():\n response = zone.get(domain_name='example.com')\n assert response.success\n\n payload = response.payload\n assert payload['url'] == 'https://api.cloudns.net/dns/get-zone-info.json'\n assert payload['params']['domain-name'] == 'example.com'", "def test_diag_stategy(self):\n # Add access strategies to the care plan intervention\n cp = INTERVENTION.CARE_PLAN\n cp.public_access = False # turn off public access to force strategy\n cp_id = cp.id\n\n with SessionScope(db):\n d = {'function': 'observation_check',\n 'kwargs': [{'name': 'display', 'value':\n CC.PCaDIAG.codings[0].display},\n {'name': 'boolean_value', 'value': 'true'}]}\n strat = AccessStrategy(\n name=\"has PCa diagnosis\",\n intervention_id=cp_id,\n function_details=json.dumps(d))\n db.session.add(strat)\n db.session.commit()\n cp = INTERVENTION.CARE_PLAN\n user = db.session.merge(self.test_user)\n\n # Prior to PCa dx, user shouldn't have access\n self.assertFalse(cp.display_for_user(user).access)\n\n # Bless the test user with PCa diagnosis\n self.login()\n user.save_constrained_observation(\n codeable_concept=CC.PCaDIAG, value_quantity=CC.TRUE_VALUE,\n audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID))\n with SessionScope(db):\n db.session.commit()\n user, cp = map(db.session.merge, (user, cp))\n\n self.assertTrue(cp.display_for_user(user).access)", "def named_checkzone(self, zone_file, root_domain):\n # Make sure we have the write tools to do the job\n command_str = \"test -f {0}\".format(NAMED_CHECKZONE)\n stdout, stderr, returncode = self.shell_out(command_str)\n if returncode != 0:\n raise BuildError(\"Couldn't find named-checkzone.\")\n\n # Check the zone file.\n command_str = \"{0} {1} {2} {3}\".format(\n NAMED_CHECKZONE, self.NAMED_CHECKZONE_OPTS,\n root_domain.name, zone_file)\n self.log(\n \"Calling `{0} {1} {2}`\".\n format(NAMED_CHECKZONE, root_domain.name, zone_file),\n root_domain=root_domain\n )\n stdout, stderr, returncode = self.shell_out(command_str)\n if returncode != 0:\n raise BuildError(\"\\nnamed-checkzone failed on zone {0}. \"\n \"\\ncommand: {1}\\nstdout: {2}\\nstderr:{3}\\n\".\n format(root_domain.name, command_str, stdout,\n stderr))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a Datastore key for a User entity. We use user's email as the key.
def user_key(id): return ndb.Key(User, id)
[ "def user_key(user_number=DEFAULT_USER_NUMBER):\n\treturn ndb.Key('User', user_number)", "def build_key(cls, user_id):\n key = ndb.Key(cls, user_id)\n return key", "def load_user_key(client, user_id):\n key = None\n key = client.key(config.USER_ENTITY_TYPE, user_id)\n return key", "def _user_key(self, email):\n logging.info(\"Trying to encrypt for %s\", email)\n\n # Explicit matching of email and uid.email necessary.\n # Otherwise gpg.keylist will return a list of keys\n # for searches like \"n\"\n for key in self.gpg.keylist(email):\n for uid in key.uids:\n if uid.email == email:\n return key.subkeys[0].keyid\n\n return None", "def identity(user, domain, keypair) -> Identity:\n identity = Identity.objects.create(\n actor_uri=\"https://example.com/@test@example.com/\",\n inbox_uri=\"https://example.com/@test@example.com/inbox/\",\n private_key=keypair[\"private_key\"],\n public_key=keypair[\"public_key\"],\n username=\"test\",\n domain=domain,\n name=\"Test User\",\n local=True,\n )\n identity.users.set([user])\n return identity", "def _create_key(self):\n return uuid.uuid4().hex", "def key(self, domain, username):\n userpath = self.path.child(domain).child(username + \".info\")\n if userpath.exists():\n with userpath.open() as f:\n data = parseString(f.read())[0]\n return data['key']", "def generate_and_upload_device_signing_key(\n self, user_id: str, device_id: str\n ) -> SigningKey:\n sk = key.generate_signing_key(device_id)\n\n device_dict = build_device_dict(user_id, device_id, sk)\n\n self.get_success(\n self.hs.get_e2e_keys_handler().upload_keys_for_user(\n user_id,\n device_id,\n {\"device_keys\": device_dict},\n )\n )\n return sk", "def generate_key(self):\n try:\n return self.proto.genuid()\n except ValueError:\n return uuid.uuid4()", "def _newProfileNDBKey(old_key):\n return ndb.Key(\n user_model.User._get_kind(), old_key.parent().id(),\n profile_model.Profile._get_kind(), old_key.id())", "def generate_confirmation_key(user_email):\n try:\n salt = hashlib.sha1(str(random()).encode('utf-8')).hexdigest()\n email = user_email\n confirmation_key = hashlib.sha1((salt + email).encode('utf-8')).hexdigest()\n return confirmation_key\n except Exception as e:\n raise HTTP_400_BAD_REQUEST", "def googleplus_user_id(self):\n return self.key.string_id()", "def entity_key(entity):\n key = entity.key or entity.string\n return ':'.join([entity.resource.path, key])", "def create_user(dct):\n return User.dict2user(dct)", "def generate_user_keys(cookies_disabled=False) -> dict:\n if cookies_disabled:\n return app.default_key_set\n\n # Generate/regenerate unique key per user\n return {\n 'element_key': Fernet.generate_key(),\n 'text_key': Fernet.generate_key()\n }", "def __init__(self, *args, **kwargs):\r\n record.Record.__init__(self, *args, **kwargs)\r\n self.key = UserKey()", "def create_key(self, email):\n key = (\n self.resource.projects()\n .serviceAccounts()\n .keys()\n .create(name=f\"projects/-/serviceAccounts/{email}\", body={})\n .execute()\n )\n bucket_name = os.environ[\"KEY_FILES_BUCKET\"]\n bucket_gs = f\"gs://{bucket_name}/keys\"\n key_file = f\"{key['name']}.json\"\n with SimpleStorage(bucket_gs) as storage:\n storage.put_file(\n file_path=key_file,\n content=base64.b64decode(key[\"privateKeyData\"]),\n compress=None,\n cache_control=\"no-cache\",\n )\n\n url = utils.generate_signed_url(bucket_name, f\"keys/{key_file}\")\n msg = f\"Key created `{key['name'].split('/')[-1]}`.\"\n msg = f\"{msg}\\nAvailable <{url}|here> (link valid for\"\n return f\"{msg} {int(os.environ['KEY_LINK_EXPIRATION'])/60}m).\"", "def _get_key(\n self, requester: Optional[Requester], key: Optional[Hashable]\n ) -> Hashable:\n if key is None:\n if not requester:\n raise ValueError(\"Must supply at least one of `requester` or `key`\")\n\n key = requester.user.to_string()\n return key", "def get_identity(self, processor, user, sp_config):\n sp_mapping = sp_config.get('attribute_mapping', {'username': 'username'})\n return processor.create_identity(user, sp_mapping)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test ensures a product is correctly updated when imported data differs from stored data
def test_pies_product_update(updated_test_brand_data, test_brand_record): PiesDataStorage(updated_test_brand_data).store_brand_data() compare_products_to_db(updated_test_brand_data, test_brand_record)
[ "def test_product_ca_import_update(self):\n self.import_file(\"custom_attribute_tests.csv\")\n self.import_file(\"custom_attribute_update_tests.csv\")\n prod_0 = Product.query.filter(Product.slug == \"prod0\").first()\n prod_0_expected = {\n u\"normal text\": u\"edited normal text\",\n u\"man text\": u\"edited man text\",\n u\"normal RT\": u\"some <br> edited rich <br> text\",\n u\"man RT\": u\"other edited <br> rich text\",\n u\"normal Date\": u\"2017-09-14 00:00:00\",\n u\"man Date\": u\"2018-01-17 00:00:00\",\n u\"normal CH\": u\"1\",\n u\"man CH\": u\"0\",\n u\"normal select\": u\"a\",\n u\"man select\": u\"f\",\n u\"normal person\": u\"Person\",\n u\"man person\": u\"Person\",\n }\n prod_0_new = {c.custom_attribute.title: c.attribute_value\n for c in prod_0.custom_attribute_values}\n self.assertEqual(prod_0_expected, prod_0_new)", "def test_changing_products(self):\n r = ApprovedRevisionFactory()\n d = r.document\n prod_desktop = ProductFactory(title=\"desktop\")\n prod_mobile = ProductFactory(title=\"mobile\")\n\n data = new_document_data()\n data.update(\n {\n \"products\": [prod_desktop.id, prod_mobile.id],\n \"title\": d.title,\n \"slug\": d.slug,\n \"form\": \"doc\",\n }\n )\n self.client.post(reverse(\"wiki.edit_document_metadata\", args=[d.slug]), data)\n\n self.assertEqual(\n sorted(Document.objects.get(id=d.id).products.values_list(\"id\", flat=True)),\n sorted([prod.id for prod in [prod_desktop, prod_mobile]]),\n )\n\n data.update({\"products\": [prod_desktop.id], \"form\": \"doc\"})\n self.client.post(reverse(\"wiki.edit_document_metadata\", args=[data[\"slug\"]]), data)\n self.assertEqual(\n sorted(Document.objects.get(id=d.id).products.values_list(\"id\", flat=True)),\n sorted([prod.id for prod in [prod_desktop]]),\n )", "def test_product_creation(self):\n\n old_products = Product.objects.count()\n create_products(self.products['products'])\n new_products = Product.objects.count()\n self.assertNotEqual(old_products, new_products)", "def test_update_without_product_id(self):\n self.existing.pop('product_id')\n self.existing['name'] = 'new_name'\n id = self.existing.pop('id')\n response = self.client.put(reverse('contentdeliveryrepos-detail', args=[1]), self.existing, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.existing['product_id'] = None\n self.existing['id'] = id\n self.assertDictEqual(dict(response.data), self.existing)\n self.assertNumChanges([1])", "def test_update(self, record):", "def test_catalog_special_price_storage_v1_update_post(self):\n pass", "def test_products_are_products(self):\r\n prd1 = prd.Product.objects.get(\r\n code='0000000000001',\r\n name='product 001',\r\n generic_name='product prd 001',\r\n brands='Brand of prd 001',\r\n stores='stores001',\r\n url='url001')\r\n\r\n prd2 = prd.Product.objects.get(\r\n code='0000000000002',\r\n name='product 002',\r\n generic_name='product prd 002',\r\n brands='Brand of prd 002',\r\n stores='stores002',\r\n url='url002')\r\n\r\n self.assertEqual(prd1.code, '0000000000001')\r\n self.assertEqual(prd2.code, '0000000000002')", "def test_product_update(self):\n httpretty.register_uri(\n httpretty.PUT,\n self.endpoint_url(\"/product/5499\"),\n content_type='text/json',\n body='{\"status\": true, \"message\": \"Products retrieved\", \"data\":[{}]}',\n status=201,\n )\n\n response = Product.update(product_id=5499, name=\"Product pypaystack test\",\n description=\"my test description\", price=500000000,\n currency=\"USD\"\n )\n self.assertEqual(response['status'], True)", "def test_update(self):\n # this is really tested graphically, no unit test here\n pass", "def test_pricing_unchanged_if_update_unrelated(self):\n order = OrderFactory()\n pre_update_pricing = get_pricing_from_order(order)\n\n order.description = 'updated description'\n order.save()\n\n order.refresh_from_db()\n post_update_pricing = get_pricing_from_order(order)\n\n assert pre_update_pricing == post_update_pricing", "def test_multiple_purchases_update_product_price(self):\n\n # Generate timestamps for correct timing of purchases and updates\n t1 = datetime.datetime.now() - datetime.timedelta(seconds=30)\n t2 = datetime.datetime.now() - datetime.timedelta(seconds=25)\n t3 = datetime.datetime.now() - datetime.timedelta(seconds=20)\n t4 = datetime.datetime.now() - datetime.timedelta(seconds=15)\n t5 = datetime.datetime.now() - datetime.timedelta(seconds=10)\n t6 = datetime.datetime.now() - datetime.timedelta(seconds=5)\n # Update product price\n pp = ProductPrice(product_id=1, price=300, admin_id=1, timestamp=t1)\n db.session.add(pp)\n db.session.commit()\n # Get the first product price\n product = Product.query.filter_by(id=1).first()\n pr_1 = copy(product.price)\n # Do first purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t2)\n db.session.add(purchase)\n db.session.commit()\n # Update product price\n pp = ProductPrice(product_id=1, price=100, admin_id=1, timestamp=t3)\n db.session.add(pp)\n db.session.commit()\n # Get the second product price\n product = Product.query.filter_by(id=1).first()\n pr_2 = copy(product.price)\n # Do second purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t4)\n db.session.add(purchase)\n # Update product price\n pp = ProductPrice(product_id=1, price=600, admin_id=1, timestamp=t5)\n db.session.add(pp)\n db.session.commit()\n # Get the third product price\n product = Product.query.filter_by(id=1).first()\n pr_3 = copy(product.price)\n # Do third purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t6)\n db.session.add(purchase)\n db.session.commit()\n\n # Check the product prices\n self.assertEqual(pr_1, 300)\n self.assertEqual(pr_2, 100)\n self.assertEqual(pr_3, 600)\n\n # Check user credit\n user = User.query.filter_by(id=1).first()\n self.assertEqual(len(user.purchases.all()), 3)\n self.assertEqual(user.credit, -(pr_1 + pr_2 + pr_3))\n\n # Check purchase prices\n purchases = Purchase.query.all()\n self.assertEqual(purchases[0].price, 300)\n self.assertEqual(purchases[1].price, 100)\n self.assertEqual(purchases[2].price, 600)", "def test_update_user_inventory_behavior_data(self):\n pass", "def test_bulk_update_ifc_property(self):\n pass", "def test_update_escalation(self):\n pass", "def test_product_product_strengths_incremental_with_new_impressions_two_new_products(self):\n # Saves two new, identical products. Initially, no users will have impressions on them.\n id_twin_product_1 = \"p_tec_TWIN_1\"\n id_twin_product_2 = \"p_tec_TWIN_2\"\n\n date = self.session_context.get_present_date() - dt.timedelta(days=2)\n\n twin_product_1 = {\"external_id\": id_twin_product_1,\n \"language\": \"english\",\n \"date\": date,\n \"resources\": {\"title\": \"Whatever Gets You Through The Night\"},\n \"full_content\": \"\"\"Begin. Technology. Technology. This is all we got. End.\"\"\",\n \"category\": \"Nonsense\"}\n\n twin_product_2 = {\"external_id\": id_twin_product_2,\n \"language\": \"english\",\n \"date\": date,\n \"resources\": {\"title\": \"Whatever Gets You Through The Night\"},\n \"full_content\": \"\"\"Begin. Technology. Technology. This is all we got. End.\"\"\",\n \"category\": \"Nonsense\"}\n\n self.db_proxy.insert_product(twin_product_1)\n self.db_proxy.insert_product(twin_product_2)\n\n user1 = \"u_eco_1\"\n user2 = \"u_eco_2\"\n activity_type = self.session_context.activities_by_rating[5][0]\n\n # Saves an impression on just one of the new products\n date = pytz.utc.localize(dateutil.parser.parse(\"1988-11-06 9:00:00\"))\n self.db_proxy.increment_impression_summary(user_id=user1, product_id=id_twin_product_1,\n date=date, anonymous=False)\n\n # Saves a couple of activities for another user using the new products\n\n activity = {\"external_user_id\": user2,\n \"external_product_id\": id_twin_product_1,\n \"activity\": activity_type,\n \"created_at\": self.session_context.get_present_date()}\n pt.update_templates(self.session_context, activity)\n tasks.update_summaries(self.session_context, activity)\n\n self.compare_incremental_vs_from_scratch()\n\n activity = {\"external_user_id\": user2,\n \"external_product_id\": id_twin_product_2,\n \"activity\": activity_type,\n \"created_at\": self.session_context.get_present_date()}\n pt.update_templates(self.session_context, activity)\n tasks.update_summaries(self.session_context, activity)\n\n self.compare_incremental_vs_from_scratch()", "def test_bulk_full_update_ifc_property(self):\n pass", "def test_update(self):\n product_id = None\n product = Product.create(name=\"apple\", price=2)\n product_id = product.id\n\n # Try to change the price.\n with self.db.transaction:\n product.price = 3\n\n # Check that the product has been updated.\n product = Product.get(id=product_id)\n self.assertEqual(product.price, 3)\n\n # Now try a new update, but make sure it fails.\n try:\n with self.db.transaction:\n product.price = 4\n raise InterruptedError\n except InterruptedError:\n pass\n\n # There should have been a rollback, so the price\n # shouldn't have changed.\n self.assertEqual(product.price, 3)\n product = Product.get(id=product_id)\n self.assertEqual(product.price, 3)\n\n # Try again, to make sure double-transactions don't cause a crash.\n try:\n with self.db.transaction:\n product.price = 4\n raise InterruptedError\n except InterruptedError:\n pass\n\n # There should have been a rollback, so the price\n # shouldn't have changed.\n self.assertEqual(product.price, 3)\n product = Product.get(id=product_id)\n self.assertEqual(product.price, 3)", "def test_added_product_exists(self):\n product = Product.objects.get(unitprice=4.1)\n self.assertEqual(product.productname, \"Hillo\")", "def test_02_update_variant_archive_2_value(self):\n Product = self.env['product.product']\n\n def unlink(slef):\n raise Exception('just')\n Product._patch_method('unlink', unlink)\n\n variants_2x2 = self.template.product_variant_ids\n self._assert_2color_x_2size()\n archived_variants = self._get_archived_variants()\n self.assertFalse(archived_variants)\n\n # CASE remove one attribute line (going from 2*2 to 2*1)\n # Since they can't be unlinked, existing variants should be archived.\n self._remove_ptal_size()\n variants_2x0 = self.template.product_variant_ids\n self._assert_2color_x_0size()\n archived_variants = self._get_archived_variants()\n self.assertEqual(archived_variants, variants_2x2)\n self._assert_2color_x_2size(archived_variants)\n\n # Add the line just removed, so get back the previous variants.\n # Since they can't be unlinked, existing variants should be archived.\n self._add_ptal_size_s_m()\n self.assertEqual(self.template.product_variant_ids, variants_2x2)\n self._assert_2color_x_2size()\n archived_variants = self._get_archived_variants()\n self.assertEqual(archived_variants, variants_2x0)\n self._assert_2color_x_0size(archived_variants)\n\n # we redo the whole remove/read to check\n self._remove_ptal_size()\n self.assertEqual(self.template.product_variant_ids, variants_2x0)\n self._assert_2color_x_0size()\n archived_variants = self._get_archived_variants()\n self.assertEqual(archived_variants, variants_2x2)\n self._assert_2color_x_2size(archived_variants)\n\n self._add_ptal_size_s_m()\n self.assertEqual(self.template.product_variant_ids, variants_2x2)\n self._assert_2color_x_2size()\n archived_variants = self._get_archived_variants()\n self.assertEqual(archived_variants, variants_2x0)\n self._assert_2color_x_0size(archived_variants)\n\n self._remove_ptal_size()\n self.assertEqual(self.template.product_variant_ids, variants_2x0)\n self._assert_2color_x_0size()\n archived_variants = self._get_archived_variants()\n self.assertEqual(archived_variants, variants_2x2)\n self._assert_2color_x_2size(archived_variants)\n\n # This time we only add one of the two attributes we've been removing.\n # This is a single value line, so the value is simply added to existing\n # variants.\n self._add_ptal_size_s()\n self.assertEqual(self.template.product_variant_ids, variants_2x0)\n self._assert_2color_x_1size()\n self.assertEqual(archived_variants, variants_2x2)\n self._assert_2color_x_2size(archived_variants)\n\n Product._revert_method('unlink')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract host from endpoint.
def _get_host(endpoint): if endpoint.startswith('http://'): return endpoint[7:].strip() if endpoint.startswith('https://'): return endpoint[8:].strip() return endpoint.strip()
[ "def getHostFrom(fromHost):", "def get_host(email):\n host=email.split('@').__getitem__(1).split('.').__getitem__(0)\n return host", "def get_hostname (url):\n reg = re.search('[^:]+:(/)*([^:/]+)(:[0-9]+)?(/)?.*', url)\n host = ''\n try:\n host = reg.group(2)\n except:\n pass\n \n return host", "def host(self):\n return self.parsed_prefix.host", "def getHost(self):\n return self[SipViaHeader.PARAM_HOST] if SipViaHeader.PARAM_HOST in self else None", "def get_host(environ):\n scheme = environ.get(\"wsgi.url_scheme\")\n if \"HTTP_X_FORWARDED_HOST\" in environ:\n result = environ[\"HTTP_X_FORWARDED_HOST\"]\n elif \"HTTP_HOST\" in environ:\n result = environ[\"HTTP_HOST\"]\n else:\n result = environ[\"SERVER_NAME\"]\n if (scheme, str(environ[\"SERVER_PORT\"])) not in ((\"https\", \"443\"), (\"http\", \"80\")):\n result += \":\" + environ[\"SERVER_PORT\"]\n if result.endswith(\":80\") and scheme == \"http\":\n result = result[:-3]\n elif result.endswith(\":443\") and scheme == \"https\":\n result = result[:-4]\n return result", "def _get_hostname(url):\n\n if url.find('http://') == 0:\n url = url.replace('http://', '')\n if url.find('https://') == 0:\n url = url.replace('https://', '')\n\n hostname = url.split('/')[0]\n\n #is a user-defined port specified?\n port_parts = url.split(':')\n if len(port_parts) > 1:\n hostname = port_parts[0]\n\n return hostname", "def getHost(anHTTPmsg):\n try:\n for line in anHTTPmsg.splitlines():\n words = line.split()\n if (words[0] == \"Host:\") and (len(words)>1):\n return words[1]\n raise ValueError, \"cannot find 'Host:' keyword in HTTP message\"\n except Exception:\n raise ValueError, \"cannot find host in HTTP message\"", "def get_host(self):\n return self._host", "def hostname(url):\n netloc = urlparse.urlparse(url)[1]\n if netloc == '':\n return ''\n\n return netloc.split(':', 1)[0]", "def get_canonical_host(self):\n host = self.host.lower()\n if self.port is not None:\n host = \"%s:%s\" % (host, self.port)\n return host", "def hostname(self):\n return self.__urlsplit.hostname", "def _parse_host(id):\n host_name = None\n r = re.match(r\"^(.*);<host>$\", id)\n\n if r:\n host_name = r.group(1)\n\n return host_name", "def ex_get_hypervisor_hostname(self):\r\n hostname = self.connection.getHostname()\r\n return hostname", "def parseURI(url):\n\thostport = url.split(':')\n\thost = hostport[0] if hostport[0] != 'localhost' else socket.gethostname()\n\treturn host, hostport[1] if len(hostport) > 1 else '80'", "def get_host_port(uri):\n match = HostPortHelper.pattern.search(uri)\n if not match:\n raise ValueError(\"Bad uri string %s\" % uri)\n host, option, port = match.groups()\n return host, port", "def _get_endpoint_in_http_headers(response: requests.Response) -> Optional[str]:\n try:\n header_link = response.headers.get('Link').split(',')[0]\n if 'webmention' in header_link:\n log.debug('webmention endpoint found in http header')\n endpoint = re.match(\n r'<(?P<url>.*)>[; ]*.rel=[\\'\"]?webmention[\\'\"]?',\n header_link).group(1)\n return endpoint\n except Exception as e:\n log.debug(f'Error reading http headers: {e}')", "def get_weblog_host(self):\n return urllib.splithost(urllib.splittype(self.get_weblog_url())[1])[0].split(':')[0]", "def host(self):\n return self.socket.getsockname()[0]", "def dig_get_host_name():\n dig_args = [\"+short\", \"myip.opendns.com\", \"@resolver1.opendns.com\"]\n addr = dig(dig_args)\n\n # i've had cases of cmd running w/o error, but\n # returning blank str\n if not addr:\n raise ValueError\n\n return addr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the specified function.
def delete_function(self, serviceName, functionName, etag=None, traceId=None): method = 'DELETE' path = '/{0}/services/{1}/functions/{2}'.format(self.api_version, serviceName, functionName) headers = self._build_common_headers() if etag: headers['if-match'] = etag if traceId: headers['x-fc-trace-id'] = traceId # Sign the request and set the signature to headers. headers['authorization'] = self.auth.sign_request(method, path, headers) self._do_request(method, path, headers)
[ "def delete(self):\n self._transformation_function_engine.delete(self)", "def delete(self, func_to_reset, *args, **kwargs):\n name = func_to_reset if isinstance(\n func_to_reset, str) else func_to_reset.__name__\n task = self.steps[name]()\n\n path = task._getpath([])\n for f in path.parent.glob('*'):\n f.unlink()", "def delete(func: Callable, allowed_exceptions: List = None,\n title: str = None, req_obj_type: Callable = None) -> HTTPMethod:\n return HTTPMethod('delete', func, allowed_exceptions=allowed_exceptions,\n title=title, req_obj_type=req_obj_type)", "def del_func(pfn):\n\n # convert all contents into globals\n for l, r in function.chunks(pfn):\n for ea in database.address.iterate(l, database.address.prev(r)):\n for k in database.tag(ea):\n internal.comment.contents.dec(ea, k, target=interface.range.start(pfn))\n internal.comment.globals.inc(ea, k)\n logging.debug(u\"{:s}.del_func({:#x}) : Exchanging (increasing) reference count for global tag {!s} and (decreasing) reference count for contents tag {!s}.\".format(__name__, interface.range.start(pfn), utils.string.repr(k), utils.string.repr(k)))\n continue\n continue\n\n # remove all function tags\n for k in function.tag(interface.range.start(pfn)):\n internal.comment.globals.dec(interface.range.start(pfn), k)\n logging.debug(u\"{:s}.del_func({:#x}) : Removing (global) tag {!s} from function.\".format(__name__, interface.range.start(pfn), utils.string.repr(k)))\n return", "def delete(self, api_function):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return requests.delete('https://{0}/wapi/{1}/{2}'\n .format(self.url, self.vers, api_function),\n headers={\n 'Authorization': 'Basic {0}'\n .format(self.creds)\n },\n verify=False)", "def remove_lambda(self, funcName):\n\n try:\n response = self._lambda.delete_function(\n FunctionName = funcName\n )\n except Exception as e:\n response = {}\n\n return response", "def remove_function(self, func: Callable) -> None:\n if self._find_index_in_pipeline(func) is not None:\n self._all_functions.remove(func)\n self._CACHE.SET(self._name_in_cache, self._all_functions)\n self._CACHE.SAVE()", "def setDeleteCallback(self, function: 'SoSensorCB *', data: 'void *'=None) -> \"void\":\n return _coin.SoDataSensor_setDeleteCallback(self, function, data)", "def gen_test_delete():\n name = make_name(\"resource_impl_delete\")\n doc = make_doc(\"Deleting a %s resource\" % impl_instance.iontype)\n add_test_method(name, doc, test_delete_fun)", "def registerPreDelete(preDeleteFn):", "def deleteAfterUse(*args, **kwargs):\n \n pass", "def visit_drop_function(element, compiler, **kw):\n opt_if_exists = \"IF EXISTS\" if element.if_exists else None\n opt_drop_behavior = \"CASCADE\" if element.cascade else None\n function_name = element.function.build_quoted_identifier(quoter=compiler.preparer.quote)\n return _join_tokens(\"DROP FUNCTION\", opt_if_exists,\n function_name, opt_drop_behavior)", "def delete_table(self) -> Callable[[metastore.DeleteTableRequest], metastore.Table]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_table\" not in self._stubs:\n self._stubs[\"delete_table\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.bigquery.biglake.v1.MetastoreService/DeleteTable\",\n request_serializer=metastore.DeleteTableRequest.serialize,\n response_deserializer=metastore.Table.deserialize,\n )\n return self._stubs[\"delete_table\"]", "def method(self) -> str:\n return \"delete\"", "def delete(obj):", "def _del_original_func(obj):\n _original_funcs.pop(obj.__name__, None)\n if torch.cuda.is_available(): # clean up the cached function\n torch.cuda.synchronize()\n torch.cuda.empty_cache()", "def destructor(func: Callable) -> Callable:\n if iscoroutinefunction(func):\n raise TypeError(\"target functions must not be a coroutine function.\")\n\n func.__cog_unload_cb__ = True\n return func", "def Unregister(\n self,\n func: Callable[..., Any],\n extra_args: Sequence[object] = _EXTRA_ARGS_CONSTANT,\n ) -> None:\n key = self._GetKey(func, extra_args)\n self._UnregisterByKey(key)", "def delete_request():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List the functions of the specified service.
def list_functions(self, serviceName, limit=None, nextToken=None, prefix=None, startKey=None, traceId=None): method = 'GET' path = '/{0}/services/{1}/functions'.format(self.api_version, serviceName) headers = self._build_common_headers() if traceId: headers['x-fc-trace-id'] = traceId # Sign the request and set the signature to headers. headers['authorization'] = self.auth.sign_request(method, path, headers) params = {} if limit: params['limit'] = limit if prefix: params['prefix'] = prefix if nextToken: params['nextToken'] = nextToken if startKey: params['startKey'] = startKey return self._do_request(method, path, headers, params=params).json()
[ "def list_functions(self):\n request = self.functions.list(location=self.parent)\n return self.__execute_request(request)", "def list_services(ctx):\n\n ctx.respond(ctx._(\"I am running: {services}\").format(\n services=\", \".join(ctx.bot.services))\n )", "def list(service_template_name,\n sort_by,\n descending,\n model_storage,\n logger):\n if service_template_name:\n logger.info('Listing services for service template {0}...'.format(\n service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n filters = dict(service_template=service_template)\n else:\n logger.info('Listing all services...')\n filters = {}\n\n services_list = model_storage.service.list(\n sort=utils.storage_sort_param(sort_by=sort_by, descending=descending),\n filters=filters)\n table.print_data(SERVICE_COLUMNS, services_list, 'Services:')", "def do_service_list(cs, args):\r\n result = cs.services.list(host=args.host, binary=args.binary)\r\n columns = [\"Binary\", \"Host\", \"Zone\", \"Status\", \"State\", \"Updated_at\"]\r\n # NOTE(jay-lau-513): we check if the response has disabled_reason\r\n # so as not to add the column when the extended ext is not enabled.\r\n if result and hasattr(result[0], 'disabled_reason'):\r\n columns.append(\"Disabled Reason\")\r\n if result:\r\n print 'OKKKKKKKKK'\r\n utils.print_list(result, columns)", "def get_all_services(limit=None, columns=None, extra_filter=None):\n return query(\"GET services\\n\", limit=limit, columns=columns, \n item_type=\"services\" , extra_filter=extra_filter)", "def services_all(ctx):\n ctx.run(KUBERNETES_GET_SERVICES_ALL_CMD)", "def test_services_list(self):\n pass", "def services():\n dbSession = current_app.config['DBSESSION'] # get the db session\n # get the list of services from the db\n serv = dbSession.query(Service).all()\n return render_template('org/services.html', services=serv)", "def listar_funcionarios():\n check_admin()\n\n funcionarios = Funcionario.query.all()\n\n return render_template('admin/funcionarios/funcionarios.html',\n funcionarios=funcionarios, title=\"Funcionarios\")", "def list(service_name,\n sort_by,\n descending,\n model_storage,\n logger):\n if service_name:\n logger.info('Listing executions for service {0}...'.format(\n service_name))\n service = model_storage.service.get_by_name(service_name)\n filters = dict(service=service)\n else:\n logger.info('Listing all executions...')\n filters = {}\n\n executions_list = model_storage.execution.list(\n filters=filters,\n sort=utils.storage_sort_param(sort_by, descending)).items\n\n table.print_data(EXECUTION_COLUMNS, executions_list, 'Executions:')", "def services(self):\n _log.debug('get service list')\n result = self._requestJSON('services', '')\n return self._getKey(result, 'name')", "def list(sort_by, descending, model_storage, logger):\n\n logger.info('Listing all service templates...')\n service_templates_list = model_storage.service_template.list(\n sort=utils.storage_sort_param(sort_by, descending))\n\n column_formatters = \\\n dict(description=table.trim_formatter_generator(DESCRIPTION_FIELD_LENGTH_LIMIT))\n table.print_data(SERVICE_TEMPLATE_COLUMNS, service_templates_list, 'Service templates:',\n column_formatters=column_formatters)", "def get_all(self):\n\n return self.func_mgr.list_functions(self.project_id)", "def get_actions_for_service(db_session, service):\n results = []\n rows = db_session.query(ActionTable.service, ActionTable.name).filter(\n ActionTable.service.like(service))\n for row in rows:\n action = row.service + ':' + row.name\n if action not in results:\n results.append(action)\n return results", "def hs_list(args):\n for hs in get_hidden_services():\n print args.fmt.replace(r'\\t', '\\t') % hs", "def watch_service_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_service_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/watch/services'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def _all_services(type_, *args, **kwargs):\n return all_srvs[type_]", "def all_services_view(request):\n\tservices = Service.objects.filter(is_active=True)\n\treturn render(request, 'bundles_app/all_services.html', {'services': services} )", "def ListFunc(self):\n return self.api.firewalls.list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new site directory and init Mambo
def create_site(sitename): title('Create new site') mambo_conf = os.path.join(CWD, Mambo.config_yml) if os.path.isfile(mambo_conf): error_exit("Can't create new site in a directory that contain 'mambo.yml'") sitepath = os.path.join(CWD, sitename) if os.path.isdir(sitepath): error_exit("Site directory '%s' exists already!" % sitename) else: info("Creating site: %s..." % sitename) os.makedirs(sitepath) copy_resource("skel/", sitepath) stamp_mambo_current_version(sitepath) info("Site created successfully!") info("CD into '%s' and run 'mambo serve' to view the site" % sitename) done()
[ "def init():\n title(\"Init Mambo...\")\n mambo_conf = os.path.join(CWD, Mambo.config_yml)\n if os.path.isfile(mambo_conf):\n error_exit(\"Mambo is already initialized in '%s'. Or delete 'mambo.yml' if it's a mistake \" % CWD)\n else:\n copy_resource(\"skel/\", CWD)\n stamp_mambo_current_version(CWD)\n info(\"Mambo init successfully!\")\n info(\"Run 'mambo serve' to view the site\")\n done()", "def init():\n main_backup_dir = '.wit'\n parent_dir = os.getcwd()\n new_dir = pathlib.Path() / parent_dir / main_backup_dir / 'images' #Changed syntax according to notes on submission\n new_dir.mkdir(parents=True, exist_ok=True)\n new_dir = pathlib.Path() / parent_dir / main_backup_dir / 'staging_area'\n new_dir.mkdir(parents=True, exist_ok=True)", "def fill_site(self):\n self.init_command.copy_sample_site(self.target_dir)\n self.init_command.create_configuration(self.target_dir)", "def setup_directories():\r\n # First set up the system paths for the server/services\r\n run('mkdir -p %(app_path)s' % env)\r\n\r\n #----\r\n # Server paths (for web/other servers)\r\n #----\r\n for path in env.system_paths:\r\n env.temp_var = path\r\n run('mkdir -p %(app_path)s/%(temp_var)s' % env)\r\n # Change ownership of paths.\r\n sudo_as('chgrp -R %(webserver_group)s %(app_path)s/%(temp_var)s; chmod -R g+w %(app_path)s/%(temp_var)s;' % env)", "def create_site():\n if os.path.exists(instfolder):\n\n app = Flask('threecolor', instance_path=instfolder, instance_relative_config=True)\n\n # configure flask app from default settings, then overide with settings.cfg\n app.config.from_object('threecolor.configs.default_settings')\n app.config.from_pyfile('settings.cfg')\n\n # configure paths and folders according to instance path\n app.config['FLATPAGES_ROOT'] = os.path.join(app.instance_path, 'content')\n app.config['IMAGE_DIR'] = os.path.join(app.instance_path, 'images')\n app.config['FREEZER_DESTINATION'] = os.path.join(app.instance_path, app.config['BUILD_DIR'])\n\n from .site.coolviews import site, pages, freezer\n app.register_blueprint(site)\n pages.init_app(app)\n freezer.init_app(app)\n\n return app\n\n else:\n # app = Flask('threecolor')\n #\n # # configure flask app from default settings, then overide with settings.cfg\n # app.config.from_object('threecolor.configs.default_settings')\n\n misc.make_home(APP_ROOT)\n\n return app", "def import_site_init(name):\n #If the directory exists, just use that.\n path = os.path.join(os.path.split(__file__)[0], name)\n if os.path.isdir(path):\n logger.info(\"Initializing site from directory: \" + path)\n for root, dirs, files in os.walk(path):\n for fn in files:\n fn = os.path.join(root, fn)\n dst_fn = fn.replace(path + os.path.sep,\"\")\n dst_dir = os.path.split(dst_fn)[0]\n util.mkdir(dst_dir)\n shutil.copyfile(fn, dst_fn)\n #If a .py file exists, run with that:\n elif os.path.isfile(path) and path.endswith(\".py\"):\n mod = imp.load_source(\"mod\", path)\n mod.do_init()\n #Otherwise, load it from the zip file\n else:\n logger.info(\"Initializing site from zip file\")\n zip_data = pkgutil.get_data(\"blogofile.site_init\", name + \".zip\")\n zip_file = zipfile.ZipFile(StringIO.StringIO(zip_data))\n for name in zip_file.namelist():\n if name.endswith('/'):\n util.mkdir(name)\n else:\n util.mkdir(os.path.split(name)[0])\n f = open(name, 'wb')\n f.write(zip_file.read(name))\n f.close()", "def setup_application():\r\n require('branch')\r\n\r\n setup_directories()\r\n deploy_configurations()", "def makeuserdirs(self):\n dirlist = self.mewlosite.settings.get_value(mconst.DEF_SETTINGSEC_make_dirs, [])\n for dirpath in dirlist:\n dirpath = self.resolve(dirpath,None)\n #print \"ATTN:DEBUG site wants us to create dir '{0}'.\".format(dirpath)\n misc.makedirectorypath(dirpath)", "def _InstallBaseSites(self):\n dirname = os.path.dirname(self.sites_file)\n if not os.path.isdir(dirname):\n print '.. no uweb data directory; creating %r' % dirname\n os.mkdir(os.path.dirname(self.sites_file))\n with file(self.sites_file, 'w') as sites:\n print '.. creating %r with default sites' % self.sites_file\n sites.write(simplejson.dumps(self.SITES_BASE))\n print ''", "def setup_staticfiles():\n print yellow(stage_msg('Creating static files directories…'))\n with cd(env.config['directory']):\n run('mkdir -p public/{media,static}')", "def create_home_dir_structure():\r\n for directory in (HOME_NINJA_PATH, EXTENSIONS_PATH, PLUGINS, EDITOR_SKINS,\r\n LANGS, NINJA_THEME_DOWNLOAD, NINJA_KNOWLEDGE_PATH):\r\n if not os.path.isdir(directory):\r\n os.mkdir(directory)", "def init(project_path):\n src = os.path.join(os.path.dirname(__file__), \"template\")\n dst = os.path.join(project_path, \".deploy\")\n if os.path.exists(dst):\n msg = \"%r already exists\" % dst\n raise ValueError(msg)\n else:\n shutil.copytree(src, dst)\n\n versions_dir = os.path.join(dst, \"versions\")\n if not os.path.exists(versions_dir):\n os.mkdir(versions_dir)", "def create_dir_env_www():\n if not exists('/home/{}/env'.format(USER)):\n run('mkdir /home/{}/env'.format(USER))\n\n if not exists('/home/{}/www'.format(USER)):\n run('mkdir /home/{}/www'.format(USER))", "def setup(self):\n self.site = SiteFactory(is_default_site=True)", "def zip_site_init(): #pragma: no cover .. only used by setuptools\n try: \n curdir = os.getcwd()\n root = os.path.join(curdir, \"blogofile\", \"site_init\")\n for d in os.listdir(root):\n d = os.path.join(root, d)\n if os.path.isdir(d):\n os.chdir(root)\n zf = d + \".zip\"\n z = zipfile.ZipFile(zf, \"w\")\n os.chdir(d)\n for dirpath, dirnames, filenames in os.walk(os.curdir):\n if len(filenames) == 0:\n #This is an empty directory, add it anyway:\n z.writestr(zipfile.ZipInfo(dirpath+\"/\"), '')\n for fn in filenames:\n z.write(os.path.join(dirpath, fn))\n z.close()\n finally:\n os.chdir(curdir)", "def createSite(self, site: str) -> bool:\n sitePath = os.path.join(self.timePath, site)\n chk = checkDirExistence(sitePath)\n if chk:\n self.printWarning(\n \"Site {} already exists in project time data folder\".format(site)\n )\n return False\n checkAndMakeDir(sitePath)\n return True", "def init() -> None:\n\n path = os.path.join(os.getcwd(), '.wit')\n try:\n os.mkdir(path)\n os.mkdir(os.path.join(path, 'images'))\n os.mkdir(os.path.join(path, 'staging_area'))\n\n except FileExistsError:\n logging.error(FileExistsError('Wit already exists.'))\n\n except OSError as error:\n logging.error(OSError(f'Can not create folder:\\n{error}'))\n\n else:\n _write_base_files(path)\n _set_logger(os.path.relpath(path, os.getcwd()))\n logging.info('wit was successfully initialized.')", "def create_directory_structure():\n if not os.path.exists(APP_SAVE_DIR):\n os.mkdir(APP_SAVE_DIR)\n\n if not os.path.exists(APP_TEMP_DIR):\n os.mkdir(APP_TEMP_DIR)", "def create_project_dirs() -> None:\n create_folder('cache/data')\n create_folder('cache/models')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize Mambo in the current directory
def init(): title("Init Mambo...") mambo_conf = os.path.join(CWD, Mambo.config_yml) if os.path.isfile(mambo_conf): error_exit("Mambo is already initialized in '%s'. Or delete 'mambo.yml' if it's a mistake " % CWD) else: copy_resource("skel/", CWD) stamp_mambo_current_version(CWD) info("Mambo init successfully!") info("Run 'mambo serve' to view the site") done()
[ "def init_manager(basedir, dbconnect):\n # Test if the base directory exists and is empty.\n basedir = basedir if basedir is not None else config.BASEDIR()\n if os.path.isdir(basedir):\n if os.listdir(basedir):\n click.echo('Not an empty directory {}.'.format(basedir))\n sys.exit(-1)\n # Create instance of persistent archive manager to setup directories and\n # files.\n PersistentArchiveManager(\n basedir=basedir,\n dbconnect=dbconnect,\n create=True\n )\n click.echo(\"Initialized in {}.\".format(os.path.abspath(basedir)))", "def init(name):\n create_goblet_dir(name)\n click.echo(\"created .goblet/json.config\")\n click.echo(\"created requirements.txt\")\n click.echo(\"created main.py\")\n click.echo(\"created README.md\")", "def __init__(self):\n self.db = self._read_db()\n self._setup_dirs()", "def setup(self):\n self.orig_dir = os.getcwd()\n os.chdir(\"demos/bartlett1932\")", "def _initialize_directory(self):\n if os.path.exists(self.location):\n sys.exit(\"WARNING: %s already exists, exiting\" % self.location)\n self._generate_settings()\n self._print_initialization_message()\n self._create_directories()\n self._create_general_config_file()\n self._create_default_pipeline_config_file()\n self._create_filelist()\n print", "def init_jb():\n dict_path = os.path.dirname(os.path.split(os.path.realpath(__file__))[0]) + '/resources/QAattrdic.txt'\n jieba.load_userdict(dict_path)\n jieba.initialize()", "def run(args):\n _set_development_path()\n from mabot import run\n run(args)", "def initDirectory(self):\n self.datasetDirectory = {\"datasets\":[]}\n self.forecastDict = {\"PredictorPool\":{},\"EquationPools\":{},\"Options\":{}}\n\n return", "def init() -> None:\n\n path = os.path.join(os.getcwd(), '.wit')\n try:\n os.mkdir(path)\n os.mkdir(os.path.join(path, 'images'))\n os.mkdir(os.path.join(path, 'staging_area'))\n\n except FileExistsError:\n logging.error(FileExistsError('Wit already exists.'))\n\n except OSError as error:\n logging.error(OSError(f'Can not create folder:\\n{error}'))\n\n else:\n _write_base_files(path)\n _set_logger(os.path.relpath(path, os.getcwd()))\n logging.info('wit was successfully initialized.')", "def init(cls):\n\n os.makedirs(VideoDir.path(), exist_ok=True)", "def __init__(self):\n self.wlbt = WalabotAPI\n self.wlbt.Init()\n self.wlbt.SetSettingsFolder()", "def init():\n main_backup_dir = '.wit'\n parent_dir = os.getcwd()\n new_dir = pathlib.Path() / parent_dir / main_backup_dir / 'images' #Changed syntax according to notes on submission\n new_dir.mkdir(parents=True, exist_ok=True)\n new_dir = pathlib.Path() / parent_dir / main_backup_dir / 'staging_area'\n new_dir.mkdir(parents=True, exist_ok=True)", "def __init__(self):\n self.open = False\n self.labpygame = LabPygame()\n self.maze = Maze(\"./LabMac/resources/map/map.txt\")\n self.maze_view = MazeView(maze=self.maze)\n self.hero_view = HeroView(hero=self.maze.hero, maze=self.maze)\n self.bar = Bar(hero=self.maze.hero)", "def initialize(self):\n try:\n if not os.path.exists(self.sync_dir):\n self._logger.debug('Initializing and creating/checking data dir: %s' % (self.sync_dir))\n fs_util.createpath(self.sync_dir, mode=0755, exists_ok=True)\n except Exception:\n self._logger.exception('Failed initializing analysis metadata directory')\n raise", "def init():\n global memId0\n global open_files\n if dafBase:\n memId0 = dafBase.Citizen.getNextMemId() # used by MemoryTestCase\n # Reset the list of open files\n open_files = _get_open_files()", "def _SetupAndStart(self):\n self._SetupEnvVars()\n\n # Sometimes goma is lingering around if something went bad on a previous\n # run. Stop it before starting a new process. Can ignore the return code\n # since it will return an error if it wasn't running.\n self._Stop()\n\n if subprocess.call([self._abs_path_to_goma_file, 'start']):\n raise RuntimeError('GOMA failed to start.')", "def init():\n\n require('repo_path', 'venv_path')\n\n # Create the virtualenv\n make_virtualenv(env.venv_path, system_site_packages=False)\n\n # Create the git repo\n git_init(env.repo_path)", "def setUp(self):\n self._file_upto = 0 # How many files we have created\n self.tmpdir = tempfile.mkdtemp()\n self.output = cros_output.Output()\n self.tools = Tools(self.output)\n self.tools.PrepareOutputDir(None)\n self.bundle = Bundle(self.tools, self.output)\n self.uboot_fname = self.MakeRandomFile(500 * 1024)\n self.bmpblk_fname = os.path.abspath('bin/bmpblk.bin')\n self.bct_fname = os.path.abspath('bin/board.bct')\n self.bundle.SetDirs('##/usr/share/vboot/devkeys')\n self.bundle.SetOptions(False, None)", "def setup_application():\r\n require('branch')\r\n\r\n setup_directories()\r\n deploy_configurations()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clean the build dir
def clean(): title("Cleaning build dir...") Mambo(CWD).clean_build_dir() done()
[ "def clean():\n rm_rf(cwd/'_build')", "def clean_build():\r\n env.clean_build = True", "def clean_builds(self, _args):\n ctx = self.ctx\n rmdir(ctx.build_dir)\n rmdir(ctx.python_installs_dir)\n libs_dir = join(self.ctx.build_dir, 'libs_collections')\n rmdir(libs_dir)", "def CleanBuildOutputDirectory(self):\n PrintStatus('Removing '+ self.BuildOutputRootDir())\n if os.path.isdir(self.BuildOutputRootDir()):\n _SmartDeleteDirectory(self.BuildOutputRootDir())", "def cleanup_files(base_dir, builder):\n builder.run_root('rm -rf /build')", "def clean(deep=False):\n printer.header(\"Cleaning...\")\n\n rm_dir(\"build\")\n rm_dir(\"dist\")\n rm_dir(\"static\")\n rm_dir(f\"{SRC_PATH}/app/build\")\n rm_dir(f\"{SRC_PATH}/website/static/build\")\n rm_dir(\".mypy_cache\")\n rm_dir(\".pytest_cache\")\n rm_dir(\".ruff_cache\")\n\n pycache_dirs = tuple(Path.cwd().glob(\"__pycache__\"))\n if pycache_dirs:\n count = len(pycache_dirs)\n noun = \"directory\" if count == 1 else \"directories\"\n printer.info(f\"removing {count} __pycache__ {noun}\")\n for d in pycache_dirs:\n rm_dir(d, True)\n\n if deep:\n printer.warning(\"Deep cleaning...\")\n rm_dir(\".venv\")\n rm_dir(\"node_modules\")", "def CleanGeneratorDirectory(self):\n PrintStatus('Removing ' + GYP_PROJECTS_DIR)\n if os.path.isdir(GYP_PROJECTS_DIR):\n _SmartDeleteDirectory(GYP_PROJECTS_DIR)", "def clean_project():\n pass", "def run(self):\n call('rm -vrf ./build ./dist ./*.pyc ./*.egg-info', shell=True)\n call('make -C docs clean', shell=True)", "def clean_bootstrap_builds(self, _args):\n rmdir(join(self.ctx.build_dir, 'bootstrap_builds'))\n # for bs in Bootstrap.all_bootstraps():\n # bs = Bootstrap.get_bootstrap(bs, self.ctx)\n # if bs.build_dir and exists(bs.build_dir):\n # info('Cleaning build for {} bootstrap.'.format(bs.name))\n # rmdir(bs.build_dir)", "def CleanAll(config):\n\n def rmtree(path):\n util.Log('removing %s' % path)\n util.RemoveTree(path)\n\n rmtree(paths.STAMP_DIR)\n rmtree(paths.BUILD_ROOT)\n rmtree(paths.PUBLISH_ROOT)\n rmtree(paths.PACKAGES_ROOT)\n rmtree(util.GetInstallStampRoot(config))\n rmtree(util.GetInstallRoot(config))", "def clean_tmp(builddpath: str):\n tmpdpath = os.path.join(builddpath, \"tmp\")\n if os.path.isdir(tmpdpath):\n shutil.rmtree(tmpdpath)", "def clear_old_dist(self) -> None: # pragma: no cover\n info('<<lightyellow>>Removing old dist folders... ', newline=False)\n shutil.rmtree(self.folder / 'build', ignore_errors=True)\n shutil.rmtree(self.folder / 'dist', ignore_errors=True)\n shutil.rmtree(self.folder / 'pywikibot.egg-info', ignore_errors=True)\n info('<<lightyellow>>done')", "def clean_up() -> None:\n rmtree(TEMP)\n Path.unlink(ROOT_DIR.joinpath(ZIP_NAME))", "def clean_casedir(self):\n self._saver._clean_casedir()", "def clean_packmol_dir(envpath):\r\n # copy resulting .xyz to project dir\r\n try:\r\n os.replace(\"./PR_initcell.xyz\", f\"{envpath}/initcell.xyz\")\r\n except OSError:\r\n print(\"!!!!!Can't copy resulting .xyz file! Check packmol.log!!!!!\")\r\n exit()\r\n\r\n # clear the packmol directory of temporary .xyz and .inp files\r\n for i in glob.glob(f\"{PATH}/packmol/*.xyz\"):\r\n os.remove(i)\r\n for i in glob.glob(f\"{PATH}/packmol/*.inp\"):\r\n os.remove(i)", "def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join(\n [\"%(releases_path)s/%(release)s\" % {'releases_path': env.releases_path, 'release': release} for release in\n directories])\n run(\"rm -rf %(directories)s\" % {'directories': env.directories})", "def clean_test(context):\n context.run(\"rm -fr .tox/\")\n context.run(\"rm -f .coverage\")\n context.run(\"rm -fr htmlcov/\")", "def clean():\n for f in OUTPUT_FILES:\n os.remove(f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Affinity is a group of affinity scheduling rules.
def affinity(self) -> Optional['outputs.InfinispanSpecAffinity']: return pulumi.get(self, "affinity")
[ "def combine_affinity(affinities):\n affinity = {f: list() for f in affinities[0].keys()}\n for k in affinity.keys():\n if \"cpus\" in k:\n for aff in affinities:\n affinity[k].extend(aff[k])\n elif \"torch_threads\" in k:\n num = 0\n for aff in affinities:\n num += aff[k]\n affinity[k] = num\n elif \"cuda_idx\" == k:\n for aff in affinities:\n affinity[k].append(aff[k])\n else:\n # should be \"alternating\" and \"set_affinity\" keys\n affinity[k] = False\n for aff in affinities:\n affinity[k] |= aff[k]\n return affinity", "def additional_affinity_or_anti_affinity_rule(self) -> List[AffinityOrAntiAffinityRuleIm]:\n return self._additional_affinity_or_anti_affinity_rule", "def vm_affinity_rule(\n name,\n affinity,\n vm_names,\n cluster_name,\n datacenter_name,\n enabled=True,\n mandatory=None,\n service_instance=None,\n):\n log.debug(f\"Configuring a vm to vm DRS rule {name} on cluster {cluster_name}.\")\n if service_instance is None:\n service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)\n dc_ref = utils_common.get_datacenter(service_instance, datacenter_name)\n cluster_ref = utils_cluster.get_cluster(dc_ref, cluster_name)\n vm_refs = []\n missing_vms = []\n for vm_name in vm_names:\n vm_ref = utils_common.get_mor_by_property(service_instance, vim.VirtualMachine, vm_name)\n if not vm_ref:\n missing_vms.append(vm_name)\n vm_refs.append(vm_ref)\n if missing_vms:\n raise salt.exceptions.VMwareApiError({f\"Could not find virtual machines {missing_vms}\"})\n rules = cluster_ref.configuration.rule\n rule_ref = None\n if rules:\n for rule in rules:\n if rule.name == name:\n rule_info = utils_cluster.drs_rule_info(rule)\n if utils_cluster.check_affinity(rule) != affinity:\n return {\n \"updated\": False,\n \"message\": f\"Existing rule of name {name} has an affinity of {not affinity} and cannot be changed, make new rule.\",\n }\n if (\n rule_info[\"vms\"] == vm_names\n and rule_info[\"enabled\"] == enabled\n and rule_info[\"mandatory\"] == mandatory\n ):\n return {\n \"updated\": True,\n \"message\": \"Exact rule already exists.\",\n }\n rule_ref = rule\n\n if rule_ref:\n utils_cluster.update_drs_rule(rule_ref, vm_refs, enabled, mandatory, cluster_ref)\n return {\"updated\": True}\n else:\n utils_cluster.create_drs_rule(name, affinity, vm_refs, enabled, mandatory, cluster_ref)\n return {\"created\": True}", "def set_affinity(\n gpu_id,\n nproc_per_node,\n *,\n mode=\"unique_contiguous\",\n scope=\"node\",\n cores=\"all_logical\",\n balanced=True,\n min_cores=1,\n max_cores=None,\n):\n pynvml.nvmlInit()\n\n if mode == \"all\":\n affinity = get_all(nproc_per_node, scope, cores, min_cores, max_cores)\n elif mode == \"single\":\n affinity = get_single(nproc_per_node, scope, cores)\n elif mode == \"single_unique\":\n affinity = get_single_unique(nproc_per_node, scope, cores)\n elif mode == \"unique_interleaved\" or mode == \"unique_contiguous\":\n affinity = get_unique(\n nproc_per_node,\n scope,\n cores,\n mode,\n min_cores,\n max_cores,\n balanced,\n )\n else:\n raise RuntimeError(\"Unknown affinity mode\")\n\n os.sched_setaffinity(0, affinity[gpu_id])\n set_affinity = os.sched_getaffinity(0)\n return set_affinity", "def get_affinity_strategy(self, affinity):\n if affinity not in self.affinity_strategies:\n self.set_affinity_strategy(affinity, Bucket._default_strategy_t)\n\n return self.affinity_strategies[affinity]", "def runAffinityPropogation(self):\n af = AffinityPropagation(preference=-2000000).fit(self.data_points)\n self.labels_ap = af.labels_\n pickle.dump(af.labels_,open('affinity_prop.bn','wb'))\n return 0", "def set_affinity(irqId,affinity):\n attributes = {'cpu':affinity}\n req = requests.put(_url('/irqsummary/'+str(irqId)),data={'cpu' : affinity})\n if 'old affinity' in req.json():\n print(\"Affinity for IRQ %s changed from %s to %s\" % (irqId, req.json()['old affinity'],req.json()['new affinity']))\n if 'message' in req.json():\n print(req.json()['message'])", "def affinities(self):\n return self.to_dataframe()[\"affinity\"].values", "def set_affinity_strategy(self, affinity, strategy_t):\n self.affinity_strategies[affinity] = strategy_t(self)", "def affinity_list(self, is_geopmctl):\n app_rank_per_node = self.num_app_mask\n\n # The number of application logical CPUs per compute node.\n app_cpu_per_node = app_rank_per_node * self.cpu_per_rank\n # Total number of cores per node\n core_per_node = self.core_per_socket * self.num_socket\n\n # Number of application ranks per socket (floored)\n rank_per_socket = app_rank_per_node // self.num_socket\n rank_per_socket_remainder = app_rank_per_node % self.num_socket\n\n app_thread_per_core = 1\n while app_thread_per_core * core_per_node < app_cpu_per_node:\n app_thread_per_core += 1\n\n if app_thread_per_core > self.thread_per_core:\n err_fmt = '<geopm> geopmpy.launcher: Cannot oversubscribe hardware threads; requested threads per core: {}, hardware threads per core: {}.'\n raise RuntimeError(err_fmt.format(app_thread_per_core, self.thread_per_core))\n if app_rank_per_node > core_per_node:\n raise RuntimeError('<geopm> geopmpy.launcher: Cores cannot be shared between MPI ranks')\n if not self.config.allow_ht_pinning and app_thread_per_core > 1:\n raise RuntimeError('<geopm> geopmpy.launcher: Hyperthreads needed to satisfy ranks/threads configuration, but forbidden by'\n ' --geopm-hyperthreads-disable.')\n if app_cpu_per_node > self.num_linux_cpu:\n raise RuntimeError('<geopm> geopmpy.launcher: Requested more application threads per node than the number of Linux logical CPUs')\n\n app_core_per_rank = self.cpu_per_rank // app_thread_per_core\n if self.cpu_per_rank % app_thread_per_core > 0:\n app_core_per_rank += 1\n\n if app_core_per_rank * app_rank_per_node > core_per_node:\n raise RuntimeError('<geopm> geopmpy.launcher: Cores cannot be shared between MPI ranks')\n\n result = []\n core_index = core_per_node - 1\n\n if rank_per_socket_remainder == 0:\n socket_boundary = self.core_per_socket * (self.num_socket - 1)\n for socket in range(self.num_socket - 1, -1, -1):\n for rank in range(rank_per_socket - 1, -1, -1): # Start assigning ranks to cores from the highest rank/core backwards\n base_cores = list(range(core_index, core_index - app_core_per_rank, -1))\n cpu_range = set()\n for ht in range(app_thread_per_core):\n cpu_range.update({bc + ht * core_per_node for bc in base_cores})\n\n if not is_geopmctl:\n result.insert(0, cpu_range)\n core_index -= app_core_per_rank\n if not (rank == 0 and socket == 0):\n # Reset to highest core in the next socket when crossing the socket boundary\n # unless we've pinned the last rank on the last socket\n core_index = socket_boundary - 1\n\n socket_boundary -= self.core_per_socket\n else:\n for rank in range(app_rank_per_node - 1, -1, -1): # Start assigning ranks to cores from the highest rank/core backwards\n base_cores = list(range(core_index, core_index - app_core_per_rank, -1))\n cpu_range = set()\n for ht in range(app_thread_per_core):\n cpu_range.update({bc + ht * core_per_node for bc in base_cores})\n if not is_geopmctl:\n result.insert(0, cpu_range)\n core_index -= app_core_per_rank\n\n if core_index <= 0:\n if self.config.allow_ht_pinning and core_per_node * app_thread_per_core < self.num_linux_cpu:\n # Run controller on the lowest hyperthread that is not\n # occupied by the application\n geopm_ctl_cpu = core_per_node * app_thread_per_core\n else:\n # Oversubscribe Linux CPU 0, no better solution\n geopm_ctl_cpu = 0\n else:\n geopm_ctl_cpu = 1\n\n if self.config.get_ctl() == 'process' or is_geopmctl:\n result.insert(0, {geopm_ctl_cpu})\n\n return result", "def update_affinity_group(self, api_client, affinitygroupids=None,\n affinitygroupnames=None):\n cmd = {'id': self.id}\n\n if affinitygroupids:\n cmd['affinitygroupids'] = affinitygroupids\n\n if affinitygroupnames:\n cmd['affinitygroupnames'] = affinitygroupnames\n\n return api_client.updateVMAffinityGroup(**cmd)", "def affinity_option(self, is_geopmctl):\n result = []\n if self.is_geopm_enabled:\n # Disable other affinity mechanisms\n self.environ_ext['KMP_AFFINITY'] = 'disabled'\n self.environ_ext['MV2_ENABLE_AFFINITY'] = '0'\n self.environ_ext['KMP_WARNINGS'] = 'FALSE'\n\n aff_list = self.affinity_list(is_geopmctl)\n pid = subprocess.Popen(['srun', '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n help_msg, err = pid.communicate()\n if help_msg.find(b'--mpibind') != -1:\n result.append('--mpibind=off')\n if help_msg.find(b'--cpu_bind') != -1:\n bind_cmd = '--cpu_bind'\n elif help_msg.find(b'--cpu-bind') != -1:\n bind_cmd = '--cpu-bind'\n else:\n raise RuntimeError('<geopm> geopmpy.launcher: SLURM\\'s cpubind plugin was not detected. Unable to affinitize ranks.')\n\n mask_zero = ['0' for ii in range(self.num_linux_cpu)]\n mask_list = []\n for cpu_set in aff_list:\n mask = list(mask_zero)\n for cpu in cpu_set:\n mask[self.num_linux_cpu - 1 - cpu] = '1'\n mask = '0x{:x}'.format(int(''.join(mask), 2))\n mask_list.append(mask)\n result.append(bind_cmd)\n if self.quiet:\n result.append('mask_cpu:' + ','.join(mask_list))\n else:\n result.append('v,mask_cpu:' + ','.join(mask_list))\n\n if self.config.get_ctl() == 'application':\n result.append('--overlap')\n\n return result", "def config_numa_affinity(self, session, vmname, vmnic):\n if self.verify_numa_affinity(session, vmname, vmnic):\n return True\n else:\n data = vmUtil.read_vmx(session, vmname)\n _LOGGER.debug(f' Executing command : cat vmfs/volumes/{vmUtil.get_datastore(session, vmname)}/{vmname}/{vmname}.vmx | grep \"numa.nodeAffinity =\" -i')\n stdin, stdout, stderr = session.exec_command(f'cat vmfs/volumes/{vmUtil.get_datastore(session, vmname)}/{vmname}/{vmname}.vmx | grep \"numa.nodeAffinity =\" -i')\n a = stdout.read().decode()\n e = vmUtil.get_numa_node(session, vmname)\n if a:\n _LOGGER.debug(f'Adding : numa.nodeAffinity = \"{e}\" in vmx file ')\n data += f'numa.nodeAffinity = \"{e}\"'\n data = data.replace('\"', '\\\\\"')\n _LOGGER.debug(f'Adding changes for NUMA affinity in vmx file.')\n stdin, stdout, stderr = session.exec_command(f'echo \"{data}\" > vmfs/volumes/{vmUtil.get_datastore(session, vmname)}/{vmname}/{vmname}.vmx')\n return False if stderr.read() else True\n # return True\n else:\n _LOGGER.debug(f'Executing command : vsish -e get /net/pNics/{vmnic}/properties | grep NUMA ')\n stdin, stdout, stderr = session.exec_command(f'vsish -e get /net/pNics/{vmnic}/properties | grep NUMA')\n r = stdout.read().decode()\n st = re.search('\\d', r)\n if st:\n numa = st.group()\n old = vmUtil.get_numa_node(session, vmname)\n _LOGGER.debug(f'Replacing : numa.nodeAffinity = \\\"{old}\\\" to numa.nodeAffinity = \\\"{numa}\\\"')\n data = data.replace(f'numa.nodeAffinity = \"{old}\"', f'numa.nodeAffinity = \"{numa}\"')\n data = data.replace('\"', '\\\\\"')\n _LOGGER.debug(f'Adding changes for NUMA affinity in vmx file.')\n stdin, stdout, stderr = session.exec_command(f'echo \"{data}\" > vmfs/volumes/{vmUtil.get_datastore(session, vmname)}/{vmname}/{vmname}.vmx')\n return False if stderr.read() else True\n else:\n _LOGGER.error(f'unable to configure node affinity for vmnic : {vmnic}')", "def increment_affinity(self, counters):\n self.affinity_counters.update(counters)\n if self.parent:\n self.parent.increment_affinity(counters)", "def calculate_affinity(user1, user2, **kws): # pragma: no cover\n return Aniffinity(base_user=user1, **kws).calculate_affinity(user2)", "def __init__(__self__, *,\n pod_affinity_term: 'outputs.InfinispanSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',\n weight: int):\n pulumi.set(__self__, \"pod_affinity_term\", pod_affinity_term)\n pulumi.set(__self__, \"weight\", weight)", "def _add_auto_scaling(self):\n auto_scaling_group = self.fargate_service.service.auto_scale_task_count(\n min_capacity=2,\n max_capacity=10\n )\n auto_scaling_group.scale_on_cpu_utilization(\n 'CpuScaling',\n target_utilization_percent=50,\n scale_in_cooldown=core.Duration.seconds(60),\n scale_out_cooldown=core.Duration.seconds(60)\n )", "def qemu_set_affinity(self, *host_cpus):\n for _ in range(3):\n try:\n qemu_cpus = self.get_qemu_pids()\n\n if len(qemu_cpus) != len(host_cpus):\n sleep(1)\n continue\n for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):\n command = ('taskset -pc {host_cpu} {thread}'.\n format(host_cpu=host_cpu, thread=qemu_cpu))\n message = ('QEMU: Set affinity failed on {host}!'.\n format(host=self._node['host']))\n exec_cmd_no_error(self._node, command, sudo=True,\n message=message)\n break\n except (RuntimeError, ValueError):\n self.qemu_kill_all()\n raise\n else:\n self.qemu_kill_all()\n raise RuntimeError('Failed to set Qemu threads affinity!')", "def cpu_impl():\n return _C.CpuForceAlignmentCriterion" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InfinispanContainerSpec specify resource requirements per container
def container(self) -> Optional['outputs.InfinispanSpecContainer']: return pulumi.get(self, "container")
[ "def container(self) -> Optional['outputs.InfinispanSpecServiceContainer']:\n return pulumi.get(self, \"container\")", "def validate_required_for_container(data, c_req):\n c_req_set = set(c_req)\n result = True\n if (data['kind'] == \"Deployment\") or \\\n (data['kind'] == \"DaemonSet\") or \\\n (data['kind'] == \"StatefulSet\"):\n for i,c in enumerate(data['spec']['template']['spec']['containers']):\n d_set = set(c.keys())\n if not d_set >= c_req_set:\n missing_keys = list(c_req_set - d_set)\n print(\n err_msg(\n lvl=\"ERR\",\n sub=\"Missing required keys in containers\",\n msg=\", \".join(str(e) for e in missing_keys)\n ),\n file=sys.stderr\n )\n result = False\n elif data['kind'] == \"CronJob\":\n for i,c in enumerate(data['spec']['jobTemplate']['spec']['template']['spec']['containers']):\n d_set = set(c.keys())\n if not d_set >= c_req_set:\n missing_keys = list(c_req_set - d_set)\n print(\n err_msg(\n lvl=\"ERR\",\n sub=\"Missing required keys in containers\",\n msg=\", \".join(str(e) for e in missing_keys)\n ),\n file=sys.stderr\n )\n result = False\n return result", "def to_k8s_client_obj(self):\n limits_raw = {\n 'cpu': self.limit_cpu,\n 'memory': self.limit_memory,\n 'nvidia.com/gpu': self.limit_gpu,\n 'ephemeral-storage': self.limit_ephemeral_storage,\n }\n requests_raw = {\n 'cpu': self.request_cpu,\n 'memory': self.request_memory,\n 'ephemeral-storage': self.request_ephemeral_storage,\n }\n\n limits = {k: v for k, v in limits_raw.items() if v}\n requests = {k: v for k, v in requests_raw.items() if v}\n resource_req = k8s.V1ResourceRequirements(limits=limits, requests=requests)\n return resource_req", "def test_create_namespaced_csi_storage_capacity(self):\n pass", "def test_patch_namespaced_csi_storage_capacity(self):\n pass", "def test_read_namespaced_csi_storage_capacity(self):\n pass", "def get_required_resources(job_details: dict) -> list:\n # Load configs.\n type_to_component_details = job_details[\"components\"]\n job_id = job_details[\"id\"]\n\n # Get required resources.\n resources_list = []\n for component_type, component_details in type_to_component_details.items():\n component_id = component_details[\"id\"]\n component_num = component_details[\"num\"]\n required_cpu = component_details[\"resources\"][\"cpu\"]\n required_memory = int(component_details[\"resources\"][\"memory\"].replace(\"m\", \"\"))\n required_gpu = component_details[\"resources\"][\"gpu\"]\n\n for i in range(component_num):\n resources_list.append(\n ContainerResource(\n container_name=ContainerController.build_container_name(job_id, component_id, i),\n cpu=required_cpu,\n memory=required_memory,\n gpu=required_gpu,\n ),\n )\n return resources_list", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n capacity_reservation_group_id: Optional[pulumi.Input[str]] = None,\n custom_ca_trust_enabled: Optional[pulumi.Input[bool]] = None,\n enable_auto_scaling: Optional[pulumi.Input[bool]] = None,\n enable_host_encryption: Optional[pulumi.Input[bool]] = None,\n enable_node_public_ip: Optional[pulumi.Input[bool]] = None,\n eviction_policy: Optional[pulumi.Input[str]] = None,\n fips_enabled: Optional[pulumi.Input[bool]] = None,\n host_group_id: Optional[pulumi.Input[str]] = None,\n kubelet_config: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolKubeletConfigArgs']]] = None,\n kubelet_disk_type: Optional[pulumi.Input[str]] = None,\n kubernetes_cluster_id: Optional[pulumi.Input[str]] = None,\n linux_os_config: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolLinuxOsConfigArgs']]] = None,\n max_count: Optional[pulumi.Input[int]] = None,\n max_pods: Optional[pulumi.Input[int]] = None,\n message_of_the_day: Optional[pulumi.Input[str]] = None,\n min_count: Optional[pulumi.Input[int]] = None,\n mode: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n node_network_profile: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolNodeNetworkProfileArgs']]] = None,\n node_public_ip_prefix_id: Optional[pulumi.Input[str]] = None,\n node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n orchestrator_version: Optional[pulumi.Input[str]] = None,\n os_disk_size_gb: Optional[pulumi.Input[int]] = None,\n os_disk_type: Optional[pulumi.Input[str]] = None,\n os_sku: Optional[pulumi.Input[str]] = None,\n os_type: Optional[pulumi.Input[str]] = None,\n pod_subnet_id: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[str]] = None,\n proximity_placement_group_id: Optional[pulumi.Input[str]] = None,\n scale_down_mode: Optional[pulumi.Input[str]] = None,\n snapshot_id: Optional[pulumi.Input[str]] = None,\n spot_max_price: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n ultra_ssd_enabled: Optional[pulumi.Input[bool]] = None,\n upgrade_settings: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolUpgradeSettingsArgs']]] = None,\n vm_size: Optional[pulumi.Input[str]] = None,\n vnet_subnet_id: Optional[pulumi.Input[str]] = None,\n windows_profile: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolWindowsProfileArgs']]] = None,\n workload_runtime: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'KubernetesClusterNodePool':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _KubernetesClusterNodePoolState.__new__(_KubernetesClusterNodePoolState)\n\n __props__.__dict__[\"capacity_reservation_group_id\"] = capacity_reservation_group_id\n __props__.__dict__[\"custom_ca_trust_enabled\"] = custom_ca_trust_enabled\n __props__.__dict__[\"enable_auto_scaling\"] = enable_auto_scaling\n __props__.__dict__[\"enable_host_encryption\"] = enable_host_encryption\n __props__.__dict__[\"enable_node_public_ip\"] = enable_node_public_ip\n __props__.__dict__[\"eviction_policy\"] = eviction_policy\n __props__.__dict__[\"fips_enabled\"] = fips_enabled\n __props__.__dict__[\"host_group_id\"] = host_group_id\n __props__.__dict__[\"kubelet_config\"] = kubelet_config\n __props__.__dict__[\"kubelet_disk_type\"] = kubelet_disk_type\n __props__.__dict__[\"kubernetes_cluster_id\"] = kubernetes_cluster_id\n __props__.__dict__[\"linux_os_config\"] = linux_os_config\n __props__.__dict__[\"max_count\"] = max_count\n __props__.__dict__[\"max_pods\"] = max_pods\n __props__.__dict__[\"message_of_the_day\"] = message_of_the_day\n __props__.__dict__[\"min_count\"] = min_count\n __props__.__dict__[\"mode\"] = mode\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"node_count\"] = node_count\n __props__.__dict__[\"node_labels\"] = node_labels\n __props__.__dict__[\"node_network_profile\"] = node_network_profile\n __props__.__dict__[\"node_public_ip_prefix_id\"] = node_public_ip_prefix_id\n __props__.__dict__[\"node_taints\"] = node_taints\n __props__.__dict__[\"orchestrator_version\"] = orchestrator_version\n __props__.__dict__[\"os_disk_size_gb\"] = os_disk_size_gb\n __props__.__dict__[\"os_disk_type\"] = os_disk_type\n __props__.__dict__[\"os_sku\"] = os_sku\n __props__.__dict__[\"os_type\"] = os_type\n __props__.__dict__[\"pod_subnet_id\"] = pod_subnet_id\n __props__.__dict__[\"priority\"] = priority\n __props__.__dict__[\"proximity_placement_group_id\"] = proximity_placement_group_id\n __props__.__dict__[\"scale_down_mode\"] = scale_down_mode\n __props__.__dict__[\"snapshot_id\"] = snapshot_id\n __props__.__dict__[\"spot_max_price\"] = spot_max_price\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"ultra_ssd_enabled\"] = ultra_ssd_enabled\n __props__.__dict__[\"upgrade_settings\"] = upgrade_settings\n __props__.__dict__[\"vm_size\"] = vm_size\n __props__.__dict__[\"vnet_subnet_id\"] = vnet_subnet_id\n __props__.__dict__[\"windows_profile\"] = windows_profile\n __props__.__dict__[\"workload_runtime\"] = workload_runtime\n __props__.__dict__[\"zones\"] = zones\n return KubernetesClusterNodePool(resource_name, opts=opts, __props__=__props__)", "def pool_specification(config):\n # type: (dict) -> dict\n return config['pool_specification']", "def _spec(self) -> k8s.PodSpec:\n return k8s.PodSpec(\n containers=self.containers,\n volumes=self._volumes(),\n affinity=self._affinity(),\n )", "def server_jobspec(language, docker_image, test_duration_secs):\n container_name = dockerjob.random_name('interop_server_%s' %\n language.safename)\n cmdline = bash_login_cmdline(language.server_cmd(['--port=%s' %\n _DEFAULT_SERVER_PORT]))\n environ = language.global_env()\n docker_cmdline = docker_run_cmdline(\n cmdline,\n image=docker_image,\n cwd=language.server_cwd,\n environ=environ,\n docker_args=['-p', str(_DEFAULT_SERVER_PORT), '--name', container_name])\n\n server_job = jobset.JobSpec(cmdline=docker_cmdline,\n environ=environ,\n shortname='interop_server_%s' % language,\n timeout_seconds=test_duration_secs * 3)\n server_job.container_name = container_name\n return server_job", "def testRequestResourcesRaceConditionWithResourceDemands(self):\n config = copy.deepcopy(MULTI_WORKER_CLUSTER)\n config[\"available_node_types\"].update(\n {\n \"empty_node\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2, \"GPU\": 1},\n \"max_workers\": 1,\n },\n \"def_worker\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2, \"GPU\": 1, \"WORKER\": 1},\n \"max_workers\": 3,\n },\n }\n )\n config[\"idle_timeout_minutes\"] = 0\n\n config_path = self.write_config(config)\n self.provider = MockProvider()\n self.provider.create_node(\n {},\n {\n TAG_RAY_NODE_KIND: \"head\",\n TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,\n TAG_RAY_USER_NODE_TYPE: \"empty_node\",\n },\n 1,\n )\n\n runner = MockProcessRunner()\n runner.respond_to_call(\"json .Config.Env\", [\"[]\" for i in range(2)])\n lm = LoadMetrics()\n autoscaler = MockAutoscaler(\n config_path,\n lm,\n MockGcsClient(),\n max_failures=0,\n process_runner=runner,\n update_interval_s=0,\n )\n lm.update(\n \"127.0.0.0\",\n mock_raylet_id(),\n {\"CPU\": 2, \"GPU\": 1},\n {\"CPU\": 2},\n {},\n waiting_bundles=[{\"CPU\": 2}],\n )\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 2, \"GPU\": 1}] * 2)\n autoscaler.update()\n # 1 head, 1 worker.\n self.waitForNodes(2)\n lm.update(\n \"127.0.0.0\",\n mock_raylet_id(),\n {\"CPU\": 2, \"GPU\": 1},\n {\"CPU\": 2},\n {},\n waiting_bundles=[{\"CPU\": 2}],\n )\n # make sure it stays consistent.\n for _ in range(10):\n autoscaler.update()\n self.waitForNodes(2)", "def test_containers(self):\n del self.jobdef['containers']\n p = self._write_job('name', self.jobdef)\n with open(p) as f:\n msg = 'Missing required attribute: \"containers\".'\n with self.assertRaisesRegex(ModelError, msg):\n JobDefinition.validate(yaml.load(f.read()))\n\n self.jobdef['containers'] = [{'missing_image_attr': 'blah'}]\n p = self._write_job('name', self.jobdef)\n with open(p) as f:\n msg = 'Container\\(.*\\) must include an \"image\" attribute'\n with self.assertRaisesRegex(ModelError, msg):\n JobDefinition.validate(yaml.load(f.read()))", "def request_cluster_resources(to_request: List[dict], timeout: int = 10):\n\n # NOTE: We could also use a GCS python client. However, current GCS rpc client\n # expects GcsStatus as part of the reply, which is a protocol internal to Ray.\n # So we use the rpc stub directly to avoid that dependency.\n stub = _autoscaler_state_service_stub()\n min_bundles = [\n autoscaler_pb2.ResourceRequest(resources_bundle=bundle) for bundle in to_request\n ]\n request = autoscaler_pb2.RequestClusterResourceConstraintRequest(\n cluster_resource_constraint=autoscaler_pb2.ClusterResourceConstraint(\n min_bundles=min_bundles\n )\n )\n\n stub.RequestClusterResourceConstraint(request, timeout=timeout)", "def test_list_namespaced_csi_storage_capacity(self):\n pass", "def _reserve_memory_regions(self, spec):\n spec.reserve_memory_region(\n region=_DATA_REGIONS.CONFIG,\n size=CONFIG_SIZE,\n label=\"config\")\n spec.reserve_memory_region(\n region=_DATA_REGIONS.CHIP_TO_KEY_SPACE,\n size=SIZE_DATA_IN_CHIP_TO_KEY_SPACE,\n label=\"mc_key_map\")\n spec.reserve_memory_region(\n region=_DATA_REGIONS.PROVENANCE_REGION,\n size=_PROVENANCE_DATA_SIZE, label=\"Provenance\")", "def customized_capacity_metric_specification(self) -> Optional[pulumi.Input['PolicyPredictiveScalingConfigurationMetricSpecificationCustomizedCapacityMetricSpecificationArgs']]:\n return pulumi.get(self, \"customized_capacity_metric_specification\")", "def __init__(__self__,\n resource_name: str,\n args: KubernetesClusterNodePoolArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def testRequestResourcesRaceConditionsLong(self):\n config = copy.deepcopy(MULTI_WORKER_CLUSTER)\n config[\"max_workers\"] = 4\n config[\"idle_timeout_minutes\"] = 0\n config[\"available_node_types\"] = {\n \"empty_node\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2},\n \"max_workers\": 1,\n },\n \"def_worker\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2, \"WORKER\": 1},\n \"max_workers\": 3,\n \"min_workers\": 1,\n },\n }\n config_path = self.write_config(config)\n self.provider = MockProvider()\n runner = MockProcessRunner()\n runner.respond_to_call(\"json .Config.Env\", [\"[]\" for i in range(3)])\n self.provider.create_node(\n {},\n {\n TAG_RAY_NODE_KIND: NODE_KIND_HEAD,\n TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,\n TAG_RAY_USER_NODE_TYPE: \"empty_node\",\n },\n 1,\n )\n lm = LoadMetrics()\n autoscaler = MockAutoscaler(\n config_path,\n lm,\n MockGcsClient(),\n max_failures=0,\n process_runner=runner,\n update_interval_s=0,\n )\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 0.2, \"WORKER\": 1.0}])\n autoscaler.update()\n # 1 min worker for both min_worker and request_resources()\n self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n non_terminated_nodes = autoscaler.provider.non_terminated_nodes({})\n assert len(non_terminated_nodes) == 2\n node_id = non_terminated_nodes[1]\n node_ip = autoscaler.provider.non_terminated_node_ips({})[1]\n\n # A hack to check if the node was terminated when it shouldn't.\n autoscaler.provider.mock_nodes[node_id].state = \"unterminatable\"\n lm.update(\n node_ip,\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n waiting_bundles=[{\"CPU\": 0.2, \"WORKER\": 1.0}],\n )\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 0.2, \"WORKER\": 1.0}] * 2)\n autoscaler.update()\n # 2 requested_resource, 1 min worker, 1 free node -> 2 nodes total\n self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 0.2, \"WORKER\": 1.0}])\n autoscaler.update()\n # Still 2 because the second one is not connected and hence\n # request_resources occupies the connected node.\n self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 0.2, \"WORKER\": 1.0}] * 3)\n lm.update(\n node_ip,\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n {},\n waiting_bundles=[{\"CPU\": 0.2, \"WORKER\": 1.0}] * 3,\n )\n autoscaler.update()\n self.waitForNodes(3, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n autoscaler.load_metrics.set_resource_requests([])\n\n lm.update(\n \"172.0.0.2\",\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n )\n lm.update(\n \"172.0.0.3\",\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n )\n lm.update(\n node_ip,\n mock_raylet_id(),\n config[\"available_node_types\"][\"def_worker\"][\"resources\"],\n {},\n {},\n )\n print(\"============ Should scale down from here =============\", node_id)\n autoscaler.update()\n self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})\n # If node {node_id} was terminated any time then it's state will be set\n # to terminated.\n assert autoscaler.provider.mock_nodes[node_id].state == \"unterminatable\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ExposeSpec describe how Infinispan will be exposed externally
def expose(self) -> Optional['outputs.InfinispanSpecExpose']: return pulumi.get(self, "expose")
[ "def expose(self) -> 'outputs.InfinispanSpecServiceSitesLocalExpose':\n return pulumi.get(self, \"expose\")", "async def _expose_internal(self, exposure: Exposure, **kwargs) -> Exposure:\n\n raise NotImplementedError", "def should_expose(self, state) -> bool:\n expose_by_default = self._config.get(CONF_EXPOSE_BY_DEFAULT)\n exposed_domains = self._config.get(CONF_EXPOSED_DOMAINS)\n\n if state.attributes.get(\"view\") is not None:\n # Ignore entities that are views\n return False\n\n if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:\n return False\n\n entity_registry = er.async_get(self.hass)\n registry_entry = entity_registry.async_get(state.entity_id)\n if registry_entry:\n auxiliary_entity = (\n registry_entry.entity_category is not None\n or registry_entry.hidden_by is not None\n )\n else:\n auxiliary_entity = False\n\n explicit_expose = self.entity_config.get(state.entity_id, {}).get(CONF_EXPOSE)\n\n domain_exposed_by_default = (\n expose_by_default and state.domain in exposed_domains\n )\n\n # Expose an entity by default if the entity's domain is exposed by default\n # and the entity is not a config or diagnostic entity\n entity_exposed_by_default = domain_exposed_by_default and not auxiliary_entity\n\n # Expose an entity if the entity's is exposed by default and\n # the configuration doesn't explicitly exclude it from being\n # exposed, or if the entity is explicitly exposed\n is_default_exposed = entity_exposed_by_default and explicit_expose is not False\n\n return is_default_exposed or explicit_expose", "def EXPOSE(port_specs):\n msg = ''\n if not isinstance(port_specs, (list, tuple)):\n msg = 'EXPOSE instruction requires list or tuple, not %s' % (\n type(port_specs)\n )\n elif not port_specs:\n msg = 'EXPOSE instruction must have at least 1 port specifier.'\n if msg:\n raise DockerphileError(msg)\n return EXPOSE_t(port_specs=port_specs)", "def service(self) -> Optional['outputs.InfinispanSpecService']:\n return pulumi.get(self, \"service\")", "def polyCacheMonitor(cacheValue=bool, nodeName=\"string\"):\n pass", "def _ip_address_spec(output, ipaddress, netmask, interface, scope, active ):\n output.beginAssembling(\"IPaddressSpec\")\n output.setVirtualNameValue(\"IPaddress\", ipaddress)\n output.setVirtualNameValue(\"IPnetmask\", netmask)\n output.setVirtualNameValue(\"InterfaceName\", interface)\n output.setVirtualNameValue(\"Active\", scope)\n output.setVirtualNameValue(\"Scope\", active)\n output.endAssembling(\"IPaddressSpec\")", "def security(self) -> Optional['outputs.InfinispanSpecSecurity']:\n return pulumi.get(self, \"security\")", "def test_layer_cache(self, init):\n init.return_value = None\n cache = LayerCacheAggregator()\n b = Builder() # Don't need parameters as init's been mocked out\n b.cfr_title, b.cfr_part, b.doc_number = 15, '111', '111-222'\n b.writer = Mock()\n write = b.writer.layer.return_value.write\n tree = Node(label=[\"1234\"], children=[\n Node(label=[\"1234\", \"1\"], children=[\n Node(\"See paragraph (b)\", label=[\"1234\", \"1\", \"a\"]),\n Node(\"This is b\", label=[\"1234\", \"1\", \"b\"])])])\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a'], arg.keys())\n cache.replace_using(tree)\n\n write.reset_mock()\n tree.children[0].children[1].text = \"References paragraph (a)\"\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a'], arg.keys())\n\n write.reset_mock()\n tree.children[0].children[0].text = \"Contains no references\"\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a'], arg.keys())\n\n write.reset_mock()\n notice = {}\n cache.invalidate_by_notice(notice)\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a'], arg.keys())\n\n write.reset_mock()\n notice['changes'] = {'1234-1-b': 'some change'}\n cache.invalidate_by_notice(notice)\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a', '1234-1-b'], list(sorted(arg.keys())))\n\n write.reset_mock()\n notice['changes'] = {'1234-Subpart-A': 'some change'}\n cache.invalidate_by_notice(notice)\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-b'], list(sorted(arg.keys())))", "def export(*names):\n def decorator(symbol):\n \"\"\"Decorator to export a symbol to the API.\n\n Args:\n symbol: Symbol to decorate.\n\n Returns:\n The input symbol with the `_api_names` attribute set.\n\n Raises:\n ValueError: If the name is invalid or already used.\n \"\"\"\n for name in names:\n # API name must have format \"namespace.name\".\n if name.count('.') != 1:\n raise ValueError(f\"Invalid API name: {name}\")\n # API namespace must be one of the supported ones.\n namespace, _ = name.split('.')\n if namespace not in _SUBMODULE_NAMES:\n raise ValueError(f\"Invalid API namespace: {namespace}\")\n # API name must be unique.\n if name in _API_SYMBOLS:\n raise ValueError(\n f\"Name {name} already used for exported symbol {symbol}\")\n # Add symbol to the API symbols table.\n _API_SYMBOLS[name] = symbol\n # Set the _api_names attribute.\n setattr(symbol, _API_ATTR, names)\n return symbol\n\n return decorator", "def container(self) -> Optional['outputs.InfinispanSpecContainer']:\n return pulumi.get(self, \"container\")", "def test_topic_appear_update_expose(self):\n\n topicname = '/test/nonexistent1'\n # every added topic should be in the list of args\n self.assertTrue(topicname not in self.interface.topics_args)\n # the backend should not have been created\n self.assertTrue(topicname not in self.interface.topics.keys())\n # First update should not change state\n dt = self.interface.update()\n self.assertEqual(dt.added, []) # nothing added\n self.assertEqual(dt.removed, []) # nothing removed\n # every added topic should be in the list of args\n self.assertTrue(topicname not in self.interface.topics_args)\n # the backend should not have been created\n self.assertTrue(topicname not in self.interface.topics.keys())\n\n # create the publisher and then try exposing the topic again, simulating\n # it coming online before expose call.\n nonexistent_pub = rospy.Publisher(topicname, Empty, queue_size=1)\n with timeout(5) as t:\n while not t.timed_out and nonexistent_pub.resolved_name not in self.interface.topics_available:\n dt = self.interface.update()\n self.assertEqual(dt.added, []) # nothing added (not exposed yet)\n self.assertEqual(dt.removed, []) # nothing removed\n\n self.assertTrue(not t.timed_out)\n # TODO : do we need a test with subscriber ?\n\n # every added topic should be in the list of args\n self.assertTrue(topicname not in self.interface.topics_args)\n # the backend should not have been created\n self.assertTrue(topicname not in self.interface.topics.keys())\n\n self.interface.expose_topics([topicname])\n # every exposed topic should remain in the list of args ( in case regex match another topic )\n self.assertTrue(topicname in self.interface.topics_args)\n # make sure the topic backend has been created\n self.assertTrue(topicname in self.interface.topics.keys())\n\n nonexistent_pub.unregister() # https://github.com/ros/ros_comm/issues/111 ( topic is still registered on master... )", "def container(self) -> Optional['outputs.InfinispanSpecServiceContainer']:\n return pulumi.get(self, \"container\")", "def get_inports( self ):\n return self._inports", "def _define_module_argument_spec():\n return dict( name=dict(required=True, aliases=['stage_name']),\n rest_api_id=dict(required=True),\n description=dict(required=False),\n cache_cluster_enabled=dict(required=False, type='bool'),\n cache_cluster_size=dict(required=False, choices=['0.5','1.6','6.1','13.5','28.4','58.2','118','237']),\n method_settings=dict(\n required=False,\n default=[],\n type='list',\n method_name=dict(required=True),\n method_verb=dict(required=True, choices=['GET','PUT','POST','DELETE','HEAD','OPTIONS','PATCH']),\n caching_enabled=dict(required=False, default=False, type='bool')\n ),\n state=dict(required=False, default='present', choices=['absent', 'present'])\n )", "def test_api_challenge_list_visibility():\n app = create_ctfd()\n with app.app_context(), freeze_time(\"2017-10-5\"):\n set_config(\n \"start\", \"1507089600\"\n ) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST\n set_config(\n \"end\", \"1507262400\"\n ) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST\n set_config(\"challenge_visibility\", \"public\")\n with app.test_client() as client:\n r = client.get(\"/api/v1/challenges\")\n assert r.status_code == 200\n set_config(\"challenge_visibility\", \"private\")\n r = client.get(\"/api/v1/challenges\")\n assert r.status_code == 302\n destroy_ctfd(app)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_discover_worker_responder_spaces(self):\n pass", "def test_get_params_decorator(self, name):\n test_get_params_fn = self._test_get_params_fn()\n stage = self._get_mock_stage()\n encode_params, decode_params = self.evaluate(\n test_get_params_fn(stage, name))\n\n # The graph should contain a single node.\n graph = tf.compat.v1.get_default_graph()\n self.assertLen(graph.as_graph_def().node, 1)\n if name is not None:\n self._assert_all_graph_nodes_in_name_scope(graph, name)\n else:\n self._assert_all_graph_nodes_in_name_scope(\n graph, self._DEFAULT_NAME + '_get_params')\n # The functionality is not modified.\n self.assertEqual(1.0, encode_params['param'])\n self.assertEqual(1.0, decode_params['param'])", "def cache_enable(self):\n return self.param_info.cache_enable" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InfinispanSecurity info for the user application connection
def security(self) -> Optional['outputs.InfinispanSpecSecurity']: return pulumi.get(self, "security")
[ "def security(self) -> 'outputs.InfinispanStatusSecurity':\n return pulumi.get(self, \"security\")", "def getSecurity(self):\n return self._security", "def get_security_config(app):\n items = app.config.items()\n prefix = 'SECURITY_'\n\n def strip_prefix(tup):\n return (tup[0].replace('SECURITY_', ''), tup[1])\n\n return dict([strip_prefix(i) for i in items if i[0].startswith(prefix)])", "def getAuthenticationMap(self, *args):\r\n return _osgDB.Registry_getAuthenticationMap(self, *args)", "def keys(self):\n c = self.get_cxn().cursor()\n c.execute('SELECT session_id FROM user_sessions')\n return [ id for (id,) in c.fetchall() ]", "def getAccounts(self):\n accounts = CombaUser().getLogins()\n db = redis.Redis()\n\n internaccount = db.get('internAccess')\n if not internaccount:\n user = ''.join(random.sample(string.lowercase,10))\n password = ''.join(random.sample(string.lowercase+string.uppercase+string.digits,22))\n db.set('internAccess', user + ':' + password)\n intern = [user, password]\n else:\n intern = internaccount.split(':')\n\n accounts[intern[0]] = intern[1]\n\n return accounts", "def security_info(self, cusip, issue_date):\n self._check_cusip(cusip)\n issue_date = self._check_date(issue_date, '%m/%d/%Y')\n url = self.base_url + self.securities_endpoint + '{}/{}?format=json'.format(cusip, issue_date)\n security_dict = self._process_request(url)\n return security_dict", "def extract_auth_info():\n\n assert 'X-Login' in request.headers, \\\n 'Your web server must pass along the X-Login header.'\n\n login = request.headers['X-Login']\n g.user = db.user.get(login)\n\n if g.user is None:\n msg = _('There is no user account for you, contact administrator.')\n raise InvalidUsage(msg, data={'login': login})\n\n db.connection() \\\n .execute(text('select set_user(:name)'), name=login)\n\n g.roles = set(request.headers.get('X-Roles', '').split(';'))\n g.roles.discard('')", "def user_info(self):\n return self.__user_info", "def _get_session_auth_info(_helper_cfg):\n _session_auth = {}\n _session_info = ['username', 'password']\n for _key in _session_info:\n if _key in _helper_cfg['connection']['session_auth']:\n _session_auth[_key] = _helper_cfg['connection']['session_auth'][_key]\n else:\n _session_auth[_key] = None\n return _session_auth", "def _init_security_group(self):", "def getSecurities(self):\n\n exchange = {0:'NASDAQ', 1:'NYSE', 2:'ASE', 6:'OTC'}\n\n # Request number of securities in database\n if not self.sock.send('\\3'):\n print \"send 3 error\"\n self.close()\n return False\n\n ninfo = unpack('I',self.RecvAll(size=4))[0]\n print \"%d possible security_id's\" % ninfo\n Info = {} # empty dictionary\n sid = 0\n\n # Request the list of securities\n if not self.sock.send('\\4'):\n print \"send 4 error\"\n self.close()\n return False\n\n sids = []; tickers = []; ciks = []; sics = []; xchngs = []; names = []\n\n while sid != 9999999:\n info = self.RecvAll(size=280)\n if len(info) != 280:\n print \"info recv error, only %d bytes\" % len(info)\n self.close()\n return False\n\n sid,cik,sic,xchg,name,tkr = unpack('2I1i1I256s8s',info)\n name = name.split(\"\\0\",1)[0] # remove garbage after null byte\n tkr = tkr.split(\"\\0\",1)[0]\n #Info[sid] = {'ticker':tkr, 'cik':cik, 'sic':sic, 'exchange':exchange[xchg], 'company':name} # add dictionary item\n\n sids.append(sid)\n tickers.append(tkr)\n ciks.append(cik)\n sics.append(sic)\n xchngs.append(exchange[xchg])\n names.append(name)\n\n #assert list(set(sid)) == sid # SID list should be unique\n info = {'ticker':tickers, 'cik':ciks, 'sic':sics, 'exchange':xchngs, 'company':names}\n universe = pd.DataFrame(info, index=sids)\n\n print \"%d entries in security_id Info dictionary\" % len(universe)\n return universe", "def advapi32_GetSecurityInfo(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"handle\", \"ObjectType\", \"SecurityInfo\", \"ppsidOwner\", \"ppsidGroup\", \"ppDacl\", \"ppSacl\", \"ppSecurityDescriptor\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_user_info(self: CipherDatabase) -> dict:\n try:\n _database = Database(database=self.get_database_path(), mode=\"rw\")\n except DatabaseDoesNotExistException:\n raise UserCacheNotFoundException(\n \"Database doesn't have any cache stored\")\n\n row = _database.read(\"Email\", \"Password\", table=\"Users\", rows=\".\")\n\n return {\"user_email\": self.decrypt(row[0]), \"user_password\": self.decrypt(row[1])}", "def all_slice_authorities(self):\n # return self._uppercase_keys_in_list([e for e in self._delegate_tools.get_registry()[\"SERVICES\"] if (e['service_type']==self.SA_SERVICE_TYPE)])\n # Current deployments assume single slice authority", "def get_session_info(self):\n\n return self.get_session_key(), self.get_session_location()", "def security_definitions(self):\n return None", "def _get_ssids(self):\n return self.__ssids", "def security_credential(self):\n return self._security_credential" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InfinispanServiceSpec specify configuration for specific service
def service(self) -> Optional['outputs.InfinispanSpecService']: return pulumi.get(self, "service")
[ "def _configure_services(self):\n if self.series == 'trusty':\n keystone_config = {'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting',\n 'openstack-origin': 'cloud:trusty-mitaka'}\n designate_config = {'openstack-origin': 'cloud:trusty-mitaka',\n 'nameservers': 'ns1.mojotest.com.'}\n else:\n keystone_config = {'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting'}\n designate_config = {'nameservers': 'ns1.mojotest.com.'}\n\n pxc_config = {\n 'dataset-size': '25%',\n 'max-connections': 1000,\n 'root-password': 'ChangeMe123',\n 'sst-password': 'ChangeMe123',\n }\n\n configs = {\n 'keystone': keystone_config,\n 'designate': designate_config,\n 'percona-cluster': pxc_config,\n }\n\n super(DesignateBindDeployment, self)._configure_services(configs)", "def MaybeConfigRunDistributed(self):\n if not FLAGS.cluster_spec:\n return\n job_specs = FLAGS.cluster_spec.split('@')\n cluster_spec_dict = {}\n for job_spec in job_specs:\n # ps_host=worker1:1231,worker2:1234\n job_machines = job_spec.split('=')\n if len(job_machines) != 2:\n raise ValueError('Invalid job specification: %s', job_spec)\n cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')\n if FLAGS.job == 'trainer_client':\n FLAGS.tf_master = 'grpc://%s' % cluster_spec_dict['worker'][FLAGS.task]\n for job in cluster_spec_dict.keys():\n if job.startswith('decoder_'):\n assert len(job_specs) == 1, 'Decoder jobs must run on their own'\n assert ',' not in job_specs[0], 'Only single machine supported'\n FLAGS.decoder_job = '/job:%s' % job\n FLAGS.decoder_replicas = 1\n if job.startswith('evaler_'):\n assert len(job_specs) == 1, 'Evaler jobs must run on their own'\n assert ',' not in job_specs[0], 'Only single machine supported'\n FLAGS.evaler_job = '/job:%s' % job\n FLAGS.evaler_replicas = 1\n if FLAGS.mode == 'sync' and FLAGS.job in ('controller', 'trainer_client',\n 'worker'):\n FLAGS.worker_job = '/job:worker'\n FLAGS.worker_replicas = len(cluster_spec_dict['worker'])\n FLAGS.ps_job = '/job:worker'\n FLAGS.ps_replicas = FLAGS.worker_replicas\n if FLAGS.mode == 'async' and FLAGS.job in ('controller', 'trainer', 'ps'):\n FLAGS.worker_job = '/job:trainer'\n FLAGS.worker_replicas = len(cluster_spec_dict['trainer'])\n FLAGS.ps_job = '/job:ps'\n FLAGS.ps_replicas = len(cluster_spec_dict['ps'])", "def _find_service_service_config(self, service):\n return ServiceConfig.build_instance_from_service(service)", "def _configure_services(self):\n keystone_config = {\n 'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting'\n }\n pxc_config = {\n 'dataset-size': '25%',\n 'max-connections': 1000,\n 'root-password': 'ChangeMe123',\n 'sst-password': 'ChangeMe123',\n }\n designate_config = {\n 'nameservers': ' '.join([self.TEST_NS1_RECORD,\n self.TEST_NS2_RECORD])\n }\n configs = {\n 'keystone': keystone_config,\n 'percona-cluster': pxc_config,\n 'designate': designate_config,\n }\n super(DesignateBasicDeployment, self)._configure_services(configs)", "def __init__(self, server, port, service_name='memcache'):\n super(MemcacheServiceStub, self).__init__(service_name)\n if not server:\n server = DEFAULT_ADDR\n if not port:\n port = DEFAULT_PORT\n\n self._cache = pylibmc.Client(['%s:%i' % (server, port)])", "def test_user_specified_service_default(self):\n # Ensure that the service name was configured\n from ddtrace import config\n\n assert config.service == \"mysvc\"\n\n client = self.make_client([b\"STORED\\r\\n\", b\"VALUE key 0 5\\r\\nvalue\\r\\nEND\\r\\n\"])\n client.set(b\"key\", b\"value\", noreply=False)\n\n pin = Pin.get_from(pymemcache)\n tracer = pin.tracer\n spans = tracer.pop()\n\n assert spans[0].service != \"mysvc\"", "def container(self) -> Optional['outputs.InfinispanSpecServiceContainer']:\n return pulumi.get(self, \"container\")", "def _docker_service_file(\n storage_driver: DockerStorageDriver,\n docker_version: DockerVersion,\n) -> str:\n storage_driver_name = {\n DockerStorageDriver.AUFS: 'aufs',\n DockerStorageDriver.OVERLAY: 'overlay',\n DockerStorageDriver.OVERLAY_2: 'overlay2',\n }[storage_driver]\n\n daemon = {\n DockerVersion.v1_11_2: '/usr/bin/docker daemon',\n DockerVersion.v1_13_1: '/usr/bin/docker daemon',\n DockerVersion.v17_12_1_ce: '/usr/bin/dockerd',\n DockerVersion.v18_06_3_ce: '/usr/bin/dockerd',\n }[docker_version]\n\n docker_cmd = (\n '{daemon} '\n '-D '\n '-s {storage_driver_name} '\n '--exec-opt=native.cgroupdriver=cgroupfs '\n '--cgroup-parent=${{CGROUP_PARENT}}'\n ).format(\n storage_driver_name=storage_driver_name,\n daemon=daemon,\n )\n\n docker_service_contents = {\n 'Unit': {\n 'Description': 'Docker Application Container Engine',\n 'Documentation': 'https://docs.docker.com',\n 'After': 'dbus.service',\n },\n 'Service': {\n 'EnvironmentFile': '/etc/docker/env',\n 'ExecStart': docker_cmd,\n 'LimitNOFILE': '1048576',\n 'LimitNPROC': '1048576',\n 'LimitCORE': 'infinity',\n 'Delegate': 'yes',\n 'TimeoutStartSec': '0',\n },\n 'Install': {\n 'WantedBy': 'default.target',\n },\n }\n config = configparser.ConfigParser()\n # Ignore erroneous error https://github.com/python/typeshed/issues/1857.\n config.optionxform = str # type: ignore\n config.read_dict(docker_service_contents)\n config_string = io.StringIO()\n config.write(config_string)\n config_string.seek(0)\n return config_string.read()", "def test_user_specified_service_v1(self):\n # Ensure that the service name was configured\n from ddtrace import config\n\n assert config.service == \"mysvc\"\n\n client = self.make_client([b\"STORED\\r\\n\", b\"VALUE key 0 5\\r\\nvalue\\r\\nEND\\r\\n\"])\n client.set(b\"key\", b\"value\", noreply=False)\n\n pin = Pin.get_from(pymemcache)\n tracer = pin.tracer\n spans = tracer.pop()\n\n assert spans[0].service == \"mysvc\"", "def configure_service_set(ipsec_obj, **kwargs):\n return ipsec_obj.set_ss(**kwargs)", "def configure_services(cluster):\n services = cluster.get_all_services()\n\n for service in services:\n service_type = service.type\n if service_type == 'HDFS':\n print \"Configuring HDFS for Kerberos.\"\n service.update_config(\n {'hadoop_security_authentication': 'kerberos',\n 'hadoop_security_authorization': 'true'}\n )\n\n role_cfgs = service.get_all_role_config_groups()\n\n for role_cfg in role_cfgs:\n if role_cfg.roleType == 'DATANODE':\n role_cfg.update_config(\n {'dfs_datanode_port': '1004',\n 'dfs_datanode_http_port': '1006',\n 'dfs_datanode_data_dir_perm': '700'}\n )\n elif service_type == 'HBASE':\n print \"Configuring HBase for Kerberos.\"\n service.update_config(\n {'hbase_security_authentication': 'kerberos',\n 'hbase_security_authorization': 'true'}\n )\n elif service_type == 'ZOOKEEPER':\n print \"Configuring ZooKeeper for Kerberos.\"\n service.update_config(\n {'enableSecurity': 'true'}\n )\n elif service_type == 'SOLR':\n print \"Configuring Solr for Kerberos.\"\n service.update_config(\n {'solr_security_authentication': 'kerberos'}\n )\n elif service_type == 'KS_INDEXER':\n # API version 10 came out with CM 5.4, which is necessary to make this configuration\n # change.\n if API_CURRENT_VERSION >= 10:\n print \"Configuring KeyStoreIndexer for Kerberos.\"\n service.update_config(\n {'hbase_indexer_security_authentication': 'kerberos'}\n )\n elif service_type == 'HUE':\n kt_renewer_role = service.get_roles_by_type('KT_RENEWER')\n hue_server_role = service.get_roles_by_type('HUE_SERVER')\n\n if hue_server_role and not kt_renewer_role:\n print \"Configuring Hue for Kerberos.\"\n service.create_role('KT_RENEWER-1', 'KT_RENEWER',\n hue_server_role[0].hostRef.hostId)", "def server_jobspec(language, docker_image, test_duration_secs):\n container_name = dockerjob.random_name('interop_server_%s' %\n language.safename)\n cmdline = bash_login_cmdline(language.server_cmd(['--port=%s' %\n _DEFAULT_SERVER_PORT]))\n environ = language.global_env()\n docker_cmdline = docker_run_cmdline(\n cmdline,\n image=docker_image,\n cwd=language.server_cwd,\n environ=environ,\n docker_args=['-p', str(_DEFAULT_SERVER_PORT), '--name', container_name])\n\n server_job = jobset.JobSpec(cmdline=docker_cmdline,\n environ=environ,\n shortname='interop_server_%s' % language,\n timeout_seconds=test_duration_secs * 3)\n server_job.container_name = container_name\n return server_job", "def test_user_specified_service_v0(self):\n # Ensure that the service name was configured\n from ddtrace import config\n\n assert config.service == \"mysvc\"\n\n client = self.make_client([b\"STORED\\r\\n\", b\"VALUE key 0 5\\r\\nvalue\\r\\nEND\\r\\n\"])\n client.set(b\"key\", b\"value\", noreply=False)\n\n pin = Pin.get_from(pymemcache)\n tracer = pin.tracer\n spans = tracer.pop()\n\n assert spans[0].service != \"mysvc\"", "def configure_batvis():\n batvis_unit = {}\n batvis_content = \"\"\"\n[Unit]\nDescription=batadv-vis\n\n[Service]\nExecStart=/usr/local/sbin/batadv-vis -i bat0 -s\nRestart=always\nRestartSec=10s\n\n[Install]\nWantedBy=multi-user.target\n\"\"\"\n batvis_unit['content'] = base64.b64encode(bytes(batvis_content, \"utf-8\"))\n batvis_unit['encoding'] = \"b64\"\n batvis_unit['path'] = r'/etc/systemd/system/batadv-vis.service'\n\n return batvis_unit", "def test_schematization_undefined_service_default(self):\n\n self.cache.get(u\"á_complex_operation\")\n spans = self.get_spans()\n\n for span in spans:\n assert span.service == \"flask-cache\", \"Expected service name to be 'flask-cache' but was '{}'\".format(\n span.service\n )", "def service(self, value: typing.Union[\"IngressServiceBackend\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n IngressServiceBackend,\n IngressServiceBackend().from_dict(value),\n )\n self._properties[\"service\"] = value", "def service():\r\n\r\n mock_soco = mock.MagicMock()\r\n mock_soco.ip_address = \"192.168.1.101\"\r\n return Service(mock_soco)", "def start_conf():\n from oslo_config import cfg\n\n cfg.CONF(\n args=[],\n default_config_files=['service/etc/oslo_conf.ini']\n )", "def test_tf_config_ds_specs(self, k8s_tf_config_services, k8s_client,\n k8s_tf_config):\n\n # Check deployment set image\n services = [k8s_tf_config_services]\n name = k8s_tf_config.name\n\n if k8s_tf_config_services in [\"db-nodemgr\"]:\n ds_name = name + \"-db\"\n else:\n ds_name = name\n ds = k8s_client.AppsV1Api.read_namespaced_daemon_set(\n ds_name, k8s_tf_config.namespace)\n\n for service in services:\n image = None\n for c in ds.spec.template.spec.containers:\n if c.name == service:\n image = c.image\n break\n\n config_srv_spec = k8s_tf_config.obj['spec'][k8s_tf_config_services]\n spec_image = None\n for c in config_srv_spec['containers']:\n if c['name'] == service:\n spec_image = c['image']\n break\n if image != spec_image or image is None:\n raise Exception(\"Deployment set {} has incorrect image: {}\\n\"\n \"Operator spec image: {}\"\n \"\".format(ds.metadata.name,\n image,\n spec_image))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The weights of all of the matched WeightedPodAffinityTerm fields are added pernode to find the most preferred node(s)
def __init__(__self__, *, pod_affinity_term: 'outputs.InfinispanSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm', weight: int): pulumi.set(__self__, "pod_affinity_term", pod_affinity_term) pulumi.set(__self__, "weight", weight)
[ "def _assign_node_weights(self):\n _CONFIG_SERVER_SCORE = 11\n _QUORUM_MANAGER_SCORE = 8\n _QUORUM_SCORE = 5\n _MANAGER_SCORE = 3\n _CLIENT_SCORE = 1\n\n for node in self.state['nodes'].keys():\n\n fullname = self.state['nodes'][node]['admin_node_name']\n\n if self.state['nodes'][node]['roles'] == 'quorum-manager':\n self.state['nodes'][node]['weight'] = _QUORUM_MANAGER_SCORE\n elif self.state['nodes'][node]['roles'] == 'quorum':\n self.state['nodes'][node]['weight'] = _QUORUM_SCORE\n elif self.state['nodes'][node]['roles'] == 'manager':\n self.state['nodes'][node]['weight'] = _MANAGER_SCORE\n else:\n self.state['nodes'][node]['weight'] = _CLIENT_SCORE\n\n\n # check to see if node is primary/secondary config server\n # - don't want them both in the same group\n if self.state['primary_server'] == fullname or \\\n self.state['secondary_server'] == fullname:\n self.state['nodes'][node]['weight'] = _CONFIG_SERVER_SCORE\n \n\n return", "def _build_weighted_replicas_by_tier(self):\n weight_of_one_part = self.weight_of_one_part()\n\n # assign each device some replicanths by weight (can't be > 1)\n weighted_replicas_for_dev = {}\n devices_with_room = []\n for dev in self._iter_devs():\n if not dev['weight']:\n continue\n weighted_replicas = (\n dev['weight'] * weight_of_one_part / self.parts)\n if weighted_replicas < 1:\n devices_with_room.append(dev['id'])\n else:\n weighted_replicas = 1\n weighted_replicas_for_dev[dev['id']] = weighted_replicas\n\n while True:\n remaining = self.replicas - sum(weighted_replicas_for_dev.values())\n if remaining < 1e-10:\n break\n devices_with_room = [d for d in devices_with_room if\n weighted_replicas_for_dev[d] < 1]\n rel_weight = remaining / sum(\n weighted_replicas_for_dev[d] for d in devices_with_room)\n for d in devices_with_room:\n weighted_replicas_for_dev[d] = min(\n 1, weighted_replicas_for_dev[d] * (rel_weight + 1))\n\n weighted_replicas_by_tier = defaultdict(float)\n for dev in self._iter_devs():\n if not dev['weight']:\n continue\n assigned_replicanths = weighted_replicas_for_dev[dev['id']]\n dev_tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])\n for i in range(len(dev_tier) + 1):\n tier = dev_tier[:i]\n weighted_replicas_by_tier[tier] += assigned_replicanths\n\n # belts & suspenders/paranoia - at every level, the sum of\n # weighted_replicas should be very close to the total number of\n # replicas for the ring\n validate_replicas_by_tier(self.replicas, weighted_replicas_by_tier)\n\n return weighted_replicas_by_tier", "def calculate_capacity_for(m_name, m_pods, m_cpu, m_mem, node_map):\n # print(\n # f\"Checking capacity of metric: {m_name}\\n\"\n # f\" CPU: {m_cpu}\\n\"\n # f\" memory: {m_mem}\\n\"\n # f\" pods: {m_pods}\"\n # )\n\n metric_capacity = 0\n for node in node_map.values():\n # print(f\"Examining available capacity in node: {node['name']}\")\n pods = node[\"available\"][\"pods\"]\n cpu = node[\"available\"][\"cpu\"]\n mem = node[\"available\"][\"memory\"]\n\n if pods < 1:\n continue\n\n node_capacity = 0\n\n # print(f\"Comparing required CPU: {m_cpu} to node available CPU: {cpu}\")\n if m_cpu is not None and m_cpu > 0:\n if m_cpu >= cpu:\n continue\n\n m_count = floor(cpu / m_cpu)\n # print(\n # f\"Node has {m_count} capacity in terms of CPU (req: {m_cpu}, avail: {cpu})\"\n # )\n node_capacity = (\n m_count if node_capacity < 1 else min(m_count, node_capacity)\n )\n\n # print(f\"Comparing required Memory: {m_mem} to node available Memory: {mem}\")\n if m_mem is not None and m_mem > 0:\n if m_mem >= mem:\n continue\n\n m_count = floor(mem / m_mem)\n # print(\n # f\"Node has {m_count} capacity in terms of Memory (req: {m_mem}, avail: {mem})\"\n # )\n node_capacity = (\n m_count if node_capacity < 1 else min(m_count, node_capacity)\n )\n\n node_capacity = 1 if node_capacity < 1 else min(node_capacity, pods)\n # print(f\"Node: {node['name']} has CPU/memory capacity: {node_capacity}\")\n\n metric_capacity += node_capacity\n # print(\n # f\"After adding capacity {node_capacity} on node: {node['name']}, \" \\\n # f\"capacity of {m_name} is {metric_capacity}\\n\"\n # )\n\n # print(f\"Comparing required pods: {m_pods} to total available pods: {metric_capacity}\")\n if m_pods is not None and metric_capacity > m_pods:\n metric_capacity = floor(metric_capacity / m_pods)\n\n # print(\n # f\"After factoring out pod-count / cluster capacity {m_pods}, capacity of {m_name} is {metric_capacity}\\n\\n\"\n # )\n\n return metric_capacity", "def _score_paragraphs(self, tm, tfidf_model, corpus, weights, required_words):\n p_scores = dict()\n\n for p_ind, doc in corpus:\n tfidf = tfidf_model[doc]\n\n scores = list()\n for dict_ind, tfidf_score in tfidf:\n word = tm.dictionary[dict_ind]\n\n if required_words and word in required_words:\n scores.append(tfidf_score * self._required_field_score)\n elif self._weighted is not None and word in self._weighted:\n scores.append(tfidf_score * (1 + weights[word]) * self._weighted[word])\n else:\n if word in weights:\n scores.append(tfidf_score * (1 + weights[word]))\n else:\n scores.append(tfidf_score)\n\n p_scores[p_ind] = sum(scores)\n return p_scores", "def runAffinityPropogation(self):\n af = AffinityPropagation(preference=-2000000).fit(self.data_points)\n self.labels_ap = af.labels_\n pickle.dump(af.labels_,open('affinity_prop.bn','wb'))\n return 0", "def _build_target_replicas_by_tier(self):\n weighted_replicas = self._build_weighted_replicas_by_tier()\n wanted_replicas = self._build_wanted_replicas_by_tier()\n max_overload = self.get_required_overload(weighted=weighted_replicas,\n wanted=wanted_replicas)\n if max_overload <= 0.0:\n return wanted_replicas\n else:\n overload = min(self.overload, max_overload)\n self.logger.debug(\"Using effective overload of %f\", overload)\n target_replicas = defaultdict(float)\n for tier, weighted in weighted_replicas.items():\n m = (wanted_replicas[tier] - weighted) / max_overload\n target_replicas[tier] = m * overload + weighted\n\n # belts & suspenders/paranoia - at every level, the sum of\n # target_replicas should be very close to the total number\n # of replicas for the ring\n validate_replicas_by_tier(self.replicas, target_replicas)\n\n return target_replicas", "def compute_cw_score(p, q, edgedict, ndict, params = None):\n if (len(ndict[p]) > len(ndict[q])):\n temp = p\n p = q\n q = temp \n score = 0\n for elem in ndict[p]:\n if elem in ndict[q]:\n p_elem = edgedict[(p, elem)] if (p, elem) in edgedict else edgedict[(elem, p)]\n q_elem = edgedict[(q, elem)] if (q, elem) in edgedict else edgedict[(elem, q)]\n score += p_elem + q_elem\n return score", "def compute_matching(self, inputs, outputs):\n tree = outputs.tree\n \n # compute matching distributions\n if 'gt_match_dists' in outputs:\n gt_match_dists = outputs.gt_match_dists\n else:\n gt_match_dists = self.binding.get_w(inputs.pad_mask, inputs, outputs, log=True)\n \n tree.bf.match_dist = outputs.gt_match_dists = gt_match_dists\n\n # compute additional vals\n outputs.entropy = safe_entropy(outputs.gt_match_dists, dim=-1)\n # probability of the node existing\n tree.bf.p_n = outputs.p_n = torch.sum(outputs.gt_match_dists, dim=2).clamp(0, 1)", "def _initialize_weights(self):\n for n1, n2 in self.weights_graph.edges():\n data = self.weights_graph.edges[n1, n2]\n data[\"weight\"] = float(1)\n for _, data in self.weights_graph.nodes(data=True):\n data[\"usage\"] = 0\n\n for vs in self.terminals.values():\n self._increase_weights(vs)", "def get_node_weighted_connectivity(self,node):\n return float(sum(self.get_node_weights(node))) /\\\n ( float(sum(self.weights.values()))/2.0 )", "def get_most_common_word_weights(trainfile):\n all_counters = defaultdict(lambda: Counter())\n tag_counter = Counter()\n for (words, tags) in conll_seq_generator(trainfile):\n for word, tag in zip(words, tags):\n all_counters[word][tag] += 1\n tag_counter[tag] += 1\n \n temp = {}\n for word in all_counters.keys():\n c = all_counters[word].most_common(1)[0][0]\n #print(c)\n temp[(c, word)] = 1\n \n #my_weights[word] = temp\n \n t = tag_counter.most_common(1)[0][0]\n temp[(t, OFFSET)] = 0.5\n weights = defaultdict(float, temp)\n print(weights)\n\n \n return weights", "def _neighbor_communities(self, node):\n weights = {}\n for neighbor, edge_obj in self.node_dict[node].edges.items():\n # Exclude self loops from ki_in as it will be added later\n if neighbor != node:\n edge_weight = edge_obj.weight\n neighborcom = self.node_dict[neighbor].community\n weights[neighborcom] = weights.get(neighborcom, 0) + edge_weight\n\n return weights", "def _compute_weights(labels: Collection):\n class_support = np.unique(labels, return_counts=True)[1]\n class_frequencies = class_support / len(labels)\n # Class weights are the inverse of the class frequencies\n class_weights = 1 / class_frequencies\n # Normalize vector to sum up to 1.0 (in case the Loss function does not do it)\n class_weights /= class_weights.sum()\n return class_weights", "def solve_by_weight(items):\n # sort the items by wieght\n items = sort_by_weight(items)\n \n # run the greedy solver\n value = 0\n weight = 0\n taken = [0]*len(items)\n \n for item in items:\n if weight + item.weight <= capacity:\n taken[item.index] = 1\n value += item.value\n weight += item.weight\n # print(\"current weight: {w}; current value: {v}\".format(w=weight, v=value))\n \n # prepare the solution in the specified output format\n output_data = str(value) + ' ' + str(0) + '\\n'\n output_data += ' '.join(map(str, taken))\n dct_output_data ={\"obj\": str(value),\n \"opt\": str(0),\n \"decision\": ' '.join(map(str, taken))}\n return dct_output_data", "def _update_weighted_matrix(self) -> None:\n self.weighted_map = deepcopy(self.map)\n for connection in self.weighted_map:\n connections = self.weighted_map[connection]\n connections_count = sum(list(connections.values()))\n for key in self.weighted_map[connection]:\n self.weighted_map[connection][key] /= connections_count", "def calc_relative_weight(self):\n relative_weight = self.weight\n for agent in self.agents:\n if relative_weight > 0:\n relative_weight -= self.agents[agent]\n return relative_weight", "def calculate_weight(self):\n\n\t\tweight = 0\n\t\tfor item in self.items:\n\t\t\tif item == \"Health Potions\" or item == \"Magic Potions\":\n\t\t\t\tweight += self.items[item]\n\n\t\tself.weight = weight", "def _cost(node_and_neighborhood):\n v, neighborhood = node_and_neighborhood\n return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set)", "def aggregate_weights(self, clients_params):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
matchExpressions is a list of label selector requirements. The requirements are ANDed.
def match_expressions(self) -> Optional[Sequence['outputs.InfinispanSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]: return pulumi.get(self, "match_expressions")
[ "def match_expressions(self) -> Optional[List[\"LabelSelectorRequirement\"]]:\n return self.__match_expressions", "def _LabelMatched(obj, selector_map):\n if not obj:\n return False\n if not selector_map:\n return True\n labels = _GetPathValue(obj, ['metadata', 'labels'])\n if not labels:\n return False\n for key in selector_map:\n value = selector_map[key]\n if key not in labels or labels[key] != value:\n return False\n return True", "def match(self, queryDescriptors, trainDescriptors, mask=...) -> matches:\n ...", "def match_labels(self) -> Optional[Dict[str, str]]:\n return self.__match_labels", "def _match_any(self, regexs, text, options=None, search=False):\n match = None\n regex = None\n operator = 'search' if search else 'match'\n for regex in regexs:\n if options is not None:\n match = getattr(re, operator)(regex, text, options)\n else:\n match = getattr(re, operator)(regex, text)\n if match:\n break\n return match, regex", "def match(path: Path, selectors: Optional[List[\"BaseSelector\"]] = None) -> bool:\n if not selectors:\n return True\n\n for selector in selectors:\n if selector._match(path):\n return True\n return False", "def get_by_labels(labels):\n\n matches = []\n if labels:\n for label in labels:\n matches.append([x for x in db.Entry.find({'labels'})._get_labels() if label in x])\n return matches\n else:\n abort(404)", "def matches(a, (b_name, b_labels)):\n if not a in name_options.keys():\n return False\n for n in name_options[a]:\n if n in b_name.lower():\n return True\n for label in b_labels:\n if n in label.text.lower():\n return True\n else:\n pass#print repr(n), repr(label.text.lower())\n return False", "def get_matches(lf, candidate_set, match_values=[1, -1]):\n matches = []\n for c in candidate_set:\n label = lf(c)\n if label in match_values:\n matches.append(c)\n print((\"%s matches\") % len(matches))\n return matches", "def select_metadata_rules(patterns: Iterable[str]) -> List[MetadataRule]:\n include_rules = []\n exclude_rules = []\n\n for pattern in patterns:\n pattern = SelectPattern.parse(pattern)\n\n prop_pattern = pattern.property_pattern\n selected = not pattern.negated\n\n rules = include_rules if selected else exclude_rules\n\n if selected or not prop_pattern or prop_pattern == \"*\":\n rules.append(\n MetadataRule(\n tap_stream_id=pattern.stream_pattern,\n breadcrumb=[],\n key=\"selected\",\n value=selected,\n )\n )\n\n if prop_pattern:\n props = prop_pattern.split(\".\")\n\n rules.append(\n MetadataRule(\n tap_stream_id=pattern.stream_pattern,\n breadcrumb=property_breadcrumb(props),\n key=\"selected\",\n value=selected,\n )\n )\n\n return include_rules + exclude_rules", "def apply_selector(self, tags, selector):\n for selector_group in selector.split(','):\n found_matching_tag_group = True\n for tag in selector_group.split('+'):\n if tag not in tags.split(','):\n found_matching_tag_group = False\n if found_matching_tag_group:\n return True\n if len(selector.split(','))==0:\n return True\n else:\n return False", "def match_any_rule(self, match):\n pass", "def any_css_matches(self, selectors: tuple[str]) -> bool:\n ...", "def match_constraints(self, match_constraints):\n\n self._match_constraints = match_constraints", "def matches(self, match_value, **kwargs):\n\t\treturn match_value and all(group.invite_only for group in match_value)", "def _matches(self, entities=None, extensions=None, regex_search=False):\n if extensions is not None:\n if isinstance(extensions, six.string_types):\n extensions = [extensions]\n extensions = '(' + '|'.join(extensions) + ')$'\n if re.search(extensions, self.path) is None:\n return False\n if entities is not None:\n for name, val in entities.items():\n patt = '%s' % val\n if isinstance(val, (int, float)):\n # allow for leading zeros if a number was specified\n # regardless of regex_search\n patt = '0*' + patt\n if not regex_search:\n patt = '^%s$' % patt\n if name not in self.entities or \\\n re.search(patt, self.entities[name]) is None:\n return False\n return True", "def _get_matches_by_selector(sample_html, attributes_to_match, text_to_match):\n matches_by_selector = {}\n for attr, value in attributes_to_match.items():\n # form CSS selectors like [title=\"Make=Button\"] and match tags by them\n selector = f'{attr}=\"{value}\"'\n matches_by_selector[selector] = sample_html.select(f'[{selector}]')\n\n # 'text=\"something\"' is specific \"CSS-selector\" matched in a different way\n matches_by_selector[f'text=\"{text_to_match}\"'] = \\\n _find_tag_by_text(sample_html, text_to_match)\n return matches_by_selector", "def labels(resource, values):\n def test(value):\n try:\n return value in resource.labels\n except (AttributeError, TypeError):\n return False\n\n return any(map(test, values))", "def filter_labels(cls, labels: List[str], pred: Callable) -> List[str]:\n if not labels:\n return []\n return [label.name for label in Label.objects.filter(name__in=labels) if pred(label)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InfinispanServiceContainerSpec resource requirements specific for service
def container(self) -> Optional['outputs.InfinispanSpecServiceContainer']: return pulumi.get(self, "container")
[ "def service(self) -> Optional['outputs.InfinispanSpecService']:\n return pulumi.get(self, \"service\")", "def container(self) -> Optional['outputs.InfinispanSpecContainer']:\n return pulumi.get(self, \"container\")", "def _setup_container() -> svc_containers.Container:\n container = svc_containers.Container()\n\n app_config = svc_cfg.Config()\n container.config.from_pydantic(app_config)\n\n container.wire(modules=[svc_endpoints, svc_events])\n\n return container", "def validate_required_for_container(data, c_req):\n c_req_set = set(c_req)\n result = True\n if (data['kind'] == \"Deployment\") or \\\n (data['kind'] == \"DaemonSet\") or \\\n (data['kind'] == \"StatefulSet\"):\n for i,c in enumerate(data['spec']['template']['spec']['containers']):\n d_set = set(c.keys())\n if not d_set >= c_req_set:\n missing_keys = list(c_req_set - d_set)\n print(\n err_msg(\n lvl=\"ERR\",\n sub=\"Missing required keys in containers\",\n msg=\", \".join(str(e) for e in missing_keys)\n ),\n file=sys.stderr\n )\n result = False\n elif data['kind'] == \"CronJob\":\n for i,c in enumerate(data['spec']['jobTemplate']['spec']['template']['spec']['containers']):\n d_set = set(c.keys())\n if not d_set >= c_req_set:\n missing_keys = list(c_req_set - d_set)\n print(\n err_msg(\n lvl=\"ERR\",\n sub=\"Missing required keys in containers\",\n msg=\", \".join(str(e) for e in missing_keys)\n ),\n file=sys.stderr\n )\n result = False\n return result", "def server_jobspec(language, docker_image, test_duration_secs):\n container_name = dockerjob.random_name('interop_server_%s' %\n language.safename)\n cmdline = bash_login_cmdline(language.server_cmd(['--port=%s' %\n _DEFAULT_SERVER_PORT]))\n environ = language.global_env()\n docker_cmdline = docker_run_cmdline(\n cmdline,\n image=docker_image,\n cwd=language.server_cwd,\n environ=environ,\n docker_args=['-p', str(_DEFAULT_SERVER_PORT), '--name', container_name])\n\n server_job = jobset.JobSpec(cmdline=docker_cmdline,\n environ=environ,\n shortname='interop_server_%s' % language,\n timeout_seconds=test_duration_secs * 3)\n server_job.container_name = container_name\n return server_job", "def _spec(self) -> k8s.PodSpec:\n return k8s.PodSpec(\n containers=self.containers,\n volumes=self._volumes(),\n affinity=self._affinity(),\n )", "def test_create_namespaced_csi_storage_capacity(self):\n pass", "def test_vmware_service_resources_management_get(self):\n pass", "def expose(self) -> Optional['outputs.InfinispanSpecExpose']:\n return pulumi.get(self, \"expose\")", "def test_schematization_undefined_service_default(self):\n\n self.cache.get(u\"á_complex_operation\")\n spans = self.get_spans()\n\n for span in spans:\n assert span.service == \"flask-cache\", \"Expected service name to be 'flask-cache' but was '{}'\".format(\n span.service\n )", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n capacity_reservation_group_id: Optional[pulumi.Input[str]] = None,\n custom_ca_trust_enabled: Optional[pulumi.Input[bool]] = None,\n enable_auto_scaling: Optional[pulumi.Input[bool]] = None,\n enable_host_encryption: Optional[pulumi.Input[bool]] = None,\n enable_node_public_ip: Optional[pulumi.Input[bool]] = None,\n eviction_policy: Optional[pulumi.Input[str]] = None,\n fips_enabled: Optional[pulumi.Input[bool]] = None,\n host_group_id: Optional[pulumi.Input[str]] = None,\n kubelet_config: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolKubeletConfigArgs']]] = None,\n kubelet_disk_type: Optional[pulumi.Input[str]] = None,\n kubernetes_cluster_id: Optional[pulumi.Input[str]] = None,\n linux_os_config: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolLinuxOsConfigArgs']]] = None,\n max_count: Optional[pulumi.Input[int]] = None,\n max_pods: Optional[pulumi.Input[int]] = None,\n message_of_the_day: Optional[pulumi.Input[str]] = None,\n min_count: Optional[pulumi.Input[int]] = None,\n mode: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n node_network_profile: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolNodeNetworkProfileArgs']]] = None,\n node_public_ip_prefix_id: Optional[pulumi.Input[str]] = None,\n node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n orchestrator_version: Optional[pulumi.Input[str]] = None,\n os_disk_size_gb: Optional[pulumi.Input[int]] = None,\n os_disk_type: Optional[pulumi.Input[str]] = None,\n os_sku: Optional[pulumi.Input[str]] = None,\n os_type: Optional[pulumi.Input[str]] = None,\n pod_subnet_id: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[str]] = None,\n proximity_placement_group_id: Optional[pulumi.Input[str]] = None,\n scale_down_mode: Optional[pulumi.Input[str]] = None,\n snapshot_id: Optional[pulumi.Input[str]] = None,\n spot_max_price: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n ultra_ssd_enabled: Optional[pulumi.Input[bool]] = None,\n upgrade_settings: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolUpgradeSettingsArgs']]] = None,\n vm_size: Optional[pulumi.Input[str]] = None,\n vnet_subnet_id: Optional[pulumi.Input[str]] = None,\n windows_profile: Optional[pulumi.Input[pulumi.InputType['KubernetesClusterNodePoolWindowsProfileArgs']]] = None,\n workload_runtime: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'KubernetesClusterNodePool':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _KubernetesClusterNodePoolState.__new__(_KubernetesClusterNodePoolState)\n\n __props__.__dict__[\"capacity_reservation_group_id\"] = capacity_reservation_group_id\n __props__.__dict__[\"custom_ca_trust_enabled\"] = custom_ca_trust_enabled\n __props__.__dict__[\"enable_auto_scaling\"] = enable_auto_scaling\n __props__.__dict__[\"enable_host_encryption\"] = enable_host_encryption\n __props__.__dict__[\"enable_node_public_ip\"] = enable_node_public_ip\n __props__.__dict__[\"eviction_policy\"] = eviction_policy\n __props__.__dict__[\"fips_enabled\"] = fips_enabled\n __props__.__dict__[\"host_group_id\"] = host_group_id\n __props__.__dict__[\"kubelet_config\"] = kubelet_config\n __props__.__dict__[\"kubelet_disk_type\"] = kubelet_disk_type\n __props__.__dict__[\"kubernetes_cluster_id\"] = kubernetes_cluster_id\n __props__.__dict__[\"linux_os_config\"] = linux_os_config\n __props__.__dict__[\"max_count\"] = max_count\n __props__.__dict__[\"max_pods\"] = max_pods\n __props__.__dict__[\"message_of_the_day\"] = message_of_the_day\n __props__.__dict__[\"min_count\"] = min_count\n __props__.__dict__[\"mode\"] = mode\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"node_count\"] = node_count\n __props__.__dict__[\"node_labels\"] = node_labels\n __props__.__dict__[\"node_network_profile\"] = node_network_profile\n __props__.__dict__[\"node_public_ip_prefix_id\"] = node_public_ip_prefix_id\n __props__.__dict__[\"node_taints\"] = node_taints\n __props__.__dict__[\"orchestrator_version\"] = orchestrator_version\n __props__.__dict__[\"os_disk_size_gb\"] = os_disk_size_gb\n __props__.__dict__[\"os_disk_type\"] = os_disk_type\n __props__.__dict__[\"os_sku\"] = os_sku\n __props__.__dict__[\"os_type\"] = os_type\n __props__.__dict__[\"pod_subnet_id\"] = pod_subnet_id\n __props__.__dict__[\"priority\"] = priority\n __props__.__dict__[\"proximity_placement_group_id\"] = proximity_placement_group_id\n __props__.__dict__[\"scale_down_mode\"] = scale_down_mode\n __props__.__dict__[\"snapshot_id\"] = snapshot_id\n __props__.__dict__[\"spot_max_price\"] = spot_max_price\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"ultra_ssd_enabled\"] = ultra_ssd_enabled\n __props__.__dict__[\"upgrade_settings\"] = upgrade_settings\n __props__.__dict__[\"vm_size\"] = vm_size\n __props__.__dict__[\"vnet_subnet_id\"] = vnet_subnet_id\n __props__.__dict__[\"windows_profile\"] = windows_profile\n __props__.__dict__[\"workload_runtime\"] = workload_runtime\n __props__.__dict__[\"zones\"] = zones\n return KubernetesClusterNodePool(resource_name, opts=opts, __props__=__props__)", "def test_patch_namespaced_csi_storage_capacity(self):\n pass", "def service_create(container, cconfig, sysdir=constants.SYSTEMD_DIR, log=None):\n log = log or common.configure_logging(__name__)\n # We prefix the SystemD service so we can identify them better:\n # e.g. systemctl list-unit-files | grep tripleo_\n # It'll help to not conflict when rpms are installed on the host and\n # have the same service name as their container name.\n # For example haproxy rpm and haproxy container would have the same\n # service name so the prefix will help to not having this conflict\n # when removing the rpms during a cleanup by the operator.\n service = 'tripleo_' + container\n\n wants = \" \".join(str(x) + '.service' for x in\n cconfig.get('depends_on', []))\n\n restart = cconfig.get('restart', 'always')\n stop_grace_period = cconfig.get('stop_grace_period', '10')\n\n # Please refer to systemd.exec documentation for those entries\n # https://www.freedesktop.org/software/systemd/man/systemd.exec.html\n sys_exec = cconfig.get('systemd_exec_flags', {})\n\n # SystemD doesn't have the equivalent of docker unless-stopped.\n # Let's force 'always' so containers aren't restarted when stopped by\n # systemd, but restarted when in failure. Also this code is only for\n # podman now, so nothing changed for Docker deployments.\n if restart == 'unless-stopped':\n restart = 'always'\n\n sysd_unit_f = sysdir + service + '.service'\n log.debug('Creating systemd unit file: %s' % sysd_unit_f)\n s_config = {\n 'name': container,\n 'wants': wants,\n 'restart': restart,\n 'stop_grace_period': stop_grace_period,\n 'sys_exec': '\\n'.join(['%s=%s' % (x, y) for x, y in sys_exec.items()]),\n }\n with open(sysd_unit_f, 'w') as unit_file:\n os.chmod(unit_file.name, 0o644)\n unit_file.write(\"\"\"[Unit]\nDescription=%(name)s container\nAfter=paunch-container-shutdown.service\nWants=%(wants)s\n[Service]\nRestart=%(restart)s\nExecStart=/usr/bin/podman start %(name)s\nExecStop=/usr/bin/podman stop -t %(stop_grace_period)s %(name)s\nKillMode=none\nType=forking\nPIDFile=/var/run/%(name)s.pid\n%(sys_exec)s\n[Install]\nWantedBy=multi-user.target\"\"\" % s_config)\n try:\n subprocess.check_call(['systemctl', 'daemon-reload'])\n subprocess.check_call(['systemctl', 'enable', '--now', service])\n except subprocess.CalledProcessError:\n log.exception(\"systemctl failed\")\n raise", "def test_read_namespaced_csi_storage_capacity(self):\n pass", "def test_vmware_service_resources_interfaces_get(self):\n pass", "def test_service_container_init(self):\n\n Container.reset_services()\n\n # Checking event service.\n self.assertEquals(\n Container.services['events_service'],\n Events.Service\n )\n self.assertEquals(\n Container.events_service().info(),\n Events.Service().info()\n )\n\n # Testing location service.\n self.assertEquals(\n Container.services['location_service'],\n Location.Service\n )", "def _docker_service_file(\n storage_driver: DockerStorageDriver,\n docker_version: DockerVersion,\n) -> str:\n storage_driver_name = {\n DockerStorageDriver.AUFS: 'aufs',\n DockerStorageDriver.OVERLAY: 'overlay',\n DockerStorageDriver.OVERLAY_2: 'overlay2',\n }[storage_driver]\n\n daemon = {\n DockerVersion.v1_11_2: '/usr/bin/docker daemon',\n DockerVersion.v1_13_1: '/usr/bin/docker daemon',\n DockerVersion.v17_12_1_ce: '/usr/bin/dockerd',\n DockerVersion.v18_06_3_ce: '/usr/bin/dockerd',\n }[docker_version]\n\n docker_cmd = (\n '{daemon} '\n '-D '\n '-s {storage_driver_name} '\n '--exec-opt=native.cgroupdriver=cgroupfs '\n '--cgroup-parent=${{CGROUP_PARENT}}'\n ).format(\n storage_driver_name=storage_driver_name,\n daemon=daemon,\n )\n\n docker_service_contents = {\n 'Unit': {\n 'Description': 'Docker Application Container Engine',\n 'Documentation': 'https://docs.docker.com',\n 'After': 'dbus.service',\n },\n 'Service': {\n 'EnvironmentFile': '/etc/docker/env',\n 'ExecStart': docker_cmd,\n 'LimitNOFILE': '1048576',\n 'LimitNPROC': '1048576',\n 'LimitCORE': 'infinity',\n 'Delegate': 'yes',\n 'TimeoutStartSec': '0',\n },\n 'Install': {\n 'WantedBy': 'default.target',\n },\n }\n config = configparser.ConfigParser()\n # Ignore erroneous error https://github.com/python/typeshed/issues/1857.\n config.optionxform = str # type: ignore\n config.read_dict(docker_service_contents)\n config_string = io.StringIO()\n config.write(config_string)\n config_string.seek(0)\n return config_string.read()", "def create_tile_service(self,\r\n title,\r\n min_scale,\r\n max_scale,\r\n cache_info=None,\r\n build_cache=False):\r\n\r\n if self.type.lower() == 'Feature Service'.lower():\r\n p = self.layers[0].container\r\n if cache_info is None:\r\n cache_info = {'spatialReference': {'latestWkid': 3857, 'wkid': 102100},\r\n 'rows': 256, 'preciseDpi': 96, 'cols': 256, 'dpi': 96,\r\n 'origin': {'y': 20037508.342787, 'x': -20037508.342787},\r\n 'lods': [{'level': 0, 'scale': 591657527.591555, 'resolution': 156543.033928},\r\n {'level': 1, 'scale': 295828763.795777, 'resolution': 78271.5169639999},\r\n {'level': 2, 'scale': 147914381.897889, 'resolution': 39135.7584820001},\r\n {'level': 3, 'scale': 73957190.948944, 'resolution': 19567.8792409999},\r\n {'level': 4, 'scale': 36978595.474472, 'resolution': 9783.93962049996},\r\n {'level': 5, 'scale': 18489297.737236, 'resolution': 4891.96981024998},\r\n {'level': 6, 'scale': 9244648.868618, 'resolution': 2445.98490512499},\r\n {'level': 7, 'scale': 4622324.434309, 'resolution': 1222.99245256249},\r\n {'level': 8, 'scale': 2311162.217155, 'resolution': 611.49622628138},\r\n {'level': 9, 'scale': 1155581.108577, 'resolution': 305.748113140558},\r\n {'level': 10, 'scale': 577790.554289, 'resolution': 152.874056570411},\r\n {'level': 11, 'scale': 288895.277144, 'resolution': 76.4370282850732},\r\n {'level': 12, 'scale': 144447.638572, 'resolution': 38.2185141425366},\r\n {'level': 13, 'scale': 72223.819286, 'resolution': 19.1092570712683},\r\n {'level': 14, 'scale': 36111.909643, 'resolution': 9.55462853563415},\r\n {'level': 15, 'scale': 18055.954822, 'resolution': 4.77731426794937},\r\n {'level': 16, 'scale': 9027.977411, 'resolution': 2.38865713397468},\r\n {'level': 17, 'scale': 4513.988705, 'resolution': 1.19432856685505},\r\n {'level': 18, 'scale': 2256.994353, 'resolution': 0.597164283559817},\r\n {'level': 19, 'scale': 1128.497176, 'resolution': 0.298582141647617},\r\n {'level': 20, 'scale': 564.248588, 'resolution': 0.14929107082380833},\r\n {'level': 21, 'scale': 282.124294, 'resolution': 0.07464553541190416},\r\n {'level': 22, 'scale': 141.062147, 'resolution': 0.03732276770595208}]\r\n }\r\n pp = {\"minScale\":min_scale,\"maxScale\":max_scale,\"name\":title,\r\n \"tilingSchema\":{\"tileCacheInfo\": cache_info,\r\n \"tileImageInfo\":{\"format\":\"PNG32\",\"compressionQuality\":0,\"antialiasing\":True},\r\n \"cacheStorageInfo\":{\"storageFormat\":\"esriMapCacheStorageModeExploded\",\r\n \"packetSize\":128}},\"cacheOnDemand\":True,\r\n \"cacheOnDemandMinScale\":144448,\r\n \"capabilities\":\"Map,ChangeTracking\"}\r\n params = {\r\n \"f\" : \"json\",\r\n \"outputType\" : \"tiles\",\r\n \"buildInitialCache\" : build_cache,\r\n \"itemid\" : self.itemid,\r\n \"filetype\" : \"featureService\",\r\n \"publishParameters\" : json.dumps(pp)\r\n }\r\n url = \"%s/content/users/%s/publish\" % (self._portal.resturl,\r\n self._user_id)\r\n res = self._gis._con.post(url, params)\r\n serviceitem_id = self._check_publish_status(res['services'], folder=None)\r\n if self._gis._portal.is_arcgisonline:\r\n from ..mapping._types import MapImageLayer\r\n ms_url = self._gis.content.get(serviceitem_id).url\r\n ms = MapImageLayer(url=ms_url, gis=self._gis)\r\n extent = \",\".join([str(ms.properties['fullExtent']['xmin']),\r\n str(ms.properties['fullExtent']['ymin']),\r\n str(ms.properties['fullExtent']['xmax']),\r\n str(ms.properties['fullExtent']['ymax'])])\r\n lods = []\r\n for lod in cache_info['lods']:\r\n if lod['scale'] <= min_scale and \\\r\n lod['scale'] >= max_scale:\r\n lods.append(str(lod['level']))\r\n ms.manager.update_tiles(levels=\",\".join(lods), extent=extent)\r\n return self._gis.content.get(serviceitem_id)\r\n else:\r\n raise ValueError(\"Input must of type FeatureService\")\r\n return", "def __init__(self, server, port, service_name='memcache'):\n super(MemcacheServiceStub, self).__init__(service_name)\n if not server:\n server = DEFAULT_ADDR\n if not port:\n port = DEFAULT_PORT\n\n self._cache = pylibmc.Client(['%s:%i' % (server, port)])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ExposeSpec describe how Infinispan will be exposed externally
def expose(self) -> 'outputs.InfinispanSpecServiceSitesLocalExpose': return pulumi.get(self, "expose")
[ "def expose(self) -> Optional['outputs.InfinispanSpecExpose']:\n return pulumi.get(self, \"expose\")", "async def _expose_internal(self, exposure: Exposure, **kwargs) -> Exposure:\n\n raise NotImplementedError", "def should_expose(self, state) -> bool:\n expose_by_default = self._config.get(CONF_EXPOSE_BY_DEFAULT)\n exposed_domains = self._config.get(CONF_EXPOSED_DOMAINS)\n\n if state.attributes.get(\"view\") is not None:\n # Ignore entities that are views\n return False\n\n if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:\n return False\n\n entity_registry = er.async_get(self.hass)\n registry_entry = entity_registry.async_get(state.entity_id)\n if registry_entry:\n auxiliary_entity = (\n registry_entry.entity_category is not None\n or registry_entry.hidden_by is not None\n )\n else:\n auxiliary_entity = False\n\n explicit_expose = self.entity_config.get(state.entity_id, {}).get(CONF_EXPOSE)\n\n domain_exposed_by_default = (\n expose_by_default and state.domain in exposed_domains\n )\n\n # Expose an entity by default if the entity's domain is exposed by default\n # and the entity is not a config or diagnostic entity\n entity_exposed_by_default = domain_exposed_by_default and not auxiliary_entity\n\n # Expose an entity if the entity's is exposed by default and\n # the configuration doesn't explicitly exclude it from being\n # exposed, or if the entity is explicitly exposed\n is_default_exposed = entity_exposed_by_default and explicit_expose is not False\n\n return is_default_exposed or explicit_expose", "def EXPOSE(port_specs):\n msg = ''\n if not isinstance(port_specs, (list, tuple)):\n msg = 'EXPOSE instruction requires list or tuple, not %s' % (\n type(port_specs)\n )\n elif not port_specs:\n msg = 'EXPOSE instruction must have at least 1 port specifier.'\n if msg:\n raise DockerphileError(msg)\n return EXPOSE_t(port_specs=port_specs)", "def service(self) -> Optional['outputs.InfinispanSpecService']:\n return pulumi.get(self, \"service\")", "def polyCacheMonitor(cacheValue=bool, nodeName=\"string\"):\n pass", "def _ip_address_spec(output, ipaddress, netmask, interface, scope, active ):\n output.beginAssembling(\"IPaddressSpec\")\n output.setVirtualNameValue(\"IPaddress\", ipaddress)\n output.setVirtualNameValue(\"IPnetmask\", netmask)\n output.setVirtualNameValue(\"InterfaceName\", interface)\n output.setVirtualNameValue(\"Active\", scope)\n output.setVirtualNameValue(\"Scope\", active)\n output.endAssembling(\"IPaddressSpec\")", "def security(self) -> Optional['outputs.InfinispanSpecSecurity']:\n return pulumi.get(self, \"security\")", "def test_layer_cache(self, init):\n init.return_value = None\n cache = LayerCacheAggregator()\n b = Builder() # Don't need parameters as init's been mocked out\n b.cfr_title, b.cfr_part, b.doc_number = 15, '111', '111-222'\n b.writer = Mock()\n write = b.writer.layer.return_value.write\n tree = Node(label=[\"1234\"], children=[\n Node(label=[\"1234\", \"1\"], children=[\n Node(\"See paragraph (b)\", label=[\"1234\", \"1\", \"a\"]),\n Node(\"This is b\", label=[\"1234\", \"1\", \"b\"])])])\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a'], arg.keys())\n cache.replace_using(tree)\n\n write.reset_mock()\n tree.children[0].children[1].text = \"References paragraph (a)\"\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a'], arg.keys())\n\n write.reset_mock()\n tree.children[0].children[0].text = \"Contains no references\"\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a'], arg.keys())\n\n write.reset_mock()\n notice = {}\n cache.invalidate_by_notice(notice)\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a'], arg.keys())\n\n write.reset_mock()\n notice['changes'] = {'1234-1-b': 'some change'}\n cache.invalidate_by_notice(notice)\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-a', '1234-1-b'], list(sorted(arg.keys())))\n\n write.reset_mock()\n notice['changes'] = {'1234-Subpart-A': 'some change'}\n cache.invalidate_by_notice(notice)\n b.gen_and_write_layers(tree, [], cache, [])\n arg = write.call_args_list[3][0][0]\n self.assertEqual(['1234-1-b'], list(sorted(arg.keys())))", "def export(*names):\n def decorator(symbol):\n \"\"\"Decorator to export a symbol to the API.\n\n Args:\n symbol: Symbol to decorate.\n\n Returns:\n The input symbol with the `_api_names` attribute set.\n\n Raises:\n ValueError: If the name is invalid or already used.\n \"\"\"\n for name in names:\n # API name must have format \"namespace.name\".\n if name.count('.') != 1:\n raise ValueError(f\"Invalid API name: {name}\")\n # API namespace must be one of the supported ones.\n namespace, _ = name.split('.')\n if namespace not in _SUBMODULE_NAMES:\n raise ValueError(f\"Invalid API namespace: {namespace}\")\n # API name must be unique.\n if name in _API_SYMBOLS:\n raise ValueError(\n f\"Name {name} already used for exported symbol {symbol}\")\n # Add symbol to the API symbols table.\n _API_SYMBOLS[name] = symbol\n # Set the _api_names attribute.\n setattr(symbol, _API_ATTR, names)\n return symbol\n\n return decorator", "def container(self) -> Optional['outputs.InfinispanSpecContainer']:\n return pulumi.get(self, \"container\")", "def test_topic_appear_update_expose(self):\n\n topicname = '/test/nonexistent1'\n # every added topic should be in the list of args\n self.assertTrue(topicname not in self.interface.topics_args)\n # the backend should not have been created\n self.assertTrue(topicname not in self.interface.topics.keys())\n # First update should not change state\n dt = self.interface.update()\n self.assertEqual(dt.added, []) # nothing added\n self.assertEqual(dt.removed, []) # nothing removed\n # every added topic should be in the list of args\n self.assertTrue(topicname not in self.interface.topics_args)\n # the backend should not have been created\n self.assertTrue(topicname not in self.interface.topics.keys())\n\n # create the publisher and then try exposing the topic again, simulating\n # it coming online before expose call.\n nonexistent_pub = rospy.Publisher(topicname, Empty, queue_size=1)\n with timeout(5) as t:\n while not t.timed_out and nonexistent_pub.resolved_name not in self.interface.topics_available:\n dt = self.interface.update()\n self.assertEqual(dt.added, []) # nothing added (not exposed yet)\n self.assertEqual(dt.removed, []) # nothing removed\n\n self.assertTrue(not t.timed_out)\n # TODO : do we need a test with subscriber ?\n\n # every added topic should be in the list of args\n self.assertTrue(topicname not in self.interface.topics_args)\n # the backend should not have been created\n self.assertTrue(topicname not in self.interface.topics.keys())\n\n self.interface.expose_topics([topicname])\n # every exposed topic should remain in the list of args ( in case regex match another topic )\n self.assertTrue(topicname in self.interface.topics_args)\n # make sure the topic backend has been created\n self.assertTrue(topicname in self.interface.topics.keys())\n\n nonexistent_pub.unregister() # https://github.com/ros/ros_comm/issues/111 ( topic is still registered on master... )", "def container(self) -> Optional['outputs.InfinispanSpecServiceContainer']:\n return pulumi.get(self, \"container\")", "def get_inports( self ):\n return self._inports", "def _define_module_argument_spec():\n return dict( name=dict(required=True, aliases=['stage_name']),\n rest_api_id=dict(required=True),\n description=dict(required=False),\n cache_cluster_enabled=dict(required=False, type='bool'),\n cache_cluster_size=dict(required=False, choices=['0.5','1.6','6.1','13.5','28.4','58.2','118','237']),\n method_settings=dict(\n required=False,\n default=[],\n type='list',\n method_name=dict(required=True),\n method_verb=dict(required=True, choices=['GET','PUT','POST','DELETE','HEAD','OPTIONS','PATCH']),\n caching_enabled=dict(required=False, default=False, type='bool')\n ),\n state=dict(required=False, default='present', choices=['absent', 'present'])\n )", "def test_api_challenge_list_visibility():\n app = create_ctfd()\n with app.app_context(), freeze_time(\"2017-10-5\"):\n set_config(\n \"start\", \"1507089600\"\n ) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST\n set_config(\n \"end\", \"1507262400\"\n ) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST\n set_config(\"challenge_visibility\", \"public\")\n with app.test_client() as client:\n r = client.get(\"/api/v1/challenges\")\n assert r.status_code == 200\n set_config(\"challenge_visibility\", \"private\")\n r = client.get(\"/api/v1/challenges\")\n assert r.status_code == 302\n destroy_ctfd(app)", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_discover_worker_responder_spaces(self):\n pass", "def test_get_params_decorator(self, name):\n test_get_params_fn = self._test_get_params_fn()\n stage = self._get_mock_stage()\n encode_params, decode_params = self.evaluate(\n test_get_params_fn(stage, name))\n\n # The graph should contain a single node.\n graph = tf.compat.v1.get_default_graph()\n self.assertLen(graph.as_graph_def().node, 1)\n if name is not None:\n self._assert_all_graph_nodes_in_name_scope(graph, name)\n else:\n self._assert_all_graph_nodes_in_name_scope(\n graph, self._DEFAULT_NAME + '_get_params')\n # The functionality is not modified.\n self.assertEqual(1.0, encode_params['param'])\n self.assertEqual(1.0, decode_params['param'])", "def cache_enable(self):\n return self.param_info.cache_enable" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InfinispanSecurity info for the user application connection
def security(self) -> 'outputs.InfinispanStatusSecurity': return pulumi.get(self, "security")
[ "def security(self) -> Optional['outputs.InfinispanSpecSecurity']:\n return pulumi.get(self, \"security\")", "def getSecurity(self):\n return self._security", "def get_security_config(app):\n items = app.config.items()\n prefix = 'SECURITY_'\n\n def strip_prefix(tup):\n return (tup[0].replace('SECURITY_', ''), tup[1])\n\n return dict([strip_prefix(i) for i in items if i[0].startswith(prefix)])", "def getAuthenticationMap(self, *args):\r\n return _osgDB.Registry_getAuthenticationMap(self, *args)", "def keys(self):\n c = self.get_cxn().cursor()\n c.execute('SELECT session_id FROM user_sessions')\n return [ id for (id,) in c.fetchall() ]", "def getAccounts(self):\n accounts = CombaUser().getLogins()\n db = redis.Redis()\n\n internaccount = db.get('internAccess')\n if not internaccount:\n user = ''.join(random.sample(string.lowercase,10))\n password = ''.join(random.sample(string.lowercase+string.uppercase+string.digits,22))\n db.set('internAccess', user + ':' + password)\n intern = [user, password]\n else:\n intern = internaccount.split(':')\n\n accounts[intern[0]] = intern[1]\n\n return accounts", "def security_info(self, cusip, issue_date):\n self._check_cusip(cusip)\n issue_date = self._check_date(issue_date, '%m/%d/%Y')\n url = self.base_url + self.securities_endpoint + '{}/{}?format=json'.format(cusip, issue_date)\n security_dict = self._process_request(url)\n return security_dict", "def extract_auth_info():\n\n assert 'X-Login' in request.headers, \\\n 'Your web server must pass along the X-Login header.'\n\n login = request.headers['X-Login']\n g.user = db.user.get(login)\n\n if g.user is None:\n msg = _('There is no user account for you, contact administrator.')\n raise InvalidUsage(msg, data={'login': login})\n\n db.connection() \\\n .execute(text('select set_user(:name)'), name=login)\n\n g.roles = set(request.headers.get('X-Roles', '').split(';'))\n g.roles.discard('')", "def user_info(self):\n return self.__user_info", "def _get_session_auth_info(_helper_cfg):\n _session_auth = {}\n _session_info = ['username', 'password']\n for _key in _session_info:\n if _key in _helper_cfg['connection']['session_auth']:\n _session_auth[_key] = _helper_cfg['connection']['session_auth'][_key]\n else:\n _session_auth[_key] = None\n return _session_auth", "def _init_security_group(self):", "def getSecurities(self):\n\n exchange = {0:'NASDAQ', 1:'NYSE', 2:'ASE', 6:'OTC'}\n\n # Request number of securities in database\n if not self.sock.send('\\3'):\n print \"send 3 error\"\n self.close()\n return False\n\n ninfo = unpack('I',self.RecvAll(size=4))[0]\n print \"%d possible security_id's\" % ninfo\n Info = {} # empty dictionary\n sid = 0\n\n # Request the list of securities\n if not self.sock.send('\\4'):\n print \"send 4 error\"\n self.close()\n return False\n\n sids = []; tickers = []; ciks = []; sics = []; xchngs = []; names = []\n\n while sid != 9999999:\n info = self.RecvAll(size=280)\n if len(info) != 280:\n print \"info recv error, only %d bytes\" % len(info)\n self.close()\n return False\n\n sid,cik,sic,xchg,name,tkr = unpack('2I1i1I256s8s',info)\n name = name.split(\"\\0\",1)[0] # remove garbage after null byte\n tkr = tkr.split(\"\\0\",1)[0]\n #Info[sid] = {'ticker':tkr, 'cik':cik, 'sic':sic, 'exchange':exchange[xchg], 'company':name} # add dictionary item\n\n sids.append(sid)\n tickers.append(tkr)\n ciks.append(cik)\n sics.append(sic)\n xchngs.append(exchange[xchg])\n names.append(name)\n\n #assert list(set(sid)) == sid # SID list should be unique\n info = {'ticker':tickers, 'cik':ciks, 'sic':sics, 'exchange':xchngs, 'company':names}\n universe = pd.DataFrame(info, index=sids)\n\n print \"%d entries in security_id Info dictionary\" % len(universe)\n return universe", "def advapi32_GetSecurityInfo(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"handle\", \"ObjectType\", \"SecurityInfo\", \"ppsidOwner\", \"ppsidGroup\", \"ppDacl\", \"ppSacl\", \"ppSecurityDescriptor\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_user_info(self: CipherDatabase) -> dict:\n try:\n _database = Database(database=self.get_database_path(), mode=\"rw\")\n except DatabaseDoesNotExistException:\n raise UserCacheNotFoundException(\n \"Database doesn't have any cache stored\")\n\n row = _database.read(\"Email\", \"Password\", table=\"Users\", rows=\".\")\n\n return {\"user_email\": self.decrypt(row[0]), \"user_password\": self.decrypt(row[1])}", "def all_slice_authorities(self):\n # return self._uppercase_keys_in_list([e for e in self._delegate_tools.get_registry()[\"SERVICES\"] if (e['service_type']==self.SA_SERVICE_TYPE)])\n # Current deployments assume single slice authority", "def get_session_info(self):\n\n return self.get_session_key(), self.get_session_location()", "def security_definitions(self):\n return None", "def _get_ssids(self):\n return self.__ssids", "def security_credential(self):\n return self._security_credential" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a Varian .fid file and converts it into an NMR pipe file.
def read_varian_as_nmrpipe(fid_file): dic, data = ng.varian.read(fid_file) udic = ng.varian.guess_udic(dic, data) C = ng.convert.converter() C.from_varian(dic, data, udic) dic, data = C.to_pipe() return dic, data
[ "def write_varian_as_pipe(fid_file, output_folder):\n # Get the basename of the fid_file.\n # base_name = os.path.basename(fid_file)\n base_name = os.sep.join(os.path.normpath(fid_file).split(os.sep)[5:])\n\n dic, data = ng.varian.read(fid_file)\n udic = ng.varian.guess_udic(dic, data)\n convert = ng.convert.converter()\n convert.from_varian(dic, data, udic)\n output_path = os.path.join(output_folder, f\"pipe-{base_name}\")\n ng.pipe.write(output_path, *convert.to_pipe(), overwrite=True)\n return output_path", "def read_var(file):\n c = read_byte(file)\n value = c & 0x7f\n if c & 0x80:\n c = read_byte(file)\n value = (value << 7) | (c & 0x7f)\n if c & 0x80:\n c = read_byte(file)\n value = (value << 7) | (c & 0x7f)\n if c & 0x80:\n c = read_byte(file)\n value = (value << 7) | (c & 0x7f)\n if c & 0x80:\n raise SMFError('Invalid variable-length number at file position %d' % file.tell())\n return value", "def read(*args):\n return _vnl_vectorPython.vnl_vectorF_read(*args)", "def vnl_vectorF_read(*args):\n return _vnl_vectorPython.vnl_vectorF_read(*args)", "def _read_nlrsfd(self, data: bytes, n: int) -> int:\n op2 = self.op2\n ntotal = 80 # 4*20\n nentries = (len(data) - n) // ntotal\n struc = Struct(op2._endian + b'3i 8s 3f 8s 2f i 4f i 2f')\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struc.unpack(edata)\n (sid, ga, gb, plane, bdia, blen, bclr, soln,\n visco, pvapco, nport,\n pres1, theta1, pres2, theat2, npnt,\n offset1, offset2) = out\n plane = plane.rstrip().decode('latin1')\n soln = soln.rstrip().decode('latin1')\n #NLRSFD SID GA GB PLANE BDIA BLEN BCLR SOLN\n # VISCO PVAPCO NPORT PRES1 THETA1 PRES2 THETA2 NPNT\n # OFFSET1 OFFSET2\n if op2.is_debug_file:\n op2.binary_debug.write(' NLRSFD=%s\\n' % str(out))\n op2.add_nlrsfd(sid, ga, gb, plane, bdia, blen, bclr, soln,\n visco, pvapco, nport,\n pres1, theta1, pres2, theat2, npnt,\n offset1, offset2)\n n += ntotal\n op2.increase_card_count('NLRSFD', nentries)\n return n", "def readData(self, n):\n f = open(self.file, \"rb\")\n fortran.skip(f)\n for i in range(n):\n fortran.skip(f) # Detector Header\n if self.detector[i].lowneu:\n fortran.skip(f) # Detector low energy neutron groups\n fortran.skip(f) # Detector data\n\n fortran.skip(f) # Detector Header\n if self.detector[n].lowneu:\n fortran.skip(f) # Detector low energy neutron groups\n data = fortran.read(f) # Detector data\n f.close()\n return data", "def read_fsa_db(db,fp,org_id) :\n\n cdsseq=\"\"\n tag=\"\"\n for line in fp :\n if line[0] == '>' :\n loaddb(cdsseq,org_id,tag,db)\n\n tag = line[1:].strip().split()[0]\n tag=tag.replace(\"ORFN:\",\"ORFP_\")\n cdsseq = \"\"\n else :\n cdsseq += line.strip()\n \n loaddb(cdsseq,org_id,tag,db)", "def read_pvar(pvar_path):\n return pd.read_csv(pvar_path, sep='\\t', comment='#',\n names=['chrom', 'pos', 'id', 'ref', 'alt', 'qual', 'filter', 'info'],\n dtype={'chrom':str, 'pos':np.int32, 'id':str, 'ref':str, 'alt':str,\n 'qual':str, 'filter':str, 'info':str})", "def parse_vcfs(args, db):\n for sid in db[\"samples\"]:\n for mode in [\"SNV\", \"INDEL\"]:\n parse_vcf(args, db, sid, mode)", "def handle_var_file(self, filename, source):\n var_description = ('PGP Harvard genome, Complete Genomics var file '\n 'format.')\n new_filename = 'PGP-Harvard-{}-var.tsv'.format(self.hu_id)\n\n if filename.endswith('.bz2'):\n new_filename += '.bz2'\n elif filename.endswith('.gz'):\n new_filename += '.gz'\n\n new_filepath = os.path.join(self.temp_directory, new_filename)\n\n shutil.move(os.path.join(self.temp_directory, filename), new_filepath)\n\n self.temp_files.append({\n 'temp_filename': new_filename,\n 'metadata': {\n 'description': var_description,\n 'tags': ['Complete Genomics', 'var', 'genome'],\n 'sourceURL': source,\n 'originalFilename': filename,\n },\n })\n\n vcf_filename = re.sub(r'\\.tsv', '.vcf', new_filename)\n\n if not (vcf_filename.endswith('.gz') or vcf_filename.endswith('.bz2')):\n vcf_filename += '.bz2'\n\n self.vcf_from_var(vcf_filename, var_filepath=new_filepath)", "def read_nms_file(nms_filename):\n d = {}\n if nms_filename == '':\n return d\n #read file\n try:\n f, file_enc = open_file(nms_filename, 'r')\n nms_lines = f.readlines()\n f.close()\n except IOError:\n print _('Error: nms file [%s] does not exist' % (nms_filename))\n return d\n nms_lines = [s.strip() for s in nms_lines]\n #create dictionary\n nms_recs = [l.split('|') for l in nms_lines]\n for r in nms_recs:\n if r != ['']:\n d[r[1]] = r[0]\n #done\n return d", "def readfile(f):\n n = repvar(f)\n print(\"Reading file `\"+n+\"'\")\n fh = open(n)\n c = fh.read()\n fh.close()\n return c", "def readStat(self, n):\n if self.statpos < 0:\n return None\n f = open(self.file, \"rb\")\n f.seek(self.statpos)\n for i in range(n):\n fortran.skip(f) # Detector Data\n data = fortran.read(f)\n f.close()\n return data", "def readData(self, n):\n f = open(self.file, \"rb\")\n fortran.skip(f)\n for i in range(n):\n fortran.skip(f) # Detector Header\n if self.detector[i].low_en_neutr_sc:\n fortran.skip(f) # Detector low energy neutron groups\n fortran.skip(f) # Detector data\n\n fortran.skip(f) # Detector Header\n if self.detector[n].low_en_neutr_sc:\n fortran.skip(f) # Detector low energy neutron groups\n data = fortran.read(f) # Detector data\n f.close()\n return data", "def read_gitm_one_file(file_to_read, vars_to_read=-1):\n\n print(\"Reading file : \"+file_to_read)\n\n data = {\"version\": 0, \\\n \"nLons\": 0, \\\n \"nLats\": 0, \\\n \"nAlts\": 0, \\\n \"nVars\": 0, \\\n \"time\": 0, \\\n \"vars\": []}\n\n f=open(file_to_read, 'rb')\n\n # This is all reading header stuff:\n\n endChar='>'\n rawRecLen=f.read(4)\n recLen=(unpack(endChar+'l',rawRecLen))[0]\n if (recLen>10000)or(recLen<0):\n # Ridiculous record length implies wrong endian.\n endChar='<'\n recLen=(unpack(endChar+'l',rawRecLen))[0]\n\n # Read version; read fortran footer+data.\n data[\"version\"] = unpack(endChar+'d',f.read(recLen))[0]\n\n (oldLen, recLen)=unpack(endChar+'2l',f.read(8))\n\n # Read grid size information.\n (data[\"nLons\"],data[\"nLats\"],data[\"nAlts\"]) = \\\n unpack(endChar+'lll',f.read(recLen))\n (oldLen, recLen)=unpack(endChar+'2l',f.read(8))\n\n # Read number of variables.\n data[\"nVars\"]=unpack(endChar+'l',f.read(recLen))[0]\n (oldLen, recLen)=unpack(endChar+'2l',f.read(8))\n\n if (vars_to_read[0] == -1):\n vars_to_read = np.arange[nVars]\n\n # Collect variable names.\n for i in range(data[\"nVars\"]):\n data[\"vars\"].append(unpack(endChar+'%is'%(recLen),f.read(recLen))[0])\n (oldLen, recLen)=unpack(endChar+'2l',f.read(8))\n\n # Extract time. \n (yy,mm,dd,hh,mn,ss,ms)=unpack(endChar+'lllllll',f.read(recLen))\n data[\"time\"] = datetime(yy,mm,dd,hh,mn,ss,ms*1000)\n #print(data[\"time\"])\n\n # Header is this length:\n # Version + start/stop byte\n # nLons, nLats, nAlts + start/stop byte\n # nVars + start/stop byte\n # Variable Names + start/stop byte\n # time + start/stop byte\n\n iHeaderLength = 8 + 4+4 + 3*4 + 4+4 + 4 + 4+4 + \\\n data[\"nVars\"]*40 + data[\"nVars\"]*(4+4) + 7*4 + 4+4\n\n nTotal = data[\"nLons\"]*data[\"nLats\"]*data[\"nAlts\"]\n iDataLength = nTotal*8 + 4+4\n\n for iVar in vars_to_read:\n f.seek(iHeaderLength+iVar*iDataLength)\n s=unpack(endChar+'l',f.read(4))[0]\n data[iVar] = np.array(unpack(endChar+'%id'%(nTotal),f.read(s)))\n data[iVar] = data[iVar].reshape( \n (data[\"nLons\"],data[\"nLats\"],data[\"nAlts\"]),order=\"F\")\n\n f.close()\n\n return data", "def read(self):\n if self.getiddname() == None:\n errortxt = (\n \"IDD file needed to read the idf file. \"\n \"Set it using IDF.setiddname(iddfile)\"\n )\n raise IDDNotSetError(errortxt)\n readout = idfreader1(\n self.idfname, self.iddname, self, commdct=self.idd_info, block=self.block\n )\n (self.idfobjects, block, self.model, idd_info, idd_index, idd_version) = readout\n self.__class__.setidd(idd_info, idd_index, block, idd_version)", "def readf26(fh, res=None):\n if isinstance(fh, basestring):\n fh = open(fh, 'rt')\n f = fh.readlines()\n fh.close()\n vp = VpfitModel()\n if len(f) == 0:\n #print filename, 'is empty'\n return vp #None [JXP FIX; 16 Sep 2014]\n f = [r for r in f if\n not r.lstrip().startswith('!') or 'Stats' in r]\n regionrows = [r for r in f if r.lstrip().startswith('%%')]\n ionrows = [r for r in f if '%%' not in r and\n 'Stats' not in r and r.lstrip()]\n keys = 'iterations nchisq npts dof prob ndropped info'.split()\n statrow = [row for row in f if 'Stats' in row]\n if statrow:\n if statrow[0].split()[-1] == 'BAD':\n status = 'BAD'\n else:\n status = 'OK'\n vals = statrow[0].split()[2:8] + [status]\n vp.stats = dict(zip(keys,vals))\n elif ionrows:\n # older style f26 file\n stat = ionrows[0]\n status = ('BAD' if stat.split()[-1] == 'BAD' else 'OK')\n vals = [stat[66:71], stat[71:85], stat[85:90], stat[90:95],\n stat[95:102], stat[102:107], status]\n vp.stats = dict(zip(keys,vals))\n\n vp.regions = parse_regions(regionrows,res=res)\n #print vp.regions,'\\n\\n\\n'\n #vp.filename = filename\n if len(ionrows) == 0:\n return vp\n\n ionrows = [r.lstrip() for r in ionrows]\n param = []\n molecule_names = set(('H2J0 H2J1 H2J2 H2J3 H2J4 H2J5 H2J6 '\n 'COJ0 COJ1 COJ2 COJ3 COJ4 COJ5 COJ6 '\n 'HDJ0 HDJ1 HDJ2').split())\n for r in ionrows:\n if 'nan' in r:\n i = r.index('nan')\n param.append([r[:i]] + r[i:].split())\n continue\n if r[:4] in molecule_names:\n i = 4\n else:\n i = 0\n while not r[i].isdigit() and r[i] != '-':\n i += 1\n param.append([r[:i]] + r[i:].split())\n \n param = [[p[0],p[1],p[3],p[5],p[2],p[4],p[6]] for p in param]\n vp.lines = parse_lines(param)\n\n return vp", "def nco_extract( var , infile , outfile ):\n command = \"ncks --overwrite --history\"+\\\n \" --variable \"+var+\\\n \" --output \"+outfile+\\\n \" \"+infile\n process_cmd(command)", "def read_raw_nirx(fname, saturated=\"annotate\", preload=False, verbose=None):\n return RawNIRX(fname, saturated, preload, verbose)", "def ReadNmrView(self, fileName):\n if _DoesFileExist(fileName) == 0:\n return\n print 'reading a NMRView .out file', fileName\n\n #important - clean atomlist and atomdicfa:\n self.atomlist = []\n self.atomdicfa = {}\n self.fileName = fileName\n\n #get the file without the comments:\n bigstring = DeleteComments.GetString(fileName)\n\n #split the string in lines:\n lines = string.split(bigstring, '\\n')\n\n for line in lines:\n linelist = string.split(line)\n #for wrong or empty lines:\n if len(linelist) < 3:\n continue\n ATOM = Atom()\n firstFieldList = string.split(linelist[0], '.')\n ATOM.residuenumber = firstFieldList[0]\n ATOM.aminoacid = None\n ATOM.segid = None\n ATOM.atomname = (PseudoAtom.Pseudo2Atom(firstFieldList[1]),)\n ATOM.shift = linelist[1]\n ATOM.shifterror = '0.0'\n self.AddAtom(ATOM)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a Varian .fid file and writes it as an NMR pipe file in the output_folder directory.
def write_varian_as_pipe(fid_file, output_folder): # Get the basename of the fid_file. # base_name = os.path.basename(fid_file) base_name = os.sep.join(os.path.normpath(fid_file).split(os.sep)[5:]) dic, data = ng.varian.read(fid_file) udic = ng.varian.guess_udic(dic, data) convert = ng.convert.converter() convert.from_varian(dic, data, udic) output_path = os.path.join(output_folder, f"pipe-{base_name}") ng.pipe.write(output_path, *convert.to_pipe(), overwrite=True) return output_path
[ "def nco_extract( var , infile , outfile ):\n command = \"ncks --overwrite --history\"+\\\n \" --variable \"+var+\\\n \" --output \"+outfile+\\\n \" \"+infile\n process_cmd(command)", "def create_forna_file(output_folder, origin, name, seq, structure):\n if origin == \"Real\":\n forna_file = '{}/{}_(Real).txt'.format(output_folder, name)\n else:\n forna_file = '{}/{}_({}_predicted).txt'.format(output_folder, name, origin)\n with open(forna_file, 'w') as output:\n if origin == \"Real\":\n output.write('>{}_Real'.format(name))\n else:\n output.write('>{}_{}_predicted'.format(name, origin))\n output.write('\\n')\n output.write(seq)\n output.write('\\n')\n output.write(structure)", "def write_VarScan_script(self, out, exe, ref, file1, file2):\n output_dir = os.path.join(out, self.out)\n script_file = os.path.join(output_dir, self.script_filename)\n output_snp = os.path.join(output_dir, 'output.snp')\n output_indel = os.path.join(output_dir, 'output.indel')\n logging.info(\"Create script file: {0}\".format(script_file))\n cmd_samtools = \"{0} mpileup -f {1} {2} {3}\".format(samtools_exe, ref, file1, file2)\n cmd_VarScan = \"{0} -Xmx25g -jar {1} somatic --output-snp {2} --output-indel {3} --mpileup 1\".format(\n java_exe, exe, output_snp, output_indel\n )\n for key in self.params.keys():\n if self.params[key] is None:\n raise ValueError(\"VarScan does not support flags without value: {0}\".format(key))\n cmd_VarScan += \" {0} {1}\".format(key, self.params[key])\n self.write_prolog_script(script_file)\n cmd = \"{0} | {1}\\n\".format(cmd_samtools, cmd_VarScan)\n with open(script_file, 'a') as stream:\n # stream.write(\"cd {0}\\n\".format(output_dir))\n stream.write(cmd)\n self.make_script_executable(script_file)\n return None", "def convert_dicom_to_nifti(infolder, outfolder):\n\tpath.rmtree(outfolder)\n\tpath.makedirs(outfolder)\n\tgen_scan_info(infolder, outfolder)\n\tret = subprocess.call([rootconfig.path.dcm2nii, '-z', 'y', '-o', outfolder, infolder],\n\t\tcwd=os.path.dirname(rootconfig.path.dcm2nii))\n\tprint(outfolder, ret)\n\treturn ret", "def write_dsift_to_file(self, out_fname=''):\n\n f = open(out_fname, 'w')\n for path in self.dsift_paths:\n print path\n locations, descriptors = sift.read_features_from_file(path, desc_dim=132)\n\n # check that it's safe to cast ot uint16\n check = descriptors[descriptors > 2 ** 16]\n if check != 0:\n print path\n print descriptors\n print descriptors[descriptors > 2 ** 16]\n raw_input(\"uint16 is not enough\")\n descriptors = descriptors.astype(np.uint16)\n\n savetxt_compact(f, descriptors) # pass file handle to function\n # print descriptors.shape\n # print type(descriptors)\n\n f.close()", "def extract_cif(infile, folder, nodes_export_subfolder=\"nodes\", aiida_export_subfolder=\"aiida\", silent=False):\n # pylint: disable=unused-argument,too-many-locals,invalid-name\n from six.moves import urllib\n import CifFile\n from aiida.common.exceptions import ValidationError\n from aiida.common.files import md5_file, sha1_file\n from aiida.tools.dbexporters.tcod import decode_textfield\n\n values = CifFile.ReadCif(infile)\n values = values[list(values.keys())[0]] # taking the first datablock in CIF\n\n for i in range(len(values['_tcod_file_id']) - 1):\n name = values['_tcod_file_name'][i]\n if not name.startswith(aiida_export_subfolder + os.sep):\n continue\n dest_path = os.path.relpath(name, aiida_export_subfolder)\n if name.endswith(os.sep):\n if not os.path.exists(folder.get_abs_path(dest_path)):\n folder.get_subfolder(folder.get_abs_path(dest_path), create=True)\n continue\n contents = values['_tcod_file_contents'][i]\n if contents in ['?', '.']:\n uri = values['_tcod_file_uri'][i]\n if uri is not None and uri != '?' and uri != '.':\n contents = urllib.request.urlopen(uri).read()\n encoding = values['_tcod_file_content_encoding'][i]\n if encoding == '.':\n encoding = None\n contents = decode_textfield(contents, encoding)\n if os.path.dirname(dest_path) != '':\n folder.get_subfolder(os.path.dirname(dest_path) + os.sep, create=True)\n with io.open(folder.get_abs_path(dest_path), 'w', encoding='utf8') as fhandle:\n fhandle.write(contents)\n fhandle.flush()\n md5 = values['_tcod_file_md5sum'][i]\n if md5 is not None:\n if md5_file(folder.get_abs_path(dest_path)) != md5:\n raise ValidationError(\"MD5 sum for extracted file '{}' is \"\n \"different from given in the CIF \"\n \"file\".format(dest_path))\n sha1 = values['_tcod_file_sha1sum'][i]\n if sha1 is not None:\n if sha1_file(folder.get_abs_path(dest_path)) != sha1:\n raise ValidationError(\"SHA1 sum for extracted file '{}' is \"\n \"different from given in the CIF \"\n \"file\".format(dest_path))", "def process_file(f_in, f_out, field_dictionary):\n\n print(f_out)", "def build_input_file(self, replica):\n\n\t\tbasename = self.inp_basename[:-5]\n\t\ttemplate = self.inp_basename\n\t\t\t\n\t\tnew_input_file = \"%s_%d_%d.namd\" % (basename, replica.id, replica.cycle)\n\t\toutputname = \"%s_%d_%d\" % (basename, replica.id, replica.cycle)\n\t\told_name = \"%s_%d_%d\" % (basename, replica.id, (replica.cycle-1))\n\t\treplica.new_coor = outputname + \".coor\"\n\t\treplica.new_vel = outputname + \".vel\"\n\t\treplica.new_history = outputname + \".history\"\n\t\treplica.new_ext_system = outputname + \".xsc\" \n\t\thistoryname = replica.new_history\n\n\t\tif (replica.cycle == 0):\n\t\t\tfirst_step = 0\n\t\telif (replica.cycle == 1):\n\t\t\tfirst_step = int(self.cycle_steps)\n\t\telse:\n\t\t\tfirst_step = (replica.cycle - 1) * int(self.cycle_steps)\n\n\t\tif (replica.cycle == 0):\n\t\t\told_name = \"%s_%d_%d\" % (basename, replica.id, (replica.cycle-1)) \n\t\t\tstructure = self.namd_structure\n\t\t\tcoordinates = self.namd_coordinates\n\t\t\tparameters = self.namd_parameters\n\t\telse:\n\t\t\told_name = \"../staging_area/%s_%d_%d\" % (basename, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t replica.id, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t(replica.cycle-1))\n\t\t\tstructure = self.namd_structure\n\t\t\tcoordinates = self.namd_coordinates\n\t\t\tparameters = self.namd_parameters\n\n\t\t# substituting tokens in main replica input file \n\t\ttry:\n\t\t\tr_file = open( (os.path.join((self.work_dir_local + \"/namd_inp/\"), \\\n\t\t\t\t\t\t\ttemplate)), \"r\")\n\t\texcept IOError:\n\t\t\tprint 'Warning: unable to access template file %s' % template\n\n\t\ttbuffer = r_file.read()\n\t\tr_file.close()\n\n\t\ttbuffer = tbuffer.replace(\"@swap@\",str(replica.swap))\n\t\ttbuffer = tbuffer.replace(\"@ot@\",str(replica.old_temperature))\n\t\ttbuffer = tbuffer.replace(\"@nt@\",str(replica.new_temperature))\n\t\ttbuffer = tbuffer.replace(\"@steps@\",str(self.cycle_steps))\n\t\ttbuffer = tbuffer.replace(\"@rid@\",str(replica.id))\n\t\ttbuffer = tbuffer.replace(\"@somename@\",str(outputname))\n\t\ttbuffer = tbuffer.replace(\"@oldname@\",str(old_name))\n\t\ttbuffer = tbuffer.replace(\"@cycle@\",str(replica.cycle))\n\t\ttbuffer = tbuffer.replace(\"@firststep@\",str(first_step))\n\t\ttbuffer = tbuffer.replace(\"@history@\",str(historyname))\n\t\ttbuffer = tbuffer.replace(\"@structure@\", str(structure))\n\t\ttbuffer = tbuffer.replace(\"@coordinates@\", str(coordinates))\n\t\ttbuffer = tbuffer.replace(\"@parameters@\", str(parameters))\n\t\t\n\t\t# write out\n\t\ttry:\n\t\t\tw_file = open( new_input_file, \"w\")\n\t\t\tw_file.write(tbuffer)\n\t\t\tw_file.close()\n\t\texcept IOError:\n\t\t\tprint 'Warning: unable to access file %s' % new_input_file", "def convert_zipped_dicom_to_nifti(zip_file_path: Path, reference_series_folder: Path,\n nifti_file_path: Path) -> None:\n extract_zipped_files_and_flatten(zip_file_path, reference_series_folder)\n load_dicom_series_and_save(reference_series_folder, nifti_file_path)", "def to_shapefile(output, input_nrml_file, validate):\n input_parser = shapefileparser.SourceModelParser()\n source_model = input_parser.read(input_nrml_file, validate)\n if not output:\n output = os.path.splitext(input_nrml_file)[0]\n print('Extracting %s_ files' % output)\n shapefileparser.ShapefileParser().write(output, source_model)", "def create_output_file_for_parametrization(input_file, is_pdb, molset_atom_types, classifier_name):\n input_filename = os.path.basename(input_file)\n if input_filename.endswith('.sdf') or input_filename.endswith('.pdb'):\n input_filename = input_filename[:-4]\n # returns parent directory of directory where io.py is saved\n parent_dir = Path(__file__).resolve().parents[1]\n output_dirname = 'attyc_outputs'\n if not os.path.isdir(os.path.join(parent_dir, output_dirname)):\n print(f'Creating directory {output_dirname}...')\n os.mkdir(os.path.join(parent_dir, output_dirname))\n\n if is_pdb:\n file_extension = 'PDB'\n else:\n file_extension = 'SDF'\n output_filename = f'{input_filename}{file_extension}_{classifier_name}.txt'\n print(f'Output filename: {output_filename},\\n'\n f'path: {os.path.join(parent_dir, output_dirname, output_filename)}')\n with open(os.path.join(parent_dir, output_dirname, output_filename), 'w') as file:\n file.writelines(','.join(mol_atom_types) + os.linesep for mol_atom_types in molset_atom_types)\n print('Finished successfully.')", "def read_output_file():\n global TP_SPAM, FP_SPAM, TN_SPAM, FN_SPAM\n global TP_HAM, FP_HAM, TN_HAM, FN_HAM\n global path\n output_file = open(path+\"nboutput.txt\",\"r\", encoding=\"latin-1\")\n i = 0\n for line in output_file:\n i+=1\n arr = line.split()\n path = arr[1]\n label = arr[0]\n \n #calculating for spam\n if \"spam\" in path:\n if label == \"spam\":\n TP_SPAM+= 1\n else:\n FN_SPAM+= 1\n else:\n if label == \"ham\":\n TN_SPAM+= 1\n else:\n FP_SPAM+= 1\n \n #calculating for ham\n if \"ham\" in path:\n if label == \"ham\":\n TP_HAM+= 1\n else:\n FN_HAM+= 1\n else:\n if label == \"spam\":\n TN_HAM+= 1\n else:\n FP_HAM+= 1\n calculate_f1(TP_SPAM, TN_SPAM, FP_SPAM, FN_SPAM, \"SPAM\")\n calculate_f1(TP_HAM, TN_HAM, FP_HAM, FN_HAM, \"HAM\")", "def write2file(self, direc, idmatch, lines):\n with open('{0}/{1}.fq'.format(direc,idmatch ), \"ab+\") as file:\n file.write(lines['1'])\n file.write(lines['2'])\n file.write(lines['3'])\n file.write(lines['4'])", "def run(self, pdb_fn, ofn_root=None):\n # convert .pdb file to .xyzr and .xyzrn files\n #fname_root = '.'.join(self.pdb_fn.split('/')[-1].split('.')[:-1])\n fname_root = os.path.splitext(os.path.split(pdb_fn)[-1])[0]\n## xyzr_fname = '%s/%s.xyzr' % (self.output_dir, fname_root)\n #xyzrn_fname = '%s/%s.xyzrn' % (self.output_dir, fname_root)\n xyzrn_fname = os.path.join(self.output_dir, \"%s.xyzrn\" % fname_root)\n\n if ofn_root is None:\n ofn_root = '%s_surface' % (fname_root,)\n\n old_cwd = os.getcwd()\n os.chdir(self.msms_wd)\n## cmd = '%s %s > %s' % (self.pdb2xyzr_bin, pdb_fn, xyzr_fname)\n# os.system(cmd)\n cmd = '%s %s > %s' % (self.pdb2xyzrn_bin, pdb_fn, xyzrn_fname)\n # os.system(cmd)\n print(cmd)\n if sys.platform.startswith('win') and 'PYMOL_GIT_MOD' in os.environ:\n pymol_env = os.environ.copy()\n callfunc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=pymol_env)\n child_stdout, child_stderr = callfunc.communicate()\n print(child_stdout)\n print(child_stderr)\n retval = callfunc.returncode\n print(\"pdb2xyzrn's mainCommand returned\", retval)\n else:\n status = subprocess.call(cmd, shell=True)\n # read in .xyzr and .xyzrn data\n# try:\n## xyzr_fh = open(xyzr_fname)\n## self.xyzr_fd = xyzr_fh.readlines()\n# xyzr_fh.close()\n# except IOError:\n# print 'ERROR: pdb2xyzr failed to convert pdb file to xyzr file!'\n# print ' pdb2xyzr = %s' % (self.pdb2xyzr_bin,)\n# print ' pdb file = %s' % (pdb_fn,)\n# sys.exit()\n\n try:\n xyzrn_fh = open(xyzrn_fname)\n self.xyzrn_fd = xyzrn_fh.readlines()\n xyzrn_fh.close()\n except IOError:\n print('ERROR: pdb2xyzrn failed to convert pdb file to xyzrn file!')\n print(' pdb2xyzrn = %s' % (self.pdb2xyzrn_bin,))\n print(' pdb file = %s' % (pdb_fn,))\n sys.exit()\n\n #output_root = '%s/%s' % (self.output_dir, ofn_root)\n output_root = os.path.join(self.output_dir, ofn_root)\n\n # run MSMS on .xyzrn file\n msms_bin_str = '\\\"%s\\\"' % (self.msms_bin,) # there may be whitespace in path\n cmd = '%s -if %s -probe_radius %f -density %f -hdensity %f -no_area -of %s' % \\\n (msms_bin_str, xyzrn_fname,\n self.param_pr, self.param_den, self.param_hden,\n output_root)\n\n if self.noh: # ignore hydrogen atoms\n cmd += ' -noh'\n\n if self.all_components: # force MSMS to search all surface components\n cmd += ' -all_components'\n\n if VERBOSE:\n print('command line for running msms:')\n print(cmd)\n\n # os.system(cmd)\n status = subprocess.call(cmd, shell=True)\n os.chdir(old_cwd)\n\n## self.output_xyzr_fn = xyzr_fname\n## self.output_xyzrn_fn = xyzrn_fname\n\n # clean up intermediate files\n# if os.path.isfile(xyzr_fname):\n# os.remove(xyzr_fname)\n #!if os.path.isfile(xyzrn_fname):\n #! os.remove(xyzrn_fname)\n\n self.output_vert_fn = '%s.vert' % (output_root,)\n self.output_face_fn = '%s.face' % (output_root,)\n if self.all_components:\n fn_idx = 1\n component_vert_fn = '%s_%d.vert' % (output_root, fn_idx)\n component_face_fn = '%s_%d.face' % (output_root, fn_idx)\n\n while os.path.isfile(component_vert_fn) and \\\n os.path.isfile(component_face_fn):\n self.output_cpn_vert_fn.append(component_vert_fn)\n self.output_cpn_face_fn.append(component_face_fn)\n\n fn_idx += 1\n component_vert_fn = '%s_%d.vert' % (output_root, fn_idx)\n component_face_fn = '%s_%d.face' % (output_root, fn_idx)\n\n return", "def get_SV_CNV_and_SV_CNV_annot_into_file(Is, nsamples, sampleID, tmpdir, SV_CNV_vcf, SV_CNV_var_annotation, fields_varCall, fields_varAnnot, replace):\n\n print_if_verbose(\"getting SV vcf and annot file for sample %i/%i\"%(Is+1, nsamples))\n\n # define files\n SV_CNV_file = \"%s/%s_SV_CNV_noHeader.tab\"%(tmpdir, sampleID)\n SV_CNV_annot_file = \"%s/%s_SV_CNV_annot_noHeader.tab\"%(tmpdir, sampleID)\n\n # generate the SV_CNV_file\n if file_is_empty(SV_CNV_file) or replace is True:\n\n # load df\n vcf_df = get_vcf_df_with_INFO_as_single_fields(get_df_and_header_from_vcf(SV_CNV_vcf)[0])\n \n # add the sample ID\n vcf_df[\"sampleID\"] = sampleID\n\n # write\n SV_CNV_file_tmp = \"%s.tmp\"%SV_CNV_file\n vcf_df[fields_varCall].to_csv(SV_CNV_file_tmp, sep=\"\\t\", header=False, index=False)\n os.rename(SV_CNV_file_tmp, SV_CNV_file)\n\n # generate the SV_CNV_annot_file\n if file_is_empty(SV_CNV_annot_file) or replace is True:\n\n # load df\n annotation_df = pd.read_csv(SV_CNV_var_annotation, sep=\"\\t\")\n\n # write\n SV_CNV_annot_file_tmp = \"%s.tmp\"%SV_CNV_annot_file\n annotation_df[fields_varAnnot].to_csv(SV_CNV_annot_file_tmp, sep=\"\\t\", header=False, index=False)\n os.rename(SV_CNV_annot_file_tmp, SV_CNV_annot_file)\n\n return (SV_CNV_file, SV_CNV_annot_file)", "def write_input(infile,tkin,nh2,cdmol=cdmol_default):\n infile.write(mole+'.dat\\n')\n infile.write('radex.out\\n')\n infile.write(str(flow*(1-bw))+' '+str(fupp/(1-bw))+'\\n')\n infile.write(str(tkin)+'\\n')\n infile.write('1\\n')\n infile.write('H2\\n')\n infile.write(str(nh2)+'\\n')\n infile.write(str(tbg)+'\\n')\n infile.write(str(cdmol)+'\\n')\n infile.write(str(dv)+'\\n')", "def ReadNmrView(self, fileName):\n if _DoesFileExist(fileName) == 0:\n return\n print 'reading a NMRView .out file', fileName\n\n #important - clean atomlist and atomdicfa:\n self.atomlist = []\n self.atomdicfa = {}\n self.fileName = fileName\n\n #get the file without the comments:\n bigstring = DeleteComments.GetString(fileName)\n\n #split the string in lines:\n lines = string.split(bigstring, '\\n')\n\n for line in lines:\n linelist = string.split(line)\n #for wrong or empty lines:\n if len(linelist) < 3:\n continue\n ATOM = Atom()\n firstFieldList = string.split(linelist[0], '.')\n ATOM.residuenumber = firstFieldList[0]\n ATOM.aminoacid = None\n ATOM.segid = None\n ATOM.atomname = (PseudoAtom.Pseudo2Atom(firstFieldList[1]),)\n ATOM.shift = linelist[1]\n ATOM.shifterror = '0.0'\n self.AddAtom(ATOM)", "def vcf2snapp(vcf_file, output_file):\r\n\r\n fh = open(vcf_file)\r\n\r\n chroms = []\r\n\r\n for line in fh:\r\n\r\n # Skip header\r\n if line.startswith(\"##\"):\r\n pass\r\n elif line.startswith(\"#CHROM\"):\r\n # Get taxa information\r\n taxa_list = line.strip().split()\r\n nexus_data = OrderedDict((x, []) for x in taxa_list[9:])\r\n elif line.strip() != \"\":\r\n fields = line.strip().split()\r\n\r\n ref_snp = fields[3]\r\n alt_snp = fields[4]\r\n\r\n # If SNP is not bialleic, ignore\r\n if len(alt_snp) > 1:\r\n continue\r\n\r\n # Record data for each Taxon\r\n for tx in nexus_data:\r\n # Get genotype\r\n gen = fields[taxa_list.index(tx)]\r\n gen = gen.split(\":\")[0]\r\n\r\n if gen == \"./.\":\r\n nexus_data[tx].append(\"-\")\r\n elif gen == \"0/0\":\r\n nexus_data[tx].append(\"0\")\r\n elif gen == \"1/1\":\r\n nexus_data[tx].append(\"2\")\r\n elif gen == \"1/0\" or gen == \"0/1\":\r\n nexus_data[tx].append(\"1\")\r\n\r\n\r\n # Write nexus files\r\n nexus_fh = open(output_file, \"w\")\r\n\r\n # Write header\r\n ntaxa = len(nexus_data)\r\n nloci = len(nexus_data[tx])\r\n nexus_fh.write(\"#NEXUS\\nBEGIN Data;\\n\\tDIMENSIONS NTAX={} NCHAR={};\\n\\t\"\r\n r'FORMAT DATATYPE=standard SYMBOLS=\"012\" INTERLEAVE=no missing=-;'\r\n \"\\n\"\r\n \"Matrix\\n\".format(ntaxa, nloci))\r\n\r\n # Write Data\r\n for tx in nexus_data:\r\n nexus_fh.write(\"{}\\t{}\\n\".format(tx, \"\".join(nexus_data[tx])))\r\n\r\n # Write file ending\r\n nexus_fh.write(\";\\nEND;\\n\")\r\n nexus_fh.close()", "def convert_dicom_to_nii(dicom_dir, output_dir, filename):\n # file processing\n output_dir = output_dir.replace(' ', '\\ ')\n dicom_dir = dicom_dir.replace(' ', '\\ ')\n dcm2niix_cmd = \"dcm2niix -d 0 -s y -f %s -o %s %s\" % (filename, output_dir, dicom_dir)\n args = shlex.split(dcm2niix_cmd)\n process = subprocess.Popen(args, env=config.path_environment)\n process.wait()\n print(\"dcm2niix_cmd terminated with return code: '%s'\" % process.returncode)\n # expected file path\n path_to_nii_file = os.path.join(output_dir, filename + '.nii')\n path_to_json_file = os.path.join(output_dir, filename + '.json')\n if os.path.exists(path_to_json_file):\n os.remove(path_to_json_file)\n if os.path.exists(path_to_nii_file):\n success=True\n else:\n success=False\n print(\"Dicom dir '%s' was not converted\"%dicom_dir)\n return success" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Integrates a spectra based on the linewidth of its peaks.
def integrate_peak_by_linewidth(peaks, dic, data, uc): integrals = dict() # TODO: Create a unit conversion object. for peak in peaks: peak_center = peak[0] peak_id = peak[1] peak_width = peak[2] int_start = (peak_center - peak_width) / 500 int_end = (peak_center + peak_width) / 500 integrals[peak_id] = ng.analysis.integration.integrate( data, uc, [int_start, int_end]) return integrals
[ "def integrateSpectra(spectra, dlambda):\n \n \"\"\"\n spectra = list of Nx2 arrays describing filter or dye spectra, or laser wavelength profile\n dlambda = wavelength difference betweeen adjacent values in the spectra\n \"\"\"\n\n lowerLimit = min( [min(spectrum[:,0]) for spectrum in spectra] )\n upperLimit = max( [max(spectrum[:,0]) for spectrum in spectra] )\n\n trimmedSpectra = [padWithZeros(spectrum, lowerLimit, upperLimit) for spectrum in spectra]\n \n product = trimmedSpectra[0][:,1]\n for idx in np.arange(1,len(spectra)):\n product = np.multiply(product, trimmedSpectra[idx][:,1])\n \n \n product = np.ones((trimmedSpectra[0][:,1].shape))\n for spectrum in trimmedSpectra:\n product = np.multiply(product, spectrum[:,1])\n \n integral = np.sum(product) * dlambda\n\n \n return integral", "def add_emission_line(spec, central_wave, flux, width=20., dw=1.0, return_ew=False, verbose=False):\n w0 = central_wave - width * 3\n w1 = central_wave + width * 3\n wave = np.arange(w0, w1+dw, dw) # the wavelength grid for the line\n # calculate the amplitude\n # amp = flux / (np.sqrt(2.*np.pi) * width)\n line = flux * gauss.gauss(wave, central_wave, width, 1.0, xstep=dw)\n line[0] = 0.; line[-1] = 0.\n new_wave = np.union1d(spec.wave, wave) # already sorted\n spr = spec.resample(new_wave)\n continuum = (spr.sample(central_wave-100.) + spr.sample(central_wave+100.)) / 2.\n EW = flux / continuum\n if verbose:\n print \"Equiv. width at %.1f A is %.2f A.\" % (lam, EW)\n sp_line = S.ArraySpectrum(wave=wave, flux=line, fluxunits='flam')\n sp_new = spr + sp_line\n if return_ew:\n return EW\n else:\n return sp_new", "def integrate(self, wavelengths=None):\n if wavelengths is None:\n x = self.wave.value\n y = self.flux.value\n else:\n y = self.resample(wavelengths).value\n if isinstance(wavelengths, u.Quantity):\n x = wavelengths.value\n else:\n x = wavelengths\n\n result = utils.trapezoid_integration(x, y)\n\n return u.Quantity(result, unit=self.flux.unit)", "def eqWidthSynth(flux, linePoints): \r\n #//, fluxCont) {\r\n \r\n logE = math.log10(math.e) #// for debug output\r\n Wlambda = 0.0 #// Equivalent width in pm - picometers\r\n numPoints = len(linePoints)\r\n #//console.log(\"numPoints \" + numPoints);\r\n #var delta, logDelta, term, integ, integ2, logInteg, lastInteg, lastTerm, term2;\r\n\r\n #//Spectrum now continuum rectified before eqWidth called\r\n\r\n #//Trapezoid rule:\r\n #// First integrand:\r\n\r\n lastInteg = 1.0 - flux[0][0]\r\n \r\n lastTerm = lastInteg #//initialization\r\n\r\n for il in range(1, numPoints-1):\r\n\r\n delta = linePoints[il] - linePoints[il - 1]\r\n delta = delta * 1.0E+7 #// cm to nm - W_lambda in pm\r\n logDelta = math.log(delta)\r\n\r\n integ = 1.0 - flux[0][il]\r\n\r\n #//Extended trapezoid rule:\r\n integ2 = 0.5 * (lastInteg + integ)\r\n #//logInteg = math.log(integ2)\r\n #//term = Math.exp(logInteg + logDelta);\r\n term = integ2 * delta\r\n #//console.log(\"linePoints[il] \" + linePoints[il] + \" flux[0][il] \" + flux[0][il]\r\n #// + \" integ \" + integ + \" term \" + term);\r\n\r\n #//Wlambda = Wlambda + (term * delta);\r\n Wlambda = Wlambda + term\r\n\r\n lastInteg = integ\r\n\r\n #//System.out.println(\"EqWidth: Wlambda: \" + Wlambda);\r\n #}\r\n\r\n #// Convert area in nm to pm - picometers\r\n Wlambda = Wlambda * 1.0E3\r\n\r\n return Wlambda", "def integrate_along(self, key):\n if key not in self.dim:\n return self\n # return value\n rval = type(self)(self.name)\n rval.attributes = deepcopy(self.attributes)\n # calculate the integrated spectra\n axis, scale = self.axis(key), self.scale(key)\n rval.data = np.trapz(self.data, x=scale, axis=axis)\n rval.data /= np.sum(scale[1:] - scale[:-1])\n # populate the dimension information.\n rmdim = self.get_dim(key)\n for dim in set(self.dim.values()):\n # drop the dimension that was reduced\n if dim is rmdim:\n continue\n # get the axis, name and scle of the dimension\n axis, name, scale = dim.axis, dim.name, dim.scale\n if axis > rmdim.axis:\n # the dimensionality has been reduced by 1.\n axis -= 1\n rval.set_dim(axis=axis, name=name, scale=scale)\n # done\n return rval", "def plot_spectra(self, ax=None):\n import matplotlib.pyplot as plt\n if ax is None:\n fig, ax = plt.subplots()\n for spectrum in self.spectra:\n ax.step(self.velax, spectrum, where='mid', color='k')\n ax.set_xlabel('Velocity')\n ax.set_ylabel('Intensity')\n ax.set_xlim(self.velax[0], self.velax[-1])\n return ax", "def integrate(self):\n s = self.info.instrument.sampling_interval.decompose().value\n snf.integrate_signal(values=self.value, dt=s * self.resolution)\n self.is_floating = True", "def integrate_flux(infile, wav_start, wav_end,column):\n \n wav,flux_z = np.genfromtxt(infile,usecols=(0,column),unpack=True)\n flx = [flux_z[i] for i in range(len(wav)) if (wav[i] >= wav_start and wav[i] <= wav_end) ]\n integral= simps(flx)\n\n return integral", "def get_line_flux(line_wave, wave, flux, **kwargs):\n return np.interp(line_wave, wave, flux, **kwargs)", "def assign_wavelengths(self, input_spectra):\n\n # Save these, so we'll know what data type to use for the output.\n # The types used for accumulating sums and taking averages may not\n # be the same as these types.\n self.wavelength_dtype = input_spectra[0].wavelength.dtype\n self.net_dtype = input_spectra[0].net.dtype\n self.dq_dtype = input_spectra[0].dq.dtype\n\n nwl = 0\n for in_spec in input_spectra:\n nwl += in_spec.nelem\n\n # Create an array with all the input wavelengths (i.e. the union\n # of the input wavelengths).\n wl = np.zeros(nwl, dtype=np.float)\n i = 0\n for in_spec in input_spectra:\n nelem = in_spec.nelem\n # Concatenate current input wavelengths to wl array.\n wl[i:i + nelem] = in_spec.wavelength.copy()\n i += nelem\n wl.sort()\n\n # count_input will be the number of input spectra that cover the\n # corresponding wavelength in wl.\n count_input = np.zeros(nwl, dtype=np.int64)\n for in_spec in input_spectra:\n input_wl = in_spec.wavelength\n # wl0 and wl1 will be about a half pixel wider on either side\n # of the wavelength range for the current input spectrum.\n if input_wl[1] > input_wl[0]: # wavelengths are increasing\n wl0 = input_wl[0] - 0.5 * (input_wl[1] - input_wl[0])\n wl1 = input_wl[-1] + 0.5 * (input_wl[-1] - input_wl[-2])\n elif input_wl[1] < input_wl[0]: # wavelengths are decreasing\n wl0 = input_wl[-1] - 0.5 * (input_wl[-2] - input_wl[-1])\n wl1 = input_wl[0] + 0.5 * (input_wl[0] - input_wl[1])\n else:\n raise RuntimeError(\"Wavelength increment must not be zero.\")\n temp = np.where(wl >= wl0, 1, 0)\n temp = np.where(wl >= wl1, 0, temp)\n count_input += temp\n del temp\n # This shouldn't happen.\n if np.any(count_input <= 0.):\n raise RuntimeError(\"Problem with input wavelengths.\")\n\n self.wavelength = self.compute_output_wl(wl, count_input)\n\n self.wcs = create_spectral_wcs(input_spectra[0].right_ascension[0],\n input_spectra[0].declination[0],\n self.wavelength)", "def interpolLin(wave, spec, new_wave): \n inter = interpolate.interp1d(wave, spec, bounds_error = False)\n return inter(new_wave)", "def multiplySpectra(spectra, dl = 0.5):\n \"\"\" dl = optional parameter to control in-built interpolation\"\"\"\n interpSpectra = [interpolateSpectrum(sp, dl) for sp in spectra]\n \n lowerLimit = min( [min(spectrum[:,0]) for spectrum in interpSpectra] )\n upperLimit = max( [max(spectrum[:,0]) for spectrum in interpSpectra] )\n\n trimmedSpectra = [padWithZeros(spectrum, lowerLimit, upperLimit) for spectrum in interpSpectra]\n \n product = np.ones((trimmedSpectra[0][:,1].shape))\n\n for spectrum in trimmedSpectra:\n product = np.multiply(product, spectrum[:,1])\n\n \n out = np.stack([trimmedSpectra[0][:,0], product], axis=1)\n return out", "def plot_line(datafile,radius,wavelength=5048.126,ax=False,\n central_lambda=[4901.416,5048.126],flip=False,\n plot=True,window=20,velo=False,baseline=False,\n verbose=True,skywindow=20,**plotargs):\n\n if '.slay.' in datafile:\n datafile = '.'.join(datafile.split('.')[:-2] + ['ms.fits'])\n\n slayfile = '.'.join(datafile.split('.')[:-2] + ['slay.fits'])\n\n kpcradii, _, _ = openslay(slayfile,central_lambda=central_lambda,\n flip=flip,moments=False)\n pxradii = pyfits.open(slayfile)[3].data\n\n row = np.where(np.abs(kpcradii-radius) == np.min(np.abs(kpcradii-radius)))[0][0]\n if verbose:\n print \"using pixel value {} where radius is {} kpc\".format(pxradii[row],kpcradii[row])\n\n datahdus = pyfits.open(datafile)\n hdu = datahdus[0]\n CRVAL = hdu.header['CRVAL1']\n Cdelt = hdu.header['CDELT1']\n try:\n seperr = hdu.header['SEPERR']\n except KeyError:\n seperr = False\n\n '''get the width of the bin in kpc'''\n# print np.array([int(s) for s in hdu.header['APNUM{}'.format(row+1)].split()[2:]])\n rwidthpx = np.diff(np.array([int(s) for s in hdu.header['APNUM{}'.format(row+1)].split()[2:]]))[0]\n rwidth = rwidthpx*0.118*8. # 0.118 \"/px (from KN 11.29.12) times 8x binning\n rwidth *= 34.1e3/206265. # distance (34.1Mpc) / 206265\"\n if verbose:\n print 'rwidth = {} px ({} kpc)'.format(rwidthpx,rwidth)\n\n # We use '=f8' to force the endianess to be the same as the local\n # machine. This is so the precompiled bottleneck (bn) functions don't\n # complain\n if seperr:\n spectrum = np.array(hdu.data[row],dtype='=f8')\n errorfile = '{}_error.{}'.format(os.path.basename(datafile).split('.')[0],\n '.'.join(os.path.basename(datafile).split('.')[1:]))\n if os.path.dirname(datafile) != '':\n errorfile = '{}/{}'.format(os.path.dirname(datafile),errorfile)\n error = pyfits.open(errorfile)[0].data[row]\n else:\n spectrum = np.array(hdu.data[row*2],dtype='=f8')\n error = hdu.data[row*2 + 1]\n\n wave = np.arange(spectrum.size)*Cdelt + CRVAL\n idx = np.where((wave >= wavelength - window/2.) & (wave <= wavelength + window/2.))\n \n if baseline:\n fit = ADE.polyclip(wave,spectrum,baseline)\n spectrum -= fit(wave)\n \n if velo:\n wave = (wave - wavelength)/wavelength * 3e5\n\n pwave = wave[idx]\n pspec = spectrum[idx]\n perr = error[idx]\n\n if not ax and plot:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if velo:\n ax.set_xlabel('Velocity [km/s]')\n else:\n ax.set_xlabel('Wavelength [Angstroms]')\n ax.set_ylabel('ADU/s')\n ax.set_title(datetime.now().isoformat(' '))\n \n \n if plot:\n ax.errorbar(pwave,pspec,yerr=perr,**plotargs)\n fig = ax.figure\n fig.show()\n\n datahdus.close()\n return pwave, pspec, perr, rwidth", "def segmentByEnergy(self,thr,width,min_width=450):\n data = np.abs(self.data)\n E = np.zeros(len(data))\n E[width] = np.sum(data[:2*width+1])\n for i in range(width+1,len(data)-width):\n E[i] = E[i-1] - data[i-width-1] + data[i+width]\n E = E/(2*width)\n\n # TODO: Automatic energy gain (normalisation method)\n\n # This thing is noisy, so I'm going to median filter it. SoundID doesn't seem to?\n Em = np.zeros(len(data))\n for i in range(width,len(data)-width):\n Em[i] = np.median(E[i-width:i+width])\n for i in range(width):\n Em[i] = np.median(E[0:2*i])\n Em[-i] = np.median(E[-2 * i:])\n\n # TODO: Better way to do this?\n threshold = np.mean(Em) + thr*np.std(Em)\n\n # Pick out the regions above threshold and the argmax of each, assuming they are wide enough\n starts = []\n ends = []\n insegment = False\n for i in range(len(data)-1):\n if not insegment:\n if Em[i]<threshold and Em[i+1]>threshold:\n starts.append(i)\n insegment = True\n if insegment:\n if Em[i]>threshold and Em[i+1]<threshold:\n ends.append(i)\n insegment = False\n if insegment:\n ends.append(len(data))\n maxpoints = []\n Emm = np.zeros(len(data))\n for i in range(len(starts)):\n if ends[i] - starts[i] > min_width:\n maxpoints.append(np.argmax(Em[starts[i]:ends[i]]))\n Emm[starts[i]:ends[i]] = Em[starts[i]:ends[i]]\n\n # TODO: SoundID appears to now compute the 44 LPC coeffs for each [midpoint-width:midpoint+width]\n # TODO: And then compute the geometric distance to templates\n\n segs = []\n for i in range(len(starts)):\n segs.append([float(starts[i])/self.fs,float(ends[i])/self.fs])\n return segs", "def data__interp_spect(self):\n # Generate a wavelength scale\n wavl = np.arange(1000., 9000., 5.)\n # Form some data\n data = np.random.rand(len(wavl))\n # Put some random np.nan into the data\n for i in np.random.randint(0, len(data) - 1, 20):\n data[i] = np.nan\n # Generate a finer wavelength scale to interp. to\n # Make sure there are un-interpolable points\n new_wavl = np.arange(800., 9200., 1.)\n\n return wavl, data, new_wavl", "def plot_IR_spectrum(self,width=10*lightspeed/centimeter,scale=1.0,intensities=None,charges=None):\n if not intensities is None:\n assert len(intensities) == (len(self.freqs)-len(self.zeros))\n if intensities is None and charges is None:\n raise ValueError('This function requires the charges or the intensities to calculate the line shape')\n elif not intensities is None and not charges is None:\n raise ValueError('Please only provide either the intensities or the charges')\n else:\n xr = np.arange(0,5001,1)*lightspeed/centimeter\n alphas = np.zeros(len(xr))\n\n # Calculate intensities\n amps = self.modes\n freqs = self.freqs * scale\n for n, (wn, ampn) in enumerate(zip(np.delete(freqs,self.zeros),np.delete(amps,self.zeros,axis=0))): #self.zeros contain the indices of the zero frequencies\n if not charges is None:\n intensity = 0.0\n for k in range(3):\n for i, qi in enumerate(charges):\n idx = 3*i+k\n intensity += (qi*ampn[idx])**2\n else:\n intensity = intensities[n]\n alphas += intensity*self._lorentz(xr,wn,width)\n print('Mode %i: freq = %.3f 1/cm IR ampl. = %.3e a.u.' %(n, wn/(lightspeed/centimeter), intensity))\n\n\n pt.clf()\n pt.plot(xr/(lightspeed/centimeter),alphas)\n pt.xlabel('Frequency [1/cm]')\n pt.ylabel('Absorption [a.u.]')\n pt.show()", "def updatelines(self):\n if self.wavefunction is not None:\n self.rewavefunctionlines.set_ydata(real(self.wavefunction))\n self.imwavefunctionlines.set_ydata(imag(self.wavefunction))\n if self.grid is None:\n self.rewavefunctionlines.set_xdata(arange(len(self.wavefunction)))\n self.imwavefunctionlines.set_xdata(arange(len(self.wavefunction)))\n else:\n self.rewavefunctionlines.set_xdata(self.grid)\n self.imwavefunctionlines.set_xdata(self.grid)\n #self.force_redraw()", "def rescale(self):\n xr, yr = self.rewavefunctionlines.get_data()\n xi, yi = self.imwavefunctionlines.get_data()\n maxy=max(max(yi),max(yr))\n miny=min(min(yi),min(yr))\n maxx=max(max(xi),max(xr))\n minx=min(min(xi),min(xr))\n self.xlim(minx,maxx)\n self.ylim(miny,maxy)", "def radial_integration(r1, frame, radii, r0, ringsize):\n\n integration_area = np.where( radii<r1, frame, 0)\n integration_area = np.where( radii>(r1-ringsize), integration_area, 0)\n\n entries = np.where( radii<r1, 1, 0)\n #entries = np.where( radii>(r1-ringsize), entries1, 0)\n integral = np.sum(integration_area) / np.sum(entries)\n\n return integral", "def linewidth_from_data_units(linewidth, axis, reference='x'):\n fig = axis.get_figure()\n if reference == 'x':\n length = fig.bbox_inches.width * axis.get_position().width\n value_range = np.diff(axis.get_xlim())\n elif reference == 'y':\n length = fig.bbox_inches.height * axis.get_position().height\n value_range = np.diff(axis.get_ylim())\n # Convert length to points\n length *= 72\n # Scale linewidth to value range\n return linewidth * (length / value_range)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Two identical rings should produce identical .gz files on disk.
def test_deterministic_serialization(self): os.mkdir(os.path.join(self.testdir, '1')) os.mkdir(os.path.join(self.testdir, '2')) # These have to have the same filename (not full path, # obviously) since the filename gets encoded in the gzip data. ring_fname1 = os.path.join(self.testdir, '1', 'the.ring.gz') ring_fname2 = os.path.join(self.testdir, '2', 'the.ring.gz') rd = ring.RingData( [array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1])], [{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30) rd.save(ring_fname1) rd.save(ring_fname2) with open(ring_fname1, 'rb') as ring1: with open(ring_fname2, 'rb') as ring2: self.assertEqual(ring1.read(), ring2.read())
[ "def gzip_cmp(self, first, second):\n ungz_first = ''.join(os.path.splitext(first)[:-1]) + '.ugz'\n ungz_second = ''.join(os.path.splitext(second)[:-1]) + '.ugz'\n # Unzip the first file.\n with gzip.open(first, 'rb') as gfp, open(ungz_first, 'wb') as ufp:\n ufp.write(gfp.read())\n # Unzip the second file.\n with gzip.open(second, 'rb') as gfp, open(ungz_second, 'wb') as ufp:\n ufp.write(gfp.read())\n # Compare two unzipped files.\n result = filecmp.cmp(ungz_first, ungz_second)\n # Removing generated files.\n os.remove(ungz_first)\n os.remove(ungz_second)\n return result", "def createSwapFiles(self):", "def test_s3_gzip_roundtrip(tmp_path, s3_setup, path_info):\n out = tmp_path / \"sample.json\"\n sheet = load_vd_sheet(f\"s3://{path_info.s3_bucket}/{path_info.s3_key}.gz\")\n vd.save_json(Path(out), sheet)\n with open(path_info.local_file, \"r\") as f1, open(out, \"r\") as f2:\n assert json.load(f1) == json.load(f2)", "def _merge_beds(in_beds, final_db):\n if len(in_beds) == 1:\n out_file = in_beds[0]\n else:\n out_file = \"%s.bed\" % os.path.splitext(final_db)[0]\n cmd = \"cat %s | sort -k1,1 -k2,2n > %s\" % (\" \".join(in_beds), out_file)\n subprocess.check_call(cmd, shell=True)\n subprocess.check_call([\"bgzip\", \"-f\", out_file])\n bgzip_out = out_file + \".gz\"\n subprocess.check_call([\"tabix\", \"-p\", \"bed\", \"-f\", bgzip_out])\n return bgzip_out", "def bottle_merge(self, b1name, b2name, outname, allow_gaps=True, bigendian=None):\n\n b1 = bottle.Bottle(b1name)\n b1.read_header()\n b1.read_data()\n\n b2 = bottle.Bottle(b2name)\n b2.read_header()\n b2.read_data()\n \n if b1.file_metadata['interval'] != b2.file_metadata['interval']:\n raise PBO_FileContentsError(\"Can not merge bottle files with different sampling rates: %s=%g %s=%g\" % (b1name, b1.file_metadata['interval'], b2name, b2.file_metadata['interval']))\n\n if b1.file_metadata['data_type'] != b2.file_metadata['data_type']:\n raise PBO_FileContentsError(\"Can not merge bottle files with different data types: %s=%s %s=%s\" % (b1name, bottle.BTL_TYPE[b1.file_metadata['data_type']], b2name, bottle.BTL_TYPE[b2.file_metadata['data_type']]))\n\n if b1.file_metadata['id'] != b2.file_metadata['id']:\n raise PBO_FileContentsError(\"Can not merge bottle files with different IDs: %s=%d %s=%d\" % (b1name, b1.file_metadata['id'], b2name, b2.file_metadata['id']))\n\n if b1.file_metadata['start'] > b2.file_metadata['start']:\n raise PBO_FileContentsError(\"%s does not start at or before %s\" % (b1name, b2name))\n\n b1_end = b1.file_metadata['start'] + ((b1.file_metadata['num_pts'] - 1) * b1.file_metadata['interval'])\n outdata = []\n\n if b1_end <= b2.file_metadata['start']:\n # b1 completes, then b2 begins.\n for datum in b1.data:\n outdata.append(datum)\n if b1_end < b2.file_metadata['start']:\n if not allow_gaps:\n raise PBO_FileContentsError(\"There is a gap between end of %s and beginning of %s; option set to not allow gaps\" % (b1name, b2name))\n else:\n num_missing = int((b2.file_metadata['start'] - b1_end - b1.file_metadata['interval']) / b1.file_metadata['interval'])\n for i in range(num_missing):\n outdata.append(bottle.BTL_MISSING[b1.file_metadata['data_type']])\n else:\n num_missing = 0\n for datum in b2.data:\n outdata.append(datum)\n out_num_pts = b1.file_metadata['num_pts'] + num_missing + b2.file_metadata['num_pts']\n\n else:\n # b1 starts first, but they overlap - see if b2 fills in gaps\n for datum in b1.data:\n outdata.append(datum)\n idx = int((b2.file_metadata['start'] - b1.file_metadata['start']) / b1.file_metadata['interval'])\n for datum in b2.data:\n if datum == bottle.BTL_MISSING[b1.file_metadata['data_type']]:\n idx = idx + 1\n elif outdata[idx] == bottle.BTL_MISSING[b1.file_metadata['data_type']]:\n outdata[idx] = datum\n idx = idx + 1\n elif outdata[idx] == datum:\n idx = idx + 1\n else:\n raise PBO_FileContentsError(\"Trying to replace non-null data in %s with data from %s; outdata[%d]=%s datum=%s \" % (b1name, b2name, idx, (\"%\" + bottle.BTL_TYPE_TO_PRINT[b1.file_metadata['data_type']]) % outdata[idx], (\"%\" + bottle.BTL_TYPE_TO_PRINT[b1.file_metadata['data_type']]) % datum))\n out_num_pts = b1.file_metadata['num_pts']\n\n out_start = b1.file_metadata['start']\n out_interval = b1.file_metadata['interval']\n out_data_type = b1.file_metadata['data_type']\n out_id = b1.file_metadata['id']\n\n # we've gotten everything we need from the two source bottle\n # files. close them so that if we are overwriting one, it is safe\n del b1\n del b2\n\n if bigendian != None:\n bottle.write_bottle(outname, out_start, out_interval, out_num_pts, \\\n out_data_type, out_id, outdata, bigendian=bigendian)\n else:\n bottle.write_bottle(outname, out_start, out_interval, out_num_pts, \\\n out_data_type, out_id, outdata)", "def test_gz2dsrc():\n from bioconvert import bioconvert_data\n infile = bioconvert_data(\"test_SP1.fq.dsrc\")\n\n with TempFile(suffix=\".fq.gz\") as tempfile:\n converter = DSRC2GZ(infile, tempfile.name)\n converter()\n\n # uncompress the createdfile, and compare uncompressed file\n # to the expected md5. We do not directly compare dsrc or gz files as\n # it is not deterministic\n assert os.path.isfile(tempfile.name)\n\n cmd = \"gunzip -c {} | md5sum -\".format(tempfile.name)\n res = subprocess.check_output(cmd, shell=True)\n res = res.split()[0].decode()\n\n # Check that the output is correct with a checksum\n assert res == \"d41d8cd98f00b204e9800998ecf8427e\"", "def test_dir_to_dir_minus_d(self):\n # Create 2 dirs with 1 overlapping file, 1 extra file at root\n # level in each, and 1 extra file 1 level down in each, where one of the\n # objects starts with \".\" to test that we don't skip those objects. Make the\n # overlapping files named the same but with different content, to test\n # that we detect and properly copy in that case.\n tmpdir1 = self.CreateTempDir()\n tmpdir2 = self.CreateTempDir()\n subdir1 = os.path.join(tmpdir1, 'subdir1')\n subdir2 = os.path.join(tmpdir2, 'subdir2')\n os.mkdir(subdir1)\n os.mkdir(subdir2)\n self.CreateTempFile(tmpdir=tmpdir1, file_name='obj1', contents='obj1')\n self.CreateTempFile(tmpdir=tmpdir1, file_name='.obj2', contents='.obj2')\n self.CreateTempFile(\n tmpdir=subdir1, file_name='obj3', contents='subdir1/obj3')\n self.CreateTempFile(tmpdir=tmpdir2, file_name='.obj2', contents='.OBJ2')\n self.CreateTempFile(tmpdir=tmpdir2, file_name='obj4', contents='obj4')\n self.CreateTempFile(\n tmpdir=subdir2, file_name='obj5', contents='subdir2/obj5')\n\n self.RunGsUtil(['rsync', '-d', tmpdir1, tmpdir2])\n listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))\n listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))\n # dir1 should have un-altered content.\n self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir1/obj3']))\n # dir2 should have content like dir1 but without the subdir1 objects\n # synchronized.\n self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir2/obj5']))\n # Assert that the src/dest objects that had same length but different\n # checksums were not synchronized (dir to dir sync doesn't use checksums\n # unless you specify -c).\n with open(os.path.join(tmpdir1, '.obj2')) as f:\n self.assertEquals('.obj2', '\\n'.join(f.readlines()))\n with open(os.path.join(tmpdir2, '.obj2')) as f:\n self.assertEquals('.OBJ2', '\\n'.join(f.readlines()))\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check1():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-d', tmpdir1, tmpdir2], return_stderr=True))\n _Check1()\n\n # Now rerun the sync with the -c option.\n self.RunGsUtil(['rsync', '-d', '-c', tmpdir1, tmpdir2])\n listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))\n listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))\n # dir1 should have un-altered content.\n self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir1/obj3']))\n # dir2 should have content like dir but without the subdir objects\n # synchronized.\n self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir2/obj5']))\n # Assert that the src/dest objects that had same length but different\n # content were synchronized (dir to dir sync with -c uses checksums).\n with open(os.path.join(tmpdir1, '.obj2')) as f:\n self.assertEquals('.obj2', '\\n'.join(f.readlines()))\n with open(os.path.join(tmpdir1, '.obj2')) as f:\n self.assertEquals('.obj2', '\\n'.join(f.readlines()))\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check2():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-d', '-c', tmpdir1, tmpdir2], return_stderr=True))\n _Check2()\n\n # Now add and remove some objects in both dirs and test rsync -r.\n self.CreateTempFile(tmpdir=tmpdir1, file_name='obj6', contents='obj6')\n self.CreateTempFile(tmpdir=tmpdir2, file_name='obj7', contents='obj7')\n os.unlink(os.path.join(tmpdir1, 'obj1'))\n os.unlink(os.path.join(tmpdir2, '.obj2'))\n\n self.RunGsUtil(['rsync', '-d', '-r', tmpdir1, tmpdir2])\n listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))\n listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))\n # dir1 should have un-altered content.\n self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir1/obj3']))\n # dir2 should have content like dir but without the subdir objects\n # synchronized.\n self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir1/obj3']))\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check3():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-d', '-r', tmpdir1, tmpdir2], return_stderr=True))\n _Check3()", "def test_bucket_to_bucket(self):\n # Create 2 buckets with 1 overlapping object, 1 extra object at root level\n # in each, and 1 extra object 1 level down in each, where one of the objects\n # starts with \".\" to test that we don't skip those objects. Make the\n # overlapping objects named the same but with different content, to test\n # that we detect and properly copy in that case.\n bucket1_uri = self.CreateBucket()\n bucket2_uri = self.CreateBucket()\n self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',\n contents='obj1')\n self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',\n contents='.obj2')\n self.CreateObject(bucket_uri=bucket1_uri, object_name='subdir/obj3',\n contents='subdir/obj3')\n self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',\n contents='.OBJ2')\n self.CreateObject(bucket_uri=bucket2_uri, object_name='obj4',\n contents='obj4')\n self.CreateObject(bucket_uri=bucket2_uri, object_name='subdir/obj5',\n contents='subdir/obj5')\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check1():\n \"\"\"Tests rsync works as expected.\"\"\"\n self.RunGsUtil(['rsync', suri(bucket1_uri), suri(bucket2_uri)])\n listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))\n listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))\n # First bucket should have un-altered content.\n self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))\n # Second bucket should have new objects added from source bucket (without\n # removing extraneeous object found in dest bucket), and without the\n # subdir objects synchronized.\n self.assertEquals(listing2,\n set(['/obj1', '/.obj2', '/obj4', '/subdir/obj5']))\n # Assert that the src/dest objects that had same length but different\n # content were correctly synchronized (bucket to bucket sync uses\n # checksums).\n self.assertEquals('.obj2', self.RunGsUtil(\n ['cat', suri(bucket1_uri, '.obj2')], return_stdout=True))\n self.assertEquals('.obj2', self.RunGsUtil(\n ['cat', suri(bucket2_uri, '.obj2')], return_stdout=True))\n _Check1()\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check2():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', suri(bucket1_uri), suri(bucket2_uri)], return_stderr=True))\n _Check2()\n\n # Now add and remove some objects in each bucket and test rsync -r.\n self.CreateObject(bucket_uri=bucket1_uri, object_name='obj6',\n contents='obj6')\n self.CreateObject(bucket_uri=bucket2_uri, object_name='obj7',\n contents='obj7')\n self.RunGsUtil(['rm', suri(bucket1_uri, 'obj1')])\n self.RunGsUtil(['rm', suri(bucket2_uri, '.obj2')])\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check3():\n self.RunGsUtil(['rsync', '-r', suri(bucket1_uri), suri(bucket2_uri)])\n listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))\n listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))\n # First bucket should have un-altered content.\n self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))\n # Second bucket should have objects tha were newly added to first bucket\n # (wihout removing extraneous dest bucket objects), and without the\n # subdir objects synchronized.\n self.assertEquals(listing2, set(['/obj1', '/.obj2', '/obj4', '/obj6',\n '/obj7', '/subdir/obj3',\n '/subdir/obj5']))\n _Check3()\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check4():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-r', suri(bucket1_uri), suri(bucket2_uri)],\n return_stderr=True))\n _Check4()", "def check_that_reads_are_compressed(r1, r2):\n\n for f in [r1, r2]:\n\n if file_is_empty(f): raise ValueError(\"The reads %s do not exist\"%f)\n if not f.endswith(\".gz\"): raise ValueError(\"the filename of %s does not end with .gz. PerSVade only works with .gz compressed files. You can compress your reads with 'gzip <reads>'\"%f)", "def test_merge_rings_permutations():\n for i in range(16):\n # test each segment in both directions\n f1 = i & 1 == 0\n f2 = i & 2 == 0\n f3 = i & 4 == 0\n f4 = i & 8 == 0\n for i1, i2, i3, i4 in permutations([0, 1, 2, 3]):\n ways = [\n W(1, {}, [1, 2, 3, 4] if f1 else [4, 3, 2, 1]),\n W(2, {}, [4, 5, 6, 7] if f2 else [7, 6, 5, 4]),\n W(3, {}, [7, 8, 9, 10] if f3 else [10, 9, 8, 7]),\n W(4, {}, [10, 11, 12, 1] if f4 else [1, 12, 11, 10]),\n ]\n ways = [ways[i1], ways[i2], ways[i3], ways[i4]]\n rings = [Ring(w) for w in ways]\n \n merged_rings = merge_rings(rings)\n eq_(len(merged_rings), 1)\n r = merged_rings[0]\n eq_(r.is_closed(), True, (ways, r.refs))\n eq_(set(r.ways), set(ways))\n\n # check order of refs\n prev_x = r.refs[0]\n for x in r.refs[1:]:\n if not abs(prev_x - x) == 1:\n assert (\n (prev_x == 1 and x == 12) or \n (prev_x == 12 and x == 1)\n ), 'not in order %r' % r.refs\n prev_x = x", "def make_stuff(prefix_a, prefix_b, output_path, output_prefix, dump_range):\n\n # We want to combine images from A on top of B\n mask_a = prefix_a + \"%04d.png\"\n mask_b = prefix_b + \"%04d.png\"\n\n# rtp = lcse.rtplot_reader(project_path)\n# dumps = rtp.dump_map.keys()\n# dumps.sort()\n dumps = range(dump_range[0], dump_range[1] + 1)\n\n print \"Processing dummps %s\" % dumps\n\n path = os.path.join(os.path.abspath(output_path), output_prefix)\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n for dump in dumps:\n\n filename_out = os.path.join(path, '%s-%04d.png' % (output_prefix, dump))\n\n if os.path.exists(filename_out):\n continue\n\n print filename_out\n\n # load fv image\n try:\n # Open\n image_a = Image.open(mask_a % dump).convert(\"RGBA\")\n image_b = Image.open(mask_b % dump).convert(\"RGBA\")\n\n # Crop\n image_a = iu.square_crop(image_a, image_a.size[1])\n image_b = iu.square_crop(image_b, image_b.size[1])\n\n # Make the second image transparent\n image_b = iu.color_to_alpha(image_b, threshold=30)\n image_a = iu.alpha_composite(image_b, image_a)\n\n# draw_time(image_a, font, dump=dump, time=rtp.dump_map[dump]['T'])\n image_a.save(filename_out)\n\n except IOError as e:\n print e\n continue", "def test_bucket_to_bucket_minus_d_empty_dest(self):\n bucket1_uri = self.CreateBucket()\n bucket2_uri = self.CreateBucket()\n self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',\n contents='obj1')\n self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',\n contents='.obj2')\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check1():\n self.RunGsUtil(['rsync', '-d', suri(bucket1_uri), suri(bucket2_uri)])\n listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))\n listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))\n self.assertEquals(listing1, set(['/obj1', '/.obj2']))\n self.assertEquals(listing2, set(['/obj1', '/.obj2']))\n _Check1()\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check2():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-d', suri(bucket1_uri), suri(bucket2_uri)],\n return_stderr=True))\n _Check2()", "def zip_gerber_excellon(self):\n zipname = f\"GERBER-{self.filename.split('.')[0]}.zip\"\n with ZipFile(os.path.join(self.gerberdir, zipname), \"w\") as zipfile:\n for folderName, subfolders, filenames in os.walk(self.gerberdir):\n for filename in filenames:\n if not filename.endswith((\"gbr\", \"drl\")):\n continue\n filePath = os.path.join(folderName, filename)\n zipfile.write(filePath, os.path.basename(filePath))\n self.logger.info(f\"Finished generating ZIP file\")", "def gerp_to_flat(path, outfilenameA, outfilenameB, name, bin_format=\"f\", **kargs):\n assert os.path.realpath(path), \"no filename specified\"\n assert os.path.realpath(outfilenameA), \"no save filename specified\"\n assert os.path.realpath(outfilenameB), \"no save filename specified\"\n\n n = 0\n m = 0\n total = 0\n\n #fa = flat_track(filename=outfilenameA, new=True, name=name, bin_format=bin_format)\n fb = flat_track(filename=outfilenameB, new=True, name=name, bin_format=bin_format)\n\n config.log.info(\"Started %s -> %s and %s\" % (path, outfilenameA, outfilenameB))\n s = time.time()\n step = 1\n\n for filename in glob(os.path.join(path, \"*.maf.rates\")):\n config.log.info(\"Doing %s\" % filename)\n with open(filename, \"rU\") as oh:\n chrom = filename.split(\".\")[0].replace(\"chr\", \"\")\n cleft = 0\n\n for line in oh:\n if line: # Just in case there are some empty lines\n d = line.split()\n #print d[0], cleft\n #fa.add_score(chromosome=chrom, left=cleft, right=cleft+step, score=float(d[0]))\n fb.add_score(chromosome=chrom, left=cleft, right=cleft+step, score=float(d[1]))\n cleft += step \n\n if n>1e6:\n m += 1\n print(\"%s,000,000 bp\" % m)\n n = 0\n n += step\n e = time.time()\n\n config.log.info(\"Finalise library...\")\n #fa.finalise()\n fb.finalise()\n config.log.info(\"Took: %s seconds\" % (e-s))\n return(True)", "def test_bucket_to_bucket_minus_d_empty_src(self):\n bucket1_uri = self.CreateBucket()\n bucket2_uri = self.CreateBucket()\n self.CreateObject(bucket_uri=bucket2_uri, object_name='obj1',\n contents='obj1')\n self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',\n contents='.obj2')\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check1():\n self.RunGsUtil(['rsync', '-d', suri(bucket1_uri), suri(bucket2_uri)])\n stderr = self.RunGsUtil(['ls', suri(bucket1_uri, '**')],\n expected_status=1, return_stderr=True)\n self.assertIn('One or more URLs matched no objects', stderr)\n stderr = self.RunGsUtil(['ls', suri(bucket2_uri, '**')],\n expected_status=1, return_stderr=True)\n self.assertIn('One or more URLs matched no objects', stderr)\n _Check1()\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check2():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-d', suri(bucket1_uri), suri(bucket2_uri)],\n return_stderr=True))\n _Check2()", "def gunzip_file(gz_path, new_path):\n if not os.path.exists(new_path):\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)\n print(\"Unpacked %s to %s\" % (gz_path, new_path))\n else:\n print(\"Did not unzip %s because %s already exists.\" % (gz_path,\n new_path))", "def compare_files(file_path1, file_path2):\n file1 = open(file_path1, 'rb')\n file2 = open(file_path2, 'rb')\n while True:\n bytes1 = file1.read(bulksize)\n bytes2 = file2.read(bulksize)\n if (not bytes1) and (not bytes2):\n return True\n if bytes1 != bytes2:\n return False # Files that has been copied or replaced before and tehre is no need to synch", "def _test_compress_G_single(r):\n x = np.random.random(r)\n\n # Do a valid compress_G() calculation and check dimensions.\n G = np.random.random((r,r**3))\n s = r*(r+1)*(r+2)//6\n Gc = roi.utils.compress_G(G)\n assert Gc.shape == (r,s)\n\n # Check that Gc(x^3) == G(x⊗x⊗x).\n Gxxx = G @ np.kron(x,np.kron(x,x))\n assert np.allclose(Gxxx, Gc @ roi.utils.kron3c(x))\n\n # Check that expand_G() and compress_G() are \"inverses.\"\n assert np.allclose(Gc, roi.utils.compress_G(roi.utils.expand_G(Gc)))", "def test_dir_to_bucket_minus_d(self):\n # Create dir and bucket with 1 overlapping object, 1 extra object at root\n # level in each, and 1 extra object 1 level down in each, where one of the\n # objects starts with \".\" to test that we don't skip those objects. Make the\n # overlapping objects named the same but with different content, to test\n # that we detect and properly copy in that case.\n tmpdir = self.CreateTempDir()\n subdir = os.path.join(tmpdir, 'subdir')\n os.mkdir(subdir)\n bucket_uri = self.CreateBucket()\n self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')\n self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')\n self.CreateTempFile(tmpdir=subdir, file_name='obj3', contents='subdir/obj3')\n self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',\n contents='.OBJ2')\n self.CreateObject(bucket_uri=bucket_uri, object_name='obj4',\n contents='obj4')\n self.CreateObject(bucket_uri=bucket_uri, object_name='subdir/obj5',\n contents='subdir/obj5')\n\n # Need to make sure the bucket listing is caught-up, otherwise the\n # first rsync may not see .obj2 and overwrite it.\n self.AssertNObjectsInBucket(bucket_uri, 3)\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check1():\n \"\"\"Tests rsync works as expected.\"\"\"\n self.RunGsUtil(['rsync', '-d', tmpdir, suri(bucket_uri)])\n listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\n listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\n # Dir should have un-altered content.\n self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))\n # Bucket should have content like dir but without the subdir objects\n # synchronized.\n self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))\n # Assert that the src/dest objects that had same length but different\n # content were not synchronized (dir to bucket sync doesn't use checksums\n # unless you specify -c).\n with open(os.path.join(tmpdir, '.obj2')) as f:\n self.assertEquals('.obj2', '\\n'.join(f.readlines()))\n self.assertEquals('.OBJ2', self.RunGsUtil(\n ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))\n _Check1()\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check2():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-d', tmpdir, suri(bucket_uri)], return_stderr=True))\n _Check2()\n\n # Now rerun the sync with the -c option.\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check3():\n \"\"\"Tests rsync -c works as expected.\"\"\"\n self.RunGsUtil(['rsync', '-d', '-c', tmpdir, suri(bucket_uri)])\n listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\n listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\n # Dir should have un-altered content.\n self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))\n # Bucket should have content like dir but without the subdir objects\n # synchronized.\n self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))\n # Assert that the src/dest objects that had same length but different\n # content were synchronized (dir to bucket sync with -c uses checksums).\n with open(os.path.join(tmpdir, '.obj2')) as f:\n self.assertEquals('.obj2', '\\n'.join(f.readlines()))\n self.assertEquals('.obj2', self.RunGsUtil(\n ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))\n _Check3()\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check4():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-d', '-c', tmpdir, suri(bucket_uri)], return_stderr=True))\n _Check4()\n\n # Now add and remove some objects in dir and bucket and test rsync -r.\n self.CreateTempFile(tmpdir=tmpdir, file_name='obj6', contents='obj6')\n self.CreateObject(bucket_uri=bucket_uri, object_name='obj7',\n contents='obj7')\n os.unlink(os.path.join(tmpdir, 'obj1'))\n self.RunGsUtil(['rm', suri(bucket_uri, '.obj2')])\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check5():\n self.RunGsUtil(['rsync', '-d', '-r', tmpdir, suri(bucket_uri)])\n listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))\n listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))\n # Dir should have un-altered content.\n self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))\n # Bucket should have content like dir but without the subdir objects\n # synchronized.\n self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir/obj3']))\n _Check5()\n\n # Use @Retry as hedge against bucket listing eventual consistency.\n @Retry(AssertionError, tries=3, timeout_secs=1)\n def _Check6():\n # Check that re-running the same rsync command causes no more changes.\n self.assertEquals(NO_CHANGES, self.RunGsUtil(\n ['rsync', '-d', '-r', tmpdir, suri(bucket_uri)], return_stderr=True))\n _Check6()", "def geotiff_writer(filename, trans, dst_crs, shape, n_bands, \n dtype=np.uint8, nodata=0):\n with rio.Env():\n with rio.open(\n filename,\n 'w',\n driver='GTiff',\n width=shape[1],\n height=shape[0],\n count=n_bands,\n dtype=dtype, \n nodata=nodata,\n transform=trans,\n crs=dst_crs) as f:\n yield f" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run command cmd in directory d.
def run(self, d, cmd, **kwargs): print("running %s in %s ..." % (cmd, d)) os.chdir(os.path.join(self.rootdir, d)) r = subprocess.call([sys.executable] + cmd, **kwargs) if r != 0: self.failed.append((d, cmd, r)) else: self.passed += 1 os.chdir(self.rootdir) return r
[ "def run(self, command, src_dir=False):\n dir = self.dep.src_dir if src_dir else self.dep.build_dir\n execute(f'cd {dir} && {command}', echo=True)", "def run(cmd):\n # TODO: subprocess.run can take in lists, so could do a check with `isinstance` to allow running a command as a list\n print(f\"Running shell command: {cmd}\")\n subprocess.run(cmd, shell=True, check=True)", "def spawn (self, cmd, search_path=1, level=1):\r\n from distutils.spawn import spawn\r\n spawn(cmd, search_path, dry_run= self.dry_run)", "def run_cmd(cmd, cwd=None, show_out=False):\n process = Popen(\n cmd,\n cwd=cwd,\n stdout=PIPE,\n stderr=sys.stdout if show_out else PIPE,\n universal_newlines=True,\n )\n stdout, stderr = process.communicate()\n return_code = process.returncode\n return (return_code, stdout, stderr)", "def execute_cmd(cmd):\n\tp = Popen(cmd , shell=True, stdout=PIPE, stderr=PIPE)\n\tout, err = p.communicate()\n\tprint \"Return code: \", p.returncode\n\tprint out.rstrip(), err.rstrip()", "def run(self):\n\n return fab.run('cd {}; {}'.format(self.directory, self.cmd()))", "def print_and_exec(cmd):\n print(cmd)\n os.system(cmd)", "def echo_and_run_cmd(cmd, tree_dir=\"\", wait_for_nfs=True, nfs_search_path=\"/data.nfs\"):\n out_list = []\n if wait_for_nfs:\n nfs_found = False\n wait_time = 10\n timeout = 200\n count_time = 0\n out_list.append(\"Looking for {} ...\".format(nfs_search_path))\n while (not nfs_found) and (count_time < timeout):\n if os.path.isdir(nfs_search_path):\n nfs_found=True\n out_list.append(\"{} found. Start computing...\".format(nfs_search_path))\n else:\n out_list.append(\"{} not found. Waiting for {} seconds...\".format(nfs_search_path, wait_time))\n time.sleep(wait_time)\n count_time+= wait_time\n if not nfs_found:\n print(\"[failed]\")\n print(\"\\n\".join(out_list))\n raise Exception(\"NFS {} could not be mounted. Abort.\".format(nfs_search_path))\n\n (ret, stdout, stderr) = runme(cmd)\n\n # format byte return\n try:\n stdout = stdout.decode(\"utf-8\")\n except:\n pass\n try:\n stderr = stderr.decode(\"utf-8\")\n except:\n pass\n\n if ret != 0:\n print(\"[failed]\")\n print_stars()\n print(\"\\n\".join(out_list))\n print_stars()\n print(\"[failed]:\\n%s\" % cmd)\n print(\"Execution failed with exit code: %d\" % ret)\n print_stars()\n print(\"Output message:\\n%s\" % stdout)\n print_stars()\n print(\"Error message:\\n%s\" % stderr)\n print_stars()\n if tree_dir:\n print(runme(\"tree --charset unicode %s\" % tree_dir))\n\n else:\n print(\"[ok]\")\n print_stars()\n print(\"[ok]:\\n%s \\n\" % cmd)\n print_stars()\n print(\"Output message:\\n%s\" % stdout)\n print_stars()\n print(\"Error message:\\n%s\" % stderr)\n print_stars()\n print(\"[COMMAND]:\\n'%s' \" % cmd)\n return ret", "def writecmd(dir,cmd):\n if (os.path.isdir(dir)):\n # Directory does not exist\n dir = dir.rstrip('/')\n filename = dir+\"/cmd\"\n try:\n wfile = open(filename, 'w')\n except:\n print('ERROR: Cannot write to file: ',filename)\n return False\n newcmd=cmd.strip()\n wfile.write(cmd+\"\\n\")\n wfile.close()\n return True\n else:\n return False", "def _run_cmd(self, cmds: List[str]):\n\n try:\n out, err = subprocess.Popen(\n cmds,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n ).communicate()\n if err:\n raise TmuxFacadeException(err)\n return out\n except (OSError, FileNotFoundError) as e:\n raise TmuxFacadeException(str(e))", "def run(command):\n os.system(f\"{command} &> ./cmd-output\")\n content = readfile(\"./cmd-output\")\n return content", "def do_cd(self, line):\n args = self.line_to_args(line)\n if len(args) == 0:\n dirname = '~'\n else:\n if args[0] == '-':\n dirname = self.prev_dir\n else:\n dirname = args[0]\n dirname = resolve_path(dirname)\n mode = auto(get_mode, dirname)\n if mode_isdir(mode):\n global cur_dir\n self.prev_dir = cur_dir\n cur_dir = dirname\n else:\n self.print(\"Directory '%s' does not exist\" % dirname)", "def run_command( *args, **kwargs ):\n echo = kwargs.get( 'echo', True )\n cd = kwargs.get( 'chdir', None )\n raise_on_failure = kwargs.get( 'raise_on_failure', True )\n redirect = kwargs.get( 'redirect', None )\n append = kwargs.get( 'append', False )\n mach = kwargs.get( 'machine', None )\n sshexe = kwargs.get( 'sshexe', None )\n\n cmd,scmd = _assemble_command( *args )\n if mach:\n ss = 'ssh'\n if sshexe:\n ss = sshexe\n cmd,scmd = _assemble_command( ss, mach, scmd )\n\n dryrun = _is_dryrun( cmd )\n\n outfp = None\n fdout = None\n if not dryrun and redirect != None:\n if type(redirect) == type(2):\n fdout = redirect\n elif type(redirect) == type(''):\n fn = redirect\n if cd and not os.path.isabs( redirect ):\n fn = os.path.join( cd, redirect )\n if append: outfp = open( fn, \"a\" )\n else: outfp = open( fn, \"w\" )\n fdout = outfp.fileno()\n\n if echo:\n tm = time.time()\n L = []\n if cd: L.append( 'dir='+cd )\n else: L.append( 'dir='+os.getcwd() )\n if outfp != None:\n L.append( 'logfile='+redirect )\n startid = 'start='+str(tm)\n L.append( startid )\n L.append( 'cmd='+scmd )\n sys.stdout.write( '['+time.ctime(tm)+'] runcmd: '+repr(L)+'\\n' )\n sys.stdout.flush()\n\n # build the arguments for subprocess.Popen()\n argD = {}\n\n if type(cmd) == type(''):\n argD['shell'] = True\n\n argD['bufsize'] = -1 # use system buffer size (is this needed?)\n\n if fdout != None:\n argD['stdout'] = fdout\n argD['stderr'] = subprocess.STDOUT\n\n if cd:\n cwd = os.getcwd()\n os.chdir( cd )\n\n try:\n if dryrun:\n x = 0\n else:\n p = subprocess.Popen( cmd, **argD )\n x = p.wait()\n finally:\n if cd:\n os.chdir( cwd )\n\n if outfp != None:\n outfp.close()\n outfp = None\n fdout = None\n\n if echo:\n L = [ 'exit='+str(x), startid, 'cmd='+scmd ]\n sys.stdout.write( '['+time.ctime()+'] return: '+repr(L)+'\\n' )\n sys.stdout.flush()\n\n if raise_on_failure and x != 0:\n raise CommandException( '\\nCommand failed: '+scmd )\n\n return x", "def executeCmd(cmd, wait=False):\n\tpb = jl.ProcessBuilder([\"cmd.exe\", \"/C\", cmd])\n\tp = pb.start()\n\tif wait:\n\t\treturn p.waitFor()", "def cmd_run(cmd, shell=True, stdout=None, stdin=None, stderr=None):\n if type(cmd) == type([]):\n cmd = \" \".join([arg_esc(a) for a in cmd])\n return spc.Popen(cmd, shell=shell, stdout=stdout, stdin=stdin,\n stderr=stderr)", "def run_cmd(cmd):\n pp = Popen(str(cmd), shell=True, universal_newlines=True,\n stdout=PIPE, stderr=PIPE)\n out, err = pp.communicate()\n code = pp.returncode\n if out[-1:] == '\\n':\n out = out[:-1]\n if err[-1:] == '\\n':\n err = err[:-1]\n return code, out, err", "def exec_local_command(cmd):\n proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, error = proc.communicate()\n retcode = proc.poll()\n if retcode:\n LOG.error(\"{0} returned status {1}: {2}\".format(cmd, retcode, error))\n raise subprocess.CalledProcessError()\n else:\n return output", "def _run(self, cmd, args):\n # Depending on argument type assemble command to be run in list 'cmdl'\n cmdl = [cmd]\n if type(args) == str:\n cmdl += args.split(\" \")\n elif type(args) == list:\n cmdl += args\n else:\n errExit(4, \"Can't run Grid Engine command: unsupported argument type of \"\n + str(args) + \" = \" + str(type(args)))\n # Run the command\n try:\n p = subprocess.Popen(cmdl, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so,se = p.communicate()\n except:\n errExit(4, \"got exception while invoking \" + \" \".join(cmdl))\n\n # Check for error codes\n if p.returncode:\n errExit(4, \"Running \" + \" \".join(cmdl) + \" resulted in error code: \"\n + str(p.returncode) + \"\\nError Output:\\n\" + se)\n\n # And for error output\n if se:\n print \"Warning: command \" + \" \".join(cmd) + \" has returned the \\\n following stderr output: \" + se\n\n # Return stdout\n return so", "def run(command):\n return os.popen(command).read()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test faq's page is accessible
def test_page_access(self): response = self.client.get('/faqs/') self.assertEqual(response.status_code, 200)
[ "def test_faq_template(self):\n res = self.testapp.get('/faq', status=200)\n self.failUnless('Why is it that C3S wants me to sign?' in res.body)\n self.failUnless(\n 'Copyright 2013, OpenMusicContest.org e.V.' in res.body)", "def test_quest(self):\n response = self.client.get('/quest')\n self.assertContains(response, 'Quest', 3, 200)", "def test_the_home_url_is_accessible_by_name(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)", "def test_link_speakers(self):\n expected = 'href=\"{}#speakers\"'.format(r('home'))\n self.assertContains(self.resp, expected)", "def test_index_accessibility():\n with allure.step('Index page accessibility'):\n assert req_session.get(f'{url}').status_code == OK\n assert req_session.get(f'{url}/').status_code == OK\n assert req_session.get(f'{url}/index').status_code == OK\n with allure.step('[NEGATIVE]: unexisting link'):\n assert req_session.get(f'{url}/abracadabra').status_code == NOT_FOUND", "def test_can_load_page(self):\n self.browser.get('http://localhost:8000')\n self.assertIn('Teamwork | Teamwork', self.browser.title)", "def assert_can_access(self, user, discussion_id, thread_id, should_have_access):\n def call_single_thread():\n self.client.login(username=user.username, password='test')\n return self.client.get(\n reverse('single_thread', args=[str(self.course.id), discussion_id, thread_id])\n )\n\n if should_have_access:\n assert call_single_thread().status_code == 200\n else:\n assert call_single_thread().status_code == 404", "def test_about_page(self):\n response = self.client.get(reverse('rate_my_walk:about'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Our objective is to create a walking social media in Glasgow \")", "def test_access(self):\n # can't access if not logged in\n resp = self.client.get(self.url)\n redirected_url = reverse('account_login') + \"?next={}\".format(self.url)\n self.assertEqual(resp.status_code, 302)\n self.assertIn(redirected_url, resp.url)\n\n # can't access if not staff\n self.assertTrue(\n self.client.login(username=self.user.username, password='test')\n )\n resp = self.client.get(self.url)\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp.url, reverse('booking:permission_denied'))\n\n self.assertTrue(\n self.client.login(\n username=self.instructor_user.username, password='test'\n )\n )\n resp = self.client.get(self.url)\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp.url, reverse('booking:permission_denied'))\n\n self.assertTrue(\n self.client.login(\n username=self.staff_user.username, password='test'\n )\n )\n resp = self.client.get(self.url)\n self.assertEqual(resp.status_code, 200)", "def test_page(self):\n response = self.client.get(self.url)\n # Check response status code \n self.assertEqual(response.status_code, 200)", "def test_about_page_content(testapp):\n res = testapp.get(\"/about\").follow()\n print(res)\n assert \"About\" in res", "def test_post_landing_page(self):\n pass", "def test_edit_accessible(self):\n response, page = self._edit_page(Page.objects.get(pk=2))\n self.assertIsNotNone(page.url)\n self.assertTrue(any(\n 'View live' in message.message and page.url in message.message\n for message in response.context['messages']))", "def test_page_load(self):\n response = self.client.get('/fair/')\n self.assertEqual(response.status_code, 200)", "def test_disabled_accessibility_page(self):\n resp = self.client.get_html('/accessibility')\n self.assertEqual(resp.status_code, 404)", "def test_npf_org_admin_profile_page(self):\n response = self.client.get('/profile/')\n\n # check if users sees profile page\n self.assertEqual(response.status_code, 200)\n # self.assertTemplateUsed(response, 'profile.html')\n\n # check if user sees 2 urls to npf admin page\n self.assertContains(response, '<a href=\"/org-admin/\">', count=2)\n # check if the 2nd npf admin url has an icon\n # self.assertContains(response, '<i id=\"profile-info\" class=\"fa fa-lg fa-info-circle\">',)", "def test_create_accessible(self):\n response, page = self._create_page(Page.objects.get(pk=2))\n self.assertIsNotNone(page.url)\n self.assertTrue(any(\n 'View live' in message.message and page.url in message.message\n for message in response.context['messages']))", "def test_permission(self):\r\n self.assertTrue(self.user.has_perm('entries.can_clock_in'))\r\n response = self.client.get(self.url)\r\n self.assertEquals(response.status_code, 200)", "def test_page(self):\n response = self.client.get(self.url)\n #print response.content\n # Check response status code \n self.assertEqual(response.status_code, 200)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate the SearchSource (parameters etc.)
def __validate_source(self) -> None: source = self.search_source self.review_manager.logger.debug(f"Validate SearchSource {source.filename}") # if "query" not in source.search_parameters: # Note : for md-sources, there is no query parameter. # raise colrev_exceptions.InvalidQueryException( # f"Source missing query search_parameter ({source.filename})" # ) if "query" in source.search_parameters: pass # if "simple_query_string" in source.search_parameters["query"]: # if "query" in source.search_parameters["query"]["simple_query_string"]: # pass # else: # raise colrev_exceptions.InvalidQueryException( # "Source missing query/simple_query_string/query " # f"search_parameter ({source.filename})" # ) # elif "url" in source.search_parameters["query"]: # pass # # else: # raise colrev_exceptions.InvalidQueryException( # f"Source missing query/query search_parameter ({source.filename})" # ) self.review_manager.logger.debug(f"SearchSource {source.filename} validated")
[ "def validate(self, *args):\n pass", "def validate(self):\r\n\t\tfrom ..nrml import NRMLError\r\n\r\n\t\tsource_ids = []\r\n\t\tfor source in self.sources:\r\n\t\t\tif not source.source_id in source_ids:\r\n\t\t\t\tsource_ids.append(source.source_id)\r\n\t\t\telse:\r\n\t\t\t\traise NRMLError(\"Duplicate source id found: %s\" % source.source_id)", "def test_validate_source( self ):\n def non_iterator_dprov( source ):\n return self.provider_class( source )\n self.assertRaises( exceptions.InvalidDataProviderSource,\n non_iterator_dprov, 'one two three' )\n self.assertRaises( exceptions.InvalidDataProviderSource,\n non_iterator_dprov, 40 )", "def validate(self, alias, source):\n session = self._cache.switch(alias)\n try:\n logger.info(\"alias: %s, source: %s\" % (alias, source))\n session.validate(source)\n except NcclientException as e:\n logger.error(str(e))\n raise str(e)", "def validate(self, data):\n source = data.get(\"source\")\n if not source:\n raise exceptions.ValidationError(dict(source=\"Source is required.\"))\n\n project = data.get(\"project\")\n if source.project != project:\n raise exceptions.ValidationError(\n dict(source=\"Source must be in the same project.\")\n )\n\n source_types = (\"Github\", \"GoogleDocs\", \"GoogleDrive\", \"GoogleSheets\")\n if source.type_name not in source_types:\n raise exceptions.ValidationError(\n dict(\n source=f\"Source must be one of these types: {', '.join(source_types)}.\"\n )\n )\n\n # If the `reviewer` is a username or id then check that it is a\n # valid email address\n reviewer = data.get(\"reviewer\")\n if reviewer:\n try:\n try:\n data[\"reviewer\"] = User.objects.get(id=reviewer)\n except ValueError:\n data[\"reviewer\"] = User.objects.get(username=reviewer)\n except User.DoesNotExist:\n try:\n validate_email(reviewer)\n except ValidationError:\n raise exceptions.ValidationError(\n dict(\n reviewer=\"Reviewer is not a valid username, user id, or email address.\"\n )\n )\n else:\n data[\"reviewer\"] = None\n data[\"reviewer_email\"] = reviewer\n else:\n data[\"reviewer\"] = None\n\n return data", "def validate_settings(self):\n pass", "def validate_source(self, val):\n if val.strip() not in ['A', 'B', 'C', 'D']:\n self.set_rule_error()\n return False\n return True", "def test_valid_search_summary_exact():\n search = copy.deepcopy(SEARCH_SUMMARY)\n del search[2]\n del search[1]\n\n is_valid, errors = validate(search, 'searchSummary', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid", "def test_invalid_search_summary_matchtype():\n search = copy.deepcopy(SEARCH_SUMMARY)\n search[0]['matchType'] = 'XXXXX'\n\n is_valid, errors = validate(search, 'searchSummary', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n\n assert not is_valid", "def test_invalid_search_summary_missing_match():\n search = copy.deepcopy(SEARCH_SUMMARY)\n del search[0]['matchType']\n\n is_valid, errors = validate(search, 'searchSummary', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n\n assert not is_valid", "def test_valid_search_summary():\n is_valid, errors = validate(SEARCH_SUMMARY, 'searchSummary', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid", "def _search(self):", "def test_invalid_search_summary_regnum():\n search = copy.deepcopy(SEARCH_SUMMARY)\n search[0]['registrationNumber'] = 'XXXXXXXXXXXXX'\n\n is_valid, errors = validate(search, 'searchSummary', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n\n assert not is_valid", "def _check_sources(self):\n for source_name, source in self.sources.items():\n if \"data\" not in source or \"ref_column\" not in source:\n raise ValueError(\n \"Each source needs to have a `data` and a `ref_column` property\"\n )\n if not isinstance(source[\"data\"], pd.DataFrame):\n raise ValueError(\n \"The `data` property of each source must contain a DatFrame\"\n )\n if not isinstance(source[\"data\"].index, pd.DatetimeIndex):\n raise ValueError(\n \"The `data` DataFrame must have a pd.DatetimeIndex for each source\"\n )\n if source[\"data\"].index.duplicated().any():\n raise ValueError(\n \"The input dataframe must not have duplicate index values, \"\n \"convert the data into a normalized wide format\"\n )\n if (\n not isinstance(source[\"ref_column\"], str)\n or source[\"ref_column\"] not in source[\"data\"].columns\n ):\n raise ValueError(\n \"Each source must have a string specifying the reference column, and the reference\"\n \"column must be available in the source's DataFrame\"\n )\n if self.ref_source_name not in self.sources.keys():\n raise ValueError(\n \"The reference source name must be available in the source dict\"\n )", "def check_inputs(source_link, target_link):\n if not is_url_valid(DEFAULT_URL + source_link):\n raise ValueError('RESULT: The source link \"{}\" is invalid page'.format(unquote(source_link)))\n if not is_url_valid(DEFAULT_URL + target_link):\n raise ValueError('RESULT: The target link \"{}\" is invalid page'.format(unquote(target_link)))", "def _check_source_dir(self):\n if not os.path.isdir(self.source_dir):\n raise ValueError('source directory not found: ' + self.source_dir)", "def test_invalid_search_summary_vehicle():\n search = copy.deepcopy(SEARCH_SUMMARY)\n del search[2]['vehicleCollateral']['type']\n\n is_valid, errors = validate(search, 'searchSummary', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n\n assert not is_valid", "def validate_extended(self):", "def validate(self, data):\n url = data.get(\"url\")\n kind = data.get(\"kind\")\n google_id = data.get(\"google_id\")\n\n if url:\n address = GoogleDriveSource.parse_address(url, strict=True)\n del data[\"url\"]\n data[\"kind\"] = address.kind\n data[\"google_id\"] = address.google_id\n elif google_id:\n if not kind:\n raise exceptions.ValidationError(dict(kind=\"This field is required.\"))\n del data[\"url\"]\n else:\n message = \"Please provide either a URL or Google Drive id.\"\n raise exceptions.ValidationError(dict(url=message, google_id=message))\n return super().validate(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add SearchSource as an endpoint (based on query provided to colrev search a )
def add_endpoint(cls, operation: colrev.ops.search.Search, params: str) -> None: filename = operation.get_unique_filename( file_path_string=f"local_index_{params}".replace("%", "").replace("'", "") ) add_source = colrev.settings.SearchSource( endpoint="colrev.local_index", filename=filename, search_type=colrev.settings.SearchType.DB, search_parameters={"query": params}, comment="", ) operation.review_manager.settings.sources.append(add_source)
[ "def set_search(self, search_term, filters={}):\n\n self.search_term = search_term\n self.search_url = 'https://www.petfinder.com/search/'+self.search_term+'-for-adoption'\n if 'state' in filters:\n append = '/us/' + filters['state'] + '/?distance=100'\n self.search_url = self.search_url + append\n if 'page' in filters:\n # This largely doesn't work with selenium. If I could get it to get a url without appending a / on the end\n # it might work.\n append = '/&page=' + str(filters['page'])\n self.search_url = self.search_url + append\n # at this point you would add in the parameters from filters.", "def site_search_ac(r, **attr):\n\n response = current.response\n resource = r.resource\n settings = current.deployment_settings\n\n # Query comes in pre-filtered to accessible & deletion_status\n # Respect response.s3.filter\n resource.add_filter(response.s3.filter)\n\n _vars = current.request.get_vars\n\n # JQueryUI Autocomplete uses \"term\" instead of \"value\"\n # (old JQuery Autocomplete uses \"q\" instead of \"value\")\n value = _vars.term or _vars.value or _vars.q or None\n\n # We want to do case-insensitive searches\n # (default anyway on MySQL/SQLite, but not PostgreSQL)\n value = s3_str(value).lower().strip()\n\n if not value:\n r.error(400, \"Missing option! Require value\")\n\n # Construct query\n query = (FS(\"name\").lower().like(value + \"%\"))\n\n # Add template specific search criteria\n extra_fields = settings.get_org_site_autocomplete_fields()\n for field in extra_fields:\n if \"addr_street\" in field:\n # Need to be able to get through the street number\n query |= (FS(field).lower().like(\"%\" + value + \"%\"))\n else:\n query |= (FS(field).lower().like(value + \"%\"))\n\n resource.add_filter(query)\n\n MAX_SEARCH_RESULTS = settings.get_search_max_results()\n limit = int(_vars.limit or MAX_SEARCH_RESULTS)\n if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:\n output = [\n {\"label\": str(current.T(\"There are more than %(max)s results, please input more characters.\") % \\\n {\"max\": MAX_SEARCH_RESULTS})}\n ]\n else:\n # default fields to return\n fields = [\"name\",\n \"site_id\",\n ]\n\n # Add template specific fields to return\n fields += extra_fields\n\n rows = resource.select(fields,\n start=0,\n limit=limit,\n orderby=\"name\",\n as_rows=True)\n output = []\n append = output.append\n for row in rows:\n # Populate record\n _row = row.get(\"org_site\", row)\n record = {\"id\": _row.site_id,\n \"name\": _row.name,\n }\n\n # Populate fields only if present\n org = row.get(\"org_organisation.name\", None)\n if org:\n record[\"org\"] = org\n L1 = row.get(\"gis_location.L1\", None)\n if L1:\n record[\"L1\"] = L1\n L2 = row.get(\"gis_location.L2\", None)\n if L2:\n record[\"L2\"] = L2\n L3 = row.get(\"gis_location.L3\", None)\n if L3:\n record[\"L3\"] = L3\n L4 = row.get(\"gis_location.L4\", None)\n if L4:\n record[\"L4\"] = L4\n addr_street = row.get(\"gis_location.addr_street\", None)\n if addr_street:\n record[\"addr\"] = addr_street\n\n # Populate match information (if applicable)\n s3_set_match_strings(record, value)\n append(record)\n\n response.headers[\"Content-Type\"] = \"application/json\"\n return json.dumps(output, separators=JSONSEPARATORS)", "def datasource():\n # Retrieve the datas ource from query param.\n source = request.args.get('source', type=str, default='jhu')\n\n # Attach source to request and return it.\n request.source = data_source(source)\n pass", "def do_search(self, *args, **kwargs):\n search_url = self.search_urls[0]\n response = self._request(\n search_url,\n info_message=f\"Sending search request: {search_url}\",\n exception_message=f\"Skipping error while searching for {self.provider} \"\n f\"{self.__class__.__name__} instance:\",\n )\n return [response.json()]", "def search(self, query, *args, **kwargs):\n # TODO: read .csv???\n raise RuntimeError('Search is not supported for this provider')", "def SearchFilter(self, query):\n return self.NewViewFilter(search_query=query)", "def get_urls(self):\n return (\n ('/search/', self.require_method(self.api_search, ['GET'])),\n ) + super(HookedResource, self).get_urls()", "def search(\n endpoint,\n in_filter={},\n exclude_filter={},\n fields=[],\n expand=[],\n typ='dataframe',\n method='GET',\n):\n\n try:\n assert typ.lower() in ['json', 'dataframe']\n except (AttributeError, AssertionError):\n raise ValueError(\n 'typ should be a string of either JSON or dataframe, '\n 'not {}'.format(typ)\n )\n filters = simple_and_filter(in_dict=in_filter, exclude_dict=exclude_filter)\n if isinstance(fields, str):\n fields = [fields]\n if isinstance(expand, str):\n expand = [expand]\n payload = {'size': 1}\n if filters:\n payload['filters'] = json.dumps(filters)\n if fields:\n payload['fields'] = ','.join(fields)\n if expand:\n payload['expand'] = ','.join(expand)\n url = '{}/{}'.format(GDC_API_BASE, endpoint)\n if method.upper() == 'POST':\n response = requests.post(url, data=payload)\n elif method.upper() == 'GET':\n response = requests.get(url, params=payload)\n else:\n raise ValueError(\n 'Invalid method: {}\\n method must be either \"GET\" '\n 'or \"POST\".'.format(method)\n )\n try:\n payload['size'] = response.json()['data']['pagination']['total']\n except KeyError:\n payload.pop('size')\n response = requests.get(url, params=payload)\n if typ.lower() == 'json':\n return response.json()\n else:\n warnings.warn(\n 'Fail to get a table of results. JSON returned. '\n 'Please check the result carefully.',\n stacklevel=2,\n )\n return response.json()\n if method.upper() == 'POST':\n response = requests.post(url, data=payload)\n else:\n response = requests.get(url, params=payload)\n if response.status_code == 200:\n results = response.json()['data']['hits']\n if typ.lower() == 'json':\n return results\n try:\n return pd.io.json.json_normalize(reduce_json_array(results))\n except Exception:\n warnings.warn(\n 'Fail to convert searching results into table. '\n 'JSON will be returned.',\n stacklevel=2,\n )\n return results\n else:\n warnings.warn(\n 'Searching failed with HTTP status code: '\n '{}'.format(response.status_code),\n stacklevel=2,\n )\n return None", "def host_search(self, query, from_ind=None, to_ind=None):\n raise NotImplementedError('override me')", "def search_url(self):\n return self.request.link(Search(self.request, None, None))", "def apply_query():\n s = Search(using=ES_OBJECT, index=SEARCHING_INDEX)\n if INDEXATION_MODE == \"autocomplete\":\n logging.info(\"Applying autocomplete search\")\n s.update_from_dict(\n autocomplete_query(QUERY, FIELDS_TO_SEARCH, popularity_field=POPULARITY_FIELD)\n )\n elif INDEXATION_MODE in [\"basic_english\", \"french\"]:\n logging.info(\"Applying multi match search with fuzziness if set in yaml\")\n s.update_from_dict(\n multi_match_query(QUERY, FIELDS_TO_SEARCH, fuzziness=FUZZINESS)\n )\n else:\n raise NotImplementedError(\"Mode d'indexation choisi pas setup\")\n return s", "def api_search(self):\n g.list_callback = 'api_search'\n\n if not getattr(self, 'check_%s' % request.method.lower())():\n return self.response_forbidden()\n\n # terms to search for\n search_term = request.args.get('query') or ''\n\n # the engine to use\n engine = request.args.get('engine') or ''\n\n # construct a raw query\n query = self.get_query()\n query = self.apply_ordering(query)\n\n if engine == 'default':\n # search in default fields\n\n # split keywords by blank chars\n kw_set = set(re.split(r'\\s+', search_term, re.U))\n kw_set.discard('')\n if kw_set and self._search.get('default', []):\n query = self.apply_search_query(\n query, list(kw_set), self._search['default'])\n else:\n # more complicated search methods\n # split query to 'field:(terms)'' or 'term' using the\n # following regular expression\n regex = re.compile(\n '((?:\\w+:\\([^)]*\\))|(?:\\w+:[^()\\s]+)|[^:\\s]+)', re.U)\n kw_split_list = regex.findall(search_term)\n search_kw = MultiDict()\n\n for kw in kw_split_list:\n try:\n sp = kw.index(':')\n key = kw[0:sp]\n val = kw[sp + 1:]\n if val.startswith('(') and val.endswith(')'):\n # expand\n for x in re.split(r'\\s+', val[1:-1], re.U):\n x and search_kw.add(key, x)\n else:\n # single term\n search_kw.add(key, val)\n\n except ValueError:\n # single word\n search_kw.add('default', kw)\n\n # apply search filter engine by engine\n for engine, kws in search_kw.iterlists():\n kw_set = set(kws)\n kw_set.discard('')\n if kw_set and self._search.get(engine, []):\n query = self.apply_search_query(\n query, list(kw_set), self._search[engine])\n\n # apply output limit \n if self.paginate_by or 'limit' in request.args:\n return self.paginated_object_list(query)\n\n return self.response(self.serialize_query(query))", "def _assemble(self):\n self._filters.extend(list(self._default_filters.values()))\n if self._start is not None:\n self.es_query['from'] = self._start\n self.es_query['size'] = self._size if self._size is not None else SIZE_LIMIT\n if self._exclude_source:\n self.es_query['_source'] = False\n elif self._source is not None:\n self.es_query['_source'] = self._source\n if self._aggregations:\n self.es_query['aggs'] = {\n agg.name: agg.assemble()\n for agg in self._aggregations\n }", "def log_source_search(request):\n\n try:\n return 200, search_log_source_by_keyword(request.body)\n except ValueError as e:\n return 404, {'error': repr(e)}\n except HTTPError as e:\n return 404, {'error': repr(e)}\n except Exception as unknown_exception:\n return 500, {'error': repr(unknown_exception)}", "def register_search(cls):\n model = cls.model\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n registry['search'][label] = cls\n\n return cls", "def gen_search_index(record, link_content):\n pass", "def buildSearchUrl(self, searchTerms, limit):\n\t\treturn self.api_url + self.withAction('opensearch') + self.withFormat('xml') + self.withLimitedSearch(searchTerms, limit)", "def _search(self):", "def search(self, query, model=None):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve masterdata from LocalIndex based on similarity with the record provided
def get_masterdata( self, prep_operation: colrev.ops.prep.Prep, record: colrev.record.Record, save_feed: bool = True, timeout: int = 10, ) -> colrev.record.Record: if any(self.origin_prefix in o for o in record.data["colrev_origin"]): # Already linked to a local-index record return record retrieved_record = self.__retrieve_record_from_local_index( record=record, retrieval_similarity=prep_operation.retrieval_similarity, ) # restriction: if we don't restrict to CURATED, # we may have to rethink the LocalIndexSearchFeed.set_ids() if "CURATED" not in retrieved_record.data.get( "colrev_masterdata_provenance", "" ): return record default_source = "LOCAL_INDEX" if "colrev_masterdata_provenance" in retrieved_record.data: if "CURATED" in retrieved_record.data["colrev_masterdata_provenance"]: default_source = retrieved_record.data["colrev_masterdata_provenance"][ "CURATED" ]["source"] self.__store_retrieved_record_in_feed( record=record, retrieved_record=retrieved_record, default_source=default_source, prep_operation=prep_operation, ) return record
[ "def bfs_trrust_database_search_target(list_of_input_genes, trrust_filepath=\"../trrust_rawdata.human.tsv\", column_names=[\"Transcription factor\", \"Target gene\", \"Relationship\", \"PubMED identifier\"], return_all=False):\n\n\n\tdf = pd.read_csv(trrust_filepath, delimiter='\\t', header=None)\n\tdf.columns = column_names\n\n\tmaster_visited = []\n\tmaster_relationships = []\n\n\n\tif not return_all:\n\t\tfor gene in list_of_input_genes:\n\t\t\tprint(gene)\n\n\t\t\tqueue = [gene]\n\t\t\tvisited = []\n\t\t\trelationships = []\n\n\t\t\twhile queue:\n\t\t\t\tcurrent_gene = queue.pop(0)\n\t\t\t\tfor target_gene in df.loc[df[\"Transcription factor\"]==current_gene.upper()][\"Target gene\"].values:\n\t\t\t\t\tif target_gene not in visited:\n\t\t\t\t\t\tvisited.append(target_gene)\n\t\t\t\t\t\tqueue.append(target_gene)\n\t\t\t\t\t\trelationships.append([current_gene,target_gene])\n\n\t\t\tmaster_visited.append(visited)\n\t\t\tmaster_relationships.append(relationships)\n\n\telif return_all:\n\t\tfor index, row in df.iterrows():\n\t\t\tmaster_relationships.append([row[\"Transcription factor\"], row[\"Target gene\"]])\n\t\t\tif index % 1000==0:\n\t\t\t\tprint(index)\n\t\tmaster_relationships = [master_relationships]\n\n\tflat_visited = []\n\tfor sub_list in master_visited:\n\t\tfor gene_name in sub_list:\n\t\t\tflat_visited.append(gene_name)\n\n\tcount_dict = {}\n\tfor gene_name in flat_visited:\n\t\tif (gene_name in count_dict):\n\t\t\tcount_dict[gene_name] += 1\n\t\telse:\n\t\t\tcount_dict[gene_name] = 1\n\n\treturn master_visited, master_relationships, count_dict", "def sub_to_master(self, index):\n for mapping in self.atom_mapping:\n if index == mapping[0]:\n return mapping[1]", "def master_to_sub(self, index):\n for mapping in self.atom_mapping:\n if index == mapping[1]:\n return mapping[0]", "def getSparseMatches(self) -> sMatches:\n ...", "def get_sim_index(measures, df, res_df, true_cluster):\n out = {}\n for link in ['single', 'complete', 'average']:\n diss = []\n for measure in measures:\n if 'rccf' in measure:\n # rccf1,2,3\n idx = measure[-1]\n clustering = HClust(res_df, true_cluster,\n f'cross_corr{idx}', residuals=True)\n diss.append(clustering.cluster_eval(eval_type='sim',\n linkage_type=link))\n else:\n # other dissimilarity measures\n clustering = HClust(df, true_cluster, measure)\n diss.append(clustering.cluster_eval(eval_type='sim',\n linkage_type=link)) \n\n out[link] = diss\n return pd.DataFrame(out, index=measures)", "def predict_record_fast(train: tuple, record: np.array, K: int=10) -> list:\n # In this simple model, look for the number of identical columns for each training record.\n # Skip the last column (type of modification);\n X_train,y_train=train[0],train[1]\n #identical_values_per_row = pd.Series(np.array(record[3:] == X_train[:,3:]).sum(axis=1))\n\n # Fuzzy name match\n dataframecolumn0 = pd.DataFrame([record[0]])\n dataframecolumn0.columns = ['Match0']\n \n compare0 = pd.DataFrame(X_train[:,0])\n compare0.columns = ['compare0']\n \n dataframecolumn0['Key0'] = 1\n compare0['Key0'] = 1\n combined_dataframe0 = dataframecolumn0.merge(compare0,on=\"Key0\",how=\"left\")\n \n identical_values_per_row = pd.Series(partial_match_vector_name(combined_dataframe0['Match0'],combined_dataframe0['compare0']))\n\n # Fuzzy address match\n dataframecolumn1 = pd.DataFrame([record[1]])\n dataframecolumn1.columns = ['Match1']\n \n compare1 = pd.DataFrame(X_train[:,1])\n compare1.columns = ['compare1']\n \n dataframecolumn1['Key1'] = 1\n compare1['Key1'] = 1\n combined_dataframe1 = dataframecolumn1.merge(compare1,on=\"Key1\",how=\"left\")\n \n identical_values_per_row += partial_match_vector_address(combined_dataframe1['Match1'],combined_dataframe1['compare1'])\n\n # Fuzzy email_first_part match\n dataframecolumn2 = pd.DataFrame([record[3]])\n dataframecolumn2.columns = ['Match2']\n \n compare2 = pd.DataFrame(X_train[:,3])\n compare2.columns = ['compare2']\n \n dataframecolumn2['Key2'] = 1\n compare2['Key2'] = 1\n combined_dataframe2 = dataframecolumn2.merge(compare2,on=\"Key2\",how=\"left\")\n \n identical_values_per_row += partial_match_vector_email_first(combined_dataframe2['Match2'],combined_dataframe2['compare2'])\n\n #Fuzzy email_domain match\n dataframecolumn3 = pd.DataFrame([record[2]])\n dataframecolumn3.columns = ['Match3']\n \n compare3 = pd.DataFrame(X_train[:,2])\n compare3.columns = ['compare3']\n \n dataframecolumn3['Key3'] = 1\n compare3['Key3'] = 1\n combined_dataframe3 = dataframecolumn3.merge(compare3,on=\"Key3\",how=\"left\")\n \n identical_values_per_row += partial_match_vector_number_no_prefix(combined_dataframe3['Match3'],combined_dataframe3['compare3'])\n # Obtain the K rows with the most matches;\n best_matches = identical_values_per_row.sort_values(ascending=False)[:K] \n # Retrieve the original record IDs from the training set;\n #print(best_matches)\n return list(zip(list(y_train.loc[best_matches.index.values]) ,list(best_matches)))", "def get_sample(local_data: Dict, index: int) -> Tuple[np.ndarray, int]:\n\n # Grab a sample and label at the index\n sample = local_data[\"samples\"][index, :]\n label = local_data[\"labels\"][index]\n\n return sample, label", "def search_sbt_index(tree, query, threshold):\n for leaf in tree.find(search_minhashes, query, threshold, unload_data=True):\n similarity = query.similarity(leaf.data)\n yield leaf.data, similarity", "def similar_to(self, user_handle):\n data = self.__load_data()\n\n logging.info(\"Looking up similarity to %s from frame %s\", user_handle, data.columns)\n\n return list_similar(\n data,\n user_handle,\n self.__pipeline,\n self.__model,\n drop = self.__drop_columns,\n )", "def structural_similarity_index(target, prediction, mask=None) -> torch.Tensor:\n ssim = StructuralSimilarityIndexMeasure(return_full_image=True)\n _, ssim_idx_full_image = ssim(preds=prediction, target=target)\n mask = torch.ones_like(ssim_idx_full_image) if mask is None else mask\n try:\n ssim_idx = ssim_idx_full_image[mask]\n except Exception as e:\n print(f\"Error: {e}\")\n if len(ssim_idx_full_image.shape) == 0:\n ssim_idx = torch.ones_like(mask) * ssim_idx_full_image\n return ssim_idx.mean()", "def bfs_trrust_database_search_tf(list_of_input_genes, trrust_filepath=\"../trrust_rawdata.human.tsv\", column_names=[\"Transcription factor\", \"Target gene\", \"Relationship\", \"PubMED identifier\"], return_all=False):\n\n\n\tdf = pd.read_csv(trrust_filepath, delimiter='\\t', header=None)\n\tdf.columns = column_names\n\n\tmaster_visited = []\n\tmaster_relationships = []\n\n\tif not return_all:\n\t\tfor gene in list_of_input_genes:\n\t\t\tprint(gene)\n\n\t\t\tqueue = [gene]\n\t\t\tvisited = []\n\t\t\trelationships = []\n\n\t\t\twhile queue:\n\t\t\t\tcurrent_gene = queue.pop(0)\n\t\t\t\tfor tf_gene in df.loc[df[\"Target gene\"]==current_gene.upper()][\"Transcription factor\"].values:\n\t\t\t\t\tif tf_gene not in visited:\n\t\t\t\t\t\tvisited.append(tf_gene)\n\t\t\t\t\t\tqueue.append(tf_gene)\n\t\t\t\t\t\trelationships.append([current_gene,tf_gene])\n\n\t\t\tmaster_visited.append(visited)\n\t\t\tmaster_relationships.append(relationships)\n\n\telse:\n\t\tfor index, row in df.iterrows():\n\t\t\tmaster_relationships.append([row[\"Target gene\"], row[\"Transcription factor\"]])\n\n\tflat_visited = []\n\tfor sub_list in master_visited:\n\t\tfor gene_name in sub_list:\n\t\t\tflat_visited.append(gene_name)\n\n\tcount_dict = {}\n\tfor gene_name in flat_visited:\n\t\tif (gene_name in count_dict):\n\t\t\tcount_dict[gene_name] += 1\n\t\telse:\n\t\t\tcount_dict[gene_name] = 1\n\n\treturn master_visited, master_relationships, count_dict", "def similarity(dataframe):\r\n main = dataframe\r\n \r\n dataframe = feature_selection(dataframe)\r\n train_size = round((len(dataframe)*0.9))\r\n train = dataframe[:train_size]\r\n test = dataframe[train_size:]\r\n \r\n test_value = test.iloc[np.random.randint(0,10),:]\r\n \r\n #compute cosine similarity\r\n neighbors = {}\r\n for i, r in train.iterrows():\r\n similarity = np.dot(test_value,r)/(np.linalg.norm(test_value)*np.linalg.norm(r))\r\n neighbors[i] = similarity\r\n \r\n #get similary movies in descending order\r\n neighbors = {k: v for k, v in sorted(neighbors.items(), key=lambda item: item[1], reverse=True)}\r\n \r\n test_final = pd.concat([test, main], axis=1, sort=False)\r\n train_final = pd.concat([train, main], axis=1, sort=False)\r\n \r\n test_movie = test_final.loc[test_value.name,['Title', 'Rated', 'Genre', 'imdbRating']]\r\n similar_movies = train_final.loc[list(neighbors.keys())[:5],['Title','Rated', 'Genre', 'Released', 'imdbRating']]\r\n \r\n return test_movie, similar_movies", "def get_for_indexer(self, value):", "def _get_master_idx_file(self, update_cache=False, **kwargs):\n if self._master_idx_file is None or update_cache:\n if self.idx_filename in self.get_listings_directory().text:\n master_idx_url = \"{path}/{filename}\".format(\n path=self.path, filename=self.idx_filename)\n self._master_idx_file = self.client.get_response(\n master_idx_url, self.params, **kwargs).text\n else:\n raise EDGARQueryError(\"\"\"File {filename} not found.\n There may be no filings for the given day/quarter.\"\"\".format(\n filename=self.idx_filename))\n return self._master_idx_file", "def load_similarity_index(self) -> None:\n self._simindex = similarities.Similarity.load(self.simindex_filename)", "def predict_universe_nearest_neighbor(query, train_labels, lda, index):\n #convert the query to LDA space:\n query_lda = lda[query]\n \n sims = index[query_lda]\n #TODO: this ain't right\n# cosine_distances = [spatial.distance.cosine(query, train_doc_topics)\\\n# for train_doc_topics in sims]\n return train_labels[np.argmax(sims)]", "def get_attributes_values_in_file_by_index(master_index,file_root_and_name):\n print 'begin',datetime.datetime.now()\n command=\"sed -n '/%s/,/^[[:space:]]*$/p' %s \"%(master_index,file_root_and_name)\n out = connections.execute_mml_without_check(command)\n print 'sed command success',datetime.datetime.now()\n values_in_file = []\n line_list = out.split(\"\\r\\n\")\n print 'split with enter',datetime.datetime.now()\n for line in line_list:\n if line != '':\n if line.count('[') == 0:\n values_in_file.append(line.strip())\n print 'get every line',datetime.datetime.now()\n print 'end',datetime.datetime.now()\n print values_in_file\n return values_in_file", "def get_data_subset(self, key_label, match_keys):\n row_inds = []\n\n for key in match_keys:\n ind = self.get_ind(key, key_label)\n row_inds.append(ind)\n\n data = self.data[row_inds,:]\n new_db = txt_database(data, self.labels)\n return new_db", "def query(self, query_hash: str) -> t.List[IndexMatch[IndexT]]:\n features = prepare_vpdq_feature(query_hash, self.quality_threshold)\n if not features:\n return []\n results = self.index.search_with_distance_in_result(\n features, VPDQ_DISTANCE_THRESHOLD\n )\n query_matched: t.Dict[int, t.Set[str]] = {}\n index_matched: t.Dict[int, t.Set[int]] = {}\n matches: t.List[IndexMatch[IndexT]] = []\n for hash in results:\n for match in results[hash]:\n # query_str => (matched_idx, distance)\n vpdq_match, entry_list = self._index_idx_to_vpdqHex_and_entry[match[0]]\n for entry_id in entry_list:\n if entry_id not in query_matched:\n query_matched[entry_id] = set()\n query_matched[entry_id].add(hash)\n\n if entry_id not in index_matched:\n index_matched[entry_id] = set()\n index_matched[entry_id].add(vpdq_match)\n for entry_id in query_matched.keys():\n query_matched_percent = len(query_matched[entry_id]) * 100 / len(features)\n index_matched_percent = (\n len(index_matched[entry_id])\n * 100\n / len(self._entry_idx_to_features_and_entries[entry_id][0])\n )\n if (\n query_matched_percent >= self.query_match_threshold_pct\n and index_matched_percent >= self.index_match_threshold_pct\n ):\n matches.append(\n IndexMatch(\n VPDQSimilarityInfo(\n query_matched_percent, index_matched_percent\n ),\n self._entry_idx_to_features_and_entries[entry_id][1],\n )\n )\n return matches" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modified velstor.api call for metadata copies
def ns_copy(session, vtrqid, src, dest, overwrite): result = ns.copy_vector(session, vtrqid, [{'src': src, 'dest': dest}], overwrite) # print('ns_copy:', result) # # This command always does a single operation, but it is conveyed through # a vector mechanism. We need to unpack it here. 'result' is # a 'requests' response object. # # # Emulate a requests.response object # try: # # If the request didn't complete the operation, the standard # response handling is sufficient. # if result['status_code'] != 200: return result # # Unpack the json-formatted body # payload = json.loads(result['body']) if payload['error_sym'] != 'OK': return result # # Everything went well. If the array is empty, the metadata copy # succeeded and 'error_sym' is 'OK'. Otherwise, the 'error_sym' # value is the value inside the array. # errsym = 'OK' if len(payload['result']): errsym = payload['result'][0]['error_sym'] message = os.strerror(reverse_dict_lookup(errno.errorcode, errsym)) return synthetic_response( rpc_status_to_http_status(errsym), errsym, message) except: message = 'Internal client error on meta-data copy: ' # message += str(sys.exc_info()[0]) message += str(sys.exc_info()) return synthetic_response(500, 'EREMOTEIO', message)
[ "def smartCopy(*args, **kwargs):\n \n pass", "def test_metadata(self):\n\n # Grab the first available part\n part = Part.list(self.api, limit=1)[0]\n\n part.setMetadata(\n {\n \"foo\": \"bar\",\n },\n overwrite=True,\n )\n\n metadata = part.getMetadata()\n\n # Check that the metadata has been overwritten\n self.assertEqual(len(metadata.keys()), 1)\n\n self.assertEqual(metadata['foo'], 'bar')\n\n # Now 'patch' in some metadata\n part.setMetadata(\n {\n 'hello': 'world',\n },\n )\n\n part.setMetadata(\n {\n 'foo': 'rab',\n }\n )\n\n metadata = part.getMetadata()\n\n self.assertEqual(len(metadata.keys()), 2)\n self.assertEqual(metadata['foo'], 'rab')\n self.assertEqual(metadata['hello'], 'world')", "def copy_object_metadata(source_metadata_dict, destination_metadata_dict):\n if not destination_metadata_dict:\n destination_metadata_dict = {}\n if not source_metadata_dict:\n return destination_metadata_dict\n for field in _COMMON_S3_METADATA_FIELDS:\n if field in source_metadata_dict:\n destination_metadata_dict[field] = source_metadata_dict[field]\n return destination_metadata_dict", "def mbtiles_metadata(self):\n self.metadata = dict(self.mbtiles.execute('select name, value from metadata;').fetchall())\n (metadata, mime_type) = self.jsonp(self.metadata)\n self.send_file(self.tileset + '.json', metadata, mime_type)\n self.send_file(self.tileset + '/metadata.json', metadata, mime_type)\n self.out('- Uploading metadata.\\n')", "def copyViewFrom(*args, **kwargs):\n \n pass", "def copy_meta_from(self, ido, deep):\n # Note that `pyvista.MultiBlock` datasets currently don't have any meta.\n # This method is here for consistency with the rest of the API and\n # in case we add meta data to this pbject down the road.\n pass", "def test_azure_service_api_snapshots_get(self):\n pass", "def svn_fs_copy(*args) -> \"svn_error_t *\":\n return _fs.svn_fs_copy(*args)", "def create_snapshot(self, snapshot, share_server):", "def svn_fs_info_dup(*args) -> \"void *\":\n return _fs.svn_fs_info_dup(*args)", "def _get_resp_post(self, body, version=\"3.8\"):\n req = webob.Request.blank('/v3/%s/manageable_snapshots' %\n fake.PROJECT_ID)\n req.method = 'POST'\n req.headers['Content-Type'] = 'application/json'\n req.headers['OpenStack-API-Version'] = 'volume ' + version\n req.environ['cinder.context'] = self._admin_ctxt\n req.body = jsonutils.dump_as_bytes(body)\n res = req.get_response(app())\n return res", "def GetMetadata(client_id, client_full_info):\n\n metadata = base.ExportedMetadata()\n\n last_snapshot = None\n if client_full_info.HasField(\"last_snapshot\"):\n last_snapshot = client_full_info.last_snapshot\n\n metadata.client_urn = client_id\n metadata.client_age = client_full_info.metadata.first_seen\n\n if last_snapshot is not None:\n kb = client_full_info.last_snapshot.knowledge_base\n\n metadata.hostname = kb.fqdn\n metadata.os = kb.os\n metadata.uname = last_snapshot.Uname()\n metadata.os_release = last_snapshot.os_release\n metadata.os_version = last_snapshot.os_version\n metadata.usernames = \",\".join(user.username for user in kb.users)\n\n addresses = last_snapshot.GetMacAddresses()\n if addresses:\n metadata.mac_address = \"\\n\".join(last_snapshot.GetMacAddresses())\n metadata.hardware_info = last_snapshot.hardware_info\n metadata.kernel_version = last_snapshot.kernel\n\n ci = last_snapshot.cloud_instance\n if ci is not None:\n if ci.cloud_type == ci.InstanceType.AMAZON:\n metadata.cloud_instance_type = metadata.CloudInstanceType.AMAZON\n metadata.cloud_instance_id = ci.amazon.instance_id\n elif ci.cloud_type == ci.InstanceType.GOOGLE:\n metadata.cloud_instance_type = metadata.CloudInstanceType.GOOGLE\n metadata.cloud_instance_id = ci.google.unique_id\n\n system_labels = set()\n user_labels = set()\n for l in client_full_info.labels:\n if l.owner == \"GRR\":\n system_labels.add(l.name)\n else:\n user_labels.add(l.name)\n\n metadata.labels = \",\".join(sorted(system_labels | user_labels))\n metadata.system_labels = \",\".join(sorted(system_labels))\n metadata.user_labels = \",\".join(sorted(user_labels))\n\n return metadata", "def copyInPlace(*args, **kwargs):\n \n pass", "def test_azure_service_api_snapshots_post(self):\n pass", "def _FetchCommonMetadata(self, callback):\n paths = [ \"meta-data/hostname\", \"meta-data/instance-id\", \"user-data/passphrase\" ]\n self.FetchMetadata(paths, callback)", "def test__ChannelMetadataBase__copy():\n channel_metadata = ChannelMetadataBase()\n \n copy = channel_metadata.copy()\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n \n vampytest.assert_eq(copy, channel_metadata)", "def metadata(self) -> dict[str, Any]:", "def testCopy(self):\n impl = self.impl\n ws_name = self.ws_name\n conf = self.conf\n ws_meta = self.ws_meta\n\n test_object5 = {\n \"id\": \"test_object_id5\",\n \"type\": \"Genome\",\n \"data\": {\"name\":\"testgenome5\", \"string\":\"ACACGATTACA\"},\n \"workspace\": ws_name,\n \"command\": \"something\",\n \"metadata\": {\"origin\":\"shreyas\"},\n \"auth\": self.__class__.token\n }\n obj_meta5 = impl.save_object(test_object5)\n\n\n ws_name2 = \"testWS_%s\" % datetime.utcnow().strftime('%s')\n conf2 = {\"workspace\": ws_name2,\"default_permission\": \"a\", \"auth\": self.__class__.token }\n ws_meta2 = self.impl.create_workspace(conf2)\n\n impl.copy_object({\n \"new_id\": \"new_object_id5\",\n \"new_workspace\": ws_name2,\n \"source_id\": \"test_object_id5\",\n \"source_workspace\": ws_name,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n\n has_object = impl.has_object({\n \"id\": \"new_object_id5\",\n \"workspace\": ws_name2,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n self.assertTrue(has_object)", "def test__ChannelMetadataBase__copy_with__0():\n channel_metadata = ChannelMetadataBase()\n \n copy = channel_metadata.copy_with()\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n \n vampytest.assert_eq(copy, channel_metadata)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write a segment and AnalogSignal in a text file. Arguments
def write_segment(self, segment, delimiter = '\t', skiprows =0, writetimecolumn = True, ): if skiprows: raise NotImplementedError('skiprows values other than 0 are not ' + 'supported') l = [ ] if writetimecolumn is not None: l.append(segment.analogsignals[0].times[:, np.newaxis]) for anaSig in segment.analogsignals: l.append(anaSig.magnitude[:, np.newaxis]) sigs = np.concatenate(l, axis=1) #print sigs.shape np.savetxt(self.filename , sigs , delimiter = delimiter)
[ "def write_segment(self, segment,\n delimiter = '\\t',\n \n skiprows =0,\n \n timecolumn = None,\n \n ):\n \n \n sigs = None\n for analogSig in segment.get_analogsignals():\n if sigs is None :\n sigs = analogSig.signal[:,newaxis]\n else :\n sigs = concatenate ((sigs, analogSig.signal[:,newaxis]) , axis = 1 )\n \n if timecolumn is not None:\n t = segment.get_analogsignals()[0].t()\n print sigs.shape , t.shape\n sigs = concatenate ((sigs, t[:,newaxis]*nan) , axis = 1 )\n sigs[:,timecolumn+1:] = sigs[:,timecolumn:-1].copy()\n sigs[:,timecolumn] = t\n \n savetxt(self.filename , sigs , delimiter = delimiter)", "def write_segments(segments, args):\n for segment in segments:\n if len(segment.text) == 0:\n continue\n segment.write_utt2spk(args.utt2spk_file)\n segment.write_segment(args.segments_file)\n segment.write_text(args.text_file)", "def log_segment_information(self, segment_tag, text_lines, real_segment_tag):\n\n final_text_lines = []\n file_name = self.ocromore_data['file_info'].name\n\n # add dividers to the lines\n final_text_lines.append(real_segment_tag + \": \" + file_name + \"------------------------------------------------\")\n final_text_lines.extend(text_lines)\n final_text_lines.append(\"\")\n final_text_lines.append(\"\")\n\n segment_tag = segment_tag.replace(\"/\", \"_\") # fix to prevent folder hop in filename\n\n # print to file finally (append style)\n dh.write_array_to_root_simple(\"segmentation_segments\", segment_tag,\n final_text_lines, self.analysis_root, append_mode=True)", "def writeWaveformTextFile(t, V, path):\n with open(path, 'w') as f:\n for a, b in zip(t, V):\n f.write('{t:g},{V:g}'.format(a, b))\n # for\n # with", "def writeSegmentDetailsKml(outPath,singleSimulation,nodes):", "def write_line_segmentation(file,seg_):\n seg = iulib.intarray()\n seg.copy(seg_)\n ocropus.make_line_segmentation_white(seg)\n iulib.write_image_packed(file,seg)", "def write_timit_seg_file(fname,segdf):\n nseg = len(segdf)\n fp = open(fname,\"w\")\n for i in range(nseg):\n fp.write('{:6d} {:6d} {:10s} \\n'.format(segdf['t0'][i],segdf['t1'][i],segdf['seg'][i]) )\n fp.close()", "def _write_isa_segment(self, seg_data):\n icvn = seg_data.get_value('ISA12')\n if icvn == '00501':\n seg_data.set('ISA11', self.repetition_term)\n seg_data.set('ISA16', self.subele_term)\n out = seg_data.format(\n self.seg_term, self.ele_term, self.subele_term) + self.eol\n self.fd_out.write(out.decode('ascii'))", "def write_radec_file( fname, namestr, headstr, points):\n #depreciated\n return write_event_txt( fname, namestr, headstr, points)\n \n # logger = logging.getLogger() \n # try:\n # with open( fname, 'wt') as f:\n # f.write( namestr)\n # f.write( headstr)\n # for p in points:\n # f.write( p[0] + ', ' +\n # str(p[1]) + ', ' +\n # str(p[2]) + ', ' +\n # '{0:.6E}'.format(p[3][0]) + ', ' +\n # '{0:.6E}'.format(p[3][1]) + ', ' +\n # '{0:.6E}'.format(p[4][0]) + ', ' +\n # '{0:.6E}'.format(p[4][1]) + ', ' +\n # '{0:.3E}'.format(p[5]) + ', ' +\n # '{0:.3E}'.format(p[6][0]) + ', ' +\n # '{0:.3E}'.format(p[6][1]) + '\\n' )\n # except (IOError,OSError):\n # logger.warning('Failed to save event file, '+ fname)\n # return False\n # else:\n # logger.debug('Saved event file, '+ fname)\n # return True", "def save_signal(signal,sample_frequency,filename):\n wavfile.write(filename,sample_frequency,signal)", "def _write_segment(self, seg_data):\n out = seg_data.format(self.seg_term, self.ele_term, self.subele_term) + self.eol\n self.fd_out.write(out.decode('ascii'))", "def scad_file_write(self, scad_file: IO[Any]) -> None:\n # Grab some values from *scad* (i.e. *self*):\n scad: Scad = self\n name: str = scad.name\n\n # Store the contents of *scad* as a bunch of *scad_lines*:\n scad_lines: List[str] = list()\n scad_lines.append(f\"// '{name}' File\")\n scad.scad_lines_append(scad_lines, \"\")\n\n # Convert *scad_lines* into *scad_text*:\n scad_lines.append(\"\")\n scad_text: str = '\\n'.join(scad_lines)\n\n # Output *scad_text* to *scad_file*:\n assert scad_file.writable(), f\"Unable to write out .scad for '{name}'\"\n scad_file.write(scad_text)", "def PlotSegmentations(file: str,segmentations, show_figure: bool = True) -> None:\n\n if not os.path.exists(\"output_\"+str(file)+\"/segmentation\"):\n os.makedirs(\"output_\"+str(file)+\"/segmentation\")\n\n\n if isinstance(segmentations, np_.ndarray):\n pl_.matshow(segmentations)\n\n else:\n def __UpdateFigure__(Frame, figure_, plot_, segmentations_):\n idx = int(round(Frame)) # index\n plot_.set_data(segmentations_[idx]) # set the x and y data\n figure_.canvas.draw_idle() # redraw canvas while idle\n\n figure = pl_.figure()\n # add axes to the figure\n plot_axes = figure.add_axes([0.1, 0.2, 0.8, 0.65])\n\n\n\n # plot the values of \"segmentations[time_point=0]\" as color-coded image.\n for i,seg in enumerate(segmentations):\n\n plot_axes.matshow(segmentations[i].T)\n\n # save plot\n pl_.savefig(\"output_\"+str(file)+\"/segmentation/frame_\"+str(i))\n\n\n\n if show_figure:\n pl_.show()", "def write_event_txt( fname, head1str, head2str, points):\n logger = logging.getLogger()\n try:\n with open( fname, 'wt') as f:\n f.write( head1str)\n f.write( head2str)\n for p in points:\n p_string = ','.join( [str(p[0]), str(p[1]), \n str(p[2]), str(p[3][0]), str(p[3][1]), str(p[4][0]), str(p[4][1]),\n str(p[5]), str(p[6][0]), str(p[6][1])] )\n if len(p) > 7: # julian day column exists\n p_string = p_string + ', ' + str(p[7])\n f.write( p_string + '\\n' )\n except (IOError,OSError):\n logger.warning('Failed to save event file, '+ fname)\n return False\n else:\n logger.debug('Saved event file, '+ fname)\n return True", "def write_input(infile,tkin,nh2,cdmol=cdmol_default):\n infile.write(mole+'.dat\\n')\n infile.write('radex.out\\n')\n infile.write(str(flow*(1-bw))+' '+str(fupp/(1-bw))+'\\n')\n infile.write(str(tkin)+'\\n')\n infile.write('1\\n')\n infile.write('H2\\n')\n infile.write(str(nh2)+'\\n')\n infile.write(str(tbg)+'\\n')\n infile.write(str(cdmol)+'\\n')\n infile.write(str(dv)+'\\n')", "def writeFileButtonCallback(self):\n\t\toutputFilename = self.filenameBox.value + \".wav\"\n\t\toutputPath = os.path.join(self.path,\"audio\",outputFilename)\n\n\t\tnumChannels = len(self.activeChannels)\n\n\t\tlogger.logData(source = \"Signal handler\",priority=\"INFO\",msgType=\"Write\",msgData=(outputFilename,numChannels,self.sampleRate,self.signalDuration))\n\n\t\twavfile.write(outputPath, self.sampleRate, self.activeSignal)", "def writeToFile(self, outf):\n\t\t#We want to end up with lines of no more than 8 words, where each word\n\t\t#is in the form 0x1234, separated by commas. Each line is separated by\n\t\t#a new line and a tab, and started by a dat code.\n\t\tinf = open(self.real_path, 'rb')\n\t\toutf.write(self.labels.start + \":\\n\\tdat \")\n\t\tword_count = 0 #How many words are on the current line\n\t\tword = inf.read(2) #Read 16 bits at a time\n\t\twhile word:\n\t\t\tword = byte_to_hex(word) #Convert each word to hex\n\t\t\tl = len(word) \n\t\t\tif l < 4: #Is each word 4 characters long?\n\t\t\t\tword += \"0\" * (4-l) #If not, pad it out with 0s\n\t\t\toutf.write(\"0x\"+word)\n\t\t\tword_count += 1 #There's one more word on the line\n\t\t\t\n\t\t\tword = inf.read(2) #Read 16 more bits\n\t\t\tif word: #If we read anything from the file\n\t\t\t\tif word_count >= 8: #If it's the end of the line, write a new line\n\t\t\t\t\toutf.write(\"\\n\\tdat \")\n\t\t\t\t\tword_count = 0\n\t\t\t\telse: #Else it's the middle of a line\n\t\t\t\t\toutf.write(\", \")\n\t\tinf.close()\n\t\toutf.write(\"\\n\"+self.labels.end + \":\\n\\n\")", "def write_hyd(self,fn=None):\n # currently the segment names here are out of sync with \n # the names used by write_parameters.\n # this is relevant for salinity-file, vert-diffusion-file\n # maybe surfaces-file, depths-file.\n # for example, surfaces file is written as tbd-SURF.seg\n # but below we call it com-tbd.srf\n # maybe easiest to just change the code below since it's\n # already arbitrary\n fn=fn or os.path.join( self.scenario.base_path,\n self.fn_base+\".hyd\")\n if os.path.exists(fn):\n if self.overwrite:\n os.unlink(fn)\n else:\n self.log.warning(\"hyd file %s already exists. Not overwriting!\"%fn)\n return\n \n name=self.scenario.name\n\n dfmt=\"%Y%m%d%H%M%S\"\n\n scu=self.scenario.scu\n\n # If symlinking, we want to report the full time period.\n if self.enable_write_symlink:\n time_start,time_stop,timedelta=self.timeline_data()\n else:\n time_start,time_stop,timedelta = self.timeline_scen()\n \n timestep = timedelta_to_waq_timestep(timedelta)\n\n self.infer_2d_elements()\n n_layers=1+self.seg_k.max()\n\n # New code - maybe not right at all.\n # This code is also duplicated across several of the classes in this file.\n # crying out for refactoring.\n if 'temp' in self.parameters():\n temp_file=\"'%s-temp.seg'\"%name\n else:\n temp_file='none'\n \n if 'tau' in self.parameters():\n tau_file=\"'%s-tau.seg'\"%name\n else:\n tau_file='none'\n \n lines=[\n \"file-created-by SFEI, waq_scenario.py\",\n \"file-creation-date %s\"%( datetime.datetime.utcnow().strftime('%H:%M:%S, %d-%m-%Y') ),\n \"task full-coupling\",\n \"geometry unstructured\",\n \"horizontal-aggregation no\",\n \"reference-time '%s'\"%( self.time0.strftime(dfmt) ),\n \"hydrodynamic-start-time '%s'\"%( time_start.strftime(dfmt) ),\n \"hydrodynamic-stop-time '%s'\"%( time_stop.strftime(dfmt) ),\n \"hydrodynamic-timestep '%s'\"%timestep, \n \"conversion-ref-time '%s'\"%( self.time0.strftime(dfmt) ),\n \"conversion-start-time '%s'\"%( time_start.strftime(dfmt) ),\n \"conversion-stop-time '%s'\"%( time_stop.strftime(dfmt) ),\n \"conversion-timestep '%s'\"%timestep, \n \"grid-cells-first-direction %d\"%self.n_2d_elements,\n \"grid-cells-second-direction 0\",\n \"number-hydrodynamic-layers %s\"%( n_layers ),\n \"number-horizontal-exchanges %d\"%( self.n_exch_x ),\n \"number-vertical-exchanges %d\"%( self.n_exch_z ),\n # little white lie. this is the number in the top layer.\n # and no support for water-quality being different than hydrodynamic\n \"number-water-quality-segments-per-layer %d\"%( self.n_2d_elements),\n \"number-water-quality-layers %s\"%( n_layers ),\n \"hydrodynamic-file '%s'\"%self.fn_base,\n \"aggregation-file none\",\n # filename handling not as elegant as it could be..\n # e.g. self.vol_filename should probably be self.vol_filepath, then\n # here we could reference the filename relative to the hyd file\n \"grid-indices-file '%s.bnd'\"%self.fn_base,# lies, damn lies\n \"boundaries-file '%s.bnd'\"%self.fn_base, # this one might be true.\n \"grid-coordinates-file '%s'\"%self.flowgeom_filename,\n \"attributes-file '%s.atr'\"%self.fn_base,\n \"volumes-file '%s.vol'\"%self.fn_base,\n \"areas-file '%s.are'\"%self.fn_base,\n \"flows-file '%s.flo'\"%self.fn_base,\n \"pointers-file '%s.poi'\"%self.fn_base,\n \"lengths-file '%s.len'\"%self.fn_base,\n \"salinity-file '%s-salinity.seg'\"%name,\n \"temperature-file %s\"%temp_file,\n \"vert-diffusion-file '%s-vertdisper.seg'\"%name,\n # not a segment function!\n \"surfaces-file '%s'\"%self.surf_filename,\n \"shear-stresses-file %s\"%tau_file,\n \"hydrodynamic-layers\",\n \"\\n\".join( [\"%.5f\"%(1./n_layers)] * n_layers ),\n \"end-hydrodynamic-layers\",\n \"water-quality-layers \",\n \"\\n\".join( [\"1.000\"] * n_layers ),\n \"end-water-quality-layers\"]\n txt=\"\\n\".join(lines)\n with open(fn,'wt') as fp:\n fp.write(txt)", "def save(self, path):\n\n if len(self.voltage_data.shape) > 1:\n # two channel interleaved\n data_unleaved = np.array([self.voltage_data[0::2], self.voltage_data[1::2]]).transpose()\n # datetime stamp experiment here\n self.set_timestamp()\n np.savetxt(path,\n data_unleaved, fmt='%.11f', delimiter=',',\n header=self.get_header()) \n else:\n # datetime stamp experiment here\n self.set_timestamp()\n np.savetxt(path,\n self.voltage_data, fmt='%.11f', delimiter=',',\n header=self.get_header())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new MaptilesDataset object with a subset of df_fns and optionally overwritten transform and target_transform.
def make_subset(self, inds: Iterable[int], transform=None, target_transform=None ): df_fns = self.df_fns.iloc[inds].reset_index(drop=True) return MaptilesDataset( data_root=self.data_root, cities=self.cities, styles=self.styles, zooms=self.zooms, n_channels=self.n_channels, transform=transform if transform is not None else self.transform, target_transform=target_transform if target_transform is not None else self.target_transform, df_fns=df_fns )
[ "def __init__(\n self,\n img_files,\n img_transform: Optional[Callable] = None,\n seg_files=None,\n seg_transform: Optional[Callable] = None,\n labels=None,\n label_transform: Optional[Callable] = None,\n ):\n items = [(img_files, img_transform), (seg_files, seg_transform), (labels, label_transform)]\n self.set_random_state(seed=get_seed())\n super().__init__([Dataset(x[0], x[1]) for x in items if x[0] is not None])", "def test_ColumnSelector(self):\n df = self.df\n tmr = ColumnSelector(columns=self.df.columns)\n for input in [df]:\n with self.subTest(input=input):\n out = tmr.transform(input)", "def prepare_dataset(\n mc: Mashcima,\n annotations: List[str],\n min_staff_with: int,\n single_staff=False\n):\n def _image_generator(annotation_index: int, _: List[str]) -> np.ndarray:\n return _complex_image_generator(\n mc, annotation_index, annotations, single_staff, min_staff_with\n )\n dataset = AnnotationsDataset(annotations, _image_generator)\n dataset = ParallelFeedingDataset(dataset) # make batch preparation parallel\n return dataset", "def transform(self, func, func_description=None):\n\n if not callable(func):\n raise TypeError('Given function {} is not a callable'.format(func))\n\n xfm_ds = self.__class__()\n for samplet, data in self._data.items():\n try:\n xfm_data = func(data)\n except:\n print('Unable to transform features for {}. '\n 'Quitting.'.format(samplet))\n raise\n\n xfm_ds.add_samplet(samplet, xfm_data,\n target=self._targets[samplet])\n\n xfm_ds.description = \"{}\\n{}\".format(func_description, self._description)\n\n return xfm_ds", "def train_feature_selector(\n features_train: pd.DataFrame,\n labels_train: pd.DataFrame,\n parameters: Dict,\n) -> Pipeline:\n col_dict = _get_column_dtype(features_train)\n\n if labels_train.shape[0] == features_train.shape[0]:\n labels_train.index = features_train.index\n\n # Create transformers for each dtype\n transformers = [\n (\"num_n_trans\", StandardScaler(), col_dict[\"num_normal\"]),\n (\n \"num_s_trans\",\n QuantileTransformer(random_state=parameters[\"random_state\"]),\n col_dict[\"num_skewed\"],\n ),\n (\"ordi_trans\", \"passthrough\", col_dict[\"ordinal\"]),\n (\"bool_pass\", \"passthrough\", col_dict[\"boolean\"]),\n (\n \"cat_trans\",\n JamesSteinEncoder(random_state=parameters[\"random_state\"], return_df=False),\n col_dict[\"category\"],\n ),\n ]\n transformers = _remove_unused_transformers(transformers)\n\n # Combine the transformers as preprocessor\n preprocessor = ColumnTransformer(transformers=transformers)\n\n num_cols = col_dict[\"num_normal\"] + col_dict[\"num_skewed\"]\n nomi_cols = col_dict[\"ordinal\"] + col_dict[\"boolean\"] + col_dict[\"category\"]\n\n selector_ct = ColumnTransformer(\n transformers=[\n (\n \"num_selector\",\n SelectPercentile(f_classif, percentile=parameters[\"numeric_pct\"]),\n [x for x in range(0, len(num_cols))],\n ),\n (\n \"nomi_selector\",\n SelectPercentile(chi2, percentile=parameters[\"nominal_pct\"]),\n [x for x in range(len(num_cols), len(num_cols) + len(nomi_cols))],\n ),\n ]\n )\n\n # Extract target\n target_train = labels_train[\"DEFAULT_PAY\"]\n\n # Create feature selector pipeline and train it\n selector = Pipeline(\n steps=[(\"preprocessor\", preprocessor), (\"selector\", selector_ct)]\n )\n selector.fit(features_train, target_train)\n\n return selector", "def test_MajorsSelector(self):\n df = self.df\n tmr = MajorsSelector()\n for input in [df]:\n with self.subTest(input=input):\n out = tmr.transform(input)", "def _load_and_merge(self, f_init, f_y, f_transform):\n df_init = pd.DataFrame()\n df_y = pd.DataFrame()\n df_transform = pd.DataFrame()\n\n if f_init:\n df_init = self._X_extra_base\n features = set(df_init.columns)\n features_to_drop = features - set(f_init)\n df_init = df_init.drop(features_to_drop, axis=1)\n\n if f_y:\n if self._y is None:\n raise ValueError('Asked to load feature from y but y is None.')\n\n df_y = self._y[f_y]\n\n if f_transform:\n if self._X_extra is None:\n raise ValueError('Asked to load feature from transformed df '\n 'which id None.')\n\n df_transform = self._X_extra[f_transform]\n\n return pd.concat((df_init, df_y, df_transform), axis=1)", "def _generate_features(self, df: pd.DataFrame, new_feat_cols: list) -> pd.DataFrame:\n check_is_fitted(self, [\"feature_formulas_\"])\n if not new_feat_cols:\n return df\n if new_feat_cols[0] not in self.feature_formulas_:\n raise RuntimeError(\"[AutoFeat] First call fit or fit_transform to generate the features!\")\n if self.verbose:\n logging.info(f\"[AutoFeat] Computing {len(new_feat_cols)} new features.\")\n # generate all good feature; unscaled this time\n feat_array = np.zeros((len(df), len(new_feat_cols)))\n for i, expr in enumerate(new_feat_cols):\n if self.verbose:\n print(f\"[AutoFeat] {i:5}/{len(new_feat_cols):5} new features\", end=\"\\r\")\n if expr not in self.feature_functions_:\n # generate a substitution expression based on all the original symbols of the original features\n # for the given generated feature in good cols\n # since sympy can handle only up to 32 original features in ufunctify, we need to check which features\n # to consider here, therefore perform some crude check to limit the number of features used\n cols = [c for i, c in enumerate(self.feateng_cols_) if colnames2symbols(c, i) in expr]\n if not cols:\n # this can happen if no features were selected and the expr is \"E\" (i.e. the constant e)\n f = None\n f_jit = None\n else:\n try:\n f = lambdify([self.feature_formulas_[c] for c in cols], self.feature_formulas_[expr])\n f_jit = nb.njit(f)\n except Exception:\n logging.exception(f\"[AutoFeat] Error while processing expression: {expr}\")\n raise\n self.feature_functions_[expr] = (cols, f, f_jit)\n else:\n cols, f, f_jit = self.feature_functions_[expr]\n if f is not None and f_jit is not None:\n # only generate features for completely not-nan rows\n not_na_idx = df[cols].notna().all(axis=1)\n try:\n try:\n feat = f_jit(*(df[c].to_numpy(dtype=float)[not_na_idx] for c in cols))\n except nb.TypingError:\n # lambified abs fn with non trivial inputs doesn't jit compile with numba, yet\n # fallback on the non jitted version of the function\n feat = f(*(df[c].to_numpy(dtype=float)[not_na_idx] for c in cols))\n # henceforth, always use the non jitted version of the function\n self.feature_functions_[expr] = (cols, f, f)\n feat_array[not_na_idx, i] = feat\n feat_array[~not_na_idx, i] = np.nan\n except RuntimeWarning:\n logging.warning(\n f\"[AutoFeat] Problem while evaluating expression: {expr} with columns {cols}\",\n \" - is the data in a different range then when calling .fit()? Are maybe some values 0 that shouldn't be?\",\n )\n raise\n if self.verbose:\n logging.info(f\"[AutoFeat] {len(new_feat_cols):5}/{len(new_feat_cols):5} new features ...done.\")\n return df.join(pd.DataFrame(feat_array, columns=new_feat_cols, index=df.index))", "def preprocess_dataset(dataset, max_num_instances, num_parallel_calls):\n\n pad_dataset_fn = _pad_dataset(max_num_instances)\n dataset = dataset.map(pad_dataset_fn, num_parallel_calls=num_parallel_calls)\n\n return dataset", "def _transform(self, dataset):\n\n for t in self.transforms:\n method = getattr(dataset, t.name)\n dataset = method(*t.args, **t.kwargs)\n\n return dataset", "def _featurize_df(self, df, which_features, fs, pca_sdb, lda_sdb, pca_cnn):\r\n feats_dict = {}\r\n if 'Sdb' in which_features:\r\n # Sdb df unchanged\r\n feats_dict['Sdb'] = df\r\n\r\n if 'bands' in which_features:\r\n # tb df from bands\r\n bands = pd.DataFrame(data = Sdb_to_bands(df[np.array(fs, dtype='str')].values, fs))\r\n dfbands = pd.concat([df['times'], bands, df['egq'], df['l'], df['caseid']], axis=1)\r\n feats_dict['bands'] = dfbands\r\n\r\n if 'PCA' in which_features:\r\n # same for pca\r\n PCA = pd.DataFrame(data = pca_sdb.transform(df[np.array(fs, dtype='str')].values))\r\n dfPCA = pd.concat([df['times'], PCA, df['egq'], df['l'], df['caseid']], axis=1)\r\n feats_dict['PCA'] = dfPCA\r\n\r\n if 'LDA' in which_features:\r\n # same for LDA\r\n LDA = pd.DataFrame(data = lda_sdb.transform(df[np.array(fs, dtype='str')].values))\r\n dfLDA = pd.concat([df['times'], LDA, df['egq'], df['l'], df['caseid']], axis=1)\r\n feats_dict['LDA'] = dfLDA\r\n\r\n if 'CNN' in which_features:\r\n # helper fcn for CNN\r\n dfCNN = apply_pca_cnn(pca_cnn,np.unique(df.caseid))\r\n feats_dict['CNN'] = dfCNN\r\n \r\n return feats_dict", "def build_datasets_using_scalers(\n train_features,\n test_feature,\n df,\n test_size,\n seed,\n min_feature=-1,\n max_feature=1):\n\n status = ae_consts.NOT_RUN\n scaled_train_df = None\n scaled_test_df = None\n scaler_train = None\n scaler_test = None\n x_train = None\n y_train = None\n x_test = None\n y_test = None\n\n res = {\n 'status': status,\n 'scaled_train_df': scaled_train_df,\n 'scaled_test_df': scaled_test_df,\n 'scaler_train': scaler_train,\n 'scaler_test': scaler_test,\n 'x_train': x_train,\n 'y_train': y_train,\n 'x_test': x_test,\n 'y_test': y_test,\n }\n\n try:\n log.info(\n f'building scalers '\n f'df.rows={len(df.index)} '\n f'columns={len(list(df.columns.values))} '\n f'train_features={len(train_features)} '\n f'test_feature={test_feature}')\n\n if test_feature not in df:\n log.error(\n f'did not find test_feature={test_feature} in '\n f'df columns={df.columns.values}')\n status = ae_consts.FAILED\n res['status'] = status\n return res\n for single_train_feature in train_features:\n if single_train_feature not in df:\n log.error(\n f'did not find '\n f'train_feature={single_train_feature} in '\n f'df columns={df.columns.values}')\n status = ae_consts.FAILED\n res['status'] = status\n return res\n\n train_df = df[train_features]\n test_df = df[[test_feature]]\n\n log.info(\n 'building scaled train df')\n scaled_train_res = scaler_utils.build_scaler_dataset_from_df(\n df=train_df,\n min_feature=min_feature,\n max_feature=max_feature)\n\n log.info(\n 'building scaled test df')\n scaled_test_res = scaler_utils.build_scaler_dataset_from_df(\n df=test_df,\n min_feature=min_feature,\n max_feature=max_feature)\n\n log.info(\n f'scaled df transform '\n f'train_status={scaled_train_res[\"status\"] == ae_consts.SUCCESS} '\n f'test_status={scaled_test_res[\"status\"] == ae_consts.SUCCESS}')\n\n if scaled_train_res['status'] == ae_consts.SUCCESS \\\n and scaled_test_res['status'] == ae_consts.SUCCESS:\n log.info(\n f'scaled train_rows={len(scaled_train_res[\"df\"])} '\n f'test_rows={len(scaled_test_res[\"df\"])}')\n\n scaler_train = scaled_train_res['scaler']\n scaler_test = scaled_test_res['scaler']\n scaled_train_df = scaled_train_res['df']\n scaled_test_df = scaled_test_res['df']\n (x_train,\n x_test,\n y_train,\n y_test) = tt_split.train_test_split(\n scaled_train_df,\n scaled_test_df,\n test_size=test_size,\n random_state=seed)\n else:\n log.error(\n f'failed df transform '\n f'train_status={scaled_train_res[\"status\"]} '\n f'test_status={scaled_test_res[\"status\"]}')\n status = ae_consts.FAILED\n res['status'] = status\n return res\n # if built both train and test successfully\n\n log.info(\n f'train_rows={len(train_df.index)} '\n f'test_rows={len(test_df.index)} '\n f'x_train={len(x_train)} '\n f'x_test={len(x_test)} '\n f'y_train={len(y_train)} '\n f'y_test={len(y_test)}')\n\n res['scaled_train_df'] = scaled_train_df\n res['scaled_test_df'] = scaled_test_df\n res['scaler_train'] = scaler_train\n res['scaler_test'] = scaler_test\n res['x_train'] = x_train\n res['y_train'] = y_train\n res['x_test'] = x_test\n res['y_test'] = y_test\n\n status = ae_consts.SUCCESS\n\n except Exception as e:\n log.error(\n f'failed with ex={e} '\n f'building scalers '\n f'df.rows={len(df.index)} '\n f'columns={list(df.columns.values)} '\n f'train_features={train_features} '\n f'test_feature={test_feature}')\n status = ae_consts.ERR\n # try/ex\n\n res['status'] = status\n return res", "def make_classification_dataset(\n dataset: SupportedDataset,\n *,\n transform: Optional[XTransform] = None,\n target_transform: Optional[YTransform] = None,\n transform_groups: Optional[Mapping[str, TransformGroupDef]] = None,\n initial_transform_group: Optional[str] = None,\n task_labels: Optional[Union[int, Sequence[int]]] = None,\n targets: Optional[Sequence[TTargetType]] = None,\n collate_fn: Optional[Callable[[List], Any]] = None\n) -> Union[ClassificationDataset, SupervisedClassificationDataset]:\n\n is_supervised = isinstance(dataset, SupervisedClassificationDataset)\n\n transform_gs = _init_transform_groups(\n transform_groups,\n transform,\n target_transform,\n initial_transform_group,\n dataset,\n )\n targets_data: Optional[DataAttribute[TTargetType]] = _init_targets(dataset, targets)\n task_labels_data: Optional[DataAttribute[int]] = _init_task_labels(\n dataset, task_labels\n )\n\n das: List[DataAttribute] = []\n if targets_data is not None:\n das.append(targets_data)\n if task_labels_data is not None:\n das.append(task_labels_data)\n\n # Check if supervision data has been added\n is_supervised = is_supervised or (\n targets_data is not None and task_labels_data is not None\n )\n\n data: Union[ClassificationDataset, SupervisedClassificationDataset]\n if is_supervised:\n data = SupervisedClassificationDataset(\n [dataset],\n data_attributes=das if len(das) > 0 else None,\n transform_groups=transform_gs,\n collate_fn=collate_fn,\n )\n else:\n data = ClassificationDataset(\n [dataset],\n data_attributes=das if len(das) > 0 else None,\n transform_groups=transform_gs,\n collate_fn=collate_fn,\n )\n\n if initial_transform_group is not None:\n return data.with_transforms(initial_transform_group)\n else:\n return data", "def map(self):\n\t\tself.parse_input_datasets() # Convert input datasets to one DataFrame\n\t\tself.parse_merged_dataset() # Perform any specified actions on the merged DataFrame\n\t\tself.parse_output_datasets() # Split the merged DataFrame and output", "def fetch_and_convert_dataset(source_files, target_filename):\n def decorate_fetcher(convert_function):\n def fetch(**kwargs):\n target_path = config.get_data_path(target_filename)\n\n # If the target file does not exist, we need to acquire the\n # source files and convert them\n if not os.path.exists(target_path):\n # Acquire the source files\n source_paths = []\n for src in source_files:\n if not isinstance(src, AbstractSourceFile):\n raise TypeError('source_files should contain'\n '`SourceFile` instances, '\n 'not {}'.format(type(src)))\n p = src.acquire(**kwargs)\n if p is not None:\n source_paths.append(p)\n else:\n print('Failed to acquire {}'.format(src))\n return None\n\n # Got the source files\n # Convert\n converted_path = convert_function(source_paths, target_path)\n\n # If successful, delete the source files\n if converted_path is not None:\n for p in source_paths:\n if os.path.exists(p):\n os.remove(p)\n\n return converted_path\n else:\n # Target file already exists\n return target_path\n\n fetch.__name__ = convert_function.__name__\n\n return fetch\n\n return decorate_fetcher", "def data_processing(labels_df, x_train, y_train, label_map):\n subset = str()\n\n if labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 16 or labels_df.shape[0] == 64:\n batch_size = 8 ### Modified for smaller images\n subset = \"train\"\n elif labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8:\n batch_size = 4\n subset = \"valid\"\n elif labels_df.shape[0] == 40669:\n batch_size = 4\n subset = \"test\" \n elif labels_df.shape[0] == 20522:\n batch_size = 2\n subset = \"test-add\" \n else:\n raise ValueError('The dataset format is different than expected')\n\n label_map = label_map\n# images_size = (256, 256)\n images_size = (64, 64)\n\n # Iterate through batches of rows of the dataset\n for i in range(labels_df.shape[0]//batch_size):\n \n temp_labels_df = labels_df.iloc[i*batch_size:((i+1)*batch_size) , :]\n \n # Iterate through the samples batch and create x and y for training\n for f, tags in tqdm(temp_labels_df.values, miniters=100):\n # load a .tif file\n img = io.imread('data/{}-jpg/{}.jpg'.format(subset,f)) ######## Modified for train jpg folder\n img = transform.resize(img, images_size)\n\n### Removed for use of JPEG files:\n# # Add NDVI layer // Removed for usage of JPG files\n# np.seterr(all='warn') # divide by zero, NaN values\n# img_ndvi = np.expand_dims((img[:, :, 3] - img[:, :, 2]) / (img[:, :, 3] + img[:, :, 2]), axis=2) # (NIR - RED) / (NIR + RED)\n# img = np.concatenate((img, img_ndvi), axis=2)\n \n # Create the target array for an image\n targets = np.zeros(17)\n for t in tags.split(' '):\n targets[label_map[t]] = 1 \n\n x_train.append(img)\n y_train.append(targets)\n\n # Format values\n y_train = np.array(y_train, np.uint8)\n x_train = np.array(x_train, np.float16) / 255.\n\n### Removed for use of JPEG files: \n# x_train = np.array(x_train, np.float16) / 65536.\n#### x_train -= 0.5\n#### x_train *= 2 \n\n\n # Save subsets in npz files\n np.save('data/{}-npy/npdatasetX{}'.format(subset, i), x_train)\n x_train = []\n np.save('data/{}-npy/npdatasetY{}'.format(subset, i), y_train)\n y_train = []\n #print \"{} data saved\".format(subset)", "def load_dataframes(\n self,\n store: StoreInput,\n tables: _MULTI_TABLE_DICT_LIST = None,\n columns: _MULTI_TABLE_DICT_LIST = None,\n predicate_pushdown_to_io: bool = True,\n categoricals: _MULTI_TABLE_DICT_LIST = None,\n dates_as_object: bool = False,\n predicates: PredicatesType = None,\n ) -> \"MetaPartition\":\n if columns is None:\n columns = {}\n elif set(columns).difference(self.tables):\n raise (\n ValueError(\n \"You are trying to read columns from invalid table(s): {}\".format(\n set(columns).difference(self.tables)\n )\n )\n )\n\n if categoricals is None:\n categoricals = {}\n\n LOGGER.debug(\"Loading internal dataframes of %s\", self.label)\n if len(self.files) == 0:\n # This used to raise, but the specs do not require this, so simply do a no op\n LOGGER.debug(\"Partition %s is empty and has not tables/files\", self.label)\n return self\n new_data = copy(self.data)\n predicates = _combine_predicates(predicates, self.logical_conjunction)\n predicates = _predicates_to_named(predicates)\n\n for table, key in self.files.items():\n table_columns = columns.get(table, None)\n categories = categoricals.get(table, None)\n dataset_uuid, _, indices, file_name = decode_key(key)\n if tables and table not in tables:\n continue\n\n # In case the columns only refer to the partition indices, we need to load at least a single column to\n # determine the length of the required dataframe.\n if table_columns is None:\n table_columns_to_io = None\n else:\n table_columns_to_io = table_columns\n\n filtered_predicates = predicates\n\n self._load_table_meta(dataset_uuid=dataset_uuid, table=table, store=store)\n\n # Filter predicates that would apply to this partition and remove the partition columns\n if predicates:\n # Check if there are predicates that match to the partition columns.\n # For these we need to check if the partition columns already falsify\n # the conditition.\n #\n # We separate these predicates into their index and their Parquet part.\n (\n split_predicates,\n has_index_condition,\n ) = self._split_predicates_in_index_and_content(predicates)\n\n filtered_predicates = []\n if has_index_condition:\n filtered_predicates = self._apply_partition_key_predicates(\n table, indices, split_predicates\n )\n else:\n filtered_predicates = [\n pred.content_part for pred in split_predicates\n ]\n\n # Remove partition_keys from table_columns_to_io\n if self.partition_keys and table_columns_to_io is not None:\n keys_to_remove = set(self.partition_keys) & set(table_columns_to_io)\n # This is done to not change the ordering of the list\n table_columns_to_io = [\n c for c in table_columns_to_io if c not in keys_to_remove\n ]\n\n start = time.time()\n df = DataFrameSerializer.restore_dataframe(\n key=key,\n store=store,\n columns=table_columns_to_io,\n categories=categories,\n predicate_pushdown_to_io=predicate_pushdown_to_io,\n predicates=filtered_predicates,\n date_as_object=dates_as_object,\n )\n LOGGER.debug(\"Loaded dataframe %s in %s seconds.\", key, time.time() - start)\n # Metadata version >=4 parse the index columns and add them back to the dataframe\n\n df = self._reconstruct_index_columns(\n df=df,\n key_indices=indices,\n table=table,\n columns=table_columns,\n categories=categories,\n date_as_object=dates_as_object,\n )\n\n df.columns = df.columns.map(ensure_string_type)\n if table_columns is not None:\n # TODO: When the write-path ensures that all partitions have the same column set, this check can be\n # moved before `DataFrameSerializer.restore_dataframe`. At the position of the current check we\n # may want to double check the columns of the loaded DF and raise an exception indicating an\n # inconsistent dataset state instead.\n missing_cols = set(table_columns).difference(df.columns)\n if missing_cols:\n raise ValueError(\n \"Columns cannot be found in stored dataframe: {}\".format(\n \", \".join(sorted(missing_cols))\n )\n )\n\n if list(df.columns) != table_columns:\n df = df.reindex(columns=table_columns, copy=False)\n new_data[table] = df\n return self.copy(data=new_data)", "def create_dataset_from_feature_sets(records, preprocessed_records, presence_feature_set, count_feature_set):\r\n dataset = records.map(lambda record: (record[constants.KEY], record[constants.VALUE][constants.TEXT]))\\\r\n .join(preprocessed_records)\\\r\n .join(presence_feature_set)\\\r\n .join(count_feature_set)\\\r\n .map(lambda row: (row[0], row[1][0][0][0], row[1][0][0][1], row[1][0][1], row[1][1]))\r\n return dataset", "def map(self, *args, **kwargs):\n from .datasets_core import DataSetMap\n for a in args:\n if isinstance(a, DSColumn):\n if a.dataset is not self:\n raise ValueError('%s is not a column of %s.' % (a.name, self.dataset_name))\n a = a.name\n if not isinstance(a, str):\n raise ValueError('Arguments of .map() should be column names, not %s.' % type(a))\n kwargs[a] = a\n return DataSetMap(self, kwargs, keep_all_columns=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect all Count the number of maptiles from `cities`, for each style in `styles` and at each zoom level in `zooms`
def collect_fns(data_root: Path, cities: Iterable[str] = None, styles: Iterable[str] = None, zooms: Iterable[str] = None, verbose: bool = False, ) -> pd.DataFrame: # Collect as a record/row = Tuple[str, str, str, int] for a dataframe rows = [] for city_dir in data_root.iterdir(): if city_dir.is_dir(): city = city_dir.stem if verbose: print(f"\n{city}") if city not in cities: if verbose: print(f"Skipping... {city}") continue for style_dir in city_dir.iterdir(): if style_dir.is_dir(): style = style_dir.stem if verbose: print(f"\n\t{style}") if style not in styles: if verbose: print(f"Skipping... {style}") continue for zoom_dir in style_dir.iterdir(): if zoom_dir.is_dir(): z = zoom_dir.stem if verbose: print(f"\n\t\t{z}") if z not in zooms: if verbose: print(f"Skipping... {z}") continue for fpath in zoom_dir.iterdir(): if fpath.is_file(): rows.append([city, style, z, fpath]) # Construct a dataframe df_counts = pd.DataFrame(rows, columns=['city', 'style', 'zoom', 'fpath']) return df_counts
[ "def add_cities_to_map(map, cities, city_number):\n counter = 0\n for city in cities:\n try:\n coordinates = find_coordinates(city[0])\n counter += 1\n map.add_child(folium.CircleMarker(location=coordinates,\n radius=10,\n popup=city[0] + \" \" +\n str(round(float(city[1])/1000000, 1))\n + \" mln\",\n fill_color='red',\n color='red',\n fill_opacity=0.5))\n except:\n pass\n if counter == city_number:\n break", "def count_tiles(\n geometry, pyramid, minzoom, maxzoom, init_zoom=0, rasterize_threshold=0\n):\n if not 0 <= init_zoom <= minzoom <= maxzoom: # pragma: no cover\n raise ValueError(\"invalid zoom levels given\")\n # tile buffers are not being taken into account\n unbuffered_pyramid = BufferedTilePyramid(\n pyramid.grid, tile_size=pyramid.tile_size, metatiling=pyramid.metatiling\n )\n height = pyramid.matrix_height(init_zoom)\n width = pyramid.matrix_width(init_zoom)\n # rasterize to array and count cells if too many tiles are expected\n if width > rasterize_threshold or height > rasterize_threshold:\n logger.debug(\"rasterize tiles to count geometry overlap\")\n return _count_cells(unbuffered_pyramid, geometry, minzoom, maxzoom)\n\n logger.debug(\"count tiles using tile logic\")\n return _count_tiles(\n [\n unbuffered_pyramid.tile(*tile_id)\n for tile_id in product(\n [init_zoom],\n range(pyramid.matrix_height(init_zoom)),\n range(pyramid.matrix_width(init_zoom)),\n )\n ],\n geometry,\n minzoom,\n maxzoom,\n )", "def get_cities(stations):\n page = open('wikipage.html')\n soup = BeautifulSoup(page, 'html.parser')\n\n cities_table = soup.find_all('table')[3]\n\n city_rows = cities_table.find_all('tr')[1:]\n\n index = 1\n\n cities = []\n\n for row in city_rows:\n data = row.find_all('td')\n city = {}\n\n latlong = data[8].find('span', class_='geo').text\n latlong = latlong.split('; ')\n\n city['latitude'] = float(latlong[0])\n city['longitude'] = float(latlong[1])\n # city['station'] = \\\n # get_nearest_station((city['latitude'], city['longitude']), stations)\n\n city['wiki'] = data[1].a.get('href')\n city['name'] = data[1].a.text\n print(city['name'])\n city['state'] = data[2].a.text\n if city['state'] == \"Hawai'i\":\n city['state'] = 'Hawaii'\n city['region'] = us_regions.STATES_TO_REGIONS[city['state']]\n city['population'] = int(data[3].text.replace(',', ''))\n city['id'] = str(index).rjust(3, '0')\n try:\n growth_rate = data[5].font.text\n positive = '+' in growth_rate\n for ch in growth_rate:\n if ch in ['+', u'\\u2212', '%']:\n growth_rate = growth_rate.replace(ch, '')\n growth_rate = float(growth_rate)\n if not positive:\n growth_rate *= -1\n except AttributeError:\n growth_rate = 0.0\n city['growth_rate'] = growth_rate\n nearby_stations = get_nearest_station((city['latitude'], city['longitude']), stations)\n for station in nearby_stations:\n try:\n # print(station)\n city['climate'] = get_station_weather(copy.deepcopy(station))\n # city['station'] = copy.deepcopy(station)\n print(station['ghcn_id'], station['distance'])\n break\n except ValueError as ve:\n pass\n # print(ve.args)\n cities.append(city)\n index += 1\n return cities", "def getDiceCountsForCities(self, locations, keyword):\n\t\t# Only return one result because we're interested only in total count, this speeds things up slightly\n\t\tbase_str = \"http://service.dice.com/api/rest/jobsearch/v1/simple.json?text={}&city={}&pgcnt=1&age=7\"\n\t\titems = []\n\t\tfor location in locations: \n\t\t\tplace = \"{}, {}\".format(location[0], location[1])\n\t\t\tquotedKeyword = urllib.parse.quote(self.quoteSpaced(keyword)) \n\t\t\tquotedPlace = urllib.parse.quote(str(place)) \n\t\t\tformatted = base_str.format(quotedKeyword, quotedPlace) \t\n\t\t\t# print(formatted)\t\t\n\t\t\tr = requests.get(formatted)\n\t\t\tcount = r.json()['count'] \n\t\t\titems.append(int(count))\n\t\treturn items", "def build_county_geometries(site):\n county_list = []\n map_coverage = ''\n site_counties = get_consumer_count_per_county(site)\n for county in site_counties:\n county_list.append(county[0])\n poly = transform_market_geom(GEOSGeometry(county[1]))\n if poly:\n map_coverage = map_coverage + str(poly) + ';' \\\n + str(county[0]) + ';' + str('directory/') + '|'\n return county_list, map_coverage", "def add_city_info(cities, download=False):\n\n image_name_pattern = re.compile(r'\\d+px-(.*)')\n city_county_exceptions = {'New York': 'New York', 'Chicago': 'Cook', 'Denver': 'Denver',\n 'Washington': 'District of Columbia', 'San Francisco': 'San Francisco',\n 'Colorado Springs': 'El Paso', 'New Orleans': 'Orleans', 'Anchorage':\n 'Anchorage', 'Baton Rouge': 'East Baton Rouge', 'Shreveport': 'Caddo',\n 'Augusta': 'Richmond', 'Fort Collins': 'Larimer', 'Lakewood': 'Jefferson',\n 'Columbia': 'Richland', 'Thornton': 'Adams', 'New Haven': 'New Haven',\n 'Lafayette': 'Lafayette', 'Aurora': '', 'Honolulu': 'Honolulu',\n 'Collin': 'Denton', 'Athens': 'Clarke', 'Abiline': 'Taylor', 'Arvada':\n 'Jefferson', 'Westminster': 'Adams', 'Pueblo': 'Pueblo', 'Greeley': 'Weld',\n 'Boulder': 'Boulder', 'Centennial': 'Arapahoe', 'Richardson': 'Dallas',\n 'Plano': 'Collin'\n }\n # Getting the rent data\n area_rent = []\n with open('FMRArea_FY2016F_50_RevFinal.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n area_rent.append([row[2].lower(), row[5].lower()])\n county_rent = []\n with open('FY2016F_50_RevFinal.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n county_rent.append([row[12], row[16], row[2]])\n\n for city in cities:\n remove_noncentral_months(city)\n id = city['id']\n city_file = open('city_pages/' + id + '.html')\n city_wiki = BeautifulSoup(city_file, 'html.parser')\n intro = city_wiki.find(id='mw-content-text').find('p', recursive=False).text\n # Removing citations\n intro = re.sub(r'\\[\\w+\\]', '', intro)\n intro = re.sub(r' \\(.+?\\)', '', intro)\n intro = re.sub(r'/*/', '', intro)\n thumb_url = city_wiki.find(class_='infobox').find('img')['src'][2:]\n image_name_match = re.search(image_name_pattern, thumb_url)\n if image_name_match is None:\n image_name = thumb_url.split('/')[-1]\n else:\n image_name = image_name_match.group(1)\n if download:\n urllib.urlretrieve('http://' + thumb_url, 'city_images/' + id + '.jpg')\n time.sleep(0.5)\n if city['name'] in city_county_exceptions.keys():\n city['county'] = city_county_exceptions[city['name']]\n # There are two cities called Aurora\n if city['name'] == 'Aurora':\n if city['state'] == 'Colorado':\n city['county'] = 'Arapahoe'\n else:\n city['county'] = 'Kane'\n else:\n county_th = city_wiki.find(class_='infobox').find(\n 'th', text=re.compile(r'County'))\n if county_th:\n city['county'] = county_th.find_next('td').text.strip()\n else:\n # If there is no county field because there are multiple counties\n counties_th = city_wiki.find(class_='infobox').find(\n 'th', text=re.compile(r'Counties'))\n county_list = counties_th.find_next('td').text.strip().split(', ')\n # Uses the first county in the list\n city['county'] = county_list[0]\n rent_area_pattern = re.compile(city['name'] + r'.*' + us_regions.US_STATE_ABBREV[city['state']])\n # print(city['county'])\n city['photo_details_url'] = 'https://commons.wikimedia.org/wiki/File:' + image_name\n add_median_rent(city, area_rent, county_rent)\n # print(city['photo_details_url'])\n city['photo_url'] = thumb_url\n city['wiki_intro'] = intro", "def queryRouteInfo(self, list_of_cities):\n total_distance = 0\n total_cost = 0\n total_time = 0\n cost_per_km = 0.35\n i = 0\n while i < len(list_of_cities) - 1:\n if type(list_of_cities[0]) == str:\n src = self.graph.getCityByNameOrCode(list_of_cities[0].strip())\n else:\n src = list_of_cities[0]\n if type(list_of_cities[1]) == str:\n dest = self.graph.getCityByNameOrCode(list_of_cities[1].strip())\n else:\n dest = list_of_cities[1]\n if src == False or dest == False: # invalid src or dest\n return False\n if not (dest in src.destinations): # not connected\n return False\n distance = src.destinations[dest]\n total_distance += distance\n\n if cost_per_km == 0: # keep it free\n pass\n else: # decrease the cost for another leg\n total_cost += cost_per_km * distance\n cost_per_km -= 0.05\n\n # add layover time\n if i != 0:\n total_time += self.calculateLayoverTime(len(src.destinations))\n # add flying time\n total_time += self.calculateFlyingTime(distance)\n i += 1\n return {\"total_distance\": total_distance,\n \"total_cost\": total_cost,\n \"total_time\": total_time}", "def create_worldmap(citydata):\n\n tempdf = citydata[['City', 'Country', 'Population', 'City Location']].dropna(subset=['City Location'])\n\n tempdf['lon'] = tempdf['City Location'].apply(lambda point: getlonlat(point)[0])\n tempdf['lat'] = tempdf['City Location'].apply(lambda point: getlonlat(point)[1])\n\n # population 2 has nas filled with mean to prevent errors when plotting size according to population\n tempdf['Population2'] = tempdf.Population.fillna(tempdf.Population.mean())\n\n # adding population limit as brazils gionia population overshadows other cities\n maxpop = np.max(tempdf.Population)\n tempdf['PopLim'] = tempdf.Population2.apply(lambda x: min(x, maxpop / 100))\n\n tempdf['Population'] = tempdf.Population.fillna('Not Reported')\n tempdf['markertext'] = tempdf['City'] + ', ' + tempdf['Country'] + '<br>Population:' + tempdf['Population'].astype(\n 'str')\n\n fig = go.Figure()\n scale = 500000\n fig.add_trace(go.Scattergeo(\n lon=tempdf.lon,\n lat=tempdf.lat,\n text=tempdf.markertext,\n marker=\n dict(\n size=tempdf.PopLim / scale,\n color='#7079fb'\n )\n )\n )\n\n fig.update_layout(title=\"World Map of Reporting Cities\",\n titlefont={'size':18},\n margin=dict(l=1, r=1, t=40, b=20),\n font_size=10,)\n vizJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n legJSON = create_worldplotlegend(tempdf, scale)\n\n return vizJSON, legJSON", "def calculateTiles(self, pCity):\r\n\t\tfor i in range(gc.getNUM_CITY_PLOTS()):\r\n\t\t\tpPlot = pCity.getCityIndexPlot(i)\r\n\t\t\tif pPlot and not pPlot.isNone() and pPlot.hasYield():\r\n\t\t\t\tif pCity.isWorkingPlot(pPlot):\r\n\t\t\t\t\tself._addTile(WORKED_TILES, pPlot)\r\n\t\t\t\telif pCity.canWork(pPlot):\r\n\t\t\t\t\tself._addTile(CITY_TILES, pPlot)\r\n\t\t\t\telif pPlot.getOwner() == pCity.getOwner():\r\n\t\t\t\t\tself._addTile(OWNED_TILES, pPlot)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself._addTile(ALL_TILES, pPlot)", "def number_cities(self):\n return self.model.number_cities", "def worldmap(map_data):\n df = pd.DataFrame(map_data)\n if not df.empty:\n df = df.groupby([\"Country\", \"ISO Code\"]).count().reset_index()\n return df\n else:\n return pd.DataFrame(columns=[\"No. of Positions\",\n \"Country\",\n \"ISO Code\"])", "def get_cities(zipcodes):\n zip_cities = dict()\n for idx, zipcode in enumerate(zipcodes):\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address='+zipcode+'&sensor=true'\n response = urllib.urlopen(url)\n data = json.loads(response.read())\n city = data['results'][0]['address_components'][1]['long_name']\n state = data['results'][0]['address_components'][3]['long_name']\n zip_cities.update({idx: [zipcode, city, state]})\n return zip_cities", "def state_sizes(years = [1878]): \n df = load_location_data_and_clean()\n yr1 = df[df.Year == years[0]] # probably better to just use an array of 0s and do no assignment or popping prior\n years.pop(0)\n result = pd.DataFrame(data=yr1['Prisoners'].values, index = list(df['Location'].unique()))\n # result = {}\n # states = list(df['State'].unique())\n\n for year in years:\n yr_df = df[df['Year'] == year]\n temp = pd.DataFrame(data=yr_df['Prisoners'].values, index = list(df['Location'].unique()))\n result = result + temp\n return result", "def test_city_placement_on_map(self):\n\n event_name = 'BNPB-SCENARIO'\n\n expected_result = {10: ['Loa',\n 'Samarinda',\n 'Balikpapan',\n 'Bontang',\n 'Palu',\n 'Majene',\n 'Rantepao',\n 'Poso',\n 'Baolan',\n 'Polewali',\n 'Pare',\n 'Kota',\n 'Palopo'],\n 100: ['Loa',\n 'Palu',\n 'Majene',\n 'Rantepao',\n 'Poso',\n 'Baolan',\n 'Kota'],\n 200: ['Loa',\n 'Palu',\n 'Majene',\n 'Kota'],\n 500: ['Loa']}\n\n # Run test for a range of distance limits\n for d in [10, 100, 200, 500]:\n\n # Check that reference data exists\n msg = 'There is no reference data for distance_limit %i' % d\n assert d in expected_result, msg\n\n # Run\n event_info, A = calculate_event_info(shakedata_dir, event_name)\n pop_expo, R = calculate_pop_expo(event_info, A, library_dir)\n C = city_info(R, A, library_dir, event_info)\n cities_on_map(C, distance_limit=d)\n\n # Verify result against reference data\n fid = open('city.txt')\n for i, line in enumerate(fid.readlines()):\n fields = line.strip().split()\n city = fields[-1]\n\n try:\n ref_city = expected_result[d][i]\n except IndexError, e:\n msg = ('%s: Insufficient reference data for '\n 'distance_limit %i and city %s. '\n 'Invalid index was %i'\n % (e, d, city, i))\n raise Exception(msg)\n\n # Check that city names match\n msg = ('Cities do not match: Got %s but expected %s'\n % (city, ref_city))\n assert city == ref_city, msg\n\n\n # Clean up\n cmd = '/bin/rm -rf city.txt'\n os.system(cmd)", "def run_for_cities(cities: List[str], version: str):\n raw_df = load_data(cities=cities)\n features_local, features_global = normalize_data(raw_df, column_groups=None, agg_column='city', drop_columns=['directions_whole_day'])\n \n features_local = features_local.drop(columns=['city'])\n features_global = features_global.drop(columns=['city'])\n\n models = {\n # 'local': load_model(features_local, 'local', version),\n 'global': load_model(features_global, 'global', version)\n }\n\n p = os.path.join(REPORTS_DIRECTORY, version)\n\n # run_with_normalization(models['local'], features_local, raw_df, 'local', version, p, cities)\n run_with_normalization(models['global'], features_global, raw_df, 'global', version, p, cities)", "def build_city_geometries(site):\n city_data = ''\n market_cities = USCity.objects.filter(\n us_county__sites__id=site.id\n )\n for city in market_cities:\n if city.geom:\n poly = transform_market_geom(city.geom, simp_start=15)\n if poly:\n city_data += str(poly) + ';city_' + str(city.name) + ';;' + \\\n str(city.id) + '|'\n else:\n city_data += str(Point(\n city.coordinate.longitude, city.coordinate.latitude, \n srid=4326).transform(900913, clone=True)) \\\n + ';city_' + str(city.name) + ';;' + str(city.id) + '|'\n return city_data", "def sum_regions(regions_list):\n cnt_tweets = defaultdict(int)\n cnt_score = defaultdict(int)\n for regions in regions_list:\n for region in regions:\n cnt_tweets[region.region_id] += region.count\n cnt_score[region.region_id] += region.score\n cells = ['A1', 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4', 'C1', 'C2', 'C3', 'C4', 'C5', 'D3', 'D4', 'D5']\n print(f'{\"Cell\":<6}' f'{\"#Total Tweets\":^18}' f'{\"#Overal Sentiment Score\":^25}')\n for i in cells:\n print(f'{i:<8}' f'{cnt_tweets[i]:^15}' f'{cnt_score[i]:^25}')", "def generate_cities_averages(temp, multi_cities, years):\n average_annual_temperatures = []\n for year in years:\n annual_temperatures = []\n for city in multi_cities:\n annual_temperatures.append(temp.get_yearly_temp(city, year))\n average_annual_temperatures.append(np.mean(annual_temperatures))\n return np.array(average_annual_temperatures)", "def generateCities(self):\n citiesDict = {}\n for k in CITIES_TEMPLATE:\n citiesDict[k] = City(k, CITIES_TEMPLATE[k][\"connections\"], CITIES_TEMPLATE[k][\"colour\"])\n return citiesDict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inplace projection to the manifold. Returns tensor same instance
def proj_(self) -> torch.Tensor: return copy_or_set_(self, self.manifold.projx(self))
[ "def projective_transform(self):\r\n return transform.ProjectiveTransform(self.persp_matrix)", "def unproject_to_image_plane(self, Xi):\n Xi = np.array(Xi)\n u_hat = self.unproject(Xi)\n v = self.camera_location\n mag_v = np.linalg.norm(v)\n cos_th = np.dot(u_hat,v) / mag_v\n u = (mag_v / cos_th) * u_hat\n return v + u", "def project_vector(self,x) :\n\n n_dofs = 4*self.param.n_cells\n projection = np.zeros(4*self.param.n_mom*self.param.n_cells)\n coarse_n_mom = x.shape[0]/(4*self.param.n_cells)\n if self.param.galerkin==True :\n skip = (-1+np.sqrt(1+2*self.param.n_mom))/2-1\n tmp_end = coarse_n_mom-(skip-1)\n residual = coarse_n_mom-tmp_end\n projection[0:n_dofs*tmp_end] = x[0:n_dofs*tmp_end]\n projection[n_dofs*(tmp_end+skip):n_dofs*(tmp_end+skip+residual)] =\\\n x[n_dofs*tmp_end:n_dofs*(tmp_end+residual)]\n else :\n projection[0:4*self.n_mom*self.param.n_cells] = x[0:4*coarse_n_mom*\\\n self.param.n_cells]\n \n return projection", "def flat(self):\n return Vec3(self.x, self.y, 0)", "def forward(self):\n return Vector3.from_np(Vector3(0, 0, 1).to_np4(0) @ self.get_matrix())", "def inverse(self):\n ret = copy.deepcopy(self)\n for l in xrange(0, self.lmax + 1):\n ret.clmat[l, :, :] = np.linalg.pinv(self.clmat[l])\n return ret", "def identity():\n # type: () -> Projection\n return _IdentityProjection()", "def postProjectionMatrix(*args, **kwargs):\n \n pass", "def unproject(win, modelView, modelProj, viewport):\n # Compute the inverse transform\n m = np.linalg.inv(modelProj @ modelView) # 4 x 4\n winx = win[:, 0]\n winy = win[:, 1]\n winz = win[:, 2]\n # [B, 4]\n input_ = np.zeros((win.shape[0], 4), dtype=win.dtype)\n input_[:, 0] = (winx - viewport[0]) / viewport[2] * 2.0 - 1.0\n input_[:, 1] = (winy - viewport[1]) / viewport[3] * 2.0 - 1.0\n input_[:, 2] = winz * 2.0 - 1.0\n input_[:, 3] = 1.0\n out = (m @ input_.T).T\n # Check if out[3] == 0 ?\n out[:, 3] = 1 / out[:, 3]\n out[:, 0] = out[:, 0] * out[:, 3]\n out[:, 1] = out[:, 1] * out[:, 3]\n out[:, 2] = out[:, 2] * out[:, 3]\n return out[:, :3]", "def copy(self) -> \"SbProjector *\":\n return _coin.SbSpherePlaneProjector_copy(self)", "def project(K, X):\r\n if X.shape[0] == 3:\r\n uv = K @ X\r\n elif X.shape[0] == 4:\r\n uv = K @ X[:3,:]\r\n\r\n uv /= uv[-1,:]\r\n return uv[0,:], uv[1,:]", "def superimpose_apply(atoms, transformation):\n trans1, rot, trans2 = transformation\n s_coord = coord(atoms).copy()\n s_coord += trans1\n s_coord = np.dot(rot, s_coord.T).T\n s_coord += trans2\n\n if isinstance(atoms, np.ndarray):\n return s_coord\n else:\n transformed = atoms.copy()\n transformed.coord = s_coord\n return transformed", "def projected_positions(X,Q):\n return X @ (Q @ Q.T)", "def project(self, X): \n return (X).dot(self.eigenfaces.T)", "def project(self, point: 'SbVec2f') -> \"SbVec3f\":\n return _coin.SbSpherePlaneProjector_project(self, point)", "def project(self, point: 'SbVec2f') -> \"SbVec3f\":\n return _coin.SbPlaneProjector_project(self, point)", "def computeProjection(self):\n if (not self.MComputed):\n self.M = np.zeros((self.nZernike,self.nZernike,self.nHeight,self.nStars))\n for i in tqdm(range(self.nHeight), desc='Height'): \n for j in tqdm(range(self.nStars), desc='Stars'): \n if (self.numericalProjection):\n self.M[:,:,i,j] = projection.zernikeProjectionMatrixNumerical(self.nZernike, self.beta[i,j], self.t[i,j], self.angle[i,j], verbose=True, radius=128, includePiston=self.addPiston)\n else:\n self.M[:,:,i,j] = projection.zernikeProjectionMatrix(self.nZernike, self.beta[i,j], self.t[i,j], self.angle[i,j], verbose=True, includePiston=self.addPiston)\n np.savez('matrices/transformationMatrices_{0}.npz'.format(uuid.uuid4()), self.M, self.heights, self.nStars, self.nZernike, self.fov, self.DTel)\n self.stackProjection()", "def inverse(self):\n return Transform(self.m_inv, self.m)", "def axisRotationProjectionInPlace(*args):\n return _almathswig.axisRotationProjectionInPlace(*args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check whether the current slide is IHC stained
def check_ihc_slide(slide): sample = slide.read_region((0, 0), slide.level_count - 1, (slide.level_dimensions[slide.level_count - 1][0], slide.level_dimensions[slide.level_count - 1][1])) sample = sample.convert('RGB') sample_hsv = color.rgb2hsv(np.asarray(sample)) # brownish stain roi_ihc = (sample_hsv[:, :, 0] >= 0.056) & (sample_hsv[:, :, 0] <= 0.34) & (sample_hsv[:, :, 2] > 0.2) & ( sample_hsv[:, :, 1] > 0.04) skmp.remove_small_holes(roi_ihc, area_threshold=500, connectivity=20, in_place=True) skmp.remove_small_objects(roi_ihc, min_size=500, connectivity=20, in_place=True) is_ihc = float(np.sum(roi_ihc)) / float((roi_ihc.shape[0] * roi_ihc.shape[1])) > 0.01 return is_ihc
[ "def is_contageous(self):\n return (self.health == Health.contageous or self.health == Health.sick)", "def HasSIC(self):\n return self.__has('SIC')", "def isContinuous(self): # real signature unknown; restored from __doc__\n pass", "def HasHIS(self):\n return self.__has('HIS')", "def is_hiseq_x(self) -> bool:\n LOG.debug(\"Check if flow cell is Hiseq X\")\n return self.hiseq_x_flow_cell.exists()", "def isTransitionTrack(self):\r\n\t\treturn None", "def is_inspired(sim_info: SimInfo) -> bool:\n return CommonMoodUtils.has_mood(sim_info, CommonMoodId.INSPIRED)", "def is_stationary(self):\n ...", "def is_in_observingstate(self):\n if not self.checkobservingallowed():\n return False\n swlevel = self._lcu_interface.get_swlevel()\n if swlevel == 3:\n return True\n if swlevel == 2:\n # If in tile-off mode, count it as observing state\n if self.septonconf:\n return True\n else:\n return False\n else:\n return False", "def issiso(self):\n return self.ninputs == 1 and self.noutputs == 1", "def is_start(self) -> bool:\n return self.num_river == 1 and self.num_coast == 0", "def is_in_weierstrass_disc(self,P):\n if (P[1].valuation() == 0 and P != self(0,1,0)):\n return False\n else:\n return True", "def cirrus_test(self):\n th_cirrus = 0.0113\n\n return self.cirrus > th_cirrus", "def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageSS3___nonzero__(self)", "def is_continuous(self):\n return True", "def isCycleBound(self) -> bool:\n ...", "def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageSS2___nonzero__(self)", "def is_at_cog(self):\n return np.all(self._point == self._cog)", "def isAnalogue(self):\n return self.rawData[1] in range(1, 9)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generate binary mask for a given tile
def generate_binary_mask(tile): tile_hsv = color.rgb2hsv(np.asarray(tile)) roi1 = (tile_hsv[:, :, 0] >= 0.33) & (tile_hsv[:, :, 0] <= 0.67) roi1 = ~roi1 skmp.remove_small_holes(roi1, area_threshold=500, connectivity=20, in_place=True) skmp.remove_small_objects(roi1, min_size=500, connectivity=20, in_place=True) tile_gray = color.rgb2gray(np.asarray(tile)) masked_sample = np.multiply(tile_gray, roi1) roi2 = (masked_sample <= 0.8) & (masked_sample >= 0.2) skmp.remove_small_holes(roi2, area_threshold=500, connectivity=20, in_place=True) skmp.remove_small_objects(roi2, min_size=500, connectivity=20, in_place=True) return tile_hsv, roi2
[ "def compute_mask(self, t, default_mask):\n pass", "def _to_binary_mask(self, array):\n # check where the transparency is not zero\n return (array[..., -1] > 0).astype(self.raster_dtype) * self.raster_value", "def action_mask(self, state: int):\n mask = np.zeros(6, dtype=np.int8)\n taxi_row, taxi_col, pass_loc, dest_idx = self.decode(state)\n if taxi_row < 4:\n mask[0] = 1\n if taxi_row > 0:\n mask[1] = 1\n if taxi_col < 4 and self.desc[taxi_row + 1, 2 * taxi_col + 2] == b\":\":\n mask[2] = 1\n if taxi_col > 0 and self.desc[taxi_row + 1, 2 * taxi_col] == b\":\":\n mask[3] = 1\n if pass_loc < 4 and (taxi_row, taxi_col) == self.locs[pass_loc]:\n mask[4] = 1\n if pass_loc == 4 and (\n (taxi_row, taxi_col) == self.locs[dest_idx]\n or (taxi_row, taxi_col) in self.locs\n ):\n mask[5] = 1\n return mask", "def as_mask(n, coding='big'):\n m = map(int, bin(n)[2:])\n return tuple(m if coding == 'big' else reversed(list(m)))", "def generate_mask(self, x):\n mask = np.ones_like(x.detach().cpu().numpy(), dtype=bool)\n bs, size = x.shape\n for i, lam in enumerate(self.lam.squeeze()):\n prop = int(lam * size)\n mask[i,:prop] = False\n if self.continuous:\n mask[i] = np.roll(mask[i], np.random.choice(size))\n else:\n np.random.shuffle(mask[i,:])\n return torch.tensor(mask)", "def int_to_bitmap(n):\n\n ret = ''\n for y in xrange(7, -1, -1):\n for x in xrange(0, 8):\n idx = y * 8 + x\n\n if n & (1 << idx):\n ret += '*'\n else:\n ret += '.'\n ret += '\\n'\n\n return ret", "def make_mask(self, num_ones):\n res = 0\n for i in range(num_ones):\n res |= (1 << i)\n return res", "def _mask_binary(self):\n mask_binary = ''\n for i in range(self.ip_length):\n if i < self.mask_length:\n mask_binary += '1'\n else:\n mask_binary += '0'\n return mask_binary", "def mask_byte(byte, mask=\"11111111\"):\n i = int(mask, 2)\n return byte & i", "def get_binmask(mask):\n binmask = np.zeros(len(mask))\n condition = np.where(mask != 0.)[0]\n binmask[condition] = 1.\n return binmask", "def _bit_set(self,mask, n):\n bs = bin(mask)[2:].rjust(32,'0')\n bs = bs[::-1]\n if bs[n]=='1':\n return True\n else:\n return False", "def get_binary_mask(op_weights):\n return binary_mask(op_weights[\"mask\"])", "def get_bits_mask(self):\n return (1 << self.width) - 1", "def _relabel_tile(\n inp_tile: numpy.ndarray,\n mapping: dict[int, int],\n) -> numpy.ndarray:\n out_tile = numpy.copy(inp_tile)\n for k, v in mapping.items():\n mask = inp_tile == k\n out_tile[mask] = v\n return out_tile", "def generate_square_subsequent_mask(sz, device):\n mask = (torch.triu(torch.ones(sz, sz, device=device)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float(\n '-inf')).masked_fill(mask == 1, float(0.0))\n return mask", "def _mask(value: int, high_bit: int, low_bit: int) -> int:\n high_mask = (1 << (high_bit + 1)) - 1\n low_mask = (1 << low_bit) - 1\n mask = (high_mask ^ low_mask)\n # print(high_bit, low_bit, bin(mask), bin(value))\n return value & mask", "def compute_static_mask(box_num: Tensor):\n max_len = documents.MAX_BOXES_NUM\n mask = torch.arange(0, max_len, device=box_num.device).expand((box_num.shape[0], max_len))\n box_num = box_num.expand_as(mask)\n mask = mask < box_num\n row_mask = mask.unsqueeze(1)\n column_mask = mask.unsqueeze(2)\n mask = row_mask & column_mask\n mask = ~mask * -1\n return mask.unsqueeze(-1)", "def IBM(self):\n\t\tprint('Ideal Binary Mask')\n\t\ttheta = 0.5\n\t\tmask = np.divide(self._sTarget ** self._alpha, (self._eps + self._nResidual ** self._alpha))\n\t\tbg = np.where(mask >= theta)\n\t\tsm = np.where(mask < theta)\n\t\tmask[bg[0], bg[1]] = 1.\n\t\tmask[sm[0], sm[1]] = 0.\n\t\tself._mask = mask", "def one_cold_mask(idx, tensor):\n result = np.ones([tensor.get_shape()[-1].value], dtype='bool')\n result[idx] = False\n return tf.tile(tf.expand_dims(result, axis=0), [tf.shape(tensor)[0], 1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get config for frontend web map (e.g. access tokens)
def get_map_config(): settings = get_settings() access_token = settings.w_mapbox_token if settings.w_mapbox_token else MAPBOX_ACCESS_TOKEN style = settings.w_mapbox_style if settings.w_mapbox_style else MAPBOX_STYLE return schema.MapConfig(mapbox_token=access_token, mapbox_style=style)
[ "def test_api_get_frontend_configuration_default(self):\n\n response = self.client.get(\"/api/config.json\")\n\n self.assertEqual(\n response.json(),\n {\n \"API_URL\": \"http://localhost:8070/api\",\n \"JITSI_DOMAIN\": \"meeting.education\",\n \"KEYCLOAK_URL\": \"http://localhost:8080\",\n \"KEYCLOAK_REALM\": \"magnify\",\n \"KEYCLOAK_CLIENT_ID\": \"magnify-front\",\n \"KEYCLOAK_EXPIRATION_SECONDS\": 1800,\n \"LANGUAGE_CODE\": \"en\",\n \"SHOW_REGISTER_LINK\": True,\n },\n )", "def get_config(self) -> NodeManagerConfig:", "def get_access_configs():\n\n parser = configparser.ConfigParser()\n parse_result = parser.read('access.ini')\n if not parse_result:\n return(None)\n\n # else\n return((parser['GOODREADS_ACCESS']['KEY'], parser['GOODREADS_ACCESS']['SECRET']))", "def get_public_config(_):\n public_config = {}\n if rbac_permissions.is_admin():\n if hasattr(settings, 'BOOTSTRAP_ADMIN_USERS'):\n public_config['BOOTSTRAP_ADMIN_USERS'] = settings.BOOTSTRAP_ADMIN_USERS\n return public_config", "def getConfig():\n return Cuebot.Config", "def get_academic_backend_config_dict():\n return get_academic_backend_config().to_dict()", "def get_config(self):\n response = requests.get(f\"{self.CONFIG_SERVER_URL}/{self.APP_NAME}/{self.ENV}/{self.LABEL}\")\n if response.status_code != 200:\n raise ConfigError(f\"Unable to access config server. {response.text}\")\n app_config = response.json()\n\n self.v = self.patch_config(app_config)", "def __get_configs():\n configs = {}\n for entry_point in pkg_resources.iter_entry_points(\"matlab_desktop_proxy_configs\"):\n configs[entry_point.name] = entry_point.load()\n\n return configs", "def get_cherrypy_config():\n cp_conf = {\n '/login' : {\n 'tools.auth_basic.on': True,\n 'tools.auth_basic.realm': 'pofis',\n 'tools.auth_basic.checkpassword': Authenticator().cp_authenticate,\n 'tools.auth_basic.accept_charset': 'UTF-8'},\n '/tutorials' : {\n 'tools.auth_basic.on': True,\n 'tools.auth_basic.realm': 'pofis',\n 'tools.auth_basic.checkpassword': Authenticator().cp_authenticate,\n 'tools.auth_basic.accept_charset': 'UTF-8'\n },\n '/refdocs' : {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': find_refdoc_dir(),\n 'tools.staticdir.index': 'index.html'\n },\n '/styles': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': get_styles_dir()\n },\n '/ace': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': get_ace_dir()\n }\n }\n return cp_conf", "def getconfigmgr(self):\r\n\t\treturn self.cfg", "def get_config(self):\n return self.ag_config", "def get(self, request):\n return self.json(self.config)", "def get_config():\n with open(\"config.json\", \"r\") as f:\n data = f.read()\n return json.loads(data)", "def getConfig():\n if sys.platform == \"Windows\":\n cred_file = \"C:\\\\Windows\\\\carbonblack\\\\credentials.cbc\"\n else:\n home = expanduser(\"~\")\n cred_file = f\"{home}/.carbonblack/credentials.cbc\"\n\n with open(cred_file) as file:\n datafile = file.readlines()\n for line in datafile:\n if \"url\" in line:\n address = line.split(\"=\")[1]\n elif \"token\" in line:\n auth_token = line.split(\"=\")[1]\n elif \"org\" in line:\n org = line.split(\"=\")[1]\n auth_token = str(auth_token).strip(\"\\n\")\n headers = {\n \"X-Auth-Token\": auth_token,\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n return (address, headers, org)", "def get_bucket_app_config(self):\n client = ConfigClient(self.gcp_env)\n return client.get_server_config()", "def site_config(self) -> pulumi.Output['outputs.LinuxWebAppSiteConfig']:\n return pulumi.get(self, \"site_config\")", "def site_config(self) -> pulumi.Input['LinuxWebAppSiteConfigArgs']:\n return pulumi.get(self, \"site_config\")", "def u_boot_config(request):\n\n return console.config", "def fio_configmap_dict():\n return fio_artefacts.get_configmap_dict()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sanitize user input by escaping or removing potentially harmful input using a whitelistbased approach with bleach as recommended by OWASP.
def sanitize_input(self, untrusted_text, strip_characters=False): try: # Test if the untrusted text is percent encoded # before running bleech. if unquote(untrusted_text) != untrusted_text: use_percent_encoding = True else: use_percent_encoding = False return self._sanitize_input(untrusted_text, strip_characters=strip_characters, percent_encoded=use_percent_encoding) except UnicodeDecodeError: current_app.logger.warn('A malicious user tried to crash the application ' 'by sending non-unicode input in a GET request') abort(400)
[ "def sanitize(self, s):\n s = s.lower().replace(\" \", \"\").replace(\"-\", \"\").replace(\",\", \"\").replace(\":\", \"\").replace(\"&\",\"and\").replace(\"(\",\"\").replace(\")\",\"\").strip()\n # Additional sanitization rules\n s = s.replace(\"sulphate\",\"sulfate\")\n return s", "def Sanitize(Content): # for your protection\n \n ### strip any illegal HTML\n Content = re.sub(r\"(?is)<.+?>\", HTMLChecker, Content)\n\n ### validate any links\n Content = re.sub(r'(?is)(<A .*?HREF=\")(.+?)(\".*?>)', LinkChecker, Content)\n \n ### then escape any funky characters\n ### TODO: is this really neccesary for the database?\n \n # Content = re.escape(Content)\n\n return Content", "def _sanitize(self, message):\n message = re.sub(r\"[^a-zA-Z0-9]\", \"\", message)\n\n self._update_text(message)", "def _safe_clean(self, untrusted_text, strip_characters=False):\n try:\n return clean(untrusted_text, strip=strip_characters)\n except KeyError:\n current_app.logger.warn('A malicious user tried to crash the application by '\n 'sending illegal UTF-8 in an URI or other untrusted '\n 'user input.')\n abort(400)", "def _safe_clean(untrusted_text, strip_characters=False):\n try:\n return clean(untrusted_text, strip=strip_characters)\n except KeyError:\n logger.warn('A malicious user tried to crash the application by '\n 'sending illegal UTF-8 in an URI or other untrusted '\n 'user input.')\n raise HTTPBadRequest(\"Non-unicode input, please try again.\")", "def sanitary(dirty_input):\n illegal_chars = [\"?\", \"&\", \"\\\\\", \"'\", \"|\", \";\"]\n for char in illegal_chars:\n if (dirty_input.find(char) >= 0):\n return False\n return True", "def SafeForHTML(user_input):\n return cgi.escape(user_input, quote=True)", "def _sanitize(self, target):\n return target.lower().strip()", "def str_sanitize(s):\n return re.sub('\\^[0-9]', '', re.sub(r'<[^>]*>', '', s)).replace('ß', 'ss').replace('ü', 'ue').\\\n replace('ö', 'oe').replace('ä', 'ae').replace('à', 'a').replace('è', 'e').replace('é', 'e').\\\n replace('ì', 'i').replace('ò', 'o').replace('ù', 'u').replace('ç', 'c').replace('€', 'euro').\\\n replace('$', 'dollar').replace('£', 'pound').replace('%', 'pc').replace('\"', \"''\").strip()", "def sanitize(w):\n\n # print w\n\n map = {'æ': 'ae',\n 'ø': 'o',\n '¨': 'o',\n 'ß': 'ss',\n 'Ø': 'o',\n '\\xef\\xac\\x80': 'ff',\n '\\xef\\xac\\x81': 'fi',\n '\\xef\\xac\\x82': 'fl'}\n\n # This replaces funny chars in map\n for char, replace_char in map.items():\n w = re.sub(char, replace_char, w)\n\n # w = unicode(w, encoding='latin-1')\n # w = str(w, encoding=\"utf-8\")\n\n # This gets rite of accents\n w = ''.join((c for c in unicodedata.normalize('NFD', w) if unicodedata.category(c) != 'Mn'))\n\n return w", "def _sanitize_common(request, function_attribute, *args):\n try:\n if hasattr(request, 'content_type'):\n return sanitize_input(getattr(request, function_attribute).get(*args),\n content_type=request.content_type)\n else:\n # The DummyRequest class used for testing has no attribute content_type\n return sanitize_input(getattr(request, function_attribute).get(*args))\n except UnicodeDecodeError:\n logger.warn('A malicious user tried to crash the application '\n 'by sending non-unicode input in a {!r} request'\n .format(function_attribute))\n raise HTTPBadRequest(\"Non-unicode input, please try again.\")", "def sanitize(tag):\r\n tag = tag.replace(\"'\",\"''\")\r\n return tag", "def sanitize_input(text, strip=True, lower=True):\n tmp = text[0:]\n\n if strip:\n tmp = re.sub(r'\\s+', '', tmp)\n #tmp.translate(tmp.maketrans({None))\n \n if lower:\n tmp = tmp.lower()\n\n return tmp", "def sanitize_domain(domain):\n whitelist_pattern = re.compile(r\"[^\\.\\-_a-zA-Z0-9]\")\n return whitelist_pattern.sub(\"\", domain)", "def clean_text(text):\n text = re.sub(r\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\", \"URL\", text) # Replace urls with special token\n text = text.replace(\"\\'s\", \"\")\n text = text.replace(\"\\'\", \"\")\n text = text.replace(\"n\\'t\", \" n\\'t\")\n text = text.replace(\"@\", \"\")\n text = text.replace(\"#\", \"\")\n text = text.replace(\"_\", \" \")\n text = text.replace(\"-\", \" \")\n text = text.replace(\"&amp;\", \"\")\n text = text.replace(\"&gt;\", \"\")\n text = text.replace(\"\\\"\", \"\")\n text = text.replace(\".\", \"\")\n text = text.replace(\",\", \"\")\n text = text.replace(\"(\", \"\")\n text = text.replace(\")\", \"\")\n text = ' '.join(text.split())\n return text.strip()", "def filter_input(string):\n return ((unicode(string)).lower()).strip()", "def bleach_html(html): \n \n clean = bleach.clean(html).strip()\n \n # if the source doesn't include <p> tags, enforce them.\n if not re.search(r'^<p>', clean):\n clean = \"<p>%s</p>\"%clean\n \n # now the template can treat this string as HTML safely\n return mark_safe(clean)", "def _sanitize(self, definition):\n # # removes empty lines\n # self.definition = re.sub(r'\\s*\\n\\s*', r'\\n', self.definition)\n # # removes spaces around = signs\n # self.definition = re.sub(r'\\s*=\\s*', '=', self.definition)\n # removes spaces after commas, colons, dashes etc.\n definition = definition.strip().lower()\n definition = re.sub(r'\\s*(?P<sep>[,;-_=\\n])\\s*', r'\\g<sep>', definition)\n return definition", "def sanitise(param: str):\n if not param:\n return ''\n return urllib.parse.quote(param)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper for the clean function of bleach to be able to catch when illegal UTF8 is processed.
def _safe_clean(self, untrusted_text, strip_characters=False): try: return clean(untrusted_text, strip=strip_characters) except KeyError: current_app.logger.warn('A malicious user tried to crash the application by ' 'sending illegal UTF-8 in an URI or other untrusted ' 'user input.') abort(400)
[ "def _safe_clean(untrusted_text, strip_characters=False):\n try:\n return clean(untrusted_text, strip=strip_characters)\n except KeyError:\n logger.warn('A malicious user tried to crash the application by '\n 'sending illegal UTF-8 in an URI or other untrusted '\n 'user input.')\n raise HTTPBadRequest(\"Non-unicode input, please try again.\")", "def filter_invalid_unicode(text):\n return (\"\", True) if isinstance(text, bytes) else (text, False)", "def validUTF8(data):\n # Use maks, to clean byte of anything beyond 8 least significant bits.\n cleanByte = [rawByte & 0b11111111 for rawByte in data]\n\n # Cast to byte type.\n byte = bytes(cleanByte)\n\n # Attempt to decode byte data.\n try:\n byte.decode()\n except UnicodeDecodeError:\n # If decoding fails, return False.\n return False\n\n return True", "def test_bad_bytestring(self):\n bytestring = b'\\x10\\x00'\n convert_tag(bytestring, True)", "def remove_control_characters(html):\n # type: (t.Text) -> t.Text\n # See: https://github.com/html5lib/html5lib-python/issues/96\n #\n # The XML 1.0 spec defines the valid character range as:\n # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]\n #\n # We can instead match the invalid characters by inverting that range into:\n # InvalidChar ::= #xb | #xc | #xFFFE | #xFFFF | [#x0-#x8] | [#xe-#x1F] | [#xD800-#xDFFF]\n #\n # Sources:\n # https://www.w3.org/TR/REC-xml/#charsets,\n # https://lsimons.wordpress.com/2011/03/17/stripping-illegal-characters-out-of-xml-in-python/\n def strip_illegal_xml_characters(s, default, base=10):\n # Compare the \"invalid XML character range\" numerically\n n = int(s, base)\n if n in (0xb, 0xc, 0xFFFE, 0xFFFF) or 0x0 <= n <= 0x8 or 0xe <= n <= 0x1F or 0xD800 <= n <= 0xDFFF:\n return \"\"\n return default\n\n # We encode all non-ascii characters to XML char-refs, so for example \"💖\" becomes: \"&#x1F496;\"\n # Otherwise we'd remove emojis by mistake on narrow-unicode builds of Python\n html = html.decode('utf8').encode(\"ascii\", \"xmlcharrefreplace\").decode(\"utf-8\")\n html = re.sub(r\"&#(\\d+);?\", lambda c: strip_illegal_xml_characters(c.group(1), c.group(0)), html)\n html = re.sub(r\"&#[xX]([0-9a-fA-F]+);?\", lambda c: strip_illegal_xml_characters(c.group(1), c.group(0), base=16), html)\n html = ILLEGAL_XML_CHARS_RE.sub(\"\", html)\n return html", "def checker(self, ansi, raw, clean):\r\n self.assertEqual(unicode(ansi.clean()), clean)\r\n self.assertEqual(unicode(ansi.raw()), raw)", "def clean_html_encodings(text: str) -> str:\n return str(BeautifulSoup(text, 'html.parser'))", "def remove_diacritics(buffer):\n unicodedata.normalize('NFKD', unicode(buffer)).encode('ASCII', 'ignore')", "def _ignore_somecode(text):\n text = re.sub('\\r', '', text)\n text = re.sub('\\f', '', text)\n text = re.sub('\\0', '', text)\n return text", "def normalize_utf8(string):\n if isinstance(string, unicode):\n return normalize('NFC', string)\n else:\n return normalize('NFC', string.decode('utf-8'))", "def remove_zh(x):\n\n def func(_s):\n return re.sub(r'[\\u4e00-\\u9fff]+', '', _s)\n return _parse(func, x)", "def filter_invalid_unicode_from_table(table):\n # to do: add table id support\n if not hasattr(table, \"table_id\"):\n table.table_id = 0\n\n for row_index, row in table.iterrows():\n for col_index, cell in enumerate(row):\n cell, is_invalid = filter_invalid_unicode(cell)\n if is_invalid:\n logging.warning(\n f\"Scrub an invalid table body @ table_id: {table.table_id}, row_index: {row_index}, \"\n f\"col_index: {col_index}\",\n )\n for col_index, column in enumerate(table.columns):\n column, is_invalid = filter_invalid_unicode(column)\n if is_invalid:\n logging.warning(f\"Scrub an invalid table header @ table_id: {table.table_id}, col_index: {col_index}\")", "def clean_html(text):\r\n text = re.sub(r'<.*?>', '', str(text))\r\n text = re.sub(r'[\\x80-\\xff]', '', text)\r\n text = unescape(text)\r\n return text", "def validUTF8(data):\n if data is None or len(data) == 0:\n return True\n numOfFiller = 0\n for byte in data:\n if numOfFiller > 0:\n tmp = verify_byte(byte, 5)\n numOfFiller = numOfFiller - 1\n if not tmp:\n return False\n else:\n if verify_byte(byte, 1):\n numOfFiller = 0\n elif verify_byte(byte, 2):\n numOfFiller = 1\n elif verify_byte(byte, 3):\n numOfFiller = 2\n elif verify_byte(byte, 4):\n numOfFiller = 3\n else:\n return False\n if numOfFiller > 0:\n return False\n return True", "def test_utf8_file_correctness(self):\n self.assertTrue(valet.view(self.test_utf8_file).find(u'ταὐτὰ παρίσταταί') >= 0)", "def _clean_message(message):\n return message.split('\\x00')[0]", "def clean_csdata(self) -> None:", "def decode_utf8(self, text):\n try:\n return text.decode('utf-8', 'strict') if self.utf8 else text.decode(self.fallback, errors='replace')\n except UnicodeDecodeError:\n return text.decode(self.fallback, 'replace')", "def remove_emoji(text):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists.
def to_dict(self, flat=True): if flat: d = {} for k, v in iteritems(self): v = self.sanitize_input(v) d[k] = v return d return dict(self.lists())
[ "def from_shodan_flattened(raw: Union[list, dict]) -> dict:\n return flatten(from_shodan(raw))", "def flatten(data: Dict) -> Dict[str, Any]:\n return recursive_flatten(\"\", data)", "def doc_from_flat(doc, flat):\n def doc_from_flat_inner(doc, pos):\n if isinstance(doc, (list, tuple)):\n rval = []\n for d_i in doc:\n d_i_clone, pos = doc_from_flat_inner(d_i, pos)\n rval.append(d_i_clone)\n rval = type(doc)(rval)\n\n elif isinstance(doc, dict):\n rval = type(doc)()\n if isinstance(doc, OrderedDict):\n sortedkeys = doc.iterkeys()\n else:\n sortedkeys = sorted(doc.iterkeys())\n for k in sortedkeys:\n v_clone, pos = doc_from_flat_inner(doc[k], pos)\n rval[k] = v_clone\n\n else:\n rval = flat[pos]\n pos += 1\n return rval, pos\n return doc_from_flat_inner(doc, 0)[0]", "def doc_flat_to_nested(key_list, val):\n res = {}\n if len(key_list) > 1:\n res[key_list[0]] = doc_flat_to_nested(key_list[1:], val)\n elif len(key_list) == 1:\n res[key_list[0]] = val\n else:\n raise Exception('invalid key_list @%s' % doc_flat_to_nested.__name__)\n return res", "def flatten(obj, entries):\n\n result = {}\n\n for entry in entries:\n parts = entry.split('__')\n value_parts = obj[parts[0]]\n while len(parts) > 1:\n parts = parts[1:]\n value_parts = value_parts[parts[0]]\n result[entry] = value_parts.replace('\\r\\n',' ') if type(value_parts) is str else value_parts\n\n return result", "def init_dict_flat_to_init_dict(init_dict_flat):\n\n init_dict = dict()\n\n init_dict[\"GENERAL\"] = dict()\n init_dict[\"GENERAL\"][\"num_periods\"] = init_dict_flat[\"num_periods\"]\n\n init_dict[\"CONSTANTS\"] = dict()\n init_dict[\"CONSTANTS\"][\"delta\"] = init_dict_flat[\"delta\"]\n init_dict[\"CONSTANTS\"][\"mu\"] = init_dict_flat[\"mu\"]\n init_dict[\"CONSTANTS\"][\"benefits\"] = init_dict_flat[\"benefits\"]\n\n init_dict[\"INITIAL_CONDITIONS\"] = dict()\n init_dict[\"INITIAL_CONDITIONS\"][\"educ_max\"] = init_dict_flat[\"educ_max\"]\n init_dict[\"INITIAL_CONDITIONS\"][\"educ_min\"] = init_dict_flat[\"educ_min\"]\n\n init_dict[\"SIMULATION\"] = dict()\n init_dict[\"SIMULATION\"][\"seed_sim\"] = init_dict_flat[\"seed_sim\"]\n init_dict[\"SIMULATION\"][\"num_agents_sim\"] = init_dict_flat[\"num_agents_sim\"]\n\n init_dict[\"SOLUTION\"] = dict()\n init_dict[\"SOLUTION\"][\"seed_emax\"] = init_dict_flat[\"seed_emax\"]\n init_dict[\"SOLUTION\"][\"num_draws_emax\"] = init_dict_flat[\"num_draws_emax\"]\n\n return init_dict", "def as_flat_dict(self):\n flat_params = {}\n\n def recurse(parameters, path):\n for key, value in parameters.items():\n newpath = path + [key]\n if isinstance(value, dict):\n recurse(value, newpath)\n else:\n flat_params['.'.join(newpath)] = value\n\n recurse(self.params, [])\n\n flat_params = recursively_expandvars(flat_params, ext_vars=self.ext_vars)\n return flat_params", "def flatten(self, cls=None):\n if cls is None:\n from clu.config.keymap import FrozenFlat\n cls = FrozenFlat\n return cls({ pack_ns(key, *namespaces) : value for *namespaces, key, value in self.walk() })", "def structurize(self, depth):\n # TODO: it should be more readable and simpler implementing\n # this function recursively\n rgrow_dict = self.value\n tmp_keypathlist = copy.deepcopy(self.keypathlist)\n while tmp_keypathlist[depth:]:\n try:\n tmp_dataroot = tmp_keypathlist[-2]\n except IndexError:\n tmp_dataroot = 'temporarydataroot'\n if len(tmp_keypathlist) == 1:\n index = 0\n else:\n index = -1\n try:\n key = int(tmp_keypathlist[index])\n except ValueError:\n key = tmp_keypathlist[index]\n if type(key) is int:\n locals()[tmp_dataroot] = []\n locals()[tmp_dataroot].append(rgrow_dict)\n elif type(key) is str:\n locals()[tmp_dataroot] = {}\n locals()[tmp_dataroot][key] = rgrow_dict\n rgrow_dict = locals()[tmp_dataroot]\n tmp_keypathlist.pop()\n\n return rgrow_dict", "def pack_dictionary_flat(dict_):\n i64 = np.dtype('<i8')\n\n header_dict = {}\n current_data_offset = 0\n\n element_data = []\n\n for k, v in dict_.items():\n if isinstance(v, np.ndarray) and v.dtype != np.byte:\n # General numpy arrays\n with io.BytesIO() as temp:\n np.save(temp, v, allow_pickle=False)\n v = np.frombuffer(temp.getvalue(), dtype=np.byte)\n data_type = 1\n elif isinstance(v, np.ndarray):\n # Assume that we have a packed flat array\n data_type = 2\n else:\n # We have a general object\n v = np.frombuffer(pickle.dumps(v, protocol=4), dtype=np.byte)\n data_type = 0\n\n header_dict[k] = (current_data_offset, len(v), data_type)\n current_data_offset += len(v)\n element_data.append(v)\n\n header_dict_data = pickle.dumps(header_dict, protocol=4)\n total_size = 3 * i64.itemsize + len(header_dict_data) + current_data_offset\n\n result = np.empty(total_size, dtype=np.byte)\n\n offset = 0\n offset = _write_slice(result, offset, _DICTIONARY_MAGIC_BYTES)\n offset = _write_slice(result, offset, np.array([1], dtype=i64)) # version\n offset = _write_slice(result, offset, np.array([len(header_dict_data)], dtype=i64))\n offset = _write_slice(result, offset, np.frombuffer(header_dict_data, dtype=np.byte))\n\n for element in element_data:\n offset = _write_slice(result, offset, element)\n\n return result", "def _recursive_dict(element):\n return element.tag, dict(map(_recursive_dict, element)) or element.text", "def flat_graph(self) -> Dict[str, dict]:\n return GraphFlattener().flatten(node=self)", "def as_dict(self) -> dict:\n return self._squash()", "def to_config_dict(self, flatten: bool = None) -> Dict:\n key = self.get_config_key()\n config_items = {key: self._convert_config_params()}\n for ss_key, ss in self.subconfigs.items():\n ss_dict = ss.to_config_dict()\n\n if self.flatten_sub_configs:\n config_items.update(**ss_dict)\n else:\n config_items[key].update(**ss_dict)\n\n return config_items", "def unflatten(flat_dict):\n unflat_dict = {}\n\n for compound_key, value in flat_dict.items():\n curr_dict = unflat_dict\n parts = compound_key.split(\".\")\n for key in parts[:-1]:\n curr_value = curr_dict.get(key)\n if key not in curr_dict:\n curr_dict[key] = {}\n curr_dict = curr_dict[key]\n elif isinstance(curr_value, dict):\n curr_dict = curr_value\n else:\n raise ConfigurationError(\"flattened dictionary is invalid\")\n if not isinstance(curr_dict, dict) or parts[-1] in curr_dict:\n raise ConfigurationError(\"flattened dictionary is invalid\")\n else:\n curr_dict[parts[-1]] = value\n\n return unflat_dict", "def get_data(self, flatten=False):\n self.to_graph_objs()\n l = list()\n for _plotlydict in self:\n l += [_plotlydict.get_data(flatten=flatten)]\n del_indicies = [index for index, item in enumerate(self)\n if len(item) == 0]\n del_ct = 0\n for index in del_indicies:\n del self[index - del_ct]\n del_ct += 1\n\n if flatten:\n d = {}\n for i, e in enumerate(l):\n for k, v in e.items():\n key = \"{0}.{1}\".format(i, k)\n d[key] = v\n return d\n else:\n return l", "def returnItems(dict):\n return dict.items()", "def formatted_flat_dict(model):\n return pretty_print_format(to_dict(model))", "def _flatten_query_args(args):\n def _make_flat(item):\n if not item:\n return None\n if not isinstance(item, list):\n return item\n # item -> list\n if len(item) == 1:\n return item[0] if item[0] else None # Empty string -> None\n return [x if x else None for x in item]\n\n Validator.is_instance(dict, args=args)\n res = dict()\n for key, val in args.items():\n res[key] = _make_flat(val)\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sanitized, type conversion get. The value identified by `key` is sanitized, and if `type` is provided, the value is cast to it.
def get(self, key, default=None, type=None): try: val = self.sanitize_input(self[key]) if type is not None: val = type(val) except (KeyError, ValueError): val = default return val
[ "def get_generic(self, _key: str, _type):\n set_func = {\n \"bool\" : self.get_bool,\n \"float\" : self.get_float,\n \"int\" : self.get_int,\n \"point\" : self.get_point,\n \"points\": self.get_points,\n \"str\" : self.get_str\n }\n\n # noinspection PyArgumentList\n return set_func.get(_type)(_key)", "def type(self, key):\n val = self.get(key)\n if val is not None:\n return self._get_key_type(val)\n return None", "def trans_type(_value, _type):\n if _type == 'int':\n return int(_value)\n if _type == 'string':\n return str(_value)\n return _value", "def _type_check(self, key):\n if self._type == \"I\" and isinstance(key,str):\n raise TypeError(\"STDict keys is set as type int()\")\n\n elif self._type == \"S\" and isinstance(key,int):\n raise TypeError(\"STDict keys is set as type str()\")\n else:\n return", "def get_context_value(ctx, key, type_):\n rval = None\n\n # return None for invalid key\n if key is None:\n return rval\n\n # get default language\n if type_ == '@language' and type_ in ctx:\n rval = ctx[type_]\n\n # get specific entry information\n if key in ctx['mappings']:\n entry = ctx['mappings'][key]\n if entry is None:\n return None\n\n # return whole entry\n if type_ is None:\n rval = entry\n # return entry value for type\n elif type_ in entry:\n rval = entry[type_]\n\n return rval", "def decode_value(type, value):\n\n if value is None:\n return type()\n else:\n return type(value)", "def _get_cast_type(field_type: type, value: Any) -> Optional[Callable]:\n if type(value) is dict:\n return _get_cast_type_for_dict(field_type)\n if type(value) is str:\n return _get_cast_type_for_str(field_type)\n return None", "def _convert_type(doc, key_or_keys, converter):\n if isinstance(key_or_keys, str):\n doc[key_or_keys] = converter(doc[key_or_keys])\n else:\n for key in key_or_keys:\n doc[key] = converter(doc[key])", "def _is_type(self, key, value, etype, none=False):\n if not isinstance(value, etype):\n if none and value is None:\n return value\n self._error(\"{} must be {}, not {}\", self._keyname(key), etype, type(value))\n return value", "def _cast_type(self, value):\n try:\n # Try to cast to integer, or JSON\n value = json.loads(value)\n return value\n except ValueError:\n return value", "def convert_to_type(type: str, val: str) -> Union[str, int, float, bytes, bool, Any]:\n if type is None or type in (\"str\", \"string\"):\n return val\n elif type in (\"int\", \"integer\"):\n return int(val)\n elif type in (\"float\", \"number\"):\n return float(val)\n elif type == \"bytes\":\n return val.encode(\"utf-8\")\n elif type == \"bool\":\n if isinstance(val, bool):\n return val\n return False if val.lower() in (\"false\", \"0\", \"no\") else True\n elif type == \"json\":\n if val in (\"\", None):\n return val\n if isinstance(val, str):\n return json.loads(val)\n return val\n else:\n raise ValueError(\n \"variable type can only be: bool, str, int, float, bytes or json\"\n )", "def _validate_key(key: Union[str, ObservableType]) -> ObservableType:\n key_error_message = (\n \"The key must either be an `ObservableType` object or a \"\n \"string representation of an `ObservableType` object.\"\n )\n\n if isinstance(key, str):\n try:\n key = ObservableType(key)\n except ValueError:\n raise KeyError(key_error_message)\n\n elif not isinstance(key, ObservableType):\n raise KeyError(key_error_message)\n\n return key", "def __getitem__(self, key):\n #retrieve the value\n curValue = self._d[key.lower().strip()]\n \n #check if the value is a bool\n if curValue.strip().lower() in ['yes','true']:\n return True\n if curValue.strip().lower() in ['no','false']:\n return False\n \n #check if value is a int\n if curValue.strip().isdigit():\n return int(curValue)\n \n #try to convert it to a float\n try:\n curValue = float(curValue)\n return curValue\n except ValueError:\n pass\n \n #return it as a string\n return curValue", "def get_prop_type(value, key=None):\n \"\"\"\n if isinstance(key, unicode):\n # Encode the key as ASCII\n key = key.encode('ascii', errors='replace')\n \"\"\"\n\n \"\"\"\n elif isinstance(value, unicode):\n tname = 'string'\n value = value.encode('ascii', errors='replace')\n \"\"\"\n\n # Deal with the value\n if isinstance(value, bool):\n tname = 'bool'\n\n elif isinstance(value, int):\n tname = 'float'\n value = float(value)\n\n elif isinstance(value, float):\n tname = 'float'\n\n elif isinstance(value, dict):\n tname = 'object'\n\n else:\n tname = 'string'\n value = str(value)\n\n return tname, value, key", "def set_generic(self, _key: str, _type, _value):\n set_func = {\n \"bool\" : self.set_bool,\n \"float\" : self.set_float,\n \"int\" : self.set_int,\n \"point\" : self.set_point,\n \"points\": self.set_points,\n \"str\" : self.set_str\n }\n\n # noinspection PyArgumentList\n set_func.get(_type)(_key, _value)", "def get_record_from_key(type_: type) -> type:\n if not typing_inspect.is_generic_type(type_):\n raise Exception(f'Cannot get associated key from not generic type {type_.__name__}')\n\n from datacentric.types.record import TypedKey, TypedRecord, RootRecord\n from typing import ForwardRef\n\n generic_base = typing_inspect.get_generic_bases(type_)[0]\n\n generic_origin = typing_inspect.get_origin(generic_base)\n if generic_origin is not TypedKey:\n raise Exception(f'Wrong generic origin: {generic_origin.__name__}. Expected TypedKey')\n\n generic_arg = typing_inspect.get_args(generic_base)[0] # Arg\n\n # Generic parameter is forward ref\n if type(generic_arg) is ForwardRef:\n return ClassInfo.get_type(generic_arg.__forward_arg__)\n # Generic parameter is type\n elif issubclass(generic_arg, TypedRecord) or issubclass(generic_arg, RootRecord):\n return generic_arg\n else:\n raise Exception(f'Cannot deduce key from type {type_.__name__}')", "def cast(fieldtype, value=None):\n\tif fieldtype in (\"Currency\", \"Float\", \"Percent\"):\n\t\tvalue = flt(value)\n\n\telif fieldtype in (\"Int\", \"Check\"):\n\t\tvalue = cint(sbool(value))\n\n\telif fieldtype in (\n\t\t\"Data\",\n\t\t\"Text\",\n\t\t\"Small Text\",\n\t\t\"Long Text\",\n\t\t\"Text Editor\",\n\t\t\"Select\",\n\t\t\"Link\",\n\t\t\"Dynamic Link\",\n\t):\n\t\tvalue = cstr(value)\n\n\telif fieldtype == \"Date\":\n\t\tif value:\n\t\t\tvalue = getdate(value)\n\t\telse:\n\t\t\tvalue = datetime.datetime(1, 1, 1).date()\n\n\telif fieldtype == \"Datetime\":\n\t\tif value:\n\t\t\tvalue = get_datetime(value)\n\t\telse:\n\t\t\tvalue = datetime.datetime(1, 1, 1)\n\n\telif fieldtype == \"Time\":\n\t\tvalue = get_timedelta(value)\n\n\treturn value", "def getobj(self,obj,key):\n san = SanContainer.getInstance().get_san()\n objcls = obj.__class__.__name__\n if hasattr(obj,'get_'+key):\n return getattr(obj,'get_'+key)(san,key)\n ptype = getattr(obj,key).__class__.__name__\n if ptype=='NoneType' or ptype=='instancemethod':\n return (0,None)\n val=obj.__dict__[key]\n if ptype in ['int','str','bool','IP','EnumValue']:\n return getattr(self,'get_str')(obj,key,val)\n if ptype=='list': return getattr(self,'get_strlist')(obj,key,val)\n if ptype=='dict': return getattr(self,'get_strdict')(obj,key,val)\n er='unknown get type %s in set %s.%s' % (ptype,obj.__class__.__name__,key)\n logger.eventlog.warning(er)\n return (1,er)", "def _convert_key(self, key):\n try:\n return self.__key_cache[key]\n except KeyError:\n if isinstance(key, sql.ColumnElement):\n try:\n rec = self.props[key._label.lower()]\n except KeyError:\n try:\n rec = self.props[key.key.lower()]\n except KeyError:\n rec = self.props[key.name.lower()]\n elif isinstance(key, str):\n rec = self.props[key.lower()]\n else:\n rec = self.props[key]\n self.__key_cache[key] = rec\n return rec" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The headers from the WSGI environ as immutable and sanitized
def headers(self): return SanitizedEnvironHeaders(self.environ)
[ "def headers(self) -> dict:\n return self._flask_request.headers", "def get_wsgi_headers(self, environ):\n headers = Headers(self.headers)\n location = None\n content_location = None\n content_length = None\n status = self.status_code\n\n # iterate over the headers to find all values in one go. Because\n # get_wsgi_headers is used each response that gives us a tiny\n # speedup.\n for key, value in headers:\n ikey = key.lower()\n if ikey == u'location':\n location = value\n elif ikey == u'content-location':\n content_location = value\n elif ikey == u'content-length':\n content_length = value\n\n # make sure the location header is an absolute URL\n if location is not None:\n old_location = location\n if isinstance(location, text_type):\n location = iri_to_uri(location)\n if self.autocorrect_location_header:\n current_url = get_current_url(environ, root_only=True)\n if isinstance(current_url, text_type):\n current_url = iri_to_uri(current_url)\n location = url_join(current_url, location)\n if location != old_location:\n headers[u'Location'] = location\n\n # make sure the content location is a URL\n if content_location is not None and \\\n isinstance(content_location, text_type):\n headers[u'Content-Location'] = iri_to_uri(content_location)\n\n # remove entity headers and set content length to zero if needed.\n # Also update content_length accordingly so that the automatic\n # content length detection does not trigger in the following\n # code.\n if 100 <= status < 200 or status == 204:\n headers['Content-Length'] = content_length = u'0'\n elif status == 304:\n remove_entity_headers(headers)\n\n # if we can determine the content length automatically, we\n # should try to do that. But only if this does not involve\n # flattening the iterator or encoding of unicode strings in\n # the response. We however should not do that if we have a 304\n # response.\n if self.automatically_set_content_length and \\\n self.is_sequence and content_length is None and status != 304:\n try:\n content_length = sum(len(to_bytes(x, 'ascii')) for x in self.response)\n except UnicodeError:\n # aha, something non-bytestringy in there, too bad, we\n # can't safely figure out the length of the response.\n pass\n else:\n # this \"casting\" actually works\n headers['Content-Length'] = text_type(content_length)\n\n return headers", "def _extract_env(self, request_headers):\n\n stream = cStringIO.StringIO(request_headers)\n # this isn't a reliable method of doing this,\n # but since we only plan on supporting one client...\n [command, full_path, version] = stream.readline() \\\n .split(\"\\n\", 1)[0].split()\n path_components = full_path.split('?', 1)\n path = path_components[0]\n if len(path_components) == 2:\n query = path_components[1]\n else:\n query = ''\n\n headers = mimetools.Message(stream)\n\n forwarded_host = headers.get('x-forwarded-host', '')\n if forwarded_host != '':\n host_parts = forwarded_host.split(':')\n else:\n host_parts = headers.get('host', '').split(':')\n\n # TODO this doesn't take HTTPS into account.\n # How could we tell if this request came to us via HTTPS\n # at this point?\n if len(host_parts) == 2:\n [host, port] = host_parts\n else:\n host = host_parts[0]\n port = 80\n\n env = {}\n env['REQUEST_METHOD'] = command\n env['SERVER_NAME'] = host\n env['SERVER_PORT'] = port\n env['REMOTE_HOST'] = None\n env['CONTENT_LENGTH'] = headers.get('Content-Length', 0)\n env['SCRIPT_NAME'] = ''\n env['PATH_INFO'] = path\n env['QUERY_STRING'] = query\n\n if headers.typeheader is None:\n env['CONTENT_TYPE'] = headers.type\n else:\n env['CONTENT_TYPE'] = headers.typeheader\n length = headers.getheader('content-length')\n if length:\n env['CONTENT_LENGTH'] = length\n\n env['HTTP_COOKIE'] = headers.getheader('cookie', '')\n\n return env", "def get_http_headers(request_meta):\n headers = {k[5:].replace('_', '-').title(): v\n for k, v in request_meta.items()\n if k.startswith('HTTP_')}\n if request_meta.get('CONTENT_TYPE'):\n headers['Content-Type'] = request_meta['CONTENT_TYPE']\n if request_meta.get('CONTENT_LENGTH'):\n headers['Content-Length'] = request_meta['CONTENT_LENGTH']\n # Drop headers added by OpenHIM\n headers.pop('X-Forwarded-For', None)\n headers.pop('X-Forwarded-Host', None)\n headers.pop('X-Openhim-Transactionid', None)\n return headers", "def environ(request):\r\n hostport = request.host.split(\":\")\r\n if len(hostport) == 2:\r\n host = hostport[0]\r\n port = int(hostport[1])\r\n else:\r\n host = request.host\r\n port = 443 if request.protocol == \"https\" else 80\r\n environ = {\r\n \"REQUEST_METHOD\": request.method,\r\n \"SCRIPT_NAME\": \"\",\r\n \"PATH_INFO\": to_wsgi_str(escape.url_unescape(\r\n request.path, encoding=None, plus=False)),\r\n \"QUERY_STRING\": request.query,\r\n \"REMOTE_ADDR\": request.remote_ip,\r\n \"SERVER_NAME\": host,\r\n \"SERVER_PORT\": str(port),\r\n \"SERVER_PROTOCOL\": request.version,\r\n \"wsgi.version\": (1, 0),\r\n \"wsgi.url_scheme\": request.protocol,\r\n \"wsgi.input\": BytesIO(escape.utf8(request.body)),\r\n \"wsgi.errors\": sys.stderr,\r\n \"wsgi.multithread\": False,\r\n \"wsgi.multiprocess\": True,\r\n \"wsgi.run_once\": False,\r\n }\r\n if \"Content-Type\" in request.headers:\r\n environ[\"CONTENT_TYPE\"] = request.headers.pop(\"Content-Type\")\r\n if \"Content-Length\" in request.headers:\r\n environ[\"CONTENT_LENGTH\"] = request.headers.pop(\"Content-Length\")\r\n for key, value in request.headers.items():\r\n environ[\"HTTP_\" + key.replace(\"-\", \"_\").upper()] = value\r\n return environ", "def headers_to_sign(self):\n headersToSign = {'host': self.obsRequest.headers['Host']}\n for name, value in self.obsRequest.headers.items():\n if not name or not value:\n continue\n lname = name.lower().strip()\n if lname.startswith('x-amz') or lname == 'content-type':\n if isinstance(value, bytes):\n value = value.decode('utf-8')\n headersToSign[lname] = value\n return headersToSign", "def _filter_headers(self, headers):\n # we just care about the headers, not their value\n return headers.keys()", "def auth_headers():\n headers = {\n 'X-Auth-User': config.craton_creds['username'],\n 'X-Auth-Token': config.craton_creds['password'],\n 'X-Auth-Project': config.craton_creds['project']\n }\n return headers", "def _get_headers():\n return {\"content-type\": \"application/json\", \"user-agent\": \"Mandrill-Python/1.0.57\"}", "def get_headers():\n return {\n 'Content-Type': 'application/json',\n 'Authorization': get_basic_auth()\n }", "def _prepare_headers(headers):\r\n if headers is None:\r\n headers = {}\r\n\r\n if \"User-Agent\" not in headers:\r\n headers[\"User-Agent\"] = \"TrassirScript\"\r\n return headers", "def _prepare_headers(self, request):\n if not request.use_session:\n return request.headers\n\n if self.session is None:\n self.session = self._execute_with_lock(self._get_session)\n\n return {\n **self.session.session_headers,\n **request.headers\n }", "def _make_header_wsgi_env_key(http_header: str) -> str:\n return \"HTTP_\" + http_header.replace(\"-\", \"_\").upper()", "def _extract_headers(self, req):\n last_modified = req.headers['last-modified']\n # cached = req.headers['X-Apublish-Id']\n self.last_modified = last_modified", "def get_environ(self, request: requests.PreparedRequest) -> typing.Dict[str, typing.Any]:\n body = request.body\n if isinstance(body, str):\n body_bytes = body.encode(\"utf-8\") # type: bytes\n else:\n body_bytes = body\n\n url_components = urlparse(request.url)\n environ = {\n 'REQUEST_METHOD': request.method,\n 'wsgi.url_scheme': url_components.scheme,\n 'SCRIPT_NAME': '',\n 'PATH_INFO': unquote(url_components.path),\n 'wsgi.input': io.BytesIO(body_bytes),\n } # type: typing.Dict[str, typing.Any]\n\n if url_components.query:\n environ['QUERY_STRING'] = url_components.query\n\n if url_components.port:\n environ['SERVER_NAME'] = url_components.hostname\n environ['SERVER_PORT'] = str(url_components.port)\n else:\n environ['HTTP_HOST'] = url_components.hostname\n\n for key, value in request.headers.items():\n key = key.upper().replace('-', '_')\n if key not in ('CONTENT_LENGTH', 'CONTENT_TYPE'):\n key = 'HTTP_' + key\n environ[key] = value\n\n return environ", "def _normalize_csp_header(header):\n return {p.strip() for p in (header or \"\").split(\";\")}", "def get_access_control_allow_headers(self):\n _cors_headers = \"\"\n try:\n _cors_headers = self._get_config_value(\n \"Service Info\", \"Access-Control-Allow-Headers\"\n )\n except Exception:\n pass\n return _cors_headers", "def proxy_request_headers(self):\n headers = self.request.headers.copy()\n # Merge any manually configured request headers\n headers.update(self.get_request_headers_override())\n return headers", "def headers_to_sign(self):\n headers_to_sign = {'Host': self.host}\n for name, value in self.headers.items():\n l_name = name.lower()\n # 计算签名的时候, 不能包含 x-api-signature\n if l_name.startswith('x-api-') and l_name != 'x-api-signature':\n headers_to_sign[name] = value\n return headers_to_sign" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the average kernel
def _compute_average(kernel: np.array, reps_i: List[np.array], reps_j: List[np.array]): # Count the number of atoms in the rows and columns # Works by accessing where the atomic number is stored in the FCHL representation natoms_i = np.array([np.greater(x[:][0][1], 0).sum() for x in reps_i]) natoms_j = np.array([np.greater(x[:][0][1], 0).sum() for x in reps_j]) total_atoms = natoms_i[:, None] * natoms_j[None, :] # Compute the average kernel /= total_atoms
[ "def _mean(self):\n mat = self._factorize(self.matrix, self.xdef)\n mat = self._rdc_x(mat, self.xdef)\n ysects = self._by_ysect(mat, self.ydef)\n return np.expand_dims([np.nansum(ymat[:, 0] /\n np.nansum(ymat[:, -1]))\n for ymat in ysects], 1).T", "def kernel_square(nPix):\n print(\"Averaging kernel of \" + str(nPix) + \" by \" + str(nPix))\n kernel = np.empty([nPix, nPix])\n kernel.fill(1)\n kernel /= kernel.sum() # kernel should sum to 1! :)\n return kernel", "def normalize_kernel(kernel):\n sum_ = np.sum(kernel)\n if abs(sum_) > 1e-10:\n return kernel / sum_", "def compute_mean_image(index_cluster,clusters,data):\n\n l = int(np.sqrt(len(data[0])))\n M = np.zeros((l,l))\n c=0\n\n for index in clusters:\n if(index==index_cluster):\n c+=1\n\n for i,index in enumerate(clusters):\n if(index==index_cluster):\n M += compute_image(data[i])/c\n \n return(M)", "def get_arithmetic_mean_filter(self, kernel):\n kernel= np.array([[-1,-1,-1],[-1, 9,-1],[-1,-1,-1]])\n sharpened_img = cv2.filter2D(sp_05, -1, kernel_sharpening) \n return sharpened_img", "def calculate_mean(self):\n\t\t\t\t\t\n avg = 1.0 * sum(self.data) / len(self.data)\n\t\t\n self.mean = avg\n \n return self.mean", "def cluster_mean(cluster):\r\n # print(cluster.shape)\r\n return(1/cluster.shape[1])*np.sum(cluster, axis=1)", "def average_over_modes(input, dk):\n M = len(input)\n if dk == 1:\n k_new = np.arange(M)\n return input, k_new\n\n M_new = np.floor(M / dk).astype(int)\n out = np.zeros(M_new) * np.nan\n for i in range(M_new):\n out[i] = np.mean(input[dk * i: dk * (i + 1)])\n k_new = dk / 2 + dk * np.arange(M_new)\n\n return out, k_new", "def mean_per_block(array, axis=None, controller=None):\n if axis is None or axis == 0:\n return sum_per_block(array, axis, controller) / count_per_block(array, axis, controller)\n else:\n return sum(array, axis, controller)", "def avgDisplacement(arr):\n sum_squares = np.sum(arr**2, axis=1)\n return np.mean(np.sqrt(sum_squares))", "def update_mean(X):\n\n return X.sum(axis=0) / X.shape[0]", "def func(arr):\n return arr.mean()", "def compute_averages(self):\n self.energy_average = self.cumulative_energy / self.N\n self.energy_squared_average = self.cumulative_squared_energy / self.N\n self.wave_function_derivative_average = self.cumulative_wave_function_derivative / self.N\n self.wave_function_energy_average = self.cumulative_wave_function_energy / self.N", "def _mean(img):\n res = img - np.mean(img)\n res[res < 0] = 0\n return res", "def average_model(self, key, model):\n # print(\"\\u001b[31;1m|py|\\u001b[0m\\u001b[37m\", \"ModelInterface::\", inspect.currentframe().f_code.co_name)\n\n for param, other_param in zip(\n self.models[key].parameters(), model.parameters()):\n param.data += other_param.data.cuda(param.data.get_device())\n param.data /= 2", "def get_avg(self):\n\t\treturn self.sum / max(len(self.window), 1)", "def block_mean(ar, fact):\n\t\n\tassert isinstance(fact, int), type(fact)\n\tsx, sy = ar.shape\n\tX, Y = np.ogrid[0:sx, 0:sy]\n\tregions = sy/fact * (X/fact) + Y/fact\n\tres = ndimage.mean(ar, labels=regions, index=np.arange(regions.max() + 1))\n\tres.shape = (sx/fact, sy/fact)\n\treturn res", "def global_mean(self, data):\r\n local_sum = np.sum(data)\r\n local_size = data.size\r\n global_sum = self.reduce_scalar(local_sum, MPI.SUM)\r\n global_size = self.reduce_scalar(local_size, MPI.SUM)\r\n return global_sum / global_size", "def _compute_kernel(self,X,weights):\n \n #kernel\n if self.kernel == \"lin\" or self.algorithm == self.impl_algo['partitional'][0]:\n # linear kernel will be calculated\n K = np.dot(X,X.T)\n elif self.kernel == \"gauss\":\n # gaussian kernel will be calculated\n pairwise_sq_dists = squareform(pdist(X, 'sqeuclidean'))\n K = np.exp(-pairwise_sq_dists /(2*self.sigm_gauss**2)) \n else:\n raise ValueError(\"Invalid Kernel name.\") \n \n #weights\n if weights is None:\n weights = np.ones((1,X.shape[0]))\n else:\n try:\n if weights.shape[1] != X.shape[0]:\n raise ValueError \n except ValueError: \n print(\"Invalid array size for the weights.\")\n raise \n \n # return the result \n return (K,weights)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create default output of prediction. Equals to observables of AMICI model. We need to check that call to AMICI was successful (status == 0), before writing the output.
def _default_output(amici_outputs): amici_nt = [ len(edata.getTimepoints()) for edata in self.amici_objective.edatas ] amici_ny = len(self.output_ids) amici_np = len(self.amici_objective.x_names) outputs = [] outputs_sensi = [] outputs_weights = [] outputs_sigmay = [] timepoints = [ amici_output[AMICI_T] if amici_output[AMICI_STATUS] == 0 else np.full((amici_nt[i_condition],), np.nan) for i_condition, amici_output in enumerate(amici_outputs) ] # add outputs and sensitivities if requested if 0 in sensi_orders: outputs = [ amici_output[AMICI_Y] if amici_output[AMICI_STATUS] == 0 else np.full((amici_nt[i_condition], amici_ny), np.nan) for i_condition, amici_output in enumerate(amici_outputs) ] if 1 in sensi_orders: outputs_sensi = [ amici_output[AMICI_SY] if amici_output[AMICI_STATUS] == 0 else np.full( (amici_nt[i_condition], amici_np, amici_ny), np.nan ) for i_condition, amici_output in enumerate(amici_outputs) ] # add likelihood as weights if requested if include_llh_weights: outputs_weights = [ amici_output[AMICI_LLH] if amici_output[AMICI_STATUS] == 0 else np.nan for i_condition, amici_output in enumerate(amici_outputs) ] # add standard deviations if requested if include_sigmay: outputs_sigmay = [ amici_output[AMICI_SIGMAY] if amici_output[AMICI_STATUS] == 0 else np.full((1, amici_ny), np.nan) for i_condition, amici_output in enumerate(amici_outputs) ] return ( timepoints, outputs, outputs_sensi, outputs_weights, outputs_sigmay, )
[ "def build_output(self, model): # pylint: disable=no-self-use\n if model.mode != utils.INFER:\n model.score = tf.nn.softmax(model.logits, name=\"score\")\n model.preds = tf.argmax(model.logits, axis=-1)\n model.output_dict = {\"score\": model.score, \"preds\": model.preds}\n else:\n model.preds = model.logits\n model.output_dict = {\"preds\": model.preds}\n if hasattr(model, \"input_y\"):\n model.y_ground_truth = model.input_y", "def create_output_file(configs, test_data, predictions):\n result = Result(configs['data']['output'], test_data.get_engine())\n predictions = np.array(predictions)\n\n result.add_prediction('Survived', predictions)\n result.build_file()", "def init_output(self):\n if not self.output_initialised:\n self.scores = self.generate_scores()\n self.ranking = self.generate_ranking(self.scores)\n self.ranked_names = self.generate_ranked_names(self.ranking)\n self.output_initialised = True", "def create_prediction_file(output_file, front_data_id, behind_data_id,\n true_labels, predict_labels, predict_scores):\n if not output_file.endswith('.json'):\n raise IOError(\"[Error] The prediction file is not a json file.\"\n \"Please make sure the prediction data is a json file.\")\n with open(output_file, 'w') as fout:\n data_size = len(predict_labels)\n for i in range(data_size):\n data_record = OrderedDict([\n ('front_testid', front_data_id[i]),\n ('behind_testid', behind_data_id[i]),\n ('labels', int(true_labels[i])),\n ('predict_labels', int(predict_labels[i])),\n ('predict_scores', round(float(predict_scores[i]), 4))\n ])\n fout.write(json.dumps(data_record, ensure_ascii=True) + '\\n')", "def mock_predict(model_id):\t\n\n\tmodel_path = \"{}/m{}.pkl\".format(model_db_path, model_id)\n\n\n\t##################\n\t# demo predict input\n\tfor i in range(10):\n\t\tsample_idx = randint(0,len(test_dataset)-1)\n\t\tsample_test = test_dataset[sample_idx]\n\n\t\tif i == 0:\n\t\t\tsample_test_data = mx.nd.expand_dims(sample_test[0], axis = 0)\t# ndarray [[data1] [data2] ...]\n\t\t\tsample_test_label = mx.nd.array([sample_test[1]])\t\t\t# ndarray [label1 label2 ... ]\n\t\telse:\n\t\t\tsample_test_data = mx.nd.concat(sample_test_data, mx.nd.expand_dims(sample_test[0], axis = 0))\t# ndarray [[data1] [data2] ...]\n\t\t\tsample_test_label = mx.nd.concat(sample_test_label, mx.nd.array([sample_test[1]]), dim = 0)\t\t\t\t# ndarray [label1 label2 ... ]\n\t##################\n\n\ttry: \n\t\toutput = model.predict(sample_test_data, model_path)\n\n\n\t\t# Cast each output to int\n\t\tresults = []\n\t\tresult_labels = []\n\t\tfor i in range(output.size):\n\t\t\tresults.append(str(mx.nd.cast(output[i], dtype='int32').asscalar()))\n\t\t\tresult_labels.append(str(mx.nd.cast(sample_test_label[i], dtype='int32').asscalar()))\n\t\t\n\t\tresponse = {\"results\": results, \"labels\": result_labels}\n\n\t\treturn make_response(jsonify(response), 200)\n\n\texcept FileNotFoundError:\n\t\tresponse = {\"error\": \"Model not found. Make sure you have trained the model\"}\n\t\treturn make_response(jsonify(response), 404)", "def postprocess(cls, output) -> \"OutputModel\":\n pass", "def getPrediction(nnOutput):\n\treturn [nnOutput, 1.0]", "def make_prediction(chip_id, model):\n logger.info(\"Starting inference.\")\n try:\n vv_path = INPUT_IMAGES_DIRECTORY / f\"{chip_id}_vv.tif\"\n vh_path = INPUT_IMAGES_DIRECTORY / f\"{chip_id}_vh.tif\"\n output_prediction = model.predict(vv_path, vh_path)\n except Exception as e:\n logger.error(f\"No bands found for {chip_id}. {e}\")\n raise\n return output_prediction", "def start(self):\n self.managerlogger.logger.info(\"start ml predict...\")\n if runstatus.RunStatus.SUCC == self._predict_handle():\n self.managerlogger.logger.info(\"finished ml predict!\")\n else:\n self.managerlogger.logger.error(\"ml predict failed!\")", "def preproc_output(self, input: I, output: O) -> PO:\n raise Exception(\"Not implemented\")", "def postprocess_model_outputs(self, predictions, expected):\n expected[\"y\"] = expected[\"y\"].numpy()\n expected[\"display_ids\"] = expected[\"display_ids\"].numpy()\n\n return predictions.numpy(), expected", "def set_output(self, status):\n if status: #if True\n return self.command('OUT1')\n else:\n return self.command('OUT0')", "def main():\n\n parser = argparse.ArgumentParser(\n description=\"Predict experimental phasing success.\"\n )\n parser.add_argument(\n \"csv_file\", help=\"Path to a .csv formatted file containing the required metrics\"\n )\n parser.add_argument(\n \"--cutoff\",\n type=probability_type,\n default=0.80,\n help=\"Probability cutoff for determining the adjusted class\",\n )\n parser.add_argument(\n \"-o\",\n \"--outfile\",\n nargs=\"?\",\n type=argparse.FileType(\"w\"),\n help=\"output CSV format file\",\n )\n\n model = metrix_predict.model\n\n args = parser.parse_args()\n try:\n data = pd.read_csv(args.csv_file)\n except Exception:\n sys.exit(f\"Unable to read CSV data from {args.csv_file}\")\n\n try:\n data_initial = data[\n [\"lowreslimit\", \"anomalousslope\", \"anomalousCC\", \"diffI\", \"diffF\", \"f\"]\n ]\n except KeyError as e:\n sys.exit(f\"Required data not found: {e}\")\n\n data_initial = data_initial.fillna(0)\n unknown = data_initial.to_numpy()\n\n data[\"Class\"], data[\"P(fail)\"], data[\"P(success)\"] = metrix_predict.predict(unknown)\n data[\"Adj. class\"] = (data[\"P(success)\"] >= args.cutoff).astype(int)\n\n if args.outfile:\n print(f\"Writing to {args.outfile.name}\")\n data.to_csv(args.outfile, index=False, float_format=\"%g\")\n else:\n print(data)\n print(f\"\\nAdj. class is determined by the cutoff p(success) >= {args.cutoff}\")", "def predict():\n if model:\n\n try:\n incoming_data = request.get_json()\n client_ip = request.environ['REMOTE_ADDR']\n # Keep only the variables contribution to model prediction\n repeat_contact = {key: [value] for key, value in incoming_data.items() if key.lower() not in config.NOT_TO_READ}\n \n with counter.get_lock():\n counter.value += 1\n out = counter.value\n predictions = predict_repeat_contact(repeat_contact, model, features_transform_pipe)\n app.logger.info(f\"The prediction has been served for request id {counter} with client ip {client_ip}\")\n \n # we can store the incoming_data and final predictions in the database \n\n return jsonify(predictions)\n except:\n return jsonify({'trace': traceback.format_exc()})\n else:\n return (\"No model loaded\")", "def do_predictions(self):\n\n self.train_preds = self.tfmodel.predict(self.Data.X_train)\n self.test_preds = self.tfmodel.predict(self.Data.X_test)\n\n self.Helpers.logger.info(\n \"Training predictions: \" + str(self.train_preds))\n self.Helpers.logger.info(\n \"Testing predictions: \" + str(self.test_preds))\n print(\"\")", "def write_output_predictions(folder, predictions, p):\n\n out_file = os.path.join(folder, (p + \".bindPredict_out\"))\n with open(out_file, 'w') as out:\n # format: res, proba_pos\n for pred in predictions:\n label = \"nb\"\n if pred[1] >= 0.6:\n label = \"b\"\n out.write(str(pred[0]) + \"\\t\" + str(pred[1]) + \"\\t\" + label + \"\\n\")", "def _setup_prediction_op(self):", "def predict(self, test_file, output_file, verbose=True):\n f = open(test_file, 'r')\n f.close()\n f = open(output_file, 'w')\n f.close()", "def on_inference_end(self, y_pred: Tensor, y_true: Tensor):\n if self._output_sample is None:\n self._output_sample = torch.zeros(size=y_pred.shape, dtype=y_pred.dtype)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encapsulate the call to amici. This allows to use variable scoping as a mean to clean up the memory after calling amici, which is beneficial if large models with large datasets are used.
def _wrap_call_to_amici( self, amici_outputs, x, sensi_orders, mode, parameter_mapping, edatas ): chunk = self.amici_objective( x=x, sensi_orders=sensi_orders, mode=mode, parameter_mapping=parameter_mapping, edatas=edatas, return_dict=True, ) for rdata in chunk[RDATAS]: amici_outputs.append( { output_field: deepcopy(rdata[output_field]) for output_field in self.amici_output_fields } ) del chunk
[ "def acontexmanager(func):\n func.__returns_acontextmanager__ = True\n return func", "def post_compute(self):\n pass", "def _apply_fit(self,raw_chop=None,chop=None,idx=None):\n ica_obj = None\n self._ics_found_svm = None\n\n fname_ica,fname = self._get_chop_name(raw_chop,chop=None)\n \n msg=[\"start ICA FIT chop: {} / {}\".format(idx + 1,self.Chopper.n_chops),\n \" --> chop id : {}\".format(chop),\n \" -> ica fname : {}\".format(fname_ica),\n \" -> ica chop path: {}\".format(self.path_ica_chops),\n \" -> raw filename : {}\".format(fname)\n ]\n logger.info(\"\\n\".join(msg))\n \n #--- ck for ovewrite & ICA exist\n load_from_disk = False\n if not self.cfg.fit.overwrite:\n load_from_disk = jb.isFile(fname_ica,path=self.path_ica_chops)\n \n if load_from_disk:\n # self._ica_obj,fname_ica = jb.get_raw_obj(fname_ica,path=self.path_ica_chops)\n ica_obj,fname_ica = jb.get_raw_obj(fname_ica,path=self.path_ica_chops)\n \n logger.info(\"DONE LOADING ICA chop form disk: {}\\n -> ica filename: {}\".\n format(chop,fname_ica))\n else:\n if self.useArtifactRejection:\n with jumeg_logger.StreamLoggerSTD(label=\"ica fit\"):\n \n ica_obj = fit_ica(raw=raw_chop,picks=self.picks,reject=self.CFG.GetDataDict(key=\"reject\"),\n ecg_ch=self.cfg.ecg.ch_name,ecg_thresh=self.cfg.ecg.thresh,\n flow_ecg=self.cfg.ecg.flow,fhigh_ecg=self.cfg.ecg.fhigh,\n #---\n eog_hor = self.cfg.eog.hor_ch,\n eog_ver = self.cfg.eog.ver_ch,\n flow_eog=self.cfg.eog.flow,fhigh_eog=self.cfg.eog.fhigh,\n eog_thresh=self.cfg.eog.thresh,\n #---\n use_jumeg=self.cfg.ecg.use_jumeg,\n random_state=self.cfg.random_state)\n \n ica_obj.exclude = list( set( ica_obj.exclude ) )\n \n if self.useSVM:\n if not ica_obj:\n logger.info('SVM start ICA FIT: init ICA object')\n #--- !!! ToDo put parameter in CFG file\n ica_obj = ICA(method='fastica',n_components=40,random_state=42,max_pca_components=None,\n max_iter=5000,verbose=False)\n ica_obj.fit(raw_chop,picks=self.picks,decim=None,reject=self.CFG.GetDataDict(key=\"reject\"),\n verbose=True)\n else:\n logger.info('SVM ICA Obj start')\n #--- !!! do_copy = True => resample\n ica_obj,_ = self.SVM.run(raw=self.raw,ICA=ica_obj,picks=self.picks,do_crop=False,do_copy=True)\n logger.info('DONE SVM ICA FIT: apply ICA.fit')\n\n #-- save ica object\n if self.cfg.fit.save and not load_from_disk:\n logger.info(\"saving ICA chop : {} / {}\\n\".format(idx + 1,self.Chopper.n_chops) +\n \" -> ica filename : {}\".format(fname_ica))\n ica_obj.save(os.path.join(self.path_ica_chops,fname_ica))\n \n logger.info(\"done ICA FIT for chop: {}\\n\".format(chop)+\n \" -> raw chop filename : {}\\n\".format(fname_ica)+\n \"-\"*30+\"\\n\"+\n \" -> ICs found JuMEG/MNE : {}\\n\".format(self.SVM.ICsMNE)+\n \" -> ICs found SVM : {}\\n\".format(self.SVM.ICsSVM) +\n \" -> ICs excluded : {}\\n\".format(ica_obj.exclude)+\n \"-\"*30+\"\\n\"+\n \" -> save ica fit : {}\".format(self.cfg.fit.save)\n )\n return ica_obj,fname_ica", "def __attrs_post_init__(self):\n self._env = {} # environment which would be in-effect only for this session\n self._env_permanent = {} # environment variables which would be in-effect in future sessions if resource is persistent", "def post_cache_init(cls):\n for cached_function in cls._cached_functions:\n MemoryCache._accesses[cached_function] = list()", "def _wrap_measure(individual_measure_process):\n def wrapped_measure(state_collection):\n for state in state_collection.states:\n analysis_collection = measure.analysis.AnalysisCollection(state,None,[])\n individual_measure_process(analysis_collection,state_collection)\n return state_collection\n return wrapped_measure", "def share_memory(self):\n if not self.is_feat:\n self.emb.share_memory_()\n self.state_sum.share_memory_()", "def aplica(self, estado, accion):\n pass", "def _analysis_reset(self):\n\n self._normal_forces = 0\n self._shear_forces = 0\n self._bending_moments = 0\n self._deflection_equation = 0\n\n self._reactions = {}\n self._plotting_vectors = {}", "def pre_compute(self):\n pass", "def pmc_abc(model, data, inter_save_direc, inter_filename, epsilon_0=1, min_samples=10,\n steps=10, resume=None, parallel=False, n_procs='all', \n sample_only=False, minError=0.0001, minAccRate = 0.0001):\n\n output_record = np.empty(steps, dtype=[('theta accepted', object),\n #('theta rejected', object),\n ('D accepted', object),\n ('n accepted', float),\n ('n total', float),\n ('epsilon', float),\n ('weights', object),\n ('tau_squared', object),\n ('eff sample size', object),\n ])\n\n\n if resume is not None: \n steps = range(resume.size, resume.size + steps)\n output_record = stack_arrays((resume, output_record), asrecarray=True,\n usemask=False)\n epsilon = stats.scoreatpercentile(resume[-1]['D accepted'],\n per=75)\n theta = resume['theta accepted'][-1]\n weights = resume['weights'][-1]\n tau_squared = resume['tau_squared'][-1]\n\n else:\n steps = range(steps)\n epsilon = epsilon_0\n\n for step in steps: \n \n print('Starting step {}'.format(step))\n print('epsilon = {}'.format(epsilon)) \n if step == 0:\n #Fist ABC calculation\n\n if parallel:\n if n_procs == 'all':\n n_procs = mp.cpu_count()\n\n chunk = np.ceil(min_samples/float(n_procs))\n print(\"Running {} particles on {} processors\".format(chunk,\n n_procs))\n\n output = mp.Queue()\n processes = [ABCProcess(target=parallel_basic_abc,\n args=(model, data, output),\n kwargs={'epsilon': epsilon,\n 'min_samples': chunk,\n 'pmc_mode': False})\n for i in range(n_procs)]\n\n for p in processes:\n p.start()\n \n results = [output.get() for p in processes]\n \n for p in processes:\n p.join()\n \n output_record[step] = combine_parallel_output(results)\n\n else:\n\n\n output_record[step] = basic_abc(model, data, epsilon=epsilon,\n min_samples=min_samples,\n parallel=False, pmc_mode=False)\n\n theta = output_record[step]['theta accepted']\n tau_squared = 2 * np.cov(theta)\n\n weights = np.ones(theta.shape[1]) * 1.0/theta.shape[1]\n\n epsilon = stats.scoreatpercentile(output_record[step]['D accepted'],\n per=75)\n\n output_record[step]['weights'] = weights\n output_record[step]['tau_squared'] = tau_squared\n\n\n else:\n theta_prev = theta\n weights_prev = weights\n\n if parallel:\n if n_procs == 'all':\n n_procs = mp.cpu_count()\n\n chunk = np.ceil(min_samples/float(n_procs))\n print(\"Running {} particles on {} processors\".format(chunk,\n n_procs))\n\n output = mp.Queue()\n processes = [ABCProcess(target=parallel_basic_abc,\n args=(model, data, output),\n kwargs={'epsilon': epsilon,\n 'min_samples': chunk,\n 'pmc_mode': True,\n 'weights': weights,\n 'theta_prev': theta_prev,\n 'tau_squared': tau_squared})\n for i in range(n_procs)]\n\n for p in processes:\n p.start()\n \n results = [output.get() for p in processes]\n \n for p in processes: \n p.join()\n\n output_record[step] = combine_parallel_output(results)\n\n else:\n\n output_record[step] = basic_abc(model, data, epsilon=epsilon,\n min_samples =min_samples,\n parallel=False,\n n_procs=n_procs, pmc_mode=True,\n weights=weights,\n theta_prev=theta_prev,\n tau_squared=tau_squared)\n\n \n # reordering timescales for faster convergence of the two-oscillation model \n theta_load = output_record[step]['theta accepted']\n f1 = theta_load[1]\n f2 = theta_load[2]\n c1 = theta_load[3]\n c2 = theta_load[4]\n\n swap_id = np.where(f1 > f2)[0]\n# print(swap_id)\n if swap_id.size:\n f1_temp = f1 + 0\n f2_temp = f2 + 0\n c1_temp = c1 + 0\n c2_temp = c2 + 0\n f1_temp[swap_id] = f2[swap_id]\n f2_temp[swap_id] = f1[swap_id]\n c1_temp[swap_id] = c2[swap_id]\n c2_temp[swap_id] = c1[swap_id]\n \n theta_load[1] = f1_temp\n theta_load[2] = f2_temp\n theta_load[3] = c1_temp\n theta_load[4] = c2_temp\n theta = theta_load\n output_record[step]['theta accepted'] = theta\n else:\n theta = theta_load\n \n \n \n epsilon = stats.scoreatpercentile(output_record[step]['D accepted'],\n per=75)\n\n \n effective_sample = effective_sample_size(weights_prev)\n\n if sample_only:\n weights = []\n tau_squared = []\n else:\n weights = calc_weights(theta_prev, theta, tau_squared, \n weights_prev, prior=model.prior)\n \n tau_squared = 2 * weighted_covar(theta, weights)\n\n output_record[step]['tau_squared'] = tau_squared\n\n output_record[step]['eff sample size'] = effective_sample\n\n output_record[step]['weights'] = weights\n \n nAccept = output_record[step]['n accepted'] \n nTot = output_record[step]['n total'] \n acceptRate = nAccept/nTot \n print('acceptence Rate = {}'.format(acceptRate)) \n np.save(inter_save_direc + inter_filename, output_record) \n\n print('--------------------') \n \n if acceptRate < minAccRate: \n print('epsilon = {}'.format(epsilon))\n print('acceptence Rate = {}'.format(acceptRate)) \n return output_record\n \n# if epsilon < minError: \n# print('epsilon = {}'.format(epsilon)) \n# return output_record\n \n return output_record", "def __init__(self,\n action_spec,\n observation_spec=None,\n hidden_size=256,\n reward_adapt_speed=8.0,\n encoding_net: EncodingNetwork = None,\n forward_net: EncodingNetwork = None,\n inverse_net: EncodingNetwork = None,\n activation=torch.relu_,\n optimizer=None,\n name=\"ICMAlgorithm\"):\n if encoding_net is not None:\n feature_spec = encoding_net.output_spec\n else:\n feature_spec = observation_spec\n\n super(ICMAlgorithm, self).__init__(\n train_state_spec=feature_spec,\n predict_state_spec=(),\n optimizer=optimizer,\n name=name)\n\n flat_action_spec = alf.nest.flatten(action_spec)\n assert len(\n flat_action_spec) == 1, \"ICM doesn't suport nested action_spec\"\n\n flat_feature_spec = alf.nest.flatten(feature_spec)\n assert len(\n flat_feature_spec) == 1, \"ICM doesn't support nested feature_spec\"\n\n action_spec = flat_action_spec[0]\n\n if action_spec.is_discrete:\n self._num_actions = int(action_spec.maximum - action_spec.minimum +\n 1)\n else:\n self._num_actions = action_spec.shape[-1]\n\n self._action_spec = action_spec\n self._observation_normalizer = None\n if observation_spec is not None:\n self._observation_normalizer = AdaptiveNormalizer(\n tensor_spec=observation_spec)\n\n feature_dim = flat_feature_spec[0].shape[-1]\n\n self._encoding_net = encoding_net\n\n if isinstance(hidden_size, int):\n hidden_size = (hidden_size, )\n\n if forward_net is None:\n encoded_action_spec = TensorSpec((self._num_actions, ),\n dtype=torch.float32)\n forward_net = EncodingNetwork(\n name=\"forward_net\",\n input_tensor_spec=[feature_spec, encoded_action_spec],\n preprocessing_combiner=NestConcat(),\n fc_layer_params=hidden_size,\n activation=activation,\n last_layer_size=feature_dim,\n last_activation=math_ops.identity)\n\n self._forward_net = forward_net\n\n if inverse_net is None:\n inverse_net = EncodingNetwork(\n name=\"inverse_net\",\n input_tensor_spec=[feature_spec, feature_spec],\n preprocessing_combiner=NestConcat(),\n fc_layer_params=hidden_size,\n activation=activation,\n last_layer_size=self._num_actions,\n last_activation=math_ops.identity,\n last_kernel_initializer=torch.nn.init.zeros_)\n\n self._inverse_net = inverse_net\n\n self._reward_normalizer = ScalarAdaptiveNormalizer(\n speed=reward_adapt_speed)", "def reset_metrics():\n global METRICS\n METRICS = {\n 'cw': [],\n 'ac': [],\n 'rk': []\n }", "def precomputedForM(self, m):\n print(\"TODO\")", "def __init__(self):\n super(_MemoryAccessor, self).__init__(\"memory\")\n self._metric_to_points = collections.defaultdict(sortedcontainers.SortedDict)\n self._name_to_metric = {}\n self._directory_names = sortedcontainers.SortedSet()\n self.__downsampler = _downsampling.Downsampler()\n self.__delayed_writer = _delayed_writer.DelayedWriter(self)", "def my_cool_fun():\n # TODO: Add some AI, ML, cloud stuff here\n # Adding the cool AI function\n print(\"Running super conplex AI, ML, cloud stuff\")", "def __free_environment__(cls, environment):", "def memory():\n return myself()[MEMORY]", "def free_variables(self):\n # Task 7.6" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
$WIND_BASE environment variable. If $WIND_BASE isn't set, return a default of /usr/powerpcwrsvxworks/wind_base
def get_wind_base(): wind_base = os.getenv('WIND_BASE') if wind_base: return path.abspath(wind_base) return path.abspath(path.join('/usr', 'powerpc-wrs-vxworks', 'wind_base'))
[ "def find_base_path():\n if platform.system() == 'windows':\n base_path = os.path.join('K:', 'ptestbend')\n else:\n base_path = os.path.join('/mnt','K', 'ptestbend')\n return base_path", "def get_base_dir():\n global BASE_DIR\n return BASE_DIR", "def default_base_dir():\n cwd = Path('.').resolve()\n\n pwd = os.environ.get('PWD')\n if pwd is None:\n return cwd\n\n pwd = Path(pwd)\n if not pwd.is_absolute():\n return cwd\n\n if cwd != pwd.resolve():\n return cwd\n\n return pwd", "def calcHome(self):\n\t\trVal = ''\n\t\tmyenv = self.getWasEnv()\n\t\t#print \"myenv=\" + str( myenv )\n\t\tif self.getWasEnv().lower() == 'prod':\n\t\t\trVal = '/nfs/dist/dmp/WDT'\n\t\telse:\n\t\t\trVal = '/nfs/dist/dmp/WDT/' + str( self.getWasEnv().upper() )\n\t\treturn rVal", "def get_base_dir():\n return os.path.join(os.environ.get(ENV_BASEDIR, '.'), '.rob')", "def def_report_path():\n if os.name == 'nt':\n return(getwindoc())\n else:\n return(os.getenv(\"HOME\"))", "def get_base_directory() -> str:\n return SO5CGConfig.base_directory \\\n if SO5CGConfig.base_directory is not None \\\n else expanduser(SO5CGConfig.default_base_directory)", "def get_default_installation_dir():\n if sys.platform == \"win32\":\n install_path = os.path.expandvars(r'%PROGRAMW6432%\\dynatrace\\oneagent')\n conf_path = os.path.expandvars(r'%programdata%\\dynatrace\\oneagent\\agent\\config\\ruxitagentproc.conf')\n else:\n install_path = '/opt/dynatrace/oneagent'\n conf_path = '/var/lib/dynatrace/oneagent/agent/config/ruxitagentproc.conf'\n try:\n with open (conf_path, 'r') as conf_file:\n prefix = 'libraryPath64 '\n for line in conf_file:\n if line.startswith(prefix):\n lib_path = Path(line[len(prefix)+1:-1])\n install_path = lib_path.parent.parent.parent.parent\n break\n except OSError as e:\n pass\n logging.debug(\"Setting installation root dir to %s\", install_path)\n return install_path", "def _GetSystemMountPoint(self):\n if self.GetApiVersion() >= 28:\n return '/'\n else:\n return '/system'", "def _get_base0(self, base0: bool) -> bool:\n if base0 is None:\n base0 = self.base0\n self.base0 = get_option(\"index.base.0\", base0)\n return self.base0", "def _get_artifactory_base():\n return os.environ.get('PYBEL_ARTIFACTORY_BASE', _default_artifactory_base).rstrip('/')", "def _get_default_path(self):\n #return os.path.join(cfg.DATA_DIR, 'SNUBH_BUS')\n return cfg.DATA_DIR", "def getDefaultMonChannelAddress():\n\n try:\n pyST_globals.DefaultMonChannelAddress\n except NameError:\n print 'Error, run setDefaultMonChannelAddress first'\n return pyST_globals.DefaultMonChannelAddress", "def get_venv_basedir():\n exec_prefix = get_config_vars()['exec_prefix']\n has_real_prefix = hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix)\n if has_real_prefix is False or (hasattr(sys, 'real_prefix') and exec_prefix.startswith(sys.real_prefix)):\n raise EnvironmentError('You must be in a virtual environment')\n\n return os.path.abspath(get_config_vars()['exec_prefix'] + '/../')", "def base_directory():\n return os.path.dirname(os.path.realpath(__file__)) + os.path.sep", "def _base(self, path):\n\n path = path.lstrip(\"/\")\n if self.base:\n return normpath(os.path.join(self.base, path)).lstrip(\"/\")\n return normpath(path)", "def platform_root(self):\n return os.getcwd()", "def get_workdir(self, default=None):\n return getnattr(self._raw, [\"settings\", \"workdir\"], default)", "def find_kernel_base():\n return idaapi.get_fileregion_ea(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Both methods 'get_method_of_class_java' and 'get_method_of_class_java2' works correctly.
def get_method_of_class_java2(cls, db, class_name=None, class_entity=None): if class_entity is None: class_entity = cls.get_class_entity_by_name(db=db, class_name=class_name) method_list = class_entity.ents('Define', 'Java Method ~Unknown ~Unresolved ~Jar ~Library') # print('len method list', len(method_list)) # print(method_list) return method_list
[ "def Node2Method(self, node): \n ##TODO(GuoChenkai) Nodef to Encodedmethod\n ## convert through the method_name\n #res = [] \n #methods = self.d.get_method(gvm_node.method_name)\n #for i in methods:\n #if i.get_name() == gvm_node.method_name:\n #res.append(i)\n #return res\n \n #print start_method.XREFfrom.items\n \n ## convert through the id (id does not match) \n #method = self.d.get_method_by_idx(gvm_node.id)\n #return method \n \n ## convert through the map_nodemethod {} within this class\n return self.d.get_method_descriptor(node.class_name,node.method_name,node.descriptor)\n #if not gvm_node.id in self.map_nodemethod:\n #return None \n #elif self.map_nodemethod[gvm_node.id] != None:\n #method = self.map_nodemethod[gvm_node.id]\n #return method\n #else: return None", "def get_class_method(self, handler, request):\n handler_method = getattr(handler(),request.method.lower(), None)\n \n return handler_method", "def get_methods_by_class(self, clazz):\n range_start = 0\n range_end = self.num_methods\n split_idx = 0\n test_method = None\n\n clazz = ghidra_utils.SymbolDescriptor(clazz).to_java()\n\n while(range_end >= range_start):\n split_idx = range_start + ((range_end-range_start)/2)\n test_method = self.get_method(split_idx)\n\n if test_method.clazz == clazz:\n # found the class!\n break\n\n elif test_method.clazz < clazz:\n # this is too early\n range_start = split_idx+1\n else:\n # this is too late\n if range_end == split_idx:\n break\n range_end = split_idx\n\n if test_method.clazz == clazz:\n # we found our class. now walk backwards and forwards from here to find all of them\n clazz_start = split_idx\n while clazz_start > 0:\n test_idx = clazz_start - 1\n test_method = self.get_method(test_idx)\n if test_method.clazz == clazz:\n clazz_start = test_idx\n else:\n # found first non-clazz method before - we're done\n break\n\n clazz_end = split_idx\n while clazz_end < (self.num_methods-1):\n test_idx = clazz_end + 1\n test_method = self.get_method(test_idx)\n if test_method.clazz == clazz:\n clazz_end = test_idx\n else:\n # found first non-clazz method after - we're done\n break\n\n # now give back the methods in order\n for method_idx in xrange(clazz_start, clazz_end+1):\n yield self.get_method(method_idx)", "def method_of(meth):\n cls, name = meth.__qualname__.split('.')\n return dict(getmembers(getmodule(meth)))[cls]", "def _get_methods(class_input):\r\n return [f for f in dir(class_input) \r\n if callable(getattr(class_input, f)) and not f.startswith('__')]", "def get_class_methods(_class):\n return [x for x, y in list(_class.__dict__.items()) if type(y) == FunctionType]", "def _get_methods(cls, bases, parameters):\n if parameters.checkformethods == 'class':\n methods = [name for name, value in cls.__dict__.items()\n if callable(value)]\n # add the name of the entry in cls.__dict__ if it is\n # callable\n\n elif parameters.checkformethods == 'bases':\n methods = [*[name for name, value in cls.__dict__.items()\n if callable(value)],\n *[name for base in bases\n for name, value in base.__dict__.items()\n if getattr(base, 'abstract', False)\n if callable(value)]]\n # do the same as above, but this time unpack into a list\n # along with the unpacked 2nd list. The second list\n # contains the entries in base.__dict__ if it is callable\n # and assuming the base has an abstract attribute of True\n # (repeats for all bases passed in in the bases parameter)\n\n else:\n methods = [*[name for name, value in cls.__dict__.items()\n if callable(value)],\n *[name for base in cls.mro()[1:-1]\n for name, value in base.__dict__.items()\n if getattr(base, 'abstract', False)\n if callable(value)]]\n # same as above, but use the passed class' mro() to specify\n # base classes instead of the bases parameter\n \n return methods", "def get_class_methods(class_ast):\n output = list()\n \n #only checks definitions immediately in body to avoid nested class methods\n for node in class_ast.body:\n if isinstance(node,ast.FunctionDef):\n output.append(node.name)\n \n return output", "def determine_method(self, kwargs):\n valid_methods = self.api_map.get('method', ['GET'])\n passed_method = kwargs.get('method', '').upper()\n\n # Use the method passed\n if passed_method:\n if passed_method in valid_methods:\n return passed_method\n else:\n error = 'Valid methods are {}, we received \"{}\".'.format(valid_methods, passed_method)\n raise SCMError(error)\n\n # Let's fallback to something gracefully.\n if isinstance(valid_methods, list):\n methods_order = ['GET', 'POST', 'PUT', 'DELETE']\n for method in methods_order:\n if method in valid_methods:\n return method", "def GetResponseClass(self, method_descriptor):\n raise NotImplementedError", "def resolve_method(self, name):\n\n\t\tif \".\" in name:\n\t\t\tifname, name = name.rsplit(\".\", 1)\n\t\telse:\n\t\t\tifname = None\n\n\t\tfor iface in self.interfaces:\n\t\t\tif iface.name == ifname or ifname is None:\n\t\t\t\tfor method in iface.methods:\n\t\t\t\t\tif method.name == name:\n\t\t\t\t\t\treturn iface, method\n\t\telse:\n\t\t\treturn None, None", "def get_original_method(self, obj, met_name):\n basemethod = method = getattr(obj, met_name)\n try:\n basemethod = self.__woven_dict[obj][met_name]['original']\n except KeyError:\n # if the method wasn't found AND if 'obj' is an isntance,\n # try to look at the obj.__class__ entry (convenience behaviour)\n if type(obj) == types.InstanceType:\n klass = obj.__class__\n try:\n basemethod = self.__woven_dict[klass][met_name]['original']\n except KeyError:\n return basemethod, method\n return basemethod, method", "def GetRequestClass(self, method_descriptor):\n raise NotImplementedError", "def MethodHandle(self) -> _n_2_t_7:", "def find_defining_class(obj, method_name):\n\tfor ty in type(obj).mro():\n\t\tif method_name in ty.__dict__:\n\t\t\treturn ty", "def extract_smali_method(method_name, smali_file):\n with open(smali_file, \"r\") as fd:\n smali_code = fd.read()\n\n smali_method = \"\"\n for line in smali_code.split(\"\\n\"):\n # if method has been found and end is in line\n if smali_method and \".end method\" in line:\n return smali_method\n\n # if method has been found then start saving\n elif \".method\" in line and method_name in line and not smali_method:\n smali_method = line\n\n elif smali_method:\n smali_method += \"{}\\n\".format(line)\n\n return smali_method", "def execute_static_method_dynamically(module_name, class_name, method_name, *args):\n module_ins, class_ins, result = None, None, None\n try:\n module_ins = get_module_instance(module_name) # importing modules and getting module instance\n class_ins = getattr(module_ins, class_name) # getting class instance\n method_ins = getattr(class_ins, method_name) # getting method instance\n result = method_ins(*args) # executing method\n except ImportError:\n print('Module {module_name} not found'.format(module_name=module_name))\n raise # TBD\n except AttributeError:\n if class_ins is None:\n print('Class {class_name}({module_name}) not found'.format(class_name=class_name, module_name=module_name))\n raise\n else:\n print('Method {method_name} of class {class_name}({module_name}) not found'.format\n (method_name=method_name, class_name=class_name, module_name=module_name))\n raise # TBD\n return result", "def get_run_class_method(attr_path_and_name: str) -> CallableT:\n\n def run_class_method(\n __self: Any,\n *args: Tuple[Any, ...],\n **kwargs: Any,\n ) -> object:\n # we want to get the return type which matches the attr_path_and_name\n # so we ask lib_ast for the return type name that matches out\n # attr_path_and_name and then use that to get the actual pointer klass\n # then set the result to that pointer klass\n return_type_name = __self.client.lib_ast.query(\n attr_path_and_name\n ).return_type_name\n resolved_pointer_type = __self.client.lib_ast.query(return_type_name)\n result = resolved_pointer_type.pointer_type(client=__self.client)\n\n # QUESTION can the id_at_location be None?\n result_id_at_location = getattr(result, \"id_at_location\", None)\n if result_id_at_location is not None:\n # first downcast anything primitive which is not already PyPrimitive\n (\n downcast_args,\n downcast_kwargs,\n ) = lib.python.util.downcast_args_and_kwargs(args=args, kwargs=kwargs)\n\n # then we convert anything which isnt a pointer into a pointer\n pointer_args, pointer_kwargs = pointerize_args_and_kwargs(\n args=downcast_args, kwargs=downcast_kwargs, client=__self.client\n )\n\n cmd = RunClassMethodAction(\n path=attr_path_and_name,\n _self=__self,\n args=pointer_args,\n kwargs=pointer_kwargs,\n id_at_location=result_id_at_location,\n address=__self.client.address,\n )\n __self.client.send_immediate_msg_without_reply(msg=cmd)\n\n return result\n\n return run_class_method", "def _get_method(payload):\n if payload and \"RequestMethod\" in payload and payload[\"RequestMethod\"]:\n return payload[\"RequestMethod\"]\n\n raise InvalidRequestException(\"Payload is missing RequestMethod\")", "def _add_method_obj(self, method):\n if isinstance(method, CppMethod):\n name = method.mangled_name\n elif isinstance(method, function.Function):\n name = method.custom_name\n assert isinstance(method.parameters[0], CppClassParameterBase)\n assert method.parameters[0].cpp_class is self, \\\n \"expected first parameter to be of class %s, but it is of class %s\" % \\\n (self.full_name, method.parameters[0].cpp_class.full_name)\n method.parameters[0].take_value_from_python_self = True\n method.module = self.module\n method.is_virtual = False\n method.is_pure_virtual = False\n method.self_parameter_pystruct = self.pystruct\n method.visibility = 'public'\n method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS\n else:\n raise TypeError\n \n method.class_ = self\n\n if method.visibility == 'protected' and not method.is_virtual:\n helper_class = self.get_helper_class()\n if helper_class is not None:\n parent_caller = CppVirtualMethodParentCaller(method)\n parent_caller.helper_class = helper_class\n parent_caller.main_wrapper = method\n helper_class.add_virtual_parent_caller(parent_caller)\n elif method.visibility == 'public':\n if name == '__call__': # needs special handling\n method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS\n\n try:\n overload = self.methods[name]\n except KeyError:\n overload = CppOverloadedMethod(name)\n overload.pystruct = self.pystruct\n self.methods[name] = overload\n\n ## add it....\n try:\n utils.call_with_error_handling(overload.add, (method,), {}, method)\n except utils.SkipWrapper:\n return\n\n\n # Grr! I hate C++. Overloading + inheritance = disaster!\n # So I ended up coding something which C++ does not in\n # fact support, but I feel bad to just throw away my good\n # code due to a C++ fault, so I am leaving here the code\n # disabled. Maybe some future C++ version will come along\n # and fix this problem, who knows :P\n if 0:\n # due to a limitation of the pybindgen overloading\n # strategy, we need to re-wrap for this class all\n # methods with the same name and different signature\n # from parent classes.\n overload._compute_all_wrappers()\n if isinstance(method, CppMethod):\n mro = self.get_mro()\n mro.next() # skip 'self'\n for cls in mro:\n try:\n parent_overload = cls.methods[name]\n except KeyError:\n continue\n parent_overload._compute_all_wrappers()\n for parent_method in parent_overload.all_wrappers:\n already_exists = False\n for existing_method in overload.all_wrappers:\n if existing_method.matches_signature(parent_method):\n already_exists = True\n break\n if not already_exists:\n new_method = parent_method.clone()\n new_method.class_ = self\n overload.add(new_method)\n \n else:\n self.nonpublic_methods.append(method)\n if method.is_virtual:\n self._have_pure_virtual_methods = None\n helper_class = self.get_helper_class()\n if helper_class is not None:\n helper_class.add_virtual_method(method)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
upload each file to logmuncher
def upload(self, filenames): print("I am going to upload the following files", filenames) for f in filenames: print("uploading", f) self.filenames = args.filenames payload = { 'email': self.email, 'title': os.path.basename(f) } files = {'file': open(f, 'rb')} r = requests.post("http://logs.uaventure.com/upload", data=payload, files=files) if r.status_code == requests.codes.ok: print("uploaded", f) else: print("error while uploading", f, "status code:", r.status_code) print("Dumping response:\n", r.raw) if self.verbose: print(r.text) time.sleep(1)
[ "def test_upload_run_logs(self):\n pass", "def log_record_upload(records: List[dict], endpoint: str) -> None:\n for record in records:\n log = \"Record: %s added to collection: %s on trial: %s on assay: %s\" % (\n record[\"file_name\"] if \"file_name\" in record else \" \",\n endpoint,\n record[\"trial\"],\n record[\"assay\"],\n )\n logging.info({\"message\": log, \"category\": \"FAIR-CELERY-RECORD\"})", "def upload_new_logfiles():\n if not CREDENTIALS_PROVIDED:\n log.warning(\"Credentials file not found! Can't upload results\")\n return\n\n # Make sure we're in the right directory\n if (os.getcwd() != os.path.dirname(os.path.realpath(__file__))):\n log.error(\"Please cd into the script directory before running it!\")\n sys.exit(1)\n\n # Setup FTP\n log.info(\"Connecting to FTP site\")\n try:\n ftp = FTP(timeout=5)\n ftp.connect(REMOTE_IP)\n log.info(\"FTP Connected\")\n ftp.login(USER, PASSWD)\n ftp.cwd('logs')\n\n sleep_logs = glob.glob('./logs/*.slp.csv')\n log.info(\"Found local logfiles: %s\" % sleep_logs)\n for sleep_log in sleep_logs:\n sleep_log_filename = os.path.basename(sleep_log)\n if os.stat(sleep_log).st_size < MIN_SIZE_FOR_UPLOAD:\n log.info(\"Skipping %s: sleeplog is < %s bytes \" % (sleep_log_filename, MIN_SIZE_FOR_UPLOAD))\n continue\n\n # Check if file is already on the server\n files_on_server = []\n ftp.retrlines('LIST %s' % sleep_log_filename, files_on_server.append)\n if files_on_server:\n log.info(\"Skipping %s: sleeplog is already on server\" % sleep_log_filename, MIN_SIZE_FOR_UPLOAD)\n continue\n\n # If not, upload it\n log.info(\"Uploading %s\" % sleep_log_filename)\n opened_sleep_log = open(sleep_log)\n transfer_cmd = 'STOR %s' % sleep_log_filename\n upload_result = ftp.storbinary(transfer_cmd, opened_sleep_log)\n if upload_result == '226 Transfer complete.':\n # Successful upload. remove the logfile\n log.info(\"Upload successful\")\n os.remove(sleep_log)\n else:\n log.warning(\"Upload unsuccessful\")\n\n ftp.close()\n log.info(\"FTP closed\")\n except socket_error:\n log.warning(\"FTP Connection refused\")\n except permission_error:\n log.warning(\"FTP invalid credentials\")\n except Exception as e:\n log.error(\"Unknown ftp error encountered: %s\" % e)", "def on_added_handler(file_list):\n print_file_list(file_list, \"Added\")\n for f in file_list:\n logger.info('Sending file \"{0}\"...'.format(f));\n send_file(f)", "def upload_log(t):\n global drive, http, log_id, log_recent_id, log_text, success, failure\n print(\"Uploading recent log...\")\n try:\n ## build most recent log entry\n total_files = str(success + failure)\n log_text = 'Successful downloads : ' + str(success) + '/' + total_files + '\\n' + 'Failed downloads: ' + str(failure) + '/' + total_files + '\\n\\n' + log_text\n log_text = str(t) + '\\n\\n' + 'Nightly update: ' + str(t.date()) + '\\n\\n' + log_text\n \n ## upload log_recent.txt\n drive_file = drive.CreateFile({'id': log_recent_id})\n drive_file.SetContentString(log_text)\n drive_file.Upload(param={\"http\": http})\n\n ## report success\n print(color('Recent log upload successful!', Colors.green))\n except:\n print(background('Recent log upload failed!', Colors.red))\n print(\"Appending recent log to full log...\")\n try:\n ## read in full log\n drive_file = drive.CreateFile({'id': log_id})\n tmpdir = tempfile.TemporaryDirectory()\n log_file = os.path.join(tmpdir.name, 'log.txt')\n drive_file.GetContentFile(log_file)\n with open(log_file, 'r') as full_log:\n full_log = full_log.read()\n\n ## append recent log to full log\n log_text = full_log + '\\n\\n' + log_text\n\n ## upload log.txt\n drive_file = drive.CreateFile({'id': log_id})\n drive_file.SetContentString(log_text)\n drive_file.Upload(param={\"http\": http}) \n\n ## report success\n print(color('Full log upload successful!', Colors.green)) \n except:\n print(background('Full log upload failed!', Colors.red))", "def exportLogFiles_(self, sender):\n move_dir = \"/Users/charles/Desktop/\"\n logs_list = [n for n in os.listdir(\"logs\") if n.endswith(\".log\")]\n for n in logs_list:\n shutil.move(\"logs/\" + n, move_dir)\n shutil.move()\n metatoneClassifier.start_log()", "def upload_large_files(self):\n for local_file, parent in self.large_files:\n self.settings.watcher.transferring_item(local_file, increment_amt=0, override_msg_verb='checking')\n hash_data = local_file.calculate_local_hash()\n if local_file.hash_matches_remote(hash_data):\n self.file_already_uploaded(local_file)\n else:\n self.settings.watcher.transferring_item(local_file, increment_amt=0)\n self.process_large_file(local_file, parent, hash_data)", "def collect_log_files(self, jobs, integration_step):\n log_file = pjoin(self.me_dir, 'Events', self.run_name, \n 'alllogs_%d.html' % integration_step)\n outfile = open(log_file, 'w')\n\n content = ''\n content += '<HTML><BODY>\\n<font face=\"courier\" size=2>'\n for job in jobs:\n # put an anchor\n log=pjoin(job['dirname'],'log_MINT%s.txt' % integration_step)\n content += '<a name=%s></a>\\n' % (os.path.dirname(log).replace(\n pjoin(self.me_dir,'SubProcesses'),''))\n # and put some nice header\n content += '<font color=\"red\">\\n'\n content += '<br>LOG file for integration channel %s, %s <br>' % \\\n (os.path.dirname(log).replace(pjoin(self.me_dir,\n 'SubProcesses'), ''), \n integration_step)\n content += '</font>\\n'\n #then just flush the content of the small log inside the big log\n #the PRE tag prints everything verbatim\n with open(log) as l:\n content += '<PRE>\\n' + l.read() + '\\n</PRE>'\n content +='<br>\\n'\n outfile.write(content)\n content=''\n\n outfile.write('</font>\\n</BODY></HTML>\\n')\n outfile.close()", "def cloud_sync(self):\n for record in self.upload_candidates():\n uploads = self.plowshare.upload(\n self.storage.path(record.name),\n self.RedundancyLevel)\n\n # Probably not a good idea to have the serialization code in here.\n info = json.dumps(payload.to_dict(payload.build(\n record.name,\n record.hash,\n record.size,\n uploads)))\n\n self.file_database.set_payload(record.hash, info)\n self.meter.measure_outgoing(record.size * self.RedundancyLevel)", "def test_all_uploads(self):\n pass", "def finishUploads(self):\n for f in self._files:\n self._files[f].finishUploads()", "def _UploadFiles(upload_dir, files):\n if files:\n google_storage_upload_dir = os.path.join(_RENDER_TEST_BUCKET, upload_dir)\n cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'),\n '-m', 'cp']\n cmd.extend(files)\n cmd.append(google_storage_upload_dir)\n cmd_helper.RunCmd(cmd)", "def ingest(self, args):\n self.logger.info(\"ingesting...\")\n for dirpath, _, filenames in os.walk(args.dir):\n for f in filenames:\n song_path = os.path.abspath(os.path.join(dirpath, f))\n song = fzsong.SongEntry(song_path, title=f, album=args.dir)\n\n self.databaser.write(song)", "def _check_and_upload_rotated_logs(self):\n dir_content = os.listdir(self._watch_dir)\n cur_files = []\n for f in dir_content:\n full_path = os.path.join(self._watch_dir, f)\n if \"-json.log.\" in f and os.path.isfile(full_path) and self._target_container_id in f:\n cur_files.append(full_path)\n # .log.x files will be arranged from earliest to most recent to ensure timestamp\n cur_files.sort(reverse=True)\n\n logger.debug(\"Current log directory: %s, content: %s\",\n self._watch_dir, cur_files)\n for f in cur_files:\n self._persist_log_artifact(f)", "def save_logs(self, log_count):\n with open(self.FILE_PATH) as log_file:\n with tqdm(total=log_count, desc='save to database', ) as pbar:\n record_list = []\n record_count = 1\n for line in log_file:\n\n record = self.parse_line(line)\n if record is None:\n continue\n record_list.append(LogItem(\n ip=record['ip'],\n datetime=self.parse_date(record['date']),\n method=record['method'],\n uri=record['uri'],\n status_code=record['status'],\n body_size=record['body_size'],\n user_agent=record['agent']\n ))\n\n if record_count == self.MASS_SAVE_COUNT:\n LogItem.objects.bulk_create(record_list)\n record_list = []\n record_count = 0\n pbar.update(1)\n record_count += 1", "def _copy_job_logs(self):\n basedir = os.path.join(self.config['AUTOCMS_BASEDIR'], self.testname)\n webdir = os.path.join(self.config['AUTOCMS_WEBDIR'], self.testname)\n for log in self.logs_to_copy:\n if log is None:\n continue\n src_file = os.path.join(basedir, log)\n dst_file = os.path.join(webdir, log)\n if not os.path.isfile(src_file):\n continue\n # dont copy logs already at the destination\n # not only does it waste time, they will not be\n # removed until much later as their mtime is now\n if os.path.isfile(dst_file):\n continue\n shutil.copy(src_file, dst_file)", "def do_upload():\n with open(filename) as upload_file:\n line_cnt = 0\n for line in upload_file:\n line_cnt += 1\n line = line.rstrip()\n\n if line[0] == ':':\n dev.write(line + \"\\r\")\n response = dev.read_line(timeout=5.0, purge_buffer=True)\n if debug:\n stage_callback(Firmware.STAGE_DEBUG, data=\"line={0} - line={1} response={2}\".format(line_cnt, line, response));\n\n if '!ce' in response:\n raise UploadChecksumError(\"Checksum error on line \" + str(line_cnt) + \" of \" + filename);\n\n elif '!no' in response:\n raise UploadError(\"Incorrect data sent to bootloader.\")\n\n elif '!ok' in response:\n break\n\n else:\n if progress_callback is not None:\n progress_callback(Firmware.STAGE_UPLOADING)\n\n time.sleep(0.0)", "def testUploadUsesBatchSize(self):\n client = DatasetImporter(1)\n client.upload(u'user',\n [{'about': u'hello world', 'values': {u'user/bar': 13}},\n {'about': u'wubble', 'values': {u'user/quux': 42}}])\n self.assertTrue(self.log.getvalue().startswith(\n 'Importing 2 new objects.\\nImported 1/2 new objects.\\n'\n 'Imported 2/2 new objects.\\nImported 2 objects in '))", "def update_files():\r\n set_to_file(Crawler.queue, Crawler.queueFile)\r\n set_to_file(Crawler.crawled, Crawler.crawledFile)\r\n external_to_file(Crawler.external, Crawler.externalFile)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
randomly generates a list of artists which the target_user never heard. It will compare the artists by a random generated user
def recommend_random_artists_RB(UAM, u_idx, train_aidx): all_idx = range(0, UAM.shape[0]) random_u_idx = random.sample(np.setdiff1d(all_idx, [u_idx]), 1)[0] # cannot generate the own user if random_u_idx == u_idx: recommend_random_artists_RB(UAM, u_idx) u_aidx = np.nonzero(UAM[u_idx,:])[0] random_u_aidx = np.nonzero(UAM[random_u_idx,:])[0] # this will return new artists the target_user never heard about result = np.setdiff1d(random_u_aidx, u_aidx) if len(result) > MAX_ARTIST: result = result[:MAX_ARTIST] return result
[ "def recommend_CF_our(UAM, user_id, artists):\n\n\n users = helper.read_csv(USERS_FILE)\n artists_array = []\n neighbor_array = get_user_neighbors(UAM, user_id)['neighbor_array']\n sim_users = get_user_neighbors(UAM, user_id)['sim_users']\n artist_idx_u = artists # indices of artists user u listened to\n total_artist_rating = {}\n\n for neighbor_index, neighbor in enumerate(neighbor_array, start = 1):\n a_neighbor = neighbor_array[-(neighbor_index)]\n\n if VERBOSE and VERBOSE_DEPTH == 2:\n print ' The ' + helper.number_to_text(neighbor_index) + ' closest user to ' + ' is ' + str(a_neighbor)\n\n artist_idx_n = np.nonzero(UAM[a_neighbor,:]) # indices of artists user u's neighbor listened to\n artists_array += artist_idx_n[0].tolist()\n\n artists_unique = np.unique(artists_array)\n # artists_unique = np.setdiff1d(artist_idx_u, artists_unique)\n\n for artist in artists_unique:\n artist_count_of_neighbors = 0\n\n for neighbor_index, neighbor in enumerate(neighbor_array, start = 1):\n playcount_of_user = UAM[neighbor, artist]\n rating = playcount_of_user * sim_users[neighbor]\n\n if artist in total_artist_rating:\n total_artist_rating[artist] += rating\n else:\n total_artist_rating[artist] = rating\n\n # Return list of 10 recommended artist indices\n return sorted(total_artist_rating, key=total_artist_rating.__getitem__, reverse=True)[:10]", "def random_duel(self, players):\n if self.marksmanships[players[0]] == self.marksmanships[players[1]]:\n return players\n\n self.duel_count += 1\n while len(players) == 2 :\n random.shuffle(players)\n starting_player = players[0]\n if flip(self.marksmanships[starting_player]):\n other_player = players[1]\n players.remove(other_player)\n self.G.remove_node(other_player)\n\n return players", "def generate_suggested_matches(self):\n if self.gender == 'M': # if user is a guy\n lower_diff = -4\n upper_diff = 3\n vips = AppConfig.get('vip_gids')# show guys the vip girls\n else: # its a girl\n lower_diff = -2\n upper_diff = 6\n vips = AppConfig.get('vip_mids') # show girls the vip guys\n lower_bound = self.age + lower_diff\n upper_bound = self.age + upper_diff\n in_age_range = list(User.objects.no_dereference().filter(gender=self.attracted_to, age__gte=lower_bound, age__lte=upper_bound)[:35])\n random.shuffle(in_age_range)\n suggested_matches = in_age_range[:3] # select up to three randomly\n for suggested_match in suggested_matches:\n self.suggested_matches.append(suggested_match)\n if vips:\n vip_id = random.choice(vips)\n vip_user = User.objects.no_dereference().get(id=vip_id)\n if vip_user and vip_user not in suggested_matches:\n self.suggested_matches.append(vip_user)\n self.save()\n return self", "def test_user_list_starred(self):\n pass", "def exp_players():\n for experience in players_mod:\n for k,v in experience.items():\n if 'experience' in k:\n if v == True:\n exper_players.append(experience)\n elif v == False:\n non_exper_players.append(experience)\n \"\"\"Randomly shuffles the list of Experience and Inexperience players\"\"\"\n random.shuffle(exper_players)\n random.shuffle(non_exper_players)", "def get_user_seeds(self, user):\n matching_list = []\n matching_list = Seed.objects.filter(owner=user).order_by(\n '-creation_date'\n )\n return matching_list", "def test_user_list_with_duplicates(self):\n self._create_release(user='userOne')\n self._create_release(user='userOne')\n self._create_release(user='userTwo')\n\n result = orlo.queries.user_list().all()\n self.assertEqual(len(result), 2)\n users = [r[0] for r in result]\n self.assertIn('userOne', users)\n self.assertIn('userTwo', users)", "def data_scientists_who_like(target_interest):\n\treturn[user_id \n\t\t\tfor user_id, interest in interests \n\t\t\tif interest == target_interest\n\t]", "def get_user_examples(ratings_df: pd.DataFrame,\n user_id: int,\n max_examples_per_user: Optional[int] = None) -> List[Any]:\n # Get subset of ratings_df belonging to a particular user.\n user_subset = ratings_df[ratings_df.UserID == user_id]\n user_examples = [(user_subset.UserID.iloc[i], user_subset.MovieID.iloc[i],\n user_subset.Rating.iloc[i])\n for i in range(user_subset.shape[0])]\n np.random.seed(NP_RANDOM_SEED)\n np.random.shuffle(user_examples)\n\n # Optionally filter number of examples per user, taking the first\n # max_examples_per_user examples.\n if max_examples_per_user is not None:\n user_examples = user_examples[:max_examples_per_user]\n\n return user_examples", "def scrape_artists():\r\n\tfor i in range(1, 14):\r\n\t\tif i > 1:\r\n\t\t\tresponse = requests.get(base_url + f'/list/{i}')\r\n\t\telse:\r\n\t\t\tresponse = requests.get(base_url)\r\n\t\thtml = response.text\r\n\t\thtml = html.split('class=\"item-name\">\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t')\r\n\t\tfor div in html[1:]:\r\n\t\t\tcorr_div = div.split('\\r\\n\\t')\r\n\t\t\tname = corr_div[0]\r\n\t\t\tif name.lower() not in artists:\r\n\t\t\t\tartists.append(name.lower())", "def get_followed_artists_of_user(self):\n spotify = self.__s\n result = []\n\n process = True\n lastArtistId = None\n\n # Do while there are more followed artists\n while process:\n\n # Get next n artists\n artistResult = spotify.current_user_followed_artists(50,\n lastArtistId)\n artists = artistResult[\"artists\"][\"items\"]\n\n if len(artists) == 0:\n process = False\n else:\n # Iterate over next 20 artists\n for i, artist in enumerate(artists):\n # Remember last artist\n lastArtistId = artist[\"id\"]\n\n # Create SpotifyArtist\n spotifyArtist = SpotifyArtist()\n spotifyArtist.id = artist[\"id\"]\n spotifyArtist.name = artist[\"name\"]\n\n # Append to result\n result.append(spotifyArtist)\n\n # LOGGING\n log(\"%s artist found\" % (len(result)))\n\n return result", "def like_media_from_to_follow(self):\n user_id = self.to_follow.random()\n while self.get_user_info(user_id)[\"is_private\"]:\n user_id = self.to_follow.random()\n n = random.randint(2, 4)\n username = self.get_user_info(user_id)[\"username\"]\n print(f\"Liking {n} medias from `{username}`.\")\n medias = self.bot.get_user_medias(user_id)\n self.bot.like_medias(random.sample(medias, n))\n self.to_follow.remove(user_id)", "def test_mentions(self):\n self.handler = HighfiveHandlerMock(\n Payload({}), repo_config=self.fakes['config']['mentions']\n ).handler\n (chosen_reviewers, mentions) = self.choose_reviewers(\n self.fakes['diff']['mentions'], \"nikomatsakis\",\n )\n assert set([\"pnkfelix\"]) == chosen_reviewers\n # @ehuss should not be listed here\n assert set([\"@pnkfelix\", \"@GuillaumeGomez\"]) == mentions", "def test_multiple_liking(self):\n user1 = api.user.create(\n email='john@plone.org',\n username='user1'\n )\n user2 = api.user.create(\n email='jane@plone.org',\n username='user2'\n )\n all_userids = [\n user1.getId(),\n user2.getId(),\n 'admin',\n ]\n doc1 = api.content.create(\n container=self.portal,\n type='Document',\n id='doc1',\n title='Doc 1'\n )\n doc2 = api.content.create(\n container=self.portal,\n type='Document',\n id='doc2',\n title='Doc 2'\n )\n\n util = getUtility(ILikesTool)\n # 1. All users like doc1\n # 2. user1 and user2 like doc2\n # 3. Check counts and who has liked\n util.like(\n user_id=all_userids,\n item_id=doc1.UID(),\n )\n self.assertEqual(len(util._user_uuids_mapping), 3)\n self.assertEqual(len(util._uuid_users_mapping), 1)\n self.assertEqual(\n [doc1.UID()],\n sorted(list(util._uuid_users_mapping))\n )\n\n user1_id = user1.getId()\n user2_id = user2.getId()\n util.like(\n user_id=[user1_id, user2_id],\n item_id=doc2.UID(),\n )\n results = util.get_users_for_item(doc2.UID())\n self.assertEqual(len(results), 2)\n self.assertIn(user1_id, results)\n self.assertIn(user2_id, results)", "def test_user_current_list_starred(self):\n pass", "def assign_roles(self):\n \n players_list = list(self.players.keys()) # grab all players\n roles_list = locations[self.location] # list of roles from our locations dictionary\n \n self.spy = np.random.choice(players_list) #randomly choose spy\n \n if len(players_list)-1 > len(roles_list): #If there are more non-spies than there are non-spy roles, extend the roles list\n playerlen = len(players_list)-1\n rolelen = len(roles_list)\n multiplier = playerlen//rolelen # How many copies of the entire role list can we fill for sure?\n remainder = playerlen%rolelen # How many individual additional roles will we have to add to assign all players a role if we have non-whole multiplier?\n self.roles = roles_list*multiplier\n self.roles.extend(list(np.random.choice(roles_list, remainder, replace=False))) #Create list of roles that minimizes duplicates and is equal to number of players\n\n self.roles = np.random.choice(self.roles, len(self.roles), replace=False) # Shuffle the roles order\n else: # If there are less players than roles, just use the non-extended roles list\n self.roles = roles_list\n n=0\n for player in players_list: # Assign roles from shuffled roles list or 'Spy' to user who is spy\n if player != self.spy:\n self.players_roles[player] = self.roles[n]\n n+=1\n else:\n self.players_roles[player] = 'Spy'", "def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))", "def test_unpopular(self):\n new_user = SocialMediaUser('Bryce', 'US')\n self.assertFalse(new_user.is_popular())\n for _ in range(randint(1, 100)):\n new_user.receive_upvote()\n self.assertFalse(new_user.is_popular())", "def get_queryset(self):\n\n user = self.request.user\n\n # get all movies that user has not marked as seen nor added to their watchlist\n # remember that 'user_notes' is the related name defined on the Movie model for the m2m connection to User\n possible_movies = Movie.objects.exclude(user_notes=user, usermovielink__seen=True).exclude(user_notes=user,\n usermovielink__watch_list=True)\n\n # pick three movies from possible randomly (sample will not choose duplicates)\n three_chosen = sample(list(possible_movies), 3)\n\n return three_chosen" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that implements a CF recommender. It takes as input the UAM, metadata (artists and users), the index of the seed user (to make predictions for) and the indices of the seed user's training artists. It returns a list of recommended artist indices
def recommend_CF_our(UAM, user_id, artists): users = helper.read_csv(USERS_FILE) artists_array = [] neighbor_array = get_user_neighbors(UAM, user_id)['neighbor_array'] sim_users = get_user_neighbors(UAM, user_id)['sim_users'] artist_idx_u = artists # indices of artists user u listened to total_artist_rating = {} for neighbor_index, neighbor in enumerate(neighbor_array, start = 1): a_neighbor = neighbor_array[-(neighbor_index)] if VERBOSE and VERBOSE_DEPTH == 2: print ' The ' + helper.number_to_text(neighbor_index) + ' closest user to ' + ' is ' + str(a_neighbor) artist_idx_n = np.nonzero(UAM[a_neighbor,:]) # indices of artists user u's neighbor listened to artists_array += artist_idx_n[0].tolist() artists_unique = np.unique(artists_array) # artists_unique = np.setdiff1d(artist_idx_u, artists_unique) for artist in artists_unique: artist_count_of_neighbors = 0 for neighbor_index, neighbor in enumerate(neighbor_array, start = 1): playcount_of_user = UAM[neighbor, artist] rating = playcount_of_user * sim_users[neighbor] if artist in total_artist_rating: total_artist_rating[artist] += rating else: total_artist_rating[artist] = rating # Return list of 10 recommended artist indices return sorted(total_artist_rating, key=total_artist_rating.__getitem__, reverse=True)[:10]
[ "def recommend_random_artists_RB(UAM, u_idx, train_aidx):\n all_idx = range(0, UAM.shape[0])\n random_u_idx = random.sample(np.setdiff1d(all_idx, [u_idx]), 1)[0]\n\n # cannot generate the own user\n if random_u_idx == u_idx:\n recommend_random_artists_RB(UAM, u_idx)\n\n u_aidx = np.nonzero(UAM[u_idx,:])[0]\n random_u_aidx = np.nonzero(UAM[random_u_idx,:])[0]\n\n # this will return new artists the target_user never heard about\n result = np.setdiff1d(random_u_aidx, u_aidx)\n\n if len(result) > MAX_ARTIST:\n result = result[:MAX_ARTIST]\n\n return result", "def get_recommendations(users_to_recommend):\n\tdata = get_all_data()\n\tprint \"loaded data\"\n\tprecomputed_predictions = loadReccFile('ratings')\n\tprint \"loaded precomputed predictions\"\n\tcf = collaborative_filtering(data, precomputed_predictions)\n\tprint \"initialized collaborative filter model\"\n\tfor user_id in users_to_recommend:\n\t\trecommendation = cf.recommendation(user_id)\n\t\tprint \"Recommendations for user : \" + str(user_id)\n\t\tprint [recc[0] for recc in recommendation]", "def calculate_recommendations(output_filename, model_name=\"als\"):\n # train the model based off input params\n artists, users, plays = get_lastfm()\n\n # create a model from the input data\n model = get_model(model_name)\n\n # if we're training an ALS based model, weight input for last.fm\n # by bm25\n if model_name.endswith(\"als\"):\n # lets weight these models by bm25weight.\n logging.debug(\"weighting matrix by bm25_weight\")\n plays = bm25_weight(plays, K1=100, B=0.8)\n\n # also disable building approximate recommend index\n model.approximate_similar_items = False\n\n # this is actually disturbingly expensive:\n plays = plays.tocsr()\n user_plays = plays.T.tocsr()\n\n logging.debug(\"training model %s\", model_name)\n start = time.time()\n model.fit(user_plays)\n logging.debug(\"trained model '%s' in %0.2fs\", model_name, time.time() - start)\n\n # generate recommendations for each user and write out to a file\n start = time.time()\n with tqdm.tqdm(total=len(users)) as progress:\n with codecs.open(output_filename, \"w\", \"utf8\") as o:\n batch_size = 1000\n to_generate = np.arange(len(users))\n for startidx in range(0, len(to_generate), batch_size):\n batch = to_generate[startidx : startidx + batch_size]\n ids, scores = model.recommend(\n batch, user_plays[batch], filter_already_liked_items=True\n )\n for i, userid in enumerate(batch):\n username = users[userid]\n for other, score in zip(ids[i], scores[i]):\n o.write(f\"{username}\\t{artists[other]}\\t{score}\\n\")\n progress.update(len(batch))\n logging.debug(\"generated recommendations in %0.2fs\", time.time() - start)", "def recommend_per_user(\n self, user, n_recommendations, filter_out_interacted_items=True\n ):\n\n u_recommended_items = []\n if self.user_sequences.get(user) is not None:\n u_items = self.user_sequences.get(user)\n u_recommended_items = list(\n list(\n zip(\n *self.model.wv.most_similar(\n u_items,\n topn=n_recommendations\n + len(u_items) * filter_out_interacted_items,\n )\n )\n )[0]\n )\n if filter_out_interacted_items:\n u_recommended_items = [\n i for i in u_recommended_items if i not in u_items\n ][:n_recommendations]\n return (\n [user]\n + u_recommended_items\n + [None] * (n_recommendations - len(u_recommended_items))\n )", "def recommend_for(self, uid, N):\n if uid not in self.users.keys():\n return None\n\n candidate_list = list()\n algorithms = list()\n weights = list()\n mixed_candidate_list = list()\n\n # Multiple algorithms\n algorithms.append(self.recommend_preference_content)\n algorithms.append(self.recommend_title_rel)\n\n for algo in algorithms:\n candidate_list.append(algo(uid))\n\n # Assign weights\n count = len(algorithms)\n for i in range(count):\n weights.append(1.0 / (count * 1.0))\n\n # Mix results\n for i in range(count):\n n = int(N * weights[i])\n mixed_candidate_list += candidate_list[i][:n]\n\n # Merge duplicated items and filter out improper items\n tmp_dic = dict()\n for iid, val in mixed_candidate_list:\n if iid not in tmp_dic.keys():\n tmp_dic[iid] = 0\n tmp_dic[iid] += val\n mixed_candidate_list.clear()\n for iid, val in tmp_dic.items():\n if iid not in self.users[uid].dislike_set and iid not in self.users[uid].click_record:\n mixed_candidate_list.append((iid, val))\n\n if len(mixed_candidate_list) == 0:\n return []\n\n # Sort items\n if len(self.X_train) > 0 and len(self.Y_train) > 0:\n featured_candidate_df = self.extract_features(uid, mixed_candidate_list)\n final_rec_list = self.ml_sort(uid, featured_candidate_df)\n else:\n final_rec_list = sorted(mixed_candidate_list, key=lambda x: x[1], reverse=True)\n self.extract_features(uid, final_rec_list)\n\n return final_rec_list", "def recommend(self, users_df, items_df, n_recommendations=1):\n\n # Clean previous recommendations (iloc could be used alternatively)\n self.recommender_df = self.recommender_df[:0]\n\n # Handle users not in the training data\n\n # Map item ids\n\n items_df = items_df.copy()\n items_df.replace({'item_id': self.item_id_mapping}, inplace=True)\n\n # Generate recommendations\n\n for idx, user in users_df.iterrows():\n recommendations = []\n\n user_id = user['user_id']\n\n if user_id in self.user_id_mapping:\n mapped_user_id = self.user_id_mapping[user_id]\n\n x_list = self.interactions_df.loc[self.interactions_df['user_id'] == mapped_user_id]['item_id'].tolist()\n final_scores = np.sum(self.scores[x_list], axis=0)\n\n # Choose n recommendations based on highest scores\n if not self.should_recommend_already_bought:\n final_scores[x_list] = -1e100\n\n chosen_ids = np.argsort(-final_scores)[:n_recommendations]\n\n for item_id in chosen_ids:\n recommendations.append(\n {\n 'user_id': self.user_id_reverse_mapping[mapped_user_id],\n 'item_id': self.item_id_reverse_mapping[item_id],\n 'score': final_scores[item_id]\n }\n )\n else: # For new users recommend most popular items\n for i in range(n_recommendations):\n recommendations.append(\n {\n 'user_id': user['user_id'],\n 'item_id': self.item_id_reverse_mapping[self.most_popular_items[i]],\n 'score': 1.0\n }\n )\n\n user_recommendations = pd.DataFrame(recommendations)\n\n self.recommender_df = pd.concat([self.recommender_df, user_recommendations])\n\n return self.recommender_df", "def get_user_recommend(self, user_id, overall_recommend, song_df):\n user_score = pd.DataFrame(overall_recommend[user_id]['recommend']).rename(columns={0: 'song_id', 1: 'score'})\n user_recommend = pd.merge(user_score,\n song_df[['song_id', 'title', 'release', 'artist_name', 'song']].drop_duplicates(),\n on='song_id', how='left')\n return (user_recommend)", "def recommend_items(self,dataset,u,max_items=10,return_scores=True,item_features=None):\r\n r = self.predict_ratings(u,item_features=item_features)\r\n return self._get_recommendations_from_predictions(r,dataset,u,u+1,max_items,return_scores)[0]", "def get_recommendations(self, row_or_user, n, rating_limit=8, filters={}):\n u = AnnoyIndex(len(self.games_list))\n try:\n u.load(self.file_names['annoy_index'])\n except FileNotFoundError:\n self.build_annoy_index()\n u.load(self.file_names['annoy_index'])\n\n if isinstance(row_or_user, str):\n index = self.user_indexes[row_or_user]\n vec = self.data[index]\n else:\n vec = row_or_user\n\n r_indexes = []\n r_users = []\n i = 0\n generated_n = 10\n while len(r_indexes) < n:\n # If the number of generated neighbors is higher than\n # the number of users we have, we cannot generate as \n # many recommendations as desired\n if (generated_n) > len(self.users_list):\n break\n # Rapidly increase the number of generated neighbors\n generated_n = generated_n * generated_n\n if isinstance(row_or_user, str):\n u_neighbors = u.get_nns_by_item(index, generated_n)\n else:\n u_neighbors = u.get_nns_by_vector(row_or_user, generated_n)\n\n while i < len(u_neighbors):\n neighbor = self.data[u_neighbors[i]]\n\n for k in range(len(neighbor)):\n if (neighbor[k] > rating_limit and \n vec[k] == 0 and \n k not in r_indexes and \n self.__filter_results(k, filters)):\n # Found a new game recommendation\n r_indexes.append(k)\n r_users.append(neighbor)\n if len(r_indexes) >= n:\n break\n i = i + 1\n if len(r_indexes) >= n:\n break\n \n # Aggregate the similar users, and average the ratings they have given to the \n # Recommended games, there might be some extra games in this aggregated results,\n # so we have to filter them again, and pick only the top n from those.\n a_neighbor = self.__average_similar_user_ratings(r_users)\n new_games = []\n for i in range(len(a_neighbor)):\n if vec[i] == 0 and a_neighbor[i] != 0 and self.__filter_results(i, filters):\n new_games.append((i, a_neighbor[i]))\n new_games.sort(key=lambda x: x[1], reverse=True)\n new_games = new_games[:n]\n \n for index in new_games:\n print(\"Game: {0:40} Simlar users rating: {1:4.3}\".format(self.games_list[index[0]], index[1]))\n return new_games", "def recommendations():\n song_title = request.values['song_title']\n suggestions = recommended_songs(str(song_title),\n features_df,\n knn_spotify,\n data_path)\n return render_template('recommendations.html',\n song_title=song_title,\n suggestions=suggestions)", "def recommend(self, user_ratings, ratings_matrix, k=10, creative=False):\n #######################################################################################\n # TODO: Implement a recommendation function that takes a vector user_ratings #\n # and matrix ratings_matrix and outputs a list of movies recommended by the chatbot. #\n # Do not use the self.ratings matrix directly in this function. #\n # #\n # For starter mode, you should use item-item collaborative filtering #\n # with cosine similarity, no mean-centering, and no normalization of scores. #\n #######################################################################################\n # Populate this list with k movie indices to recommend to the user.\n recommendations = []\n new_ratings = []\n zero_ratings = np.flatnonzero(user_ratings == 0)\n non_zero_ratings = np.flatnonzero(user_ratings)\n for i in zero_ratings:\n weighted_sum = 0\n for j in non_zero_ratings:\n cos_sim = self.similarity(ratings_matrix[i], ratings_matrix[j])\n weighted_sum += user_ratings[j] * cos_sim\n new_ratings.append((weighted_sum, i))\n recommendations = [r[1] for r in sorted(new_ratings, reverse = True)][:k]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return recommendations", "def recommend_anime_user(self, anime: list[tuple[str, int]]) -> list[tuple[str, float]]:\r\n\r\n self.add_vertex('me', '')\r\n for show in anime:\r\n self.add_edge('me', show[0], show[1])\r\n # adds user to UserGraph\r\n\r\n user_totals = []\r\n for user in self.get_all_vertices('user'):\r\n result = self.get_similarity_score('me', user, 'broad')\r\n user_totals.append((user, result))\r\n # finds similarity of this user to all other users in UserGraph\r\n\r\n user_totals.sort(key=lambda x: x[1], reverse=True)\r\n top_users = user_totals[:15]\r\n # Takes 15 users most similar to this user\r\n\r\n animes = [self.vertices[x[0]] for x in anime] # list of only the anime names in anime\r\n anime_options = dict()\r\n\r\n for user in top_users:\r\n for show in self.vertices[user[0]].neighbours:\r\n if show not in animes:\r\n addon = user[1] * (self.vertices[user[0]].neighbours[show] - 5)\r\n if show not in anime_options:\r\n anime_options[show] = [addon, 1.0]\r\n else:\r\n anime_options[show][0] += addon\r\n anime_options[show][1] += 1\r\n # Looking at all shows the top 15 users are neighbours of, finds those\r\n # with the highest overall (weight * similarity of this user)\r\n\r\n anime_scores = [(x.item, anime_options[x][0] / anime_options[x][1]) for x in anime_options]\r\n # finds shows with highest average scoring by the users, excluding shows of which only\r\n # one top user reviewed. This is to allow this algorithm to recommend more\r\n # niche shows, by finding users with very similar tastes, and recommending shows they liked\r\n # even if they are less well known\r\n\r\n return anime_scores", "def recommend(\n self: \"BaseGamesRecommender\",\n users: Iterable[UserKeyType],\n **kwargs,\n ) -> DataFrame:", "def _recommend_user(self, ratings, user, N):\n scores = self.predict_user(user)\n\n # compute the top N items, removing the items that the user already liked\n # from the result and ensure that we don't get out of bounds error when\n # we ask for more recommendations than that are available\n liked = set(ratings[user].indices)\n count = N + len(liked)\n if count < scores.shape[0]:\n\n # when trying to obtain the top-N indices from the score,\n # using argpartition to retrieve the top-N indices in\n # unsorted order and then sort them will be faster than doing\n # straight up argort on the entire score\n # http://stackoverflow.com/questions/42184499/cannot-understand-numpy-argpartition-output\n ids = np.argpartition(scores, -count)[-count:]\n best_ids = np.argsort(scores[ids])[::-1]\n best = ids[best_ids]\n else:\n best = np.argsort(scores)[::-1]\n\n top_n = list(islice((rec for rec in best if rec not in liked), N))\n return top_n", "def get_recommendation(seeds):\n artists_seeds = ','.join(seeds['artists'])\n tracks_seeds = ','.join(seeds['tracks'])\n\n MARKET = 'US'\n LIMIT = '20'\n\n URL_BASE = 'https://api.spotify.com/v1/recommendations?limit={}&market={}&seed_artists={}&seed_tracks={}'\n\n uri = URL_BASE.format(LIMIT, MARKET, artists_seeds, tracks_seeds)\n\n recommendation = request_spotify(uri)\n\n return recommendation.json()['tracks']", "def recommend_items(self, seed_item_name: str, similarity_metric: str, cutoff: int) -> DataFrame:\n\n \n #check if nubmer of ratings of the seed item is less than specified cutoff;\n #if so, compute list by content-based recommendation; else, collaborative filtering\n #raise value error if an appropriate similarity metric is not provided\n \n if self.ratings[\"Number_of_ratings\"][seed_item_name] < cutoff:\n \n if similarity_metric == \"cos\":\n return self.cosine(seed_item_name, self.latent_content_features)\n\n elif similarity_metric == \"corr\":\n return self.corr(seed_item_name, self.latent_content_features)\n\n else: raise ValueError(\"The similarity metric must be 'corr', for correlation, or 'cos', for cosine similarity.\")\n\n else:\n\n if similarity_metric == \"cos\":\n return self.cosine(seed_item_name, self.item_matrix_training)\n\n elif similarity_metric == \"corr\":\n return self.corr(seed_item_name, self.item_matrix_training)\n\n else: raise ValueError(\"The similarity metric must be 'corr', for correlation, or 'cos', for cosine similarity.\")", "def return_x_recommendations_based_on_input_seed(sp, seed_artists=None, seed_genres=None, seed_tracks=None, limit=20, country=None):\n if seed_artists == None and seed_tracks == None and seed_genres == None:\n raise ValueError(\"Must include input data about at least one of the following: seed_artists, seed_genres and seed_tracks.\")\n\n legal_recommendations = sp.recommendation_genre_seeds()['genres']\n for genre_seed in seed_genres:\n if genre_seed not in legal_recommendations:\n raise ValueError(\"Illegal genre seed\")\n\n try:\n recommendations = sp.recommendations(seed_artists=seed_artists, seed_genres=seed_genres, seed_tracks=seed_tracks, limit=limit, country=country)\n except Exception:\n print(\"Found no results based on your search / other error\")\n return -1\n\n track_information_list_json = recommendations['tracks']\n\n recommendation_list_to_return = []\n\n for track_info in track_information_list_json:\n artists = track_info['album']['artists']\n artist_list = []\n for artist in artists:\n temp_artist_id = artist['id']\n temp_artist_name = artist['name']\n artist_list.append([temp_artist_name, temp_artist_id])\n album_name = track_info['name']\n album_id = track_info['id']\n track_id = track_info['id']\n track_name = track_info['name']\n track_popularity = track_info['popularity']\n duration_ms = track_info['duration_ms']\n\n recommendation_list_to_return.append([\n track_name,\n track_id,\n track_popularity,\n duration_ms,\n album_name,\n album_id,\n artist_list,\n ])\n\n return recommendation_list_to_return", "def recommend(self, num_rec_items):\n predicted = np.inner(self.user_mtx, self.item_mtx) * -1\n predicted[self.data[:, 0], self.data[:, 1]] *= 0\n self.result = pd.DataFrame(predicted.argsort()[:, :num_rec_items],\n columns=['top%s' % i\n for i in range(1, num_rec_items + 1)],\n index=np.arange(len(self.user_mtx)))\n self.result.to_csv('result/recommend_np.csv')", "def get_recommendations(sorted_matches):\n # put whole method in loop from 0 to len(sorted_matches)\n # continue until we have found some recommendations\n # (instead of just looking at top match)\n if len(sorted_matches) > 0:\n top_match = sorted_matches[0]\n top_match_songs = top_match[1]\n top_match_song_set = set(top_match_songs)\n # get the most common genre for top match user's songs\n genre_lists = [song.genres for song in top_match_songs]\n genres = list(itertools.chain(*genre_lists))\n genre_counts = Counter(genres)\n most_common_genre = genre_counts.most_common(1)[0][0]\n # just get the user field of a matching song instead of making db call\n top_match_user = top_match_songs[0].user\n # get all the Songs from Artists which have the most common genre\n # that also belong to the top match user\n most_common_genre_songs = Song.query.filter(Song.artist.has(\n Artist.genres.any(Genre.name == most_common_genre))).filter(\n Song.user == top_match_user).all()\n recommendations = []\n # if any songs in most_common_genre_songs are not in top matching\n # songs, add them to the recommended songs\n most_common_genre_song_set = set(most_common_genre_songs)\n recommend_set = most_common_genre_song_set - top_match_song_set\n recommendation_list = list(recommend_set)\n recommendations += recommendation_list\n if len(recommendations > 0):\n # sort by popularity, then return\n recommendations.sort(key=lambda x: x.popularity, reverse=True)\n return recommendations\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dropout for sparse tensors.
def sparse_dropout(x, keep_prob, noise_shape): # The dropout layer for sparse matrix random_tensor = keep_prob random_tensor += tf.random_uniform([noise_shape], dtype=tf.float64) dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) pre_out = tf.sparse_retain(x, dropout_mask) return pre_out * (1. / keep_prob)
[ "def dropout_sparse(x, keep_prob, num_nonzero_elems):\n\tnoise_shape = [num_nonzero_elems]\n\trandom_tensor = keep_prob\n\trandom_tensor += torch.rand(noise_shape)\n\tdropout_mask = torch.floor(random_tensor).bool()\n\t\"\"\" \"\"\"\n\tpre_out = x[dropout_mask]\n\t\"\"\" \"\"\"\n\treturn pre_out * (1./keep_prob)", "def test_cp_dropout():\n shape = (10, 11, 12)\n rank = 8\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='CP')\n tensor = tensor_dropout(tensor, 1)\n weights = tensor().weights\n assert (len(weights) == (1))\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n weights = tensor().weights\n assert (len(weights) == rank)", "def test_tucker_dropout():\n shape = (10, 11, 12)\n rank = (7, 8, 9)\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='Tucker')\n tensor = tensor_dropout(tensor, 1)\n core = tensor().core\n assert (tl.shape(core) == (1, 1, 1))\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n core = tensor().core\n assert (tl.shape(core) == rank)", "def test_tt_dropout():\n shape = (10, 11, 12)\n # Use the same rank for all factors\n rank = 4\n tensor = FactorizedTensor.new(shape, rank=rank, factorization='TT')\n tensor = tensor_dropout(tensor, 1)\n factors = tensor().factors\n for f in factors:\n assert (f.shape[0] == f.shape[-1] == 1)\n\n remove_tensor_dropout(tensor)\n assert (not tensor._forward_hooks)\n\n tensor = tensor_dropout(tensor, 0)\n factors = tensor().factors\n for i, f in enumerate(factors):\n if i:\n assert (f.shape[0] == rank)\n else: # boundary conditions: first and last rank are equal to 1\n assert (f.shape[-1] == rank)", "def apply_dropout(self, tensor):\n if not self._use_dropout:\n raise ValueError(\"The model has not been configured to use dropout.\")\n return tf.layers.dropout(tensor, rate=self.dropout_placeholder)", "def Dropout(p_drop, inputs):\n srng = RandomStreams(seed=234)\n scaled_inputs = inputs / swft.floatX(1-p_drop)\n return scaled_inputs * srng.binomial(\n inputs.shape, \n p=swft.floatX(1-p_drop),\n dtype=theano.config.floatX\n )", "def get_dropout(x, rate=0.0, init=True):\n if init or rate == 0:\n return x\n return tf.layers.dropout(x, rate=rate, training=True) # TODO", "def drop_bias(matrix):\n return matrix[:,1:]", "def drop_nonnoise_tokens(tokens, noise_mask, vocabulary, seeds):\n del vocabulary\n del seeds\n return tf.boolean_mask(tokens, noise_mask)", "def remove_store_open_no_sales(X):\n if config.TARGET in X.columns:\n X = X.loc[~((X['Open'] == 1) & (config.TRAIN[config.TARGET] == 0))]\n print(f'* {config.TARGET} is in dataset columns. Some samples were removed')\n return X\n else:\n print(f'* {config.TARGET} is NOT in dataset columns. None sample was removed')\n return X", "def compact(self):\n zero_idx = np.where(self.data == 0)\n self.data = np.delete(self.data, zero_idx)\n self.indices = np.delete(self.indices, zero_idx)", "def _concrete_dropout(self, x: Tensor) -> Tensor:\n\n eps = 1e-7\n tmp = 0.1\n\n self.p = torch.sigmoid(self.p_logit)\n u_noise = torch.rand_like(x)\n\n drop_prob = (torch.log(self.p + eps) -\n torch.log(1 - self.p + eps) +\n torch.log(u_noise + eps) -\n torch.log(1 - u_noise + eps))\n\n drop_prob = torch.sigmoid(drop_prob / tmp)\n\n random_tensor = 1 - drop_prob\n retain_prob = 1 - self.p\n\n x = torch.mul(x, random_tensor) / retain_prob\n\n return x", "def trim_dataset(mat, batch_size):\r\n no_of_rows_drop = mat.shape[0]%batch_size\r\n #print (no_of_rows_drop)\r\n if(no_of_rows_drop > 0):\r\n return mat[:-no_of_rows_drop]\r\n else:\r\n return mat", "def _remove_empty_timesteps(sp_tensor):\n\n batch_size = tf.to_int32(sp_tensor.dense_shape[0])\n indices, max_len = _example_index_to_sparse_index(\n tf.to_int32(sp_tensor.indices[:, 0]), batch_size)\n indices = tf.concat([indices, tf.zeros_like(indices[:, 0:1])], axis=1)\n return tf.SparseTensor(\n indices=indices,\n values=sp_tensor.values,\n dense_shape=[batch_size, max_len, 1])", "def drop_noise_tokens(tokens, noise_mask, vocabulary, seeds):\n del vocabulary\n del seeds\n return tf.boolean_mask(tokens, tf.logical_not(noise_mask))", "def dropout_mask(x, sz, p: float):\n return x.new(*sz).bernoulli_(1 - p).div_(1 - p)", "def remove_tensor(self, position):\n target_indices = self.tensors[position].non_der_indices\n new_tensors = []\n for tensor in rest(self.tensors, position):\n new_indices = remove_indices(tensor.indices, target_indices)\n new_tensors.append(tensor.change_indices(new_indices))\n return Operator(new_tensors)", "def trim_dataset(mat, batch_size):\n no_of_rows_drop = mat.shape[0]%batch_size\n if(no_of_rows_drop > 0):\n return mat[no_of_rows_drop:]\n else:\n return mat", "def trim_dataset(mat, batch_size):\n\t no_of_rows_drop = mat.shape[0]%batch_size\n\t if(no_of_rows_drop > 0):\n\t return mat[:-no_of_rows_drop]\n\t else:\n\t return mat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_track_ Track the jobs while in condor This returns a threeway ntuple First, the total number of jobs still running Second, the jobs that need to be changed Third, the jobs that need to be completed
def track(self, jobs, info = None): # Create an object to store final info trackList = [] changeList = [] completeList = [] runningList = [] noInfoFlag = False # Get the job jobInfo = self.getClassAds() if jobInfo == None: return runningList, changeList, completeList if len(jobInfo.keys()) == 0: noInfoFlag = True for job in jobs: # Now go over the jobs from WMBS and see what we have if not job['jobid'] in jobInfo.keys(): # Two options here, either put in removed, or not # Only cycle through Removed if condor_q is sending # us no information if noInfoFlag: if not job['status'] == 'Removed': # If the job is not in removed, move it to removed job['status'] = 'Removed' job['status_time'] = int(time.time()) changeList.append(job) elif time.time() - float(job['status_time']) > self.removeTime: # If the job is in removed, and it's been missing for more # then self.removeTime, remove it. completeList.append(job) else: completeList.append(job) else: jobAd = jobInfo.get(job['jobid']) jobStatus = int(jobAd.get('JobStatus', 0)) statName = 'Unknown' if jobStatus == 1: # Job is Idle, waiting for something to happen statName = 'Idle' elif jobStatus == 5: # Job is Held; experienced an error statName = 'Held' elif jobStatus == 2 or jobStatus == 6: # Job is Running, doing what it was supposed to # NOTE: Status 6 is transferring output # I'm going to list this as running for now because it fits. statName = 'Running' elif jobStatus == 3: # Job is in X-state: List as error statName = 'Error' elif jobStatus == 4: # Job is completed statName = 'Complete' else: # What state are we in? logging.info("Job in unknown state %i" % jobStatus) # Get the global state job['globalState'] = CondorPlugin.stateMap()[statName] if statName != job['status']: # Then the status has changed job['status'] = statName job['status_time'] = jobAd.get('stateTime', 0) changeList.append(job) runningList.append(job) return runningList, changeList, completeList
[ "def updateJobs(currentJobs, jobTrack):\n for i in range(len(currJobs)):\n try:\n jobTrack[i] = currJobs[i].is_alive()\n except:\n jobTrack[i] = 0\n return jobTrack", "def number_of_pending_jobs():\n cmd = [\"squeue\", \"-u\", \"lstanalyzer\", \"-h\", \"-t\", \"pending\", \"-r\"]\n output = sp.check_output(cmd)\n return output.count(b\"\\n\")", "def getJobs(self):\n if (self.firstJobCheck):\n o = self.getString(\"JOBS\", \"\", \"CPU\")\n if ((o == \"CPU\") or (o == \"\")):\n o =str(multiprocessing.cpu_count())\n self.addHistory(\"JOBS\", \"Number of make jobs for build?\", o)\n self.jobs = o\n else:\n o = self.jobs\n return o", "def status(self):\n numrunning = 0\n numqueued = 0\n\n queue, comm_err = self._showq()\n\n if comm_err:\n return (9999, 9999)\n #elif error:\n # raise queue_managers.QueueManagerFatalError(error) \n\n numrunning = len(queue['running'])\n numqueued = len(queue['pending']) + len(queue['suspended'])\n\n #lines = jobs.split('\\n')\n #for line in lines:\n # if line.startswith(self.job_basename):\n # if 'Running' in line.split()[2]:\n # numrunning += 1\n # elif 'Idle' in line.split()[2]:\n # numqueued += 1\n\n return (numrunning, numqueued)", "def test_finished(jobs):\n nonlocal count\n nonlocal count_dict\n for job in jobs[:]:\n if if_finish_func(job):\n finished_jobs.append(job)\n num = str(len(finished_jobs)) + '/' + str(job_num)\n rec = str(job)\n rec += '\\n'\n rec += num + ' calculation finished.\\n'\n rec += '---'*25\n print(rec)\n record(job.root_path, rec)\n jobs.remove(job)\n count -= 1\n count_dict[job.parameter['nodes']] -= 1", "def update_running_jobs(self, condor_jobs):\n self.logger.info('Will update job info in the local storage')\n all_items = self.storage.get_all()\n for validation_name, storage_item in all_items.iteritems():\n self.logger.info('Updating %s information in local storage', validation_name)\n running = storage_item['running']\n for threads, threads_dict in running.iteritems():\n if threads_dict.get('condor_status') == 'DONE':\n continue\n\n condor_id = str(threads_dict['condor_id'])\n current_status = threads_dict.get('condor_status', '<unknown>')\n new_status = condor_jobs.get(condor_id, 'DONE')\n if current_status != new_status:\n threads_dict['condor_status'] = new_status\n self.logger.info('%s %s threads job changed to %s',\n validation_name,\n threads,\n new_status)\n self.storage.save(validation_name, storage_item)\n\n self.logger.info('Updated local storage:')\n all_items = self.storage.get_all()\n for validation_name, storage_item in all_items.iteritems():\n stage = storage_item['stage']\n self.logger.info(' %s is at stage %s:', validation_name, stage)\n running = storage_item['running']\n for threads in list(sorted(running.keys())):\n threads_dict = running[threads]\n self.logger.info(' Threads: %s, attempt: %s, status: %s, HTCondor ID: %s',\n threads,\n threads_dict.get('attempt_number'),\n threads_dict.get('condor_status'),\n threads_dict.get('condor_id'))", "def __updateProcessCounter(self):\n\n newcounter = 0\n for job in self.__procs:\n if job.is_alive():\n newcounter+=1\n self.__numRunningProcs = newcounter\n return newcounter", "def run(self):\n\n\t\ti = 0\n\t\twith open(\"job_log\", \"a+\") as job_log, open(\"task_log\", \"a+\") as task_log:\n\t\t\tlabels = [\"GWAS_rsid\", \"outside_rsid\", \"task_name\", \"job_name\", \"status\"]\n\t\t\tjob_log.write(\"\\t\".join(labels))\n\t\t\tjob_log.write(\"\\n\")\n\n\t\t\ttask_log.write(\"\\t\".join(labels))\n\t\t\ttask_log.write(\"\\n\")\n\n\t\t\twhile self.incomplete(self.tasks):\n\t\t\t\tdone_tasks = []\n\t\t\t\tprint(f\"Checked {i} times\")\n\t\t\t\ti +=1\n\n\t\t\t\tfor rsid_pair in self.tasks:\n\t\t\t\t\ttask = self.tasks.get(rsid_pair, None)\n\t\t\t\t\tlogging.info(\"rsid_pair %s,%s\" % rsid_pair)\n\n\t\t\t\t\t# First run initialization of jobs.\n\t\t\t\t\tif len(task.jobs) == 0:\n\t\t\t\t\t\tlogging.info(\"\\tstarting first job\")\n\t\t\t\t\t\tnew_jobs = create_new_jobs(task, \"new\")\n\t\t\t\t\t\tfor job in new_jobs:\n\t\t\t\t\t\t\tself.runner.run(job)\n\t\t\t\t\t\ttask.status = \"running\"\n\n\t\t\t\t\t# Re-check all the jobs for the task.\n\n\t\t\t\t\ttask.all_done = self.check_task_jobs(job_log=job_log, task= task)\n\n\t\t\t\t\t# Split child jobs\n\t\t\t\t\tif task.all_done:\n\n\t\t\t\t\t\tline = [f\"{task.rsid_pair.GWAS_rsid}\",f\"{task.rsid_pair.outside_rsid}\",f\"{task.name}\", \"NA\"]\n\t\t\t\t\t\ttask.need_split_cleaned_up = self.needs_split(task)\n\t\t\t\t\t\tif task.need_split_cleaned_up:\n\t\t\t\t\t\t\tcurrent_index = self.iteration_steps.index(task.target_iterations)\n\t\t\t\t\t\t\tif current_index+1 > len(self.iteration_steps) - 1:\n\t\t\t\t\t\t\t\tlogging.info(\"MAX ITERATION REACHED, STILL NEED MORE PERM FOR RSID PAIR {} AT {} ITERATIONS\".format(task.rsid_pair,task.target_iter_str))\n\t\t\t\t\t\t\t\t# remove task and move on to next task\n\t\t\t\t\t\t\t\tline.append(\"reached_max_iter_more_perm\")\n\t\t\t\t\t\t\t\ttask_log.write(\"\\t\".join(line))\n\t\t\t\t\t\t\t\ttask_log.write(\"\\n\")\n\t\t\t\t\t\t\t\tdone_tasks.append(task)\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# try to move to the next iteration step\n\t\t\t\t\t\t\t\ttask.target_iterations = self.iteration_steps[current_index + 1]\n\t\t\t\t\t\t\t\tlogging.info(\n\t\t\t\t\t\t\t\t\tf\"MOVING TO NEXT STEP OF {task.target_iter_str} ITERATIONS, STILL NEED MORE PERM FOR RSID PAIR {task.rsid_pair} AT {num2words(self.iteration_steps[current_index])} ITERATIONS\")\n\n\t\t\t\t\t\t\t\t#update highest iteration:\n\t\t\t\t\t\t\t\tif task.target_iterations > self.highest_iteration:\n\t\t\t\t\t\t\t\t\tself.highest_iteration = task.target_iterations\n\n\t\t\t\t\t\t\t\t#create new jobs and run them\n\t\t\t\t\t\t\t\tnext_iter_step_jobs = create_new_jobs(task, \"new\")\n\t\t\t\t\t\t\t\tfor job in next_iter_step_jobs:\n\t\t\t\t\t\t\t\t\tself.runner.run(job)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlogging.info(\"DONE WITH RSID PAIR {} AT {} ITERATIONS\".format(task.rsid_pair, task.target_iter_str))\n\t\t\t\t\t\t\ttask.status = \"complete\"\n\t\t\t\t\t\t\tline.append(f\"complete_{task.target_iter_str}\")\n\t\t\t\t\t\t\ttask_log.write(\"\\t\".join(line))\n\t\t\t\t\t\t\ttask_log.write(\"\\n\")\n\t\t\t\t\t\t\t#self.stop_monitoring(task)\n\t\t\t\t\t\t\tdone_tasks.append(task)\n\n\t\t\t\t\tprint(\"-\")\n\t\t\t\tprint(\"---\")\n\t\t\t\t# print(self.tasks)\n\t\t\t\tprint(\"===\")\n\t\t\t\tlogging.info(f\"Currently in this directory: {os.getcwd()}\")\n\n\t\t\t\t#removing all the done tasks at once:\n\t\t\t\tfor finished_task in done_tasks:\n\t\t\t\t\tcheckpoint(self.stop_monitoring(finished_task))\n\t\t\t\t#self.save_tasks()\n\t\t\t\ttime.sleep(60)\n\n\t\tself.final_combine()\n\t\tprint(\"all done ---------------\")\n\t\tself.overall_end_time = time.time()\n\t\tprint(f\"Finished {len(self.single_pairings)} SNP pairs from {self.iteration_steps[0]} to {self.highest_iteration} in {self.overall_end_time - self.overall_start_time}\")", "def get_current_jobs(grep='BM5'):\n concurrent_cmd = \"qstat -a | awk '{print $4}' | grep \" + grep + \" | wc -l\"\n p = subprocess.Popen(\n concurrent_cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n output = p.communicate()[0]\n njobs = int(output.decode('utf-8'))\n print(f'Found {njobs} in the queue.')\n return njobs", "def check_the_need_to_split(self,jobs_to_run,jobs_to_collect):\n nevt_job=self.run_card['nevt_job']\n if nevt_job > 0:\n jobs_to_collect_new=copy.copy(jobs_to_collect)\n for job in jobs_to_run:\n nevents=job['nevents']\n if nevents == 0:\n jobs_to_collect_new.remove(job)\n elif nevents > nevt_job:\n jobs_to_collect_new.remove(job)\n if nevents % nevt_job != 0 :\n nsplit=int(nevents/nevt_job)+1\n else:\n nsplit=int(nevents/nevt_job)\n for i in range(1,nsplit+1):\n job_new=copy.copy(job)\n left_over=nevents % nsplit\n if i <= left_over:\n job_new['nevents']=int(nevents/nsplit)+1\n job_new['wgt_frac']=float(job_new['nevents'])/float(nevents)\n else:\n job_new['nevents']=int(nevents/nsplit)\n job_new['wgt_frac']=float(job_new['nevents'])/float(nevents)\n job_new['split']=i\n job_new['dirname']=job['dirname']+'_%i' % job_new['split']\n jobs_to_collect_new.append(job_new)\n jobs_to_run_new=copy.copy(jobs_to_collect_new)\n else:\n jobs_to_run_new=copy.copy(jobs_to_collect)\n for job in jobs_to_collect:\n if job['nevents'] == 0:\n jobs_to_run_new.remove(job)\n jobs_to_collect_new=copy.copy(jobs_to_run_new)\n\n return jobs_to_run_new,jobs_to_collect_new", "def task_7_song_counter():\n return Song.objects.all().count()", "def _completed_update_jobs(futures_to_condition, per_condition_state, logger):\n for f in futures.as_completed(futures_to_condition):\n duration = f.result()\n condition = futures_to_condition[f]\n state = per_condition_state[condition.label]\n state['num_completed_update_jobs'] += 1\n\n logger.debug('Processed {0:d} of {1:d} jobs to update classification_state table for condition \\'{2}\\''\n ' (duration {3:.3f}s)'\n .format(state['num_completed_update_jobs'],\n state['num_total_update_jobs'],\n condition.label,\n duration / 1000))\n\n if state['num_completed_update_jobs'] == state['num_total_update_jobs']:\n logger.info('Finished updating classification_state table for condition \\'{0}\\''\n .format(condition.label))\n yield condition, state", "def _completed_update_jobs(futures_to_condition, per_condition_state, logger):\n for f in futures.as_completed(futures_to_condition):\n duration = f.result()\n condition = futures_to_condition[f]\n state = per_condition_state[condition.label]\n state['num_completed_update_jobs'] += 1\n\n logger.debug(\"Processed {0:d} of {1:d} jobs to update classification_state table for condition \\'{2}\\'\"\n ' (duration {3:.3f}s)'\n .format(state['num_completed_update_jobs'],\n state['num_total_update_jobs'],\n condition.label,\n duration / 1000))\n\n if state['num_completed_update_jobs'] == state['num_total_update_jobs']:\n logger.info(\"Finished updating classification_state table for condition \\'{0}\\'\"\n .format(condition.label))\n yield condition, state", "def getCondorRunningJobs(user):\n\n\n command = ['condor_q', user]\n pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)\n stdout, error = pipe.communicate()\n\n output = stdout.split('\\n')[-2]\n\n nJobs = int(output.split(';')[0].split()[0])\n\n return nJobs", "def work(self):\n \n if len(self.doing) < self.capacity and self.todo:\n for _ in range(min([self.todo, self.capacity])):\n if not self.pull or not self.target or not self.target.max_todo or (self.done + self.target.todo + self.batch_size) <= self.target.max_todo:\n self.todo -= 1\n self.doing.append(0)\n\n work_done = 0\n for idx in reversed(range(len(self.doing))):\n self.doing[idx] += 1\n work_done += 1\n if self.doing[idx] >= self.task_duration:\n self.doing.pop(idx)\n self.done += 1\n return work_done", "def _check_jobs(self):\n testmode = self.am_getOption(\"TestMode\", False)\n simudb = SimuInterface(create_connection(testmode=testmode))\n try:\n simusdict = simudb.get_runs_with_status_in_group_with_status(status=[\"new\"],\n gstat=[\"new\", \"submitting\"])\n except:\n return S_ERROR(\"Couldn't get the simu dict\")\n simudb.close_session()\n return S_OK(len(simusdict.keys()))", "def _completed_calc_jobs(futures_to_condition, per_condition_state, logger):\n for f in futures.as_completed(futures_to_condition):\n num_matched_imeis, duration = f.result()\n condition = futures_to_condition[f]\n state = per_condition_state[condition.label]\n state['num_completed_calc_jobs'] += 1\n state['num_matched_imeis'] += num_matched_imeis\n logger.debug('Processed {0:d} of {1:d} jobs to calculate matching IMEIs for condition \\'{2}\\' '\n '(duration {3:.3f}s)'\n .format(state['num_completed_calc_jobs'],\n state['num_total_calc_jobs'],\n condition.label,\n duration / 1000))\n\n if state['num_completed_calc_jobs'] == state['num_total_calc_jobs']:\n logger.info('Finished calculating {0:d} matching IMEIs for condition \\'{1}\\''\n .format(state['num_matched_imeis'], condition.label))\n yield condition, state", "def test_freq_job(self):\n self.job.disp = False\n self.job.ri = False\n self.job.nproc = 1\n self.job.jobtype = 'numforce'\n result = [\n '$maxcor 2048',\n '$parallel_parameters maxtask=10000',\n '$paroptions ga_memperproc 900000000000000 900000000000',\n '$ri',\n '$marij',\n '$ricore 0',\n '$ricore_slave 1'\n ]\n self.assertEqual(auto_control_mod(list(), self.job), result)", "def _completed_calc_jobs(futures_to_condition, per_condition_state, logger):\n for f in futures.as_completed(futures_to_condition):\n num_matched_imeis, duration = f.result()\n condition = futures_to_condition[f]\n state = per_condition_state[condition.label]\n state['num_completed_calc_jobs'] += 1\n state['num_matched_imeis'] += num_matched_imeis\n logger.debug(\"Processed {0:d} of {1:d} jobs to calculate matching IMEIs for condition \\'{2}\\' \"\n '(duration {3:.3f}s)'\n .format(state['num_completed_calc_jobs'],\n state['num_total_calc_jobs'],\n condition.label,\n duration / 1000))\n\n if state['num_completed_calc_jobs'] == state['num_total_calc_jobs']:\n logger.info(\"Finished calculating {0:d} matching IMEIs for condition \\'{1}\\'\"\n .format(state['num_matched_imeis'], condition.label))\n yield condition, state" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Kill a list of jobs based on the WMBS job names
def kill(self, jobs, info = None): for job in jobs: jobID = job['jobid'] # This is a very long and painful command to run command = 'condor_rm -constraint \"WMAgent_JobID =?= %i\"' % (jobID) proc = subprocess.Popen(command, stderr = subprocess.PIPE, stdout = subprocess.PIPE, shell = True) out, err = proc.communicate() return
[ "def kill_all(self):\n for job_id in self.job_ids:\n cmd = \"kill %s\" % (job_id)\n os.system(cmd)", "def aws_kill_jobs_command(\n self, args: Namespace, extra_args: List[str], argv: List[str]\n ) -> None:\n scheduler = self.get_scheduler(args)\n\n # Get AWS executors.\n executors = [\n executor\n for executor in scheduler.executors.values()\n if isinstance(executor, AWSBatchExecutor)\n ]\n statuses = args.status.split(\",\") if args.status else None\n\n for executor in executors:\n job_ids = [job[\"jobId\"] for job in executor.get_jobs(statuses=statuses)]\n for job_id in job_ids:\n self.display(\"Killing job {}...\".format(job_id))\n executor.kill_jobs([job_id])", "def kill_jobs(self):\n\n for p in self.parser_jobs:\n p.kill()", "def cleanup_jobs(self):\n SERVICE_LOGGER.info('Stopping %d jobs', len(self.jobs))\n for job_name, job in self.jobs.items():\n if job.get_status():\n SERVICE_LOGGER.info('Killing %s', job_name)\n job.kill()", "def remove_jobs():\r\n\r\n list_jobs = []\r\n for job in Gaia.list_async_jobs():\r\n list_jobs.append(job.get_jobid())\r\n \r\n Gaia.remove_jobs(list_jobs)", "def stop_jobs(shot_id):\n jobs = Job.query.\\\n filter_by(shot_id = shot_id).\\\n filter_by(status = 'running').\\\n all()\n\n for job in jobs:\n print(stop_job(job.id))", "def kill_all(self):\n while not self.job_ids.empty():\n job_id = self.job_ids.get()\n self.kill_job(job_id)", "def delete_jobs(self, jobs):\n for job in jobs:\n logger.info(f\"Deleting job {job.name}\")\n try:\n self.server.delete_job(job.name)\n except jenkins.NotFoundException as exception_received:\n logger.info(repr(exception_received))", "def killLocalJobs(self, jobIDs: List[int]) -> None:\n self.localBatch.killBatchJobs(jobIDs)", "def cancelJobs(self):\n print \"Terminating all Jobs due to reaching timeout\"\n for proc in self.__procs:\n if not proc.is_alive():\n\n proc.terminate()\n print \"All jobs have been terminated\"", "def terminate_workers():\r\n print(\"Terminating workers\")\r\n for p in jobs:\r\n p.terminate()\r\n jobs.clear()", "def delete(context, jobs_names, base_dir, confirm):\n session = jenkins_api.auth(base_dir)\n if confirm and jobs_names:\n question = click.style(click.wrap_text(\n 'Are you sure you want to delete the following jobs on the '\n 'Jenkins server?'\n ), fg='red', bold=True)\n jobs_list = '\\n'.join(' %s' % n for n in jobs_names)\n click.confirm('%s\\n\\n%s\\n\\n' % (question, jobs_list), abort=True)\n\n exit_code = 0\n for name in jobs_names:\n try:\n jenkins_api.delete_job(session, name)\n except requests.HTTPError as exc:\n if exc.response.status_code == 404:\n click.secho('%s was not found' % name, fg='red')\n exit_code = 5\n\n context.exit(exit_code)", "def stop_workers(self, delay=0):\n if delay > 0: time.sleep(delay)\n for w in self.all_workers:\n p = w[\"process\"]\n wid = w[\"wid\"]\n logger.debug(\"killing worker id(%s)\" % (wid))\n ept_utils.terminate_process(p)", "def cancelJobs(batchids):\n keywords = queueinit.readConfig(queue_dir=queue_dir)\n\n cmd = form_command(keywords, batchids)\n logger.debug(\"Executing cmd: %s\" % cmd)\n qdel = sp.Popen(cmd, shell=True, stdout = sp.PIPE, stderr = sp.STDOUT)\n message = process_output(qdel.stdout)\n print message\n exit_code = qdel.wait()\n\n return exit_code", "def cleanJobs():\n if isfile(\"cleanJobs\"):\n os.remove(JOBQUEUEDB)\n os.remove(\"cleanJobs\")", "def kill_processes(self, proc_ids):\n #The below is repeatedly tried since KILL is unreliable and we\n #need to loop until all the processes are eventually terminated.\n #An intersection with the set of processes currently running in\n #the system, gives the processes on which the KILL has still not\n #worked. Hence we need to keep calling KILL repeatedly on these.\n #for e.g.\n #P = {1, 2, 3, 4, 5, 6}\n #p = {1, 2, 3}\n #p &= P => {1,2,3}\n #After the first KILL\n #P = {3, 4, 5, 6}\n #p = {1, 2, 3}\n #p &= P => {3}\n #Now we run KILL on {3}\n #Thus each time we work with the set of proecesses in the original\n #list that are still running.\n proc_ids_set = set(proc_ids)\n proc_ids_set.intersection_update(set(self.processes(True, True)))\n while proc_ids_set:\n for id in proc_ids_set:\n self.exec_stmt(\"KILL %s\", {\"params\": (id,)})\n #sleep to ensure that the kill command reflects its results\n time.sleep(math.log10(len(proc_ids_set)))\n proc_ids_set.intersection_update(\n set(self.processes(True, True))\n )", "def delete_jobs(self, job_ids: list):\n sql = (\n f\"DELETE FROM {self._T_JOB} \"\n \"WHERE str_job_id IN (%s);\" % \", \".join([\"%s\"] * len(job_ids))\n )\n\n try:\n self._cur.execute(sql, job_ids)\n except psycopg2.IntegrityError:\n raise exc.ChildExists(\"child layer(s) exist: unable to delete\")", "def killall(self, name, sig):\r\n signum = parse_signal_value(sig)\r\n sessionid, name = self._parse_name(name)\r\n pname = \"%s.%s\" % (sessionid, name)\r\n with self._lock:\r\n state = self._get_state(sessionid, name)\r\n self._publish(\"job.%s.kill\" % pname, name=pname, signum=signum)\r\n\r\n processes = list(state.running)\r\n processes.extend(list(state.running_out))\r\n for p in processes:\r\n # notify we stop this job\r\n self._publish(\"proc.%s.kill\" % p.pid, pid=p.pid, name=p.name)\r\n # effectively send the signal\r\n p.kill(signum)\r\n\r\n self._manage_processes(state)", "def filter_jobs_to_cancel(current_job_name, current_job_id, list_of_job_info):\n running_jobs = []\n for job_info in list_of_job_info:\n job_num = job_info.job_num\n job_step_name = job_info.job_step_name\n\n if job_step_name != current_job_name:\n running_jobs.append(job_num)\n elif job_num < current_job_id:\n running_jobs.append(job_num)\n\n return running_jobs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_makeSubmit_ For a given job/cache/spec make a JDL fragment to submit the job
def makeSubmit(self, jobList): if len(jobList) < 1: #I don't know how we got here, but we did logging.error("No jobs passed to plugin") return None jdl = self.initSubmit() # For each script we have to do queue a separate directory, etc. for job in jobList: if job == {}: # Then I don't know how we got here either logging.error("Was passed a nonexistant job. Ignoring") continue jdl.append("initialdir = %s\n" % job['cache_dir']) jdl.append("transfer_input_files = %s, %s/%s, %s\n" \ % (job['sandbox'], job['packageDir'], 'JobPackage.pkl', self.unpacker)) argString = "arguments = %s %i\n" \ % (os.path.basename(job['sandbox']), job['id']) jdl.append(argString) jobCE = job['location'] if not jobCE: # Then we ended up with a site that doesn't exist? logging.error("Job for non-existant site %s" \ % (job['location'])) continue jdl.append('+DESIRED_Sites = \"%s\"\n' %(jobCE)) # Check for multicore if job.get('taskType', None) in self.multiTasks: jdl.append('+RequiresWholeMachine?' 'TRUE') # Transfer the output files jdl.append("transfer_output_files = Report.%i.pkl\n" % (job["retry_count"])) # Add priority if necessary if job.get('priority', None) != None: try: prio = int(job['priority']) jdl.append("priority = %i\n" % prio) except ValueError: logging.error("Priority for job %i not castable to an int\n" % job['id']) logging.error("Not setting priority") logging.debug("Priority: %s" % job['priority']) except Exception, ex: logging.error("Got unhandled exception while setting priority for job %i\n" % job['id']) logging.error(str(ex)) logging.error("Not setting priority") jdl.append("+WMAgent_JobID = %s\n" % job['jobid']) jdl.append("Queue 1\n") return jdl
[ "def generate_submit_job(self, submitoptions={}):\n\n # dictionary to contain specific submit options\n submit = {}\n\n submit.update(copy.deepcopy(self.submit_options))\n submit.update(copy.deepcopy(submitoptions))\n\n # add arguments\n submit[\"arguments\"] = \"$(ARGS)\"\n\n # add requirements\n if isinstance(self.requirements, list):\n if len(self.requirements) > 0:\n submit[\"requirements\"] = \" && \".join(self.requirements)\n else:\n submit[\"requirements\"] = self.requirements\n\n return Submit(submit)", "def invokeSubmitter(self, jobCache, jobToSubmit, jobSpecId,\n jobSpecInstance, specToCacheMap = {}):\n # //\n # // Retrieve the submitter plugin and invoke it\n #//\n submitter = retrieveSubmitter(self.args['SubmitterName'])\n try:\n submitter(\n jobCache,\n jobToSubmit, jobSpecId,\n JobSpecInstance = jobSpecInstance,\n CacheMap = specToCacheMap\n )\n except JSException, ex:\n if ex.data.has_key(\"FailureList\"):\n for failedId in ex.data['FailureList']:\n msg = \"Submission Failed for job %s\\n\" % failedId\n msg += str(ex)\n logging.error(msg)\n self.ms.publish(\"SubmissionFailed\", failedId)\n self.ms.commit()\n return False\n elif ex.data.has_key(\"mainJobSpecName\"):\n failedId = ex.data['mainJobSpecName']\n msg = \"Bulk Submission Failed for job %s\\n\" % failedId\n msg += str(ex)\n logging.error(msg)\n self.ms.publish(\"SubmissionFailed\", failedId)\n self.ms.commit()\n return False\n else:\n msg = \"Submission Failed for job %s\\n\" % jobSpecId\n msg += str(ex)\n logging.error(msg)\n self.ms.publish(\"SubmissionFailed\", jobSpecId)\n self.ms.commit()\n return False\n except ProdAgentException, ex:\n msg = \"Submission Failed for job %s\\n\" % jobSpecId\n msg += str(ex)\n logging.error(msg)\n self.ms.publish(\"SubmissionFailed\", jobSpecId)\n self.ms.commit()\n return False\n except StandardError, ex:\n msg = \"Submission Failed for job %s\\n\" % jobSpecId\n msg += \"Unexpected error, details: %s\" % str(ex)\n import traceback, sys\n for x in traceback.format_tb(sys.exc_info()[2]):\n msg += str(x)\n logging.error(msg)\n self.ms.publish(\"SubmissionFailed\", jobSpecId)\n self.ms.commit()\n return False\n self.ms.publish(\"JobSubmitted\", jobSpecId)\n self.ms.commit()\n return True", "def create_submission(self, workspace, bucket, submission_id, workflow_options=None, use_cache=True, memory=3, private=False, region=None, _cache_size=None):\n # FIXME: Identify better authorization scheme for firecloud\n session = generate_default_session()\n get_token_info(session)\n warnings.warn(\"[BETA] Gateway Create Submission\")\n if _cache_size is None and use_cache:\n blob = getblob('gs://{}/lapdog-call-cache.sql'.format(bucket))\n if blob.exists():\n blob.reload()\n _cache_size = blob.size\n response = get_user_session().post(\n self.get_endpoint('submit'),\n headers={\n 'Content-Type': 'application/json',\n 'X-Fc-Auth': session.credentials.token,\n },\n json={\n 'bucket': bucket,\n 'submission_id': submission_id,\n 'namespace': self.namespace,\n 'workspace': workspace,\n 'workflow_options': workflow_options if workflow_options is not None else {},\n 'memory': memory*1024,\n 'no_ip': private,\n 'compute_region': region,\n 'callcache': use_cache,\n 'cache_size': _cache_size / 1073741824 # 1gib\n }\n )\n if response.status_code == 200:\n operation = response.text\n submission_data_path = 'gs://{bucket}/lapdog-executions/{submission_id}/submission.json'.format(\n bucket=bucket,\n submission_id=submission_id\n )\n blob = getblob(submission_data_path)\n\n blob.upload_from_string(\n json.dumps(\n {\n **json.loads(blob.download_as_string().decode()),\n **{'operation': operation}\n }\n ).encode()\n )\n cache_write(\n \"{}/{}/{}\".format(\n self.namespace,\n workspace,\n submission_id\n ),\n 'submission-pointer',\n bucket,\n submission_id\n )\n return True, operation\n return False, response", "def __make_submit_file(self):\n\n filepath = os.path.join(self.tmpdir, \"submitfile.submit\")\n submit_file = open(filepath, \"w\")\n\n submit_file.write(\"universe = vanilla\\n\")\n submit_file.write(\"log = pneuron.log\\n\")\n submit_file.write(\"Error = err.$(Process)\\n\")\n submit_file.write(\"Output = out.$(Process)\\n\")\n submit_file.write('requirements = GLIBC == \"2.11\"\\n')\n tarfile_name = optimizer_params.tarred_nrnproj\n submit_file.write(\n \"transfer_input_files = portable-neuron.tar.gz,\" + tarfile_name + \"\\n\"\n )\n submit_file.write(\"should_transfer_files = yes\\n\")\n submit_file.write(\"when_to_transfer_output = on_exit_or_evict\\n\")\n # this is where you have to do the clever stuff:\n\n for shellno in range(self.num_jobs):\n submit_file.write(\"executable = run\" + str(shellno) + \".sh\\n\")\n submit_file.write(\"queue\\n\")\n\n # finally close the submit file\n submit_file.close()", "def submit_job(slug, class_name, job_config, job_name=None, message_slug=None, queue_name=None, app_id=None):\n if settings.SUBMIT_JOB_THROUGH_YARN:\n return submit_job_through_yarn(slug, class_name, job_config, job_name, message_slug, queue_name=queue_name,\n app_id=app_id)\n else:\n return submit_job_through_job_server(slug, class_name, job_config, job_name, message_slug)", "def submit(self):\n \n print 'Submitting the job'\n runner = Runner(self)\n runner.start()", "def build_postsubmit(args):\n build_commit(args, None)", "def submit(self): \n slog.debug('Submitting job...')\n self.submit_time = datetime.datetime.now()\n # note: client will send the job_id back to server to associate a replica with a job\n qsub_path = self.manager.config['system']['qsub']\n ssh_path = self.manager.config['system']['ssh']\n submit_host = self.manager.config['manager']['submit_host']\n \n # Make sure the temp dir exists.\n # We make a tempdir in the project dir because we need to ssh to a head node to submit, and the script should be available there too\n tmpdir = os.path.join(self.manager.project_path, 'tmp')\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n \n # create a temporary file in the <project_dir>/tmp\n (fd, f_abspath) = tempfile.mkstemp(dir=tmpdir)\n os.write(fd, self.make_submit_script())\n f_basename = os.path.basename(f_abspath)\n # if the user specified a submit_host then prepare the command\n if submit_host is not None and submit_host != '':\n # ssh gpc01 \"cd $PBS_O_WORKDIR; qsub submit.sh\"\n submit_command = ' '.join([ssh_path, submit_host, '\"cd %s; %s %s\"' % (tmpdir, qsub_path, f_basename)])\n else:\n submit_command = ' '.join([qsub_path, f_abspath])\n \n slog.debug('Submitting: \"%s\"' % submit_command)\n process = subprocess.Popen(submit_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n returncode = process.returncode\n (out, err) = process.communicate()\n \n try:\n # use the whole string as the job id\n self.id = out.strip()\n # qsub should return <integer>.<string>\n split_output = self.id.split('.')\n # this will raise an exception if it isnt an integer\n int(split_output[0])\n except Exception, ex:\n slog.error('Error running qsub!')\n slog.error(' Exception: %s' % str(ex))\n slog.error(' stdout: %s' % out)\n slog.error(' stderr: %s' % err)\n slog.debug('Job submit stdout: %s' % out)\n slog.debug('Job submit stderr: %s' % err)\n self.id = None\n return False\n else:\n slog.info('Job submitted with ID %s' % self.id)\n os.remove(f_abspath)\n return True", "def submit(self):\n \n # TODO: send job to scheduler ", "def submit(self) -> None:\n if self._job_id:\n raise JobError('Job has already been submitted!')\n self._job_id = self._backend.run(circuits=self._qobj)", "def submit_jobtree(self, cv_iter):\n jobname = 'dm_{}_G{}'.format(self.mvid, cv_iter)\n jid = sge.qsub(\n gfile,\n jobname,\n project=self.project,\n slots=20,\n memory=40,\n parameters=[self.mvid, '--submit_stage', 'jt', '--cv_iter',\n cv_iter],\n conda_env='cascade_ode',\n prepend_to_path='strDir',\n stderr='{}/{}.error'.format(self.logdir, jobname))\n return jid", "def prepareAndSubmit( jobname, cmd, dep, jobfolder, ntasks, cpus, tlim, env, outfolder=None):\n\n try:\n job = TrufaJob( jobname, cmd, dep, jobfolder, ntasks, cpus, tlim, env, outfolder)\n job.makeOutFolder()\n slurm_id = job.submit()\n except Exception as e:\n logging.error(e)\n sys.exit()\n return slurm_id", "def submit_job(self, jobName: str, jobQueue: str, jobDefinition: str, arrayProperties: Dict = None, dependsOn: List = None, parameters: Dict = None, containerOverrides: Dict = None, nodeOverrides: Dict = None, retryStrategy: Dict = None, timeout: Dict = None) -> Dict:\n pass", "def _submit(self, script):", "def submit_script_prepare(job, filename='submitscript.sge'):\n\n logging.debug(\"Preparing gridengine submit script\")\n\n #NPROC has to be one less for MPI jobs\n nproc = job.nproc\n if job.para_arch == \"MPI\":\n nproc = nproc - 1\n\n env_mod = ''\n #env = turbogo_helpers.check_env()\n #if env:\n # logging.warn(\"Env vars not set. Attempting to fix via submit script\")\n # for key in env:\n # env_mod += \"export {}={}\\n\".format(key, env[key]) \n\n #Set up the parallel preamble. No parallel arch info if numforce\n if job.jobtype != 'numforce':\n preamble_template = \"\"\"export PARA_ARCH={para_arch}\\n\"\"\".format(\n para_arch = job.para_arch)\n else:\n preamble_template=''\n\n preamble_template += \"\"\"export MPI_IC_ORDER=\"TCP\"\nexport PARNODES={nproc}\ncat $PE_HOSTFILE | awk '{{for(i=0;i<$2;i++) print $1}}' > hosts_file\nexport HOSTS_FILE=`readlink -f hosts_file`\n\"\"\".format(nproc = nproc)\n\n if job.nproc > 1:\n parallel_preamble = preamble_template\n else:\n parallel_preamble = ''\n\n #set up the job command call itself\n if job.jobtype == 'opt' or job.jobtype == 'optfreq':\n jobcommand = 'jobex'\n jobcommand += ' -c {}'.format(job.iterations)\n if job.ri:\n jobcommand += ' -ri'\n #add making xyz sp geom and opt trajectory xyz files after jobex\n jobcommand += ' > opt.out'\n jobcommand += '\\nt2x > optimization.xyz\\nt2x -c > final_geometry.xyz'\n\n elif job.jobtype == 'aoforce':\n jobcommand = 'aoforce'\n jobcommand += ' > aoforce.out'\n\n elif job.jobtype == 'numforce':\n jobcommand = 'NumForce -central'\n if job.ri:\n jobcommand += ' -ri'\n if job.nproc > 1:\n jobcommand += ' -mfile hosts_file'\n jobcommand += ' > numforce.out'\n\n elif job.jobtype == 'sp':\n if job.ri:\n jobcommand = 'ridft'\n else:\n jobcommand = 'dscf'\n jobcommand += ' > sp.out'\n\n elif job.jobtype == 'ts':\n if job.ri:\n jobcommand = 'ridft\\nrdgrad\\njobex -trans -ri'\n else:\n jobcommand = 'dscf\\ngrad\\njobex -trans'\n if job.iterations:\n jobcommand += ' -c {}'.format(job.iterations)\n jobcommand += ' > ts.out'\n jobcommand += '\\nt2x > optimization.xyz\\nt2x -c > final_geometry.xyz'\n\n logging.debug('Job submit script: {} completed.'.format(\n jobcommand.replace('\\n', ' & ')))\n\n #make one big sumbit script\n #runs the jobcommand\n submit_script = \"\"\"#!/bin/bash\n#$ -cwd\n#$ -V\n#$ -j y\n#$ -o {jobname}.stdout\n#$ -N tm.{jobname}\n#$ -l h_rt={rt}\n#$ -R y\n#$ -pe threaded {nproc}\n{env_mod}\n{parallel_preamble}\nsource $TURBODIR/Config_turbo_env\n\nulimit -s unlimited\n\ntouch startfile\n\n{jobcommand}\n\"\"\".format(\n jobname=turbogo_helpers.slug(job.name),\n nproc=job.nproc,\n parallel_preamble=parallel_preamble,\n jobcommand=jobcommand,\n env_mod=env_mod,\n rt = job.rt\n )\n\n #listify script by lines and write lines to file\n try:\n turbogo_helpers.write_file(filename, submit_script.split('\\n'))\n except turbogo_helpers.FileAccessError:\n logging.warning(\"Error writing submit script to file.\")\n logging.debug('Submit script generated at {}.'.format(filename))\n return submit_script", "def submit_node_driver_baketexture__3_0(rop_node, job_index, dependency_list, validate_only, batch_name=None):\n assert(rop_node.type().name() == \"baketexture::3.0\")\n\n ifd_job_id, job_index = submit_geometry_type_task(rop_node, job_index, dependency_list, validate_only,\n \"soho_diskfile\", \" (ifd)\", batch_name)\n\n job_parm_dict = {}\n handle_parms_job(rop_node, job_parm_dict)\n handle_parms_submit(rop_node, job_parm_dict)\n handle_parms_mantra(rop_node, job_parm_dict)\n handle_parms_mantra_advanced(rop_node, job_parm_dict)\n\n job_parm_dict[\"Plugin\"] = \"Mantra\"\n job_parm_dict[\"Name\"] += \" (mantra)\"\n job_parm_dict[\"IsFrameDependent\"] = \"true\"\n\n job_parm_dict[\"JobDependencies\"] = str(ifd_job_id)\n\n job_parm_dict[\"OutputFilename0\"] = hou_farm_tools.get_expanded_render_path(rop_node, \"vm_uvoutputpicture1\", \"#\")\n\n if batch_name is not None:\n job_parm_dict[\"BatchName\"] = batch_name\n\n plugin_parm_dict = {\n \"SceneFile\": hou_farm_tools.get_expanded_render_path(rop_node, \"soho_diskfile\", \"0\"),\n \"Threads\": \"0\",\n \"Version\": hou_farm_tools.get_hou_major_version(),\n \"CommandLineOptions\": hou_farm_tools.get_mantra_commandline_options(rop_node)}\n\n if validate_only:\n # Supply a dummy job id\n job_id = job_index\n else:\n # Send the job to the farm\n submit_filename, plugin_filename = write_job_files(\"mantra\", job_index, job_parm_dict, plugin_parm_dict)\n job_id = submit_job_files_to_deadline(submit_filename, plugin_filename, False) # IFD, no need to submit scene\n\n return job_id, job_index+1", "def submit_job(script, dependency=None, name=None):\n global dry_run\n\n if dependency is None:\n dependency = []\n elif isinstance(dependency, int):\n dependency = [str(dependency)]\n else:\n # dependency is supposed to be a list of integers\n dependency = [str(d) for d in dependency]\n\n # dependency is now a list of strings\n if len(dependency) > 0:\n dependency = [\"--dependency=afterok:\" + \":\".join(dependency)]\n else:\n dependency = []\n\n if name is None:\n job_name = []\n script = script.replace(\"%M\", \"\")\n else:\n script = script.replace(\"%M\", sanitize(name))\n # escape name argument\n escaped_name = re.sub(r'\\s+ ', r'\\ ', name)\n job_name = ['--job-name=%s' % escaped_name]\n\n\n if dry_run:\n id = dry_run\n dry_run += 1\n else:\n while True:\n job_submission = subprocess.Popen([\"bsub\"] + dependency + job_name,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (stdoutdata, stderrdata) = job_submission.communicate(script)\n if job_submission.returncode == 0:\n break\n time.sleep(1)\n # retry\n \n\n id = int(time.time()) \n # = int(stdoutdata.split(\" \")[-1])\n\n\n if job_dir is not None:\n if name is not None:\n filename = \"%s--slurm%d.sh\" % (sanitize(name), id)\n else:\n filename = \"%d.sh\" % id\n\n with open(os.path.join(job_dir, filename), \"w\") as f:\n f.write(script)\n\n return id", "def submit_job(self, application, job):\n raise NotImplementedError(\n \"Abstract method `LRMS.submit_job()` called \"\n \"- this should have been defined in a derived class.\")", "def submit(job, protocol=None, priority=1, scheduler='swf', domain=None, region=None):\n if scheduler != 'swf':\n raise UnsupportedScheduler(scheduler)\n from mass.scheduler.swf import config\n import boto3\n client = boto3.client(\n 'swf',\n region_name=region or config.REGION,\n config=Config(connect_timeout=config.CONNECT_TIMEOUT,\n read_timeout=config.READ_TIMEOUT))\n handler = InputHandler(protocol)\n\n job_title = job['Job']['title']\n res = client.start_workflow_execution(\n domain=domain or config.DOMAIN,\n workflowId=job_title,\n workflowType=config.WORKFLOW_TYPE_FOR_JOB,\n taskList={'name': config.DECISION_TASK_LIST},\n taskPriority=str(priority),\n input=json.dumps({\n 'protocol': protocol,\n 'body': handler.save(\n data=job,\n genealogy=[job_title]\n )\n }),\n executionStartToCloseTimeout=str(config.WORKFLOW_EXECUTION_START_TO_CLOSE_TIMEOUT),\n tagList=[job_title],\n taskStartToCloseTimeout=str(config.DECISION_TASK_START_TO_CLOSE_TIMEOUT),\n childPolicy=config.WORKFLOW_CHILD_POLICY)\n return job_title, res['runId']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_getCEName_ This is how you get the name of a CE for a job
def getCEName(self, jobSite): if not jobSite in self.locationDict.keys(): siteInfo = self.locationAction.execute(siteName = jobSite) self.locationDict[jobSite] = siteInfo[0].get('ce_name', None) return self.locationDict[jobSite]
[ "def getJobName(self):\n xpath = self.root_tag + \"/updateParameters\" + self.version_filter + \"/jobName\"\n self.debug(\"getDeveloperEmail(): xpath=\" + xpath + \"\\n\")\n # node_set = self.puke_dom.xml_select( xpath )\n node_set = self.getData(xpath)\n value = \"\"\n for node in node_set:\n # value = str( node.jobName )\n value = node.getValue()\n return value", "def communeName():", "def get_name(self, cid):\n return self.get(cid)[1]", "def cctFileName(self):\n return os.path.basename(self.cctFilePath())", "def check_for_ce(self, cename, entries):\n for entry in entries:\n if 'GlueCE' not in entry.objectClass:\n continue\n if cename not in entry.glue['CEUniqueID']:\n continue\n return entry\n self.failIf(True, msg=\"Expected a CE named %s in the output.\" % cename)", "def course_name(self):\r\n return self._course_name", "def get_name(self):\r\n return self._client_name", "def cctFileName(self):\n p = os.path.basename(self.cctFilePath())\n return p", "def jobname(fallback=UNSPECIFIED):\n if _set_jobname: return _set_jobname\n ret=getenvs(['job','LSB_JOBNAME','PBS_JOBNAME','MOAB_JOBNAME',\n 'LOADL_STEP_NAME','LOADL_JOB_NAME'])\n if ret is not None: return ret\n if _set_default is not None: return _set_default\n if fallback is not UNSPECIFIED: return fallback\n return NONAME", "def get_job_name():\n while True:\n # Set and clean jobname\n job_name = input('Enter the job name: ')\n job_name = clean_filename(job_name)\n\n # Confirm cleaned filename\n confirmation_text = 'Your job will be saved as \"{}\" ok? (Y/N): '.format(job_name)\n confirmed = input(confirmation_text).lower()\n \n if confirmed == 'y':\n clear_terminal()\n return job_name", "def job_owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_owner\")", "def getRunAfterJobName(self):\n xpath = self.root_tag + \"/updateParameters\" + self.version_filter + \"/runAfterJobName\"\n self.debug(\"getEnv(): xpath=\" + xpath + \"\\n\")\n # node_set = self.puke_dom.xml_select( xpath )\n node_set = self.getData(xpath)\n value = \"\"\n for node in node_set:\n # value = str( node.runAfterJobName )\n value = node.getValue()\n return value", "def get_ch_name(name: str):\n try:\n name.encode('ascii')\n # Not returning None beacause we need a usable name\n except UnicodeEncodeError:\n return name\n con, cur = _get_connection()\n sqlstr = ('select ch_name from hvhnonc_fields where en_name=? limit 1;')\n params = (name,)\n row = cur.execute(sqlstr, params).fetchone()\n try:\n return row[0]\n except Exception as e:\n if name not in ('ID, old_ID'):\n print(e, name, 'get_ch_name')\n return name", "def get_company_name(self):\r\n return self.company_name", "def partname(self):\n return self.edbcomponent.GetComponentDef().GetName()", "def get_name(self):\r\n return self.__nombre", "def ename(self) -> str:\n return self.entity.name", "def get_course_name(course_code):\n\n ans = DatabaseConnector.get_values('SELECT course_name FROM course WHERE course.course_code = \"' + course_code\n + '\";')\n course_name = ans[0][0]\n return \"The course name is \" + course_code + \" \" + course_name", "def _get_condensed_name(self) -> str:\n return f\"{self.get_datetime()}_{self.constellation.name}_{self.product_type.name}_{self.band_combi.name}_{self._job_id}\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_getClassAds_ Grab classAds from condor_q using xml parsing
def getClassAds(self): constraint = "\"WMAgent_JobID =!= UNDEFINED\"" jobInfo = {} command = ['condor_q', '-constraint', 'WMAgent_JobID =!= UNDEFINED', '-constraint', 'WMAgent_AgentName == \"%s\"' % (self.agent), '-format', '(JobStatus:\%s) ', 'JobStatus', '-format', '(stateTime:\%s) ', 'EnteredCurrentStatus', '-format', '(WMAgentID:\%d):::', 'WMAgent_JobID'] pipe = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = False) stdout, stderr = pipe.communicate() classAdsRaw = stdout.split(':::') if not pipe.returncode == 0: # Then things have gotten bad - condor_q is not responding logging.error("condor_q returned non-zero value %s" % str(pipe.returncode)) logging.error("Skipping classAd processing this round") return None if classAdsRaw == '': # We have no jobs return jobInfo for ad in classAdsRaw: # There should be one for every job if not re.search("\(", ad): # There is no ad. # Don't know what happened here continue statements = ad.split('(') tmpDict = {} for statement in statements: # One for each value if not re.search(':', statement): # Then we have an empty statement continue key = str(statement.split(':')[0]) value = statement.split(':')[1].split(')')[0] tmpDict[key] = value if not 'WMAgentID' in tmpDict.keys(): # Then we have an invalid job somehow logging.error("Invalid job discovered in condor_q") logging.error(tmpDict) continue else: jobInfo[int(tmpDict['WMAgentID'])] = tmpDict logging.info("Retrieved %i classAds" % len(jobInfo)) return jobInfo
[ "def get_attribute(self):\n shop_obj = self.env['sale.shop']\n connection_obj = self.env['ebayerp.osv']\n results = False\n attribute = False\n\n if self:\n print(\"-------self._ids----\", self._ids)\n if isinstance(self, int):\n ids = [self._ids]\n # if isinstance(self, int):\n # ids = [ids]\n attr_set_obj = self\n site_id_value = attr_set_obj.shop_id\n if site_id_value:\n siteid = site_id_value.instance_id.site_id.site\n else:\n siteid = ''\n category_code = attr_set_obj.code\n if category_code:\n search_ebay_true = [attr_set_obj.shop_id.id]\n if search_ebay_true:\n leafcategory = ''\n inst_lnk = shop_obj.browse(search_ebay_true[0]).instance_id\n app_id = inst_lnk.app_id\n if inst_lnk.sandbox:\n server_url = \"http://open.api.sandbox.ebay.com/\"\n else:\n server_url = \"http://open.api.ebay.com/\"\n if app_id and server_url and siteid and category_code:\n concate_url = \"\"\" %sshopping?callname=GetCategoryInfo&appid=%s&siteid=%s&CategoryID=%s&version=743&responseencoding=XML\"\"\" % (\n server_url, app_id, siteid, category_code)\n try:\n urlopen = urllib.request.urlopen(concate_url)\n except Exception as e:\n urlopen = ''\n if urlopen:\n mystring = urlopen.read()\n if mystring:\n response = parseString(mystring)\n if response:\n if response.getElementsByTagName('Ack'):\n if response.getElementsByTagName('Ack')[0].childNodes[0].data == 'Success':\n if response.getElementsByTagName('LeafCategory'):\n leafcategory = \\\n response.getElementsByTagName('LeafCategory')[0].childNodes[0].data\n if leafcategory == 'false':\n raise UserError(\"Warning ! Category is not a Leaf Category\")\n elif leafcategory == 'true':\n leafcategory = 'true'\n else:\n raise Warning(_('Category is Invalid on Current Site'))\n elif response.getElementsByTagName('Ack')[0].childNodes[0].data == 'Failure':\n long_message = response.getElementsByTagName('LongMessage')[0].childNodes[\n 0].data\n raise Warning(_('%s') % long_message)\n if leafcategory == 'true':\n results = connection_obj.call(inst_lnk, 'GetCategory2CS', category_code, siteid)\n\n results1 = connection_obj.call(inst_lnk, 'GetCategoryFeatures', category_code, siteid)\n\n if results1:\n if results1.get('ItemSpecificsEnabled', False) == 'Enabled':\n self.write({'item_specifics': True})\n if results1.get('AdFormatEnabled', False) == 'ClassifiedAdEnabled':\n self.write({'class_ad': True})\n if results1.get('ConditionEnabled', False) == 'Disabled':\n self.write({'condition_enabled': True})\n\n return True", "def print_class_info(class_result: Element) -> None:\n cls = find(class_result, 'Class')\n class_id = get_inner_text(find(cls, 'Id'))\n class_name = get_inner_text(find(cls, 'Name'))\n course = find(class_result, 'Course')\n course_name = get_inner_text(find(course, 'Name'))\n print('Class id: ' + class_id)\n print('Class name: ' + class_name)\n print('Course name: ' + course_name)", "def _cls(self, tag_name, class_name):\n return 'descendant-or-self::node()/%s[contains(concat(\" \", normalize-space(@class), \" \"), \" %s \")]' % (tag_name, class_name)", "def parseBGGXML(bgg_id):\n logging.info(TRACE+'parseBGGXML('+bgg_id+')')\n bgg_game_url = BGG_XML_URI + bgg_id\n result = urllib2.urlopen(bgg_game_url).read()\n try:\n xml = ElementTree.fromstring(result)\n except Exception:\n logging.info(TRACE+'parseBGGXML() error parsing BGG')\n return None \n decoded_result = result.decode(\"utf-8\")\n xml_text = db.Text(decoded_result)\n bgg_data = {'name': findPrimaryName(xml),\n 'description': xml.findtext(\".//description\"),\n 'year_published': strToInt(xml.findtext(\".//yearpublished\")),\n 'min_players': strToInt(xml.findtext(\".//minplayers\")),\n 'max_players': strToInt(xml.findtext(\".//maxplayers\")),\n 'playing_time': strToInt(xml.findtext(\".//playingtime\")),\n 'age': strToInt(xml.findtext(\".//age\")),\n 'publishers': \n buildDataList(xml.findall(\".//boardgamepublisher\")),\n 'artists': buildDataList(xml.findall(\".//boardgameartist\")),\n 'designers': \n buildDataList(xml.findall(\".//boardgamedesigner\")), \n 'expansions': \n buildDataList(xml.findall(\".//boardgameexpansion\")),\n 'categories': \n buildDataList(xml.findall(\".//boardgamecategory\")),\n 'mechanics': \n buildDataList(xml.findall(\".//boardgamemechanic\")),\n 'subdomains': \n buildDataList(xml.findall(\".//boardgamesubdomain\")),\n 'image_url': xml.findtext(\".//image\"),\n 'thumbnail_url':xml.findtext(\".//thumbnail\"),\n 'xml_text': xml_text}\n \n return bgg_data", "def _findClassForProxy(xmlName, xmlGroup):\n global sources, filters, writers, rendering, animation, implicit_functions,\\\n piecewise_functions, extended_sources, misc\n if not xmlName:\n return None\n if xmlGroup == \"sources\":\n return sources.__dict__[xmlName]\n elif xmlGroup == \"filters\":\n return filters.__dict__[xmlName]\n elif xmlGroup == \"implicit_functions\":\n return implicit_functions.__dict__[xmlName]\n elif xmlGroup == \"piecewise_functions\":\n return piecewise_functions.__dict__[xmlName]\n elif xmlGroup == \"writers\":\n return writers.__dict__[xmlName]\n elif xmlGroup == \"extended_sources\":\n return extended_sources.__dict__[xmlName]\n elif xmlName in rendering.__dict__:\n return rendering.__dict__[xmlName]\n elif xmlName in animation.__dict__:\n return animation.__dict__[xmlName]\n elif xmlName in misc.__dict__:\n return misc.__dict__[xmlName]\n else:\n return None", "def GetChildClass(self, stag_class):\n return stag_class", "def get_cls_results(det_results, annotations, class_id):\n cls_dets = [img_res[class_id] for img_res in det_results]\n cls_gts = []\n cls_gts_ignore = []\n for ann in annotations:\n gt_inds = ann['labels'] == (class_id + 1)\n cls_gts.append(ann['bboxes'][gt_inds, :])\n\n if ann.get('labels_ignore', None) is not None:\n ignore_inds = ann['labels_ignore'] == (class_id + 1)\n cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])\n else:\n cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))\n\n return cls_dets, cls_gts, cls_gts_ignore", "def scrap_classes():\n\n config = load_config()\n session = requests.session()\n\n with session.post('https://myclass.apps.binus.ac.id/Auth/Login', data={\n 'Username': config['login']['username'],\n 'Password': config['login']['password'],\n 'btnSubmit': True\n }) as response:\n try:\n assert response.json()['Status']\n except:\n return print('Error: Failed to login to BINUS Classes site!')\n\n with session.get('https://myclass.apps.binus.ac.id/Home/GetViconSchedule') as response:\n result = response.json()\n\n for class_data in result:\n date = class_data['DisplayStartDate']\n time = class_data['StartTime'] + ' - ' + class_data['EndTime']\n\n code = class_data['ClassCode']\n delivery = class_data['DeliveryMode'] + ' - ' + class_data['SsrComponentDescription']\n course = class_data['CourseCode'] + ' - ' + class_data['CourseTitleEn']\n\n week = class_data['WeekSession']\n session = class_data['CourseSessionNumber']\n\n meeting_url = class_data['MeetingUrl']\n meeting_id = class_data['MeetingId']\n meeting_password = class_data['MeetingPassword']\n\n student_class = StudentClass(date, time, code, delivery, course, week, session)\n if meeting_url != '-':\n meeting = MeetingInfo(meeting_id, meeting_password, meeting_url)\n student_class.meeting = meeting\n\n student_classes.append(student_class)", "def get_category_specifics(self):\n results = False\n attribute = False\n shop_obj = self.env['sale.shop']\n connection_obj = self.env['ebayerp.osv']\n attribute_obj = self.env['product.attribute']\n attribute_val_obj = self.env['product.attribute.value']\n\n if self:\n if isinstance(self, int):\n ids = [self._ids]\n # if isinstance(self, long ):\n # ids = [ids]\n attr_set_obj = self\n siteid = attr_set_obj.shop_id.instance_id.site_id.site\n category_code = attr_set_obj.code\n if category_code:\n search_ebay_true = [attr_set_obj.shop_id.id]\n if search_ebay_true:\n leafcategory = ''\n inst_lnk = shop_obj.browse(search_ebay_true[0]).instance_id\n app_id = inst_lnk.app_id\n if inst_lnk.sandbox:\n server_url = \"http://open.api.sandbox.ebay.com/\"\n else:\n server_url = \"http://open.api.ebay.com/\"\n if app_id and server_url and siteid and category_code:\n concate_url = \"\"\" %sshopping?callname=GetCategoryInfo&appid=%s&siteid=%s&CategoryID=%s&version=743&responseencoding=XML\"\"\" % (\n server_url, app_id, siteid, category_code)\n try:\n urlopen = urllib.request.urlopen(concate_url)\n except Exception as e:\n urlopen = ''\n if urlopen:\n mystring = urlopen.read()\n if mystring:\n response = parseString(mystring)\n if response:\n if response.getElementsByTagName('Ack'):\n if response.getElementsByTagName('Ack')[0].childNodes[0].data == 'Success':\n if response.getElementsByTagName('LeafCategory'):\n leafcategory = \\\n response.getElementsByTagName('LeafCategory')[0].childNodes[0].data\n if leafcategory == 'false':\n raise Warning(_(\"Category is not a Leaf Category\"))\n elif leafcategory == 'true':\n leafcategory = 'true'\n else:\n raise Warning(_(\"Category is Invalid on Current Site\"))\n elif response.getElementsByTagName('Ack')[0].childNodes[0].data == 'Failure':\n long_message = response.getElementsByTagName('LongMessage')[0].childNodes[\n 0].data\n raise Warning(_(\"%s\") % long_message)\n if leafcategory == 'true':\n results = connection_obj.call(inst_lnk, 'GetCategorySpecifics', category_code, siteid)\n for item in results:\n search_id = attribute_obj.search(\n [('attr_set_id', '=', self[0].id), ('attribute_code', '=', item)])\n if not search_id:\n var = True\n if results[item]:\n if results[item][0] == 'novariation':\n var = False\n att_id = attribute_obj.create({'attr_set_id': self[0].id, 'name': item.encode(\"utf-8\"),\n 'attribute_code': item.encode(\"utf-8\"),\n 'variation_enabled': var})\n if len(results[item]):\n for val in results[item]:\n att_val_id = attribute_val_obj.create(\n {'attribute_id': att_id.id, 'name': val, 'value': val})\n\n return True", "def getAds(self):\n \n ids = [id for id in self.db]\n \n results = etree.Element(\"results\")\n for id in ids:\n if (id.find(\"_design\") != -1):\n continue\n \n result = etree.Element(\"result\")\n data = self.db[id]\n result.set(\"title\", data[\"title\"])\n result.set(\"content\", data[\"content\"])\n result.set(\"href\", data[\"href\"])\n results.append(result)\n\n return results", "def test_get_ad_campaigns(self):\n pass", "def parse_overview_page1(self, response):\n\t\tcomm = response.meta['comm'] # the private/commercial indicator\n\t\t#cityid = response.meta['cityid'] # the id of the city of which we look for the ads (as string)\n\t\t# find the number of pages in total and open all other pages from 1,...,last page\n\t\tif len(response.xpath('//li[@class=\"pageno\"]/a[@class=\"nothing\"]/strong')) > 1:\n\t\t\tnumpages = int(response.xpath('//li[@class=\"pageno\"]/a[@class=\"nothing\"]/strong[2]/text()').extract()[0])\n\t\t\tfor pageno in xrange(1,numpages+1):\n\t\t\t\t# we have to re-post our form for the filter settings\n\t\t\t\t#request = FormRequest.from_response(response, formdata={'classtype': 'of', 'comm': str(comm), 'pageno': str(pageno), 'cityid': cityid},\n\t\t\t\t#\t\t\t\t\t\t\t\t\tcallback=self.parse_overview_page2)\n\t\t\t\trequest = FormRequest.from_response(response, formdata={'classtype': 'of', 'comm': str(comm), 'pageno': str(pageno)},\n\t\t\t\t\t\t\t\t\t\t\t\t\tcallback=self.parse_overview_page2)\n\t\t\t\trequest.meta['comm'] = comm\n\t\t\t\tyield request\n\t\t\t\t# find the immoscout ads for this site\n\t\t\t\trequest = scrapy.Request('http://www.quoka.de/qs/qpc/xmlSearch.php?search=&view=quoka&platform=desktop&catid=27_2710&maxresults=20&page=' +str(pageno)+\n\t\t\t\t\t\t\t\t\t\t'&output=json&oe=UTF-8', callback=self.parse_immoscout)\n\t\t\t\trequest.meta['comm'] = comm\n\t\t\t\tyield request\n\t\telse:\n\t\t\t# in this case there is no \"Seite 1 von n\", so we simply scrape this page\n\t\t\trequest = scrapy.Request(response.url, callback=self.parse_overview_page2)\n\t\t\trequest.meta['comm'] = comm\n\t\t\tyield request", "def get_description_data(xml_file):\n soup = bs4.BeautifulSoup(xml_file, 'lxml')\n descs = soup.find_all('description')\n for desc in descs:\n desc_data = str(desc.string)\n # if '.com' in desc_data:\n desc_arr = \"\"\n desc_arr.append(desc_data)", "def getClasses(self, record):\n attributes = self.class_by.split('.')\n classes = []\n\n for attribute in attributes:\n record = record[attribute]\n\n rule_no = 0\n for rule in self.rules:\n if re.search(rule, record) is not None:\n classes.append(self.classes[rule_no])\n rule_no += 1\n if self.multi_match is False:\n break\n\n if self.keep_others is True and len(classes) <= 0:\n classes.append(\"others\")\n\n return classes", "def parse_class(element):\n assert element.tag == 'class'\n style_class = {\n 'name': element.get('type'),\n 'entries': [],\n }\n\n for child in element:\n if child.tag != 'category':\n continue\n style_class['entries'].append(parse_category(child))\n return style_class", "def get_matchAD2GLASS(matchtol=0.5,GLASSids=None,ASTRODEEPids=None,verbose=True):\n if verbose: print(' - Get matches to ASTRODEEP catalog')\n cat = np.genfromtxt('hlsp_glass_hst_wfc3_a2744-fullfov-pa999_ir_v001_glassmaster.txt',dtype=None,names=True)\n\n ADpath = '/Users/kschmidt/work/GLASS/LAEsearchFullGLASS/catalogs/ASTRODEEP/fullrelease/'\n AD_cat = np.genfromtxt(ADpath+'A2744cl_26012016/A2744cl_A.cat',dtype=None,names=True)\n\n if verbose: print(' - Ignore ASTRODEEP objects with MAG_JH140 >= 99.0')\n AD_cat = AD_cat[AD_cat['MAG_JH140'] < 99.0]\n\n AD_radec = SkyCoord(ra=AD_cat['RA']*u.degree, dec=AD_cat['DEC']*u.degree)\n cat_radec = SkyCoord(ra=cat['X_WORLD']*u.degree, dec=cat['Y_WORLD']*u.degree)\n\n if verbose: print(' - Getting sources within the match toleracnce of '+str(matchtol)+' arc seconds')\n AD_idx, d2d, d3d = cat_radec.match_to_catalog_sky(AD_radec)\n\n if verbose: print(' - Printing ID matches:')\n if verbose: print(' id_GLASS id_ASTRODEEP r_match_arcsec ')\n outarr = np.array([])\n for ii, id_GLASS in enumerate(cat['NUMBER']):\n if d2d[ii] < matchtol*u.arcsec:\n id_AD = AD_cat['ID'][AD_idx[ii]]\n\n if ASTRODEEPids is not None:\n if id_AD not in ASTRODEEPids: continue\n\n if GLASSids is not None:\n if id_GLASS not in GLASSids: continue\n\n if verbose: print(str(\"%10s\" % id_GLASS)+' '+str(\"%10s\" % id_AD)+\n ' '+str(\"%.6f\" % d2d[ii].arcsec))\n if len(outarr) == 0:\n outarr = np.array([int(id_GLASS),int(id_AD),float(d2d[ii].arcsec)])\n else:\n outarr = np.vstack([ outarr,np.array([int(id_GLASS),int(id_AD),float(d2d[ii].arcsec)]) ])\n\n return outarr", "def for_ads(self):\n return self.active().exclude(ad__isnull=True).distinct()", "def getNodeRG_forCavity(self,cavities_da,cav_name_gap_counter_dict,xal_acc_seq_da,xal_node_da):\t\t\n\t\ttype_name = xal_node_da.stringValue(\"type\")\n\t\tif(type_name != \"RG\"): return None\n\t\trf_cavity_name = xal_acc_seq_da.stringValue(\"id\")\n\t\tattrb_da = xal_acc_seq_da.childAdaptors(\"attributes\")[0]\n\t\txal_rfcavity_da = attrb_da.childAdaptors(\"rfcavity\")[0]\n\t\tE0TL = xal_rfcavity_da.doubleValue(\"amp\")/1.0e+9\n\t\tphase = xal_rfcavity_da.doubleValue(\"phase\")\n\t\tfreq = xal_rfcavity_da.doubleValue(\"freq\")*1.0E+6\n\t\tmode = xal_rfcavity_da.stringValue(\"structureMode\")\n\t\tif(cav_name_gap_counter_dict.has_key(rf_cavity_name)):\n\t\t\tcav_name_gap_counter_dict[rf_cavity_name] += 1\n\t\telse:\n\t\t\tcav_name_gap_counter_dict[rf_cavity_name] = 1\n\t\t\tcavity_da = XmlDataAdaptor(\"Cavity\")\n\t\t\tcavity_da.setValue(\"ampl\",1.0)\n\t\t\tcavity_da.setValue(\"frequency\",\"%12.5e\"%freq)\n\t\t\tcavity_da.setValue(\"name\",rf_cavity_name)\n\t\t\tcavity_da.setValue(\"pos\",0.)\n\t\t\tcavities_da.append(cavity_da)\n\t\t#====== make node from xal node\n\t\tnode_name = xal_node_da.stringValue(\"id\")\n\t\tpos = xal_node_da.doubleValue(\"pos\")\t\t\n\t\txal_attrb_da = xal_node_da.childAdaptors(\"attributes\")[0]\n\t\txal_rf_gap_da = xal_attrb_da.childAdaptors(\"rfgap\")[0]\n\t\tamp_factor = xal_rf_gap_da.doubleValue(\"ampFactor\")\n\t\tphase_factor = xal_rf_gap_da.doubleValue(\"phaseFactor\")\n\t\tE0TL = E0TL*amp_factor\n\t\tphase += phase_factor\t\n\t\txal_aperture_da = xal_attrb_da.childAdaptors(\"aperture\")[0]\n\t\taperture = 2*xal_aperture_da.doubleValue(\"x\")\n\t\tnode_da = XmlDataAdaptor(\"accElement\")\n\t\tnode_da.setValue(\"name\",node_name)\n\t\tnode_da.setValue(\"pos\",pos)\n\t\tnode_da.setValue(\"type\",\"RFGAP\")\n\t\tnode_da.setValue(\"length\",0.0)\n\t\tparameters_da = node_da.createChild(\"parameters\")\n\t\tparameters_da.setValue(\"E0L\",E0TL)\n\t\tparameters_da.setValue(\"E0TL\",E0TL)\n\t\tparameters_da.setValue(\"EzFile\",\"\")\n\t\tparameters_da.setValue(\"aperture\",aperture)\n\t\tparameters_da.setValue(\"aprt_type\",1)\n\t\tparameters_da.setValue(\"cavity\",rf_cavity_name)\n\t\tparameters_da.setValue(\"mode\",mode)\n\t\tparameters_da.setValue(\"phase\",phase)\n\t\tttfs_da = node_da.createChild(\"TTFs\")\n\t\tttfs_da.setValue(\"beta_max\",1.0)\n\t\tttfs_da.setValue(\"beta_min\",0.0)\n\t\tpoly_t_da = ttfs_da.createChild(\"polyT\")\n\t\tpoly_s_da = ttfs_da.createChild(\"polyS\")\n\t\tpoly_tp_da = ttfs_da.createChild(\"polyTP\")\n\t\tpoly_sp_da = ttfs_da.createChild(\"polySP\")\n\t\tpoly_t_da.setValue(\"order\",0)\n\t\tpoly_t_da.setValue(\"pcoefs\",\"1.0 \")\n\t\tpoly_tp_da.setValue(\"order\",0)\n\t\tpoly_tp_da.setValue(\"pcoefs\",\"0 \")\n\t\tpoly_s_da.setValue(\"order\",0)\n\t\tpoly_s_da.setValue(\"pcoefs\",\"0 \")\n\t\tpoly_sp_da.setValue(\"order\",0)\n\t\tpoly_sp_da.setValue(\"pcoefs\",\"0 \")\n\t\treturn node_da", "def load_ontology_classes(self, base_class=None):\n sparql_query = '''\n SELECT DISTINCT ?ont_node ?label\n {\n ?ont_node rdfs:subClassOf* <%s> .\n ?ont_node rdfs:label ?label\n }\n '''\n\n count = 0\n qres = self.rdf_graph.query(sparql_query % base_class)\n\n for (ont_node, ont_label) in qres:\n uri = str(ont_node)\n label = str(ont_label)\n self.current_classes[uri] = label\n count +=1\n\n '''\n Add the children too\n '''\n self.get_children(uri=uri)\n\n logger.debug(\"parsed %i classes\"%count)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints exception and details in human readable form. You can specify IO stream object in `output` parameter. By default text is printed to standard output.
def print_exception(self, output=None): if not output: output = sys.stderr text = u"stream failed. reason: %s\n" % self.message text += u"exception: %s: \n" % self.exception.__class__.__name__ text += u"node: %s\n" % self.node try: text += unicode(self.exception) except Exception, e: text += u"<unable to get exception string: %s>" % e text += "\ntraceback\n" try: l = traceback.format_list(traceback.extract_tb(self.traceback)) text += "".join(l) except Exception as e: text += "<unable to get traceback string: %s>" % e text += "\n" if self.inputs: for i, fields in enumerate(self.inputs): text += "input %i:\n" % i input_text = "" for (index, field) in enumerate(fields): input_text += u"% 5d %s (storage:%s analytical:%s)\n" \ % (index, field.name, field.storage_type, field.analytical_type) text += unicode(input_text) else: text += "input: none" text += "\n" if self.output: text += "output:\n" for field in self.output: text += u" %s (storage:%s analytical:%s)\n" \ % (field.name, field.storage_type, field.analytical_type) else: text += "ouput: none" text += "\n" if self.attributes: text += "attributes:\n" for name, attribute in self.attributes.items(): try: value = unicode(attribute) except Exception, e: value = "unable to convert to string (exception: %s)" % e text += " %s: %s\n" % (name, value) else: text += "attributes: none" output.write(text)
[ "def ShowException(self):\n (etype, value, tb) =sys.exc_info()\n # remove myself from traceback\n tblist =traceback.extract_tb(tb)[1:]\n msg = ' '.join(traceback.format_exception_only(etype, value)\n +traceback.format_list(tblist))\n self.output.write_exc(msg)", "def print_exc(self, output_file=sys.stdout):\n our_type, our_exception, our_traceback = sys.exc_info()\n traceback.print_exception(our_type, our_exception, our_traceback, file=output_file)\n if self.nested_type != None:\n print(\"Caused by:\", file=output_file)\n\n if self.nested_exception != None:\n self._print_exc(output_file)", "def display_exceptions(context: ExecutionContext, event: events.Finished) -> None:\n if not event.has_errors:\n return\n\n default.display_section_name(\"EXCEPTIONS\")\n for result in context.results:\n if result.has_errors:\n display_single_exception(context, result)\n if not context.show_errors_tracebacks:\n click.secho(\n \"Add this option to your command line parameters to see full tracebacks: --show-exception-tracebacks\",\n fg=\"magenta\",\n )", "def _print_exc(self, output_file):\n traceback.print_exception(self.nested_type, self.nested_exception, self.nested_traceback, file=output_file)\n if isinstance(self.nested_exception, NestingException):\n print(\"Caused by:\", file=output_file)\n self.nested_exception._print_exc(output_file)", "def print_exception(exc, file=stdout, /) -> Any:\n ...", "def PrintExceptionScreen(self):\r\n if self.ErrorCount > 0:\r\n print('The following issues have occurred: ')\r\n print(self.StdoutMessage())", "def print_exc_info():\n\n import StringIO, traceback\n \n sio = StringIO.StringIO()\n traceback.print_exc(file=sio) #thread-safe print_exception to string\n sio.seek(0, 0)\n \n return sio.read()", "def _error(exc=None):\n if exc is None:\n exc = format_exc()\n print('* confspec:', file=stderr)\n for line in exc.split('\\n'):\n print('* ', line, file=stderr)", "def print_errors(self):\n out = f\"process standard output:\\n{self.stdout_str}\"\n err = f\"process error output:\\n{self.stderr_str}\"\n print(out, file=sys.stderr)\n print(err, file=sys.stderr)", "def ShowSyntaxError(self):\n (etype, value, tb) =sys.exc_info()\n msg = ' '.join(traceback.format_exception_only(etype, value))\n self.output.write_exc(msg)", "def printErrorMsg(text):\r\n\tprint >> stderr, text", "async def error_to_text(req: Request, res: Response, exc: HTTPError):\n res.status_code = exc.status_code\n text = exc.title\n if exc.detail:\n text += f\"\\n{exc.detail}\"\n res.text = text", "def get_exception_text():\n # type: (...) -> str\n return ''.join(format_exception(*sys.exc_info()))", "def print_output_error(job):\n out_file = find_latest_log(job.out_file)\n err_file = find_latest_log(job.err_file)\n\n my_parser = kickstart_parser.Parser(out_file)\n my_output = my_parser.parse_stdout_stderr()\n my_task_id = 0\n\n if len(my_output) > 0:\n # Ok, we got valid kickstart records, output stdout and stderr for tasks that failed\n for entry in my_output:\n # Count tasks, the same way as pegasus-monitord for Stampede\n my_task_id = my_task_id + 1\n if not \"derivation\" in entry or not \"transformation\" in entry:\n continue\n if not \"exitcode\" in entry and not \"error\" in entry:\n continue\n if \"exitcode\" in entry:\n try:\n if int(entry[\"exitcode\"]) == 0:\n # Skip tasks with exitcode equals to zero\n continue\n except Exception:\n logger.warn(\"couldn't convert exitcode to integer!\")\n continue\n else:\n # We must have \"error\" in entry\n pass\n # Got a task with a non-zero exitcode\n print_console((\"Task #\" + str(my_task_id) + \" - Summary\").center(80, \"-\"))\n print_console()\n if \"resource\" in entry:\n print_console(\"site : %s\" % (entry[\"resource\"]))\n if \"hostname\" in entry:\n print_console(\"hostname : %s\" % (entry[\"hostname\"]))\n if \"name\" in entry:\n print_console(\"executable : %s\" % (entry[\"name\"]))\n if \"argument-vector\" in entry:\n print_console(\"arguments : %s\" % (entry[\"argument-vector\"]))\n if \"exitcode\" in entry:\n print_console(\"exitcode : %s\" % (entry[\"exitcode\"]))\n else:\n if \"error\" in entry:\n print_console(\"error : %s\" % (entry[\"error\"]))\n if \"cwd\" in entry:\n print_console(\"working dir : %s\" % (entry[\"cwd\"]))\n print_console()\n # Now let's display stdout and stderr\n if \"stdout\" in entry:\n if len(entry[\"stdout\"]) > 0:\n # Something to display\n print_console(\n (\n \"Task #\"\n + str(my_task_id)\n + \" - \"\n + entry[\"transformation\"]\n + \" - \"\n + entry[\"derivation\"]\n + \" - stdout\"\n ).center(80, \"-\")\n )\n print_console()\n print_console(entry[\"stdout\"])\n print_console()\n if \"stderr\" in entry:\n if len(entry[\"stderr\"]) > 0:\n # Something to display\n print_console(\n (\n \"Task #\"\n + str(my_task_id)\n + \" - \"\n + entry[\"transformation\"]\n + \" - \"\n + entry[\"derivation\"]\n + \" - stderr\"\n ).center(80, \"-\")\n )\n print_console()\n print_console(entry[\"stderr\"])\n print_console()\n else:\n # Not able to parse the kickstart output file, let's just dump the out and err files\n\n # Print outfile to screen\n dump_file(out_file)\n\n # Print errfile to screen\n dump_file(err_file)", "def exception(self, s):\n self.error(s)\n type, value, tb = sys.exc_info()\n self.writelines(traceback.format_stack(), 1)\n self.writelines(traceback.format_tb(tb)[1:], 1)\n self.writelines(traceback.format_exception_only(type, value), 1)", "def display_error():\n sys.stderr.write(\n \"\\n\\n###########\\n# ERROR\\n###########\\n#\\n\"\n \"# It would appear that you are trying to perform trail grouping \"\n \"on\\n\"\n \"# CoMetGeNe results different than those used to generate the\\n\"\n \"# pickle files in the pickle/ directory.\\n#\\n\"\n \"# Try the following:\\n\"\n \"# \\t* rename \" + PICKLE_RN_FILENAME + \" to something else\\n\"\n \"# \\t* rename \" + PICKLE_RS_FILENAME + \" to something else\\n\"\n \"# \\t* rename \" + PICKLE_GEN_FILENAME + \" to something else\\n\"\n \"# \\t* rename \" + PICKLE_GENOME_FILENAME + \" to something else and\"\n \"\\n# \\t use the correct file instead (created by CoMetGeNe.py)\\n\\n\"\n )", "def displayError(err):\n print(\"\\nError: %s.\" % err)\n displayUsage()", "def http_return_exception(exc_info=None, stream=sys.stdout):\n\n if exc_info == None:\n exc_info = sys.exc_info()\n\n stream.write(\"Content-type: text/html\\n\\n\");\n stream.write(format_exception(exc_info))", "def __str__(self):\n header = \"Traceback\"\n if self.COLORIZE:\n header = Colorize.apply(header, 'traceback-header')\n header = \"{}{} (most recent call last):\".format(self.leading_chars, header)\n steps = \"\\n\".join([str(step) for step in self.steps])\n output = \"\\n\".join([header, steps, str(self.exception)])\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
mask all the pixels that either contain (bs[i], ls[i]) or the distance from the point to the center of the pixel is less than dist
def ps2maskpix(nside, bs, ls, dist, nest=False): nestin = True npix = healpy.nside2npix(nside) mask = np.ones(npix) pixel_size = pix_size(nside) if not isinstance(dist, np.ndarray): dists = np.ones(len(bs)) * dist else: dists = dist depth_min = min(dists / pixel_size) if depth_min < 2.: vp = np.array(BL2xyz(bs, ls)) vec2pix = lambda x, y, z: healpy.vec2pix(nside, x, y, z, nest=nestin) vec2pix_vec = np.frompyfunc(vec2pix, 3, 1) pixs = np.array(vec2pix_vec(vp[0], vp[1], vp[2]), dtype=int) mask[pixs] = 0. for i in range(len(bs)): if i % 100 == 0 and i > 0: print i depth = np.ceil(dists[i] / pixel_size) neib = get_all_neib(nside, pixs[i], depth=depth, nest=nestin) for pn in neib: vpn = healpy.pix2vec(nside, pn, nest=nestin) if np.arccos(np.dot(vp[:,i], vpn)) < dists[i]: mask[pn] = 0. if nest: return mask else: return nest_array2ring_array(mask) else: inds = range(npix) vecs = np.array(healpy.pix2vec(nside, inds, nest=False)).T for i in range(len(bs)): if i % 100 == 0 and i > 0: print i BL0 = (bs[i], ls[i]) mask *= 1. - mask_circle(nside, dists[i], BL0, inds=inds, nest=nest, vecs=vecs) return mask
[ "def handle_SExtractor_mask(stars, thresh):\r\n mask = np.ones(stars.shape)\r\n mask[stars < thresh] = 0\r\n stars[stars < thresh] = 0\r\n return mask", "def to_apply_mask(img, bbox):\n x1, y1, x2, y2 = bbox\n img[:,y1:y2,x1:x2] = img[:,y1:y2,x1:x2].normal_(0.0, 0.1) \n return img", "def landsat_clean_mask_invalid(dataset):\n data_bands = dataset.drop('pixel_qa')\n return data_bands.where((0 < data_bands) & (data_bands < 10000))", "def findPointSources_deprecated(filtered_img,num_src,mask=True,mask_rad=250):\n temp_data = np.copy(filtered_img)\n pointsrc_coords_x=[]\n pointsrc_coords_y=[]\n if mask == False:\n for i in range(num_src):\n center=np.where(temp_data==np.max(temp_data))\n pointsrc_coords_x=np.append(pointsrc_coords_x,center[0][0])\n pointsrc_coords_y=np.append(pointsrc_coords_y,center[1][0])\n xmin=center[0][0]-10\n xmax=center[0][0]+10\n ymin=center[1][0]-10\n ymax=center[1][0]+10\n temp_data[xmin:xmax,ymin:ymax]=0\n else:\n temp = maskOuterRing(temp_data,mask_rad)\n for i in range(num_src):\n center=np.where(temp==np.max(temp))\n pointsrc_coords_x=np.append(pointsrc_coords_x,center[0][0])\n pointsrc_coords_y=np.append(pointsrc_coords_y,center[1][0])\n xmin=center[0][0]-10\n xmax=center[0][0]+10\n ymin=center[1][0]-10\n ymax=center[1][0]+10\n temp[xmin:xmax,ymin:ymax]=0\n return pointsrc_coords_x,pointsrc_coords_y", "def transform_mask(weights, filt):\r\n stamp_size = weights.shape[0]\r\n antimask = np.zeros(weights.shape)\r\n antimask[weights == 0] = 1\r\n kernel = np.where(filt != 0)[0]\r\n filt_radius = np.max(kernel) - np.min(kernel)\r\n bad_pix = np.where(antimask)\r\n for pixx, pixy, flagged_idx in zip(*bad_pix):\r\n lx = max(0, pixx - filt_radius)\r\n ly = max(0, pixy - filt_radius)\r\n rx = min(pixx + filt_radius, stamp_size)\r\n ry = min(pixy + filt_radius, stamp_size)\r\n antimask[lx:rx, ly:ry, flagged_idx] = 1\r\n\r\n mask = np.abs(antimask - 1)\r\n return mask", "def image_spotselect(CS_mask,N_min = 2):\n S_mask = (CS_mask > 0) \n \n N_spots = sum(S_mask)\n X0,Y0 = where(S_mask)\n close = zeros(N_spots)\n for i in range(N_spots):\n for j in range(N_spots):\n if (i <> j) & (close[i] == 0):\n close[i] = sqrt((X0[i]-X0[j])**2+(Y0[i]-Y0[j])**2) < 4\n S_mask[X0[where(close == 1)],Y0[where(close == 1)]] = 0\n \n S_mask &= (CS_mask >= N_min) # Select spots found in N_min+ images\n \n return S_mask", "def dist_filter(xyz, dists, result):\n i = knn2\n il = 0\n end = xyz.shape[0] - knn2\n while i < end:\n dst = ((xyz[il,0] - xyz[i,0])**2 + \\\n (xyz[il,1] - xyz[i,1])**2 + \\\n (xyz[il,2] - xyz[i,2])**2)**0.5\n\n if dst >= abs(xyz[i,2])*depth_multiplier:\n il = i - knn2 + np.argmin(dists[i-knn2:i+knn2+1])\n result[il] = True\n i += knn2 - 1\n else:\n i += 1\n\n return result", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=0.0,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n dims = data[refband].shape\n assert(dims[0] == dims[1])\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n #def tractor2mge(indx, majoraxis=None):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 5:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2 # [arcsec]\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n #majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n\n # force the central pixels to be at the center of the mosaic because all\n # MaNGA sources were visually inspected and we want to have consistency\n # between the center used for the IFU and the center used for photometry.\n mgegalaxy.xmed = dims[0] / 2\n mgegalaxy.ymed = dims[0] / 2\n mgegalaxy.xpeak = dims[0] / 2\n mgegalaxy.ypeak = dims[0] / 2\n #mgegalaxy.xmed = tractor.by[indx]\n #mgegalaxy.ymed = tractor.bx[indx]\n #mgegalaxy.xpeak = tractor.by[indx]\n #mgegalaxy.ypeak = tractor.bx[indx]\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n\n # by default, restore all the pixels within 10% of the nominal IFU\n # footprint, assuming a circular geometry.\n default_majoraxis = 1.1 * MANGA_RADIUS / 2 / filt2pixscale[refband] # [pixels]\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # mgegalaxy.majoraxis,\n # mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n # np.radians(mgegalaxy.theta-90), xobj, yobj)\n \n return mgegalaxy, objmask\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n #if tractor.ref_cat[galaxy_indx] == 'R1' and tractor.ref_id[galaxy_indx] == 8587006103:\n # neighborfactor = 1.0\n\n # [1] Determine the non-parametricc geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n # the \"residual mask\" is initialized in legacyhalos.io._read_image_data\n # and it includes pixels which are significant residuals (data minus\n # model), pixels with invvar==0, and pixels belonging to maskbits\n # BRIGHT, MEDIUM, CLUSTER, or ALLMASK_[GRZ]\n \n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('desi-users/ioannis/tmp/debug.png')\n\n # force the center\n mgegalaxy.xmed = dims[0] / 2\n mgegalaxy.ymed = dims[0] / 2\n mgegalaxy.xpeak = dims[0] / 2\n mgegalaxy.ypeak = dims[0] / 2\n print('Enforcing galaxy centroid to the center of the mosaic: (x,y)=({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed))\n \n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift! (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n largeshift = True\n\n # For the MaNGA project only, check to make sure the Tractor\n # position isn't far from the center of the mosaic, which can happen\n # near bright stars, e.g., 8133-12705\n mgegalaxy = copy(mge)\n sz = img.shape\n if np.abs(mgegalaxy.xmed-sz[1]/2) > maxshift or np.abs(mgegalaxy.ymed-sz[0]/2) > maxshift:\n print('Large centroid shift in Tractor coordinates! (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, sz[1]/2, sz[0]/2))\n mgegalaxy.xmed = sz[1]/2\n mgegalaxy.ymed = sz[0]/2\n \n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n\n # add the dust\n from legacyhalos.dust import SFDMap, mwdust_transmission\n ebv = SFDMap().ebv(radec_peak[0], radec_peak[1])\n mge['ebv'] = np.float32(ebv)\n for band in ['fuv', 'nuv', 'g', 'r', 'z', 'w1', 'w2', 'w3', 'w4']:\n mge['mw_transmission_{}'.format(band.lower())] = mwdust_transmission(ebv, band, 'N', match_legacy_surveys=True).astype('f4')\n \n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n #srcs = tractor.copy()\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n #raise ValueError('Central galaxy flux is negative!')\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n print('Warning! All satellites have been dropped from band {}!'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n ## plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/debug.png')\n ### #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'W1':\n # plt.imshow(_satmask, origin='lower') ; plt.savefig('junk-satmask-{}.png'.format(filt))\n # plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('qa-psf-{}.png'.format(filt.lower()))\n #if filt == 'W4':# or filt == 'r':\n # pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n\n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if filt == 'r':# or filt == 'r':\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ## plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def testFaintNeighborMasking(self):\n \"\"\"\n We create another faint (i.e., undetected) object separated\n from the one of interest, which should be masked.\n \"\"\"\n self.checkCandidateMasking([(self.x+5, self.y, 0.5)], threshold=0.9, pixelThreshold=1.0)", "def _filter_cell_clumps(data, cells, wildcards, distance_threshold=10):\n if np.all(cells==0):\n return np.zeros((1480,1480))\n\n df = (Snake._extract_features(cells, cells, wildcards))\n # add column for [x,y] positions\n df['ij'] = df[['i','j']].values.tolist()\n ij = df['ij'].values.tolist()\n\n # calculate matrix of Euclidean distance between all cells in FOV\n distance = scipy.spatial.distance.cdist(ij, ij, 'euclidean')\n min_dist = np.where(distance>0, distance,distance.max()).min(1)\n # cells (labels) that pass distance threshold from nearest neighbor\n try:\n min_idx = np.hstack(np.argwhere(min_dist > distance_threshold))\n label = df.iloc[min_idx]\n mask = np.isin(cells, np.array(label['label'].values.tolist()))\n filtered_cells = np.multiply(mask.astype(int),cells)\n except:\n filtered_cells = np.zeros((1480,1480))\n\n return filtered_cells", "def fill_holes_per_blob(self, labeled_mask):\n image_cleaned = np.zeros_like(labeled_mask)\n for i in range(1, labeled_mask.max() + 1):\n mask = np.where(labeled_mask == i, 1, 0)\n mask = ndi.binary_fill_holes(mask)\n image_cleaned = image_cleaned + mask * i\n return image_cleaned", "def make_mask(data, xpix, ypix, rmask=15):\r\n mask = np.zeros_like(halpha).astype(np.int) # variavel booleana do tamanho do halpha\r\n xdim, ydim = data.shape\r\n #define um array de x e y\r\n x = np.arange(xdim)\r\n y = np.arange(ydim)\r\n xx, yy = np.meshgrid(x, y) #faz uma imagem das coordenadas\r\n for x0, y0 in zip(xpix, ypix):#loop para cada objeto dessa lista\r\n #x0 é o centro da estrela\r\n r = np.sqrt((xx - x0) ** 2 + (yy - y0) ** 2) # raio em ao x0 e y0\r\n mask[r<=rmask] = 1\r\n return mask", "def autocrop_to_mask(self, all_images,mask, thr=0):\n mask = mask>thr\n rows = np.any(mask, axis=1)\n cols = np.any(mask, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n for image in all_images.keys():\n all_images[image]= all_images[image][rmin:rmax,cmin:cmax]\n return all_images", "def maskDead(smr, verbose=False):\n if verbose:\n print('(*) Perform masking of the dead/bad pixel')\n smr.errorType = 'F'\n #\n # mask blinded pixels\n #\n smr.blinded = np.empty((smr.numPixels,), dtype=bool)\n smr.blinded[:] = False\n\n id_list = np.array(list(range(10)) + list(range(1024-10, 1024)))\n smr.blinded[0+id_list] = True # channel 1\n smr.blinded[1024+id_list] = True # channel 2\n smr.blinded[2048+id_list] = True # channel 3\n smr.blinded[3072+id_list] = True # channel 4\n smr.blinded[4096+id_list] = True # channel 5\n smr.blinded[5120+id_list] = True # channel 6\n smr.blinded[6144+id_list] = True # channel 7\n smr.blinded[7168+id_list] = True # channel 8\n #\n # mask dead pixels\n #\n i_masked = smr.spectra.mask.sum()\n smr.spectra = ma.masked_equal(smr.spectra, 0, copy=False)\n if verbose:\n masked = smr.spectra.mask.sum()\n print('* Info: masked %6.1f pixels/spectrum with zero signal'\n % ((masked - i_masked) / float(smr.numSpectra)))\n i_masked = masked\n\n smr.spectra = ma.masked_where((smr.spectra / smr.coaddf) >= 65535.,\n smr.spectra, copy=False)\n if verbose:\n masked = smr.spectra.mask.sum()\n print('* Info: masked %6.1f pixels/spectrum with saturated signal'\n % ((masked - i_masked) / float(smr.numSpectra)))\n i_masked = masked", "def compute_mask(self, scale: int, env: Environment):\n n_X = self.hp.cst_env.width*scale\n n_Y = self.hp.cst_env.height*scale\n is_inside = np.zeros((n_X, n_Y))\n for i in range(n_X): # x\n for j in range(n_Y): # y\n is_inside[i, j] = env.is_point_inside(i/scale, j/scale)\n return is_inside", "def filter_density(mask, rad=3, size=5, fn = lambda m,i,j: m[i,j]):\n rows, cols = mask.shape\n X,Y = np.meshgrid(xrange(cols), xrange(rows))\n in_circle = lib.in_circle\n out = np.zeros((rows,cols), np.bool)\n for row,col in locations(mask.shape):\n\tif fn(mask,row,col):\n\t a = in_circle((col,row),rad)\n\t if np.sum(mask*a(X,Y))>size:\n\t\tout[row,col] = True\n return out", "def image_mask(CS_mask,radius = 15):\n from numpy import indices\n w,h = shape(CS_mask)\n x_indices,y_indices = indices((w,h))\n SAXS_mask = sqrt((y_indices-(h-1)/2)**2+(x_indices-(w-1)/2)**2) < radius\n Border_mask = (y_indices<2) | (y_indices>(h-3)) | \\\n (x_indices<2) | (x_indices>(w-3))\n CS_mask *= ~(SAXS_mask | Border_mask)\n return CS_mask", "def make_weight_map(self, masks):\n nrows, ncols = masks.shape[1:]\n masks = (masks > 0).astype(int)\n distMap = np.zeros((nrows * ncols, masks.shape[0]))\n X1, Y1 = np.meshgrid(np.arange(nrows), np.arange(ncols))\n X1, Y1 = np.c_[X1.ravel(), Y1.ravel()].T\n for i, mask in enumerate(masks):\n # find the boundary of each mask,\n # compute the distance of each pixel from this boundary\n bounds = find_boundaries(mask, mode='inner')\n X2, Y2 = np.nonzero(bounds)\n xSum = (X2.reshape(-1, 1) - X1.reshape(1, -1)) ** 2\n ySum = (Y2.reshape(-1, 1) - Y1.reshape(1, -1)) ** 2\n distMap[:, i] = np.sqrt(xSum + ySum).min(axis=0)\n ix = np.arange(distMap.shape[0])\n if distMap.shape[1] == 1:\n d1 = distMap.ravel()\n border_loss_map = self.w0 * np.exp((-1 * (d1) ** 2) / (2 * (self.sigma ** 2)))\n else:\n if distMap.shape[1] == 2:\n d1_ix, d2_ix = np.argpartition(distMap, 1, axis=1)[:, :2].T\n else:\n d1_ix, d2_ix = np.argpartition(distMap, 2, axis=1)[:, :2].T\n d1 = distMap[ix, d1_ix]\n d2 = distMap[ix, d2_ix]\n border_loss_map = self.w0 * np.exp((-1 * (d1 + d2) ** 2) / (2 * (self.sigma ** 2)))\n xBLoss = np.zeros((nrows, ncols))\n xBLoss[X1, Y1] = border_loss_map\n # class weight map\n loss = np.zeros((nrows, ncols))\n w_1 = 1 - masks.sum() / loss.size\n w_0 = 1 - w_1\n loss[masks.sum(0) == 1] = w_1\n loss[masks.sum(0) == 0] = w_0\n ZZ = xBLoss + loss\n return ZZ", "def find_unmasked_px(mask, scale):\n h, w = mask.shape\n h_scaled = h // scale\n w_scaled = w // scale\n valid_array = np.zeros((h_scaled, w_scaled), dtype=bool)\n for y in nb.prange(h_scaled):\n st_y = y * scale\n nd_y = st_y + scale\n for x in range(w_scaled):\n st_x = x * scale\n nd_x = st_x + scale\n if np.any(mask[st_y:nd_y, st_x:nd_x]):\n valid_array[y, x] = True\n return valid_array" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the min index i such that number < array[i] return len(array) if array[1] < number if nearest = True, then return the index of the closet array entry to the number
def findIndex(array, number, nearest=False): if array[0] > number: return 0 elif array[-1] < number: if nearest: return len(array) - 1 else: return len(array) else: imin = 0 imax = len(array) while imax > imin + 1: imed = (imax + imin)/2 if array[imed] < number: imin = imed else: imax = imed if nearest and number < (array[imax] + array[imax - 1])/2: return imax - 1 else: return imax
[ "def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n if array[idx] > value:\n return idx - 1\n elif array[idx] <= value:\n return idx", "def _find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n if array[idx] > value:\n return idx - 1\n elif array[idx] <= value:\n return idx", "def get_index_nearest(array, value):\n return (np.abs(array - value)).argmin()", "def nearest(array,value):\r\n array = np.asarray(array)\r\n idx = (np.abs(array - value)).argmin()\r\n return idx", "def _find_nearest(self, array, value):\n \n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n \n return array[idx], idx", "def find_nearest(array, value):\n idx = (np.abs(array - value)).idxmin() # idxmin instead of argmin\n return array[idx]", "def find_nearest_element(array,value,index=False):\n\t\tidx = n.abs(array-value).argmin()\n\t\treturn (idx,array.flat[idx]) if index else array.flat[idx]", "def find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n\n return array[idx]", "def findNearest(self, data_array, val):\n\t\tnearest_val = min(data_array, key=lambda x:abs(x-val))\n\t\tprint(\"nearest val in data array: {}\".format(nearest_val))\n\t\treturn data_array.index(nearest_val)", "def nearest_smallest_element(arr):\n smaller_numbers = []\n\n def nearest(n):\n def find_previous_num():\n for previous_num in reversed(smaller_numbers):\n if previous_num < n:\n return previous_num\n return -1\n\n def append_smaller_number_before_preceding_big(n):\n while len(smaller_numbers) > 0 and smaller_numbers[-1] > n:\n smaller_numbers.pop()\n smaller_numbers.append(n)\n\n previous_num = find_previous_num()\n append_smaller_number_before_preceding_big(n)\n return previous_num\n\n return [nearest(n) for n in arr]", "def find_smallest_greater(an_array: List[int], start: int, end: int, index: int) -> int:\n\tfor i in reversed(range(start, end)):\n\t\tif an_array[i] > an_array[index]:\n\t\t\treturn i", "def nearest (list, value):\n list = remove_out_of_domain(list)\n array = np.asarray(list)\n\n # find index of nearest list to value\n i = (np.abs(array-value)).argmin()\n return array[i]", "def geo_idx(dd, dd_array):\n import numpy as np \n from scipy import stats\n geo_idx = (np.abs(dd_array - dd)).argmin()\n # if distance from closest cell to intended value is 2x the value of the\n # spatial resolution, raise error \n res = stats.mode(np.diff(dd_array))[0][0]\n if np.abs(dd_array[geo_idx] - dd) > (2 * res):\n print('Closet index far from intended value!')\n return \n return geo_idx", "def find_closest(arr, val):\n diff = abs(arr-val)\n ind = int(diff.argmin())\n closest_val = float(arr[ind])\n return closest_val, ind", "def findIndex( value, array ):\n if value < array[0] or value > array[-1]: raise IndexError , \"%s: Out of bound\" % value\n for i, v in enumerate(array):\n if value < v : return i-1\n continue\n raise RuntimeError , \"should not reach here: findIndex( %s, %s)\" % (value, array)", "def get_min(array):\n\n min_val, min_idx = float('inf'), None\n for idx, val in enumerate(array):\n if val < min_val:\n min_val, min_idx = val, idx\n return min_val, min_idx", "def findnearest(particle, particle_array): \r\n\tdist_array = np.sum((particle - particle_array)**2, axis=1)\r\n\treturn np.nanargmin(dist_array)", "def min_index(arr):\n index = 0\n\n for i in range(1, len(arr)):\n if arr[i, 0] < arr[index, 0]:\n index = i\n\n return index", "def min_argmin(array):\n mn = min(array)\n return (mn, array.index(mn))", "def closest_point_finder( point, pointsArray ):\n\n mindex = 0\n mindist = 99999999999999999\n for i in range(len(pointsArray)):\n dist = np.linalg.norm( point - pointsArray[i] )\n if dist < mindist:\n mindist = dist\n mindex = i\n return mindex" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that makefile.yml will be searched in RKD_PATH
def test_loads_from_file_is_searching_in_rkd_path(self): yaml_loader = YamlFileLoader([]) d = tempfile.TemporaryDirectory() os.environ['RKD_PATH'] = d.name with open(d.name + '/makefile.yml', 'w') as f: f.write(''' version: org.riotkit.rkd/yaml/v1 imports: [] tasks: :join:iwa-ait: description: Subscribe to any local section of IWA-AIT, workers have common interest arguments: - not a list ''') try: self.assertRaises(YAMLFileValidationError, lambda: yaml_loader.load_from_file('makefile.yml', 'org.riotkit.rkd/yaml/v1')) finally: d.cleanup() os.environ['RKD_PATH'] = ''
[ "def test_find_path_by_name_founds_path(self):\n\n yaml_loader = YamlFileLoader([])\n\n d = tempfile.TemporaryDirectory()\n os.environ['RKD_PATH'] = d.name\n\n with open(d.name + '/makefile.yml', 'w') as f:\n f.write('''\n version: org.riotkit.rkd/yaml/v1\n imports: []\n tasks: \n :join:iwa-ait:\n description: Subscribe to any local section of IWA-AIT, workers have common interest\n arguments:\n - not a list\n ''')\n\n try:\n path = yaml_loader.find_path_by_name('makefile.yml', '/')\n self.assertTrue(len(path) > 0)\n finally:\n d.cleanup()\n os.environ['RKD_PATH'] = ''", "def testPaths():\n for path in config.main.paths:\n assert(os.path.exists(config.main.paths[path]))", "def check_path():\n root = os.path.abspath(os.path.curdir)\n assert os.path.basename(root) == \"treelite\", \"Must be run on project root.\"", "def is_pkr_path(path):\n from pkr.environment import ENV_FOLDER\n\n return path.is_dir() and len(list(path.glob(\"{}/*/env.yml\".format(ENV_FOLDER)))) > 0", "def test_paths_from_settings():\n import settings_bipype\n\n namespace = settings_bipype.__dict__\n \n variables = { key: namespace[key]\n for key in namespace\n if key.startswith('PATH') }\n \n for var in variables.values():\n assert path_exists(var)", "def test_get_lookup_paths_includes_internal_path_as_well_as_rkd_path(self):\n\n yaml_loader = YamlFileLoader([])\n os.environ['RKD_PATH'] = 'SOME-PATH-THERE'\n\n try:\n paths = yaml_loader.get_lookup_paths('harbor-internal/')\n finally:\n os.environ['RKD_PATH'] = ''\n\n defined_by_rkd_path = paths.index('SOME-PATH-THERE/harbor-internal/')\n\n internal_path = (os.path.realpath(SCRIPT_DIR_PATH) + '/harbor-internal/').replace('test/', '')\n internal_path_index = paths.index(internal_path)\n\n self.assertGreater(defined_by_rkd_path, internal_path_index, msg='defined_by_rkd_path should be favored')", "def check_yaml(recipe):\n\n ## Check yaml keys\n assert (\n \"package\" in recipe and \"version\" in recipe[\"package\"]\n ), \":ggd:check-recipe: must specify 'package:' section with ggd version and package name\"\n assert (\n \"extra\" in recipe\n ), \":ggd:check-recipe: must specify 'extra:' section with author and extra-files\"\n assert (\n \"about\" in recipe and \"summary\" in recipe[\"about\"]\n ), \":ggd:check-recipe: must specify an 'about/summary' section\"\n assert (\n \"identifiers\" in recipe[\"about\"]\n ), \":ggd:check-recipe: must specify an 'identifier' section in about\"\n assert (\n \"genome-build\" in recipe[\"about\"][\"identifiers\"]\n ), \":ggd:check-recipe: must specify 'about:' section with genome-build\"\n assert (\n \"species\" in recipe[\"about\"][\"identifiers\"]\n ), \":ggd:check-recipe: must specify 'about:' section with species\"\n assert (\n \"tags\" in recipe[\"about\"]\n ), \":ggd:check-recipe: must specify 'about:' section with tags\"\n assert \"keywords\" in recipe[\"about\"] and isinstance(\n recipe[\"about\"][\"keywords\"], list\n ), \":ggd:check-recipe: must specify 'about:' section with keywords\"\n\n ##Check tags\n assert (\n \"genomic-coordinate-base\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: must specify a genomic coordinate base for the files created by this recipe\"\n assert (\n \"data-version\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: must specify the data version for the data files created by this recipe\"\n assert (\n \"data-provider\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: must specify the data provider for the files created by this recipe\"\n assert (\n \"ggd-channel\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: must specify the specific ggd channel for the recipe in the 'about:tags' section\"\n assert (\n \"file-type\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: The final data file types must be specified in the 'about:tags' section\"\n assert (\n \"final-files\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: All final data file must be specified in the 'about:tags' section\"\n assert (\n \"final-file-sizes\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: The size of each final data file must be specified in the 'about:tags' section\"\n\n species, build, version, name, dp = (\n recipe[\"about\"][\"identifiers\"][\"species\"],\n recipe[\"about\"][\"identifiers\"][\"genome-build\"],\n recipe[\"package\"][\"version\"],\n recipe[\"package\"][\"name\"],\n recipe[\"about\"][\"tags\"][\"data-provider\"].lower(),\n )\n version = version.replace(\" \", \"\")\n version = version.replace(\" \", \"'\")\n\n _check_build(species, build)\n return species, build, version, name, dp", "def repo_check():\n ls = os.listdir('.')\n if '_distro_map.yml' not in ls or '_distro_map.yml' not in ls:\n print(\"The specified docs base directory {} does\"\n \"not appear to be a valid ascii_binder directory.\"\n .format(os.getcwd()))\n return False\n return True", "def test_build_checks_yaml_syntax_error(self): # pylint: disable=C0103\n mydir = os.path.dirname(__file__)\n path = os.path.join(mydir, './build_checks_bad_syntax.yml')\n checks_path = os.path.join(mydir, './checks_bad_syntax.yml')\n\n with assertRaisesRegex(self, build.BuildException, r'Bad yaml syntax.*checks_bad_syntax\\.yml'):\n build.build_package(None, 'test_syntax_error', PACKAGE, [], path, checks_path=checks_path)", "def test_check_recipe_recipe_path():\n\n pytest_enable_socket()\n\n ## Uninstall the already installed recipe\n try:\n sp.check_call([\"conda\", \"uninstall\", \"-y\", \"trial-hg38-gaps-v1\"])\n except Exception as e:\n pass\n\n ## Remove fragment files\n jdict = ggd_jdict = {u'channeldata_version': 1, u'subdirs': [u'noarch'], u'packages': {u'trial-hg38-gaps-v1': \n {u'activate.d': False, u'version': u'1', u'tags': {u'ggd-channel': u'genomics', \n u'data-version': u'11-Mar-2019'}, u'post_link': True, u'binary_prefix': False, u'run_exports': {}, \n u'pre_unlink': False, u'subdirs': [u'noarch'], u'deactivate.d': False, u'reference_package': u'noarch/trial-hg38-gaps-v1-1-0.tar.bz2', \n u'pre_link': False, u'keywords': [u'gaps', u'region'], u'summary': u'hg38 Assembly gaps from USCS', \n u'text_prefix': False, u'identifiers': {u'genome-build': u'hg38', u'species': u'Homo_sapiens'}}}}\n\n\n uninstall.check_for_installation([\"trial-hg38-gaps-v1\"], jdict)\n ## Check that the recipe is not installed \n out = utils.check_output([\"conda\", \"list\", \"trial-hg38-gaps-v1\"])\n assert \"trial-hg38-gaps-v1\" not in out\n\n ## Check that the tar file was deleted. If not, remove it\n bz2_file = pytest.global_tarball_testing_file\n try:\n assert not os.path.exists(bz2_file)\n assert not os.path.isfile(bz2_file)\n except AssertionError as e:\n if os.path.exists(bz2_file):\n os.remove(bz2_file)\n else:\n raise e\n \n ## Use the previously created ggd recipe path\n recipe_path = pytest.global_ggd_recipe_path\n try: \n assert not os.path.exists(recipe_path)\n assert not os.path.isdir(recipe_path)\n except AssertionError as e:\n if os.path.exists(recipe_path):\n shutil.rmtree(recipe_path)\n else:\n raise e\n\n ## Add recipe\n test__build_normal_run(add_checksum=False,final_files=True)\n recipe_path = pytest.global_ggd_recipe_path\n\n ### check that the checksum file is empty \n assert os.path.getsize(os.path.join(recipe_path,\"checksums_file.txt\")) == 0\n\n ## SKIP md5sum process. -> This will trigger a checksum of the files, which will fail because there is none and the recipe will be uninstalled\n ### exit with 222\n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_path, dont_uninstall=True, dont_add_md5sum_for_checksum=True, id=None)\n \n with pytest.raises(SystemExit) as pytest_wrapped_e:\n check_recipe.check_recipe((),args)\n assert \"SystemExit\" in str(pytest_wrapped_e.exconly()) ## test that SystemExit was raised by sys.exit() \n if sys.version_info[0] >= 3:\n assert pytest_wrapped_e.match(\"222\") ## Check that the exit code is 1\n\n ## Check that the recipe was uninstalled\n out = utils.check_output([\"conda\", \"list\", \"trial-hg38-gaps-v1\"])\n assert \"trial-hg38-gaps-v1\" not in out\n out = utils.check_output([\"ggd\", \"show-env\"])\n assert \"ggd_trial_hg38_gaps_v1\" not in out\n conda_root = utils.conda_root()\n assert os.path.exists(os.path.join(conda_root,\"share/ggd/Homo_sapiens/hg38/trial-hg38-gaps-v1/1\")) == False \n\n ## Check that the final files and the file types were added \n yaml_dict = yaml.safe_load(open(os.path.join(recipe_path,\"meta.yaml\")))\n assert len(yaml_dict[\"about\"][\"tags\"][\"final-files\"]) == 2\n assert len(yaml_dict[\"about\"][\"tags\"][\"file-type\"]) == 1\n assert \"trial-hg38-gaps-v1.bed.gz\" in yaml_dict[\"about\"][\"tags\"][\"final-files\"] \n assert \"trial-hg38-gaps-v1.bed.gz.tbi\" in yaml_dict[\"about\"][\"tags\"][\"final-files\"] \n assert \"bed\" in yaml_dict[\"about\"][\"tags\"][\"file-type\"] \n\n\n ## Add md5sum. No errors should happend, and the package should not be uninstalled because the dont_uninstall flag is set to True\n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n \n assert check_recipe.check_recipe((),args) == True \n\n out = utils.check_output([\"conda\", \"list\", \"trial-hg38-gaps-v1\"])\n assert \"trial-hg38-gaps-v1\" in out\n out = utils.check_output([\"ggd\", \"show-env\"])\n assert \"ggd_trial_hg38_gaps_v1\" in out\n conda_root = utils.conda_root()\n assert os.path.exists(os.path.join(conda_root,\"share/ggd/Homo_sapiens/hg38/trial-hg38-gaps-v1/1\")) == True", "def test_check_recipe_package_env_vars():\n\n pytest_enable_socket()\n\n ## Test that an env_var is created for a single installed file and the dir\n recipe = CreateRecipe(\n \"\"\"\n one_file_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - one_file_v1.bw\n package:\n name: one_file_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing env_var for recipe with one file\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: 11-Mar-2019\n file-type: \n - bw\n final-files: \n - one_file_v1.bw\n final-file-sizes:\n one_file_v1.bw: 10.1K\n data-provider: UCSC\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet --no-check-certificate --output-document hg19phastcons.bw http://hgdownload.cse.ucsc.edu/goldenpath/hg19/phastCons100way/hg19.100way.phastCons.bw\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n (mv $f \"one_file_v1.$ext\")\n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_one_file_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_one_file_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_one_file_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"one_file_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_one_file_v1_dir\" in x or \"ggd_one_file_v1_file\" in x]\n first = False\n second = False\n for x in env_vars:\n if \"ggd_one_file_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/one_file_v1/1\") in x\n first = True\n elif \"ggd_one_file_v1_file\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/one_file_v1/1/one_file_v1.bw\")\n second = True\n else:\n assert False\n assert first == True\n assert second == True\n\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_one_file_v1_file\" in output\n assert \"$ggd_one_file_v1_dir\" in output\n\n ## Test that an env_var is created for the non indexed file when two files are installed with an index present, and the dir\n recipe = CreateRecipe(\n \"\"\"\n two_files_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: []\n package:\n name: two_files_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing env_var for recipe with two files and an index present\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - bed\n final-files: \n - two_files_v1.bed.gz\n - two_files_v1.bed.gz.tbi\n final-file-sizes:\n two_files_v1.bed.gz: 24.02K\n two_files_v1.bed.gz.tbi: 10.24K\n ggd-channel: genomics\n\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet $genome\n\n ## get the file, \n ## unzip it, \n ## remove any lines that do not have a scaffolding in the hg19.genom file. (If scaffolding in hg19.genome, grep exists with 0)\n ## add header to the file, and remove the bin column\n ## sort it based on the genome file\n ## bgzip it\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk '{ if (system(\"grep -Fq \" $2 \" hg19.genome\") == 0) print $0}' \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | gsort /dev/stdin $genome \\\n | bgzip -c > gaps.bed.gz\n\n tabix gaps.bed.gz\n\n rm hg19.genome\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n (mv $f \"two_files_v1.$ext\")\n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_two_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_two_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_two_files_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"two_files_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_two_files_v1_dir\" in x or \"ggd_two_files_v1_file\" in x]\n first = False\n second = False\n for x in env_vars:\n if \"ggd_two_files_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_v1/1\") in x\n first = True\n elif \"ggd_two_files_v1_file\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_v1/1/two_files_v1.bed.gz\")\n second = True\n else:\n assert False\n assert first == True\n assert second == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_two_files_v1_file\" in output\n assert \"$ggd_two_files_v1_dir\" in output \n\n ## Test that NO env_var is created when two files are installed with no index present, and the dir\n recipe = CreateRecipe(\n \"\"\"\n two_files_noindex_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - two_files_noindex_v1.genome\n - two_files_noindex_v1.txt.gz\n package:\n name: two_files_noindex_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing NO file env_var for recipe with two files and no index\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - genome\n - txt\n final-files: \n - two_files_noindex_v1.genome\n - two_files_noindex_v1.txt.gz\n final-file-sizes: \n two_files_noindex_v1.genome: 10.01K\n two_files_noindex_v1.txt.gz: 12.41K\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | bgzip -c > two_files_noindex_v1.txt.gz\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n if [[ ! -f \"two_files_noindex_v1.$ext\" ]] \n then\n (mv $f \"two_files_noindex_v1.$ext\")\n fi \n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_two_files_noindex_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_two_files_noindex_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_two_files_noindex_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"two_files_noindex_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_two_files_noindex_v1_dir\" in x or \"ggd_two_files_noindex_v1_file\" in x]\n first = False\n for x in env_vars:\n if \"ggd_two_files_noindex_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\") in x\n first = True\n elif \"ggd_two_files_noindex_v1_file\" in x:\n assert False ## There should not be a file env_var made for this package\n else:\n assert False\n assert first == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_two_files_noindex_v1_file\" not in output\n assert \"$ggd_two_files_noindex_v1_dir\" in output\n\n ## Test that NO env_var is created when thre+ files are installed, and the dir\n recipe = CreateRecipe(\n \"\"\"\n three_files_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - three_files_v1.genome\n - three_files_v1.1.txt.gz\n - three_files_v1.2.txt.gz\n package:\n name: three_files_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing NO file env_var for recipe with three+ files\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - txt\n - genome\n final-files: \n - three_files_v1.1.txt.gz\n - three_files_v1.2.txt.gz\n - three_files_v1.genome\n final-file-sizes: \n three_files_v1.1.txt.gz: 24.04K\n three_files_v1.2.txt.gz: 24.04K\n three_files_v1.genome: 10.01K\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | bgzip -c > gap.txt.gz\n cp gap.txt.gz gaps.1.txt.gz\n mv gap.txt.gz gaps.2.txt.gz\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n if [[ ! -f \"three_files_v1.$ext\" ]] \n then\n (mv $f \"three_files_v1.$ext\")\n fi \n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_three_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_three_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_three_files_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"three_files_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_three_files_v1_dir\" in x or \"ggd_three_files_v1_file\" in x]\n first = False\n for x in env_vars:\n if \"ggd_three_files_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/three_files_v1/1\") in x\n first = True\n elif \"ggd_three_files_v1_file\" in x:\n assert False ## There should not be a file env_var made for this package\n else:\n assert False\n assert first == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_three_files_v1_file\" not in output\n assert \"$ggd_three_files_v1_dir\" in output", "def test_rpki_download_validator(self):\n test_path = Path('.')\n self.test___init__()\n rpki_path = Path(RPKI_Validator_Wrapper.rpki_package_path)\n\n assert list(rpki_path.glob('rpki-validator*'))\n assert path.exists(rpki_path / 'preconfigured-tals/arin-ripevalidator.tal')\n assert not path.exists(test_path / 'rpki-validator-3-latest-dist.tar.gz')", "def custom_config_path():\n return 'tests/test-config/valid-config.yaml'", "def test_nested_recipes(config_fixture):\n r = Recipes(\n\n \"\"\"\n shallow:\n meta.yaml: |\n package:\n name: shallow\n version: \"0.1\"\n build.sh: |\n #!/bin/bash\n echo \"Shallow Created\"\n pwd\n normal/normal:\n meta.yaml: |\n package:\n name: normal\n version: \"0.1\"\n build:\n skip: true\n requirements:\n build:\n - python 3.6\n build.sh: |\n #!/bin/bash\n echo \"Testing build.sh through python\"\n python -h\n deep/deep/deep:\n meta.yaml: |\n package:\n name: deep\n version: \"0.1\"\n requirements:\n build:\n - python\n run:\n - python\n build.sh: |\n #!/bin/bash\n ## Empty script\n F/I/V/E/deep:\n meta.yaml: |\n package:\n name: fivedeep\n version: \"0.1\"\n requirements:\n build:\n - python 3.6\n run:\n - python 3.6\n \"\"\", from_string=True)\n r.write_recipes()\n\n build_results = build.build_recipes(r.basedir, config_fixture,\n r.recipe_dirnames,\n testonly=False,\n force=False,\n mulled_test=False)\n assert build_results\n\n assert len(list(utils.get_recipes(r.basedir))) == 4\n\n for k, v in r.recipe_dirs.items():\n for i in utils.built_package_paths(v):\n assert os.path.exists(i)\n ensure_missing(i)", "def test_no_meta_yaml(datafiles):\n current_folder = Path.cwd()\n os.chdir(datafiles)\n with pytest.raises(SystemExit):\n project = Project(rules=RULES)\n os.chdir(str(current_folder))", "def test_get_spotdl_path(setup):\n\n assert get_spotdl_path() == Path(setup.directory, \".spotdl\")\n assert os.path.exists(os.path.join(setup.directory, \".spotdl\"))", "def test_source_file(host):\n recon_tools = host.file('/home/recon/.recon_tools')\n\n assert recon_tools.exists", "def test_build_yaml_syntax_error(self):\n mydir = os.path.dirname(__file__)\n path = os.path.join(mydir, './build_bad_syntax.yml')\n\n with assertRaisesRegex(self, build.BuildException, r'Bad yaml syntax.*build_bad_syntax\\.yml'):\n build.build_package(None, 'test_syntax_error', PACKAGE, [], path)", "def test_template_lookup_path(self):\n lookup_list = settings.TEMPLATES[0]['DIRS']\n found_path = False\n \n for entry in lookup_list:\n entry_normalised = os.path.normpath(entry)\n \n if entry_normalised == os.path.normpath(settings.TEMPLATE_DIR):\n found_path = True\n \n self.assertTrue(found_path, f\"{FAILURE_HEADER}Your project's templates directory is not listed in the TEMPLATES>DIRS lookup list. Check your settings.py module.{FAILURE_FOOTER}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that lookup paths includes RKD_PATH and internal RKD directories
def test_get_lookup_paths_includes_internal_path_as_well_as_rkd_path(self): yaml_loader = YamlFileLoader([]) os.environ['RKD_PATH'] = 'SOME-PATH-THERE' try: paths = yaml_loader.get_lookup_paths('harbor-internal/') finally: os.environ['RKD_PATH'] = '' defined_by_rkd_path = paths.index('SOME-PATH-THERE/harbor-internal/') internal_path = (os.path.realpath(SCRIPT_DIR_PATH) + '/harbor-internal/').replace('test/', '') internal_path_index = paths.index(internal_path) self.assertGreater(defined_by_rkd_path, internal_path_index, msg='defined_by_rkd_path should be favored')
[ "def testPaths():\n for path in config.main.paths:\n assert(os.path.exists(config.main.paths[path]))", "def test_template_lookup_path(self):\n lookup_list = settings.TEMPLATES[0]['DIRS']\n found_path = False\n \n for entry in lookup_list:\n entry_normalised = os.path.normpath(entry)\n \n if entry_normalised == os.path.normpath(settings.TEMPLATE_DIR):\n found_path = True\n \n self.assertTrue(found_path, f\"{FAILURE_HEADER}Your project's templates directory is not listed in the TEMPLATES>DIRS lookup list. Check your settings.py module.{FAILURE_FOOTER}\")", "def verify_paths(config=None, output_collection=None, return_missing=False):\n paths = get_paths(config=config, output_collection=output_collection)\n missing = list(filter(lambda p: p and not os.path.exists(p), paths))\n return missing if return_missing else not bool(missing)", "def sanity_check_step(self):\n\n custom_paths = {\n 'files': [\"ipp/lib/intel64/libipp%s\" % y\n for x in [\"ac\", \"cc\", \"ch\", \"core\", \"cv\", \"dc\", \"di\",\n \"i\", \"j\", \"m\", \"r\", \"s\", \"sc\", \"vc\", \"vm\"]\n for y in [\"%s.a\" % x, \"%s.so\" % x]],\n 'dirs': [\"compiler/lib/intel64\", \"ipp/bin\", \"ipp/include\",\n \"ipp/interfaces/data-compression\", \"ipp/tools/intel64\"]\n }\n\n super(EB_ipp, self).sanity_check_step(custom_paths=custom_paths)", "def test_get_path_keyerror(self):\n x = _random_integer()\n y = _random_integer()\n D = {'a': {\n 'b': {\n 'c': {\n 'd': x,\n },\n 'e': {\n 'f': y,\n }\n }\n }}\n path = ['a', 'b', 'c', 'f']\n val = utils.get_path(D, path)\n self.assertIsNone(val)", "def test_check_dir_existence_root_is_wrong(self):\n self.assertFalse(check_dir_existence('/some/wrong/path', self.existing_dirs))", "def test_paths_from_settings():\n import settings_bipype\n\n namespace = settings_bipype.__dict__\n \n variables = { key: namespace[key]\n for key in namespace\n if key.startswith('PATH') }\n \n for var in variables.values():\n assert path_exists(var)", "def _check_input_path(self, input_path):", "def check_file_paths(self):\n if self.version != OUTDATED_WACZ:\n package_files = [item[\"path\"] for item in self.datapackage[\"resources\"]]\n for filepath in pathlib.Path(self.dir.name).glob(\"**/*.*\"):\n filename = os.path.basename(filepath)\n if (\n filename != \"datapackage.json\"\n and filename != \"datapackage-digest.json\"\n ):\n file = str(filepath).split(\"/\")[-2:]\n file = \"/\".join(file)\n if file not in package_files:\n print(\"file %s is not listed in the datapackage\" % file)\n return False\n return True", "def _AssertPathsExist(paths):\n src_root = _GetSrcRootPath()\n for path in paths:\n abspath = os.path.join(src_root, path)\n assert os.path.exists(abspath), (('Path: {} doesn\\'t exist.\\nA valid '\n 'path must exist and be relative to the '\n 'root of source, which is {}. For '\n 'example, \\'ios/\\' is a valid path.').\n format(abspath, src_root))", "def test_find_path_by_name_founds_path(self):\n\n yaml_loader = YamlFileLoader([])\n\n d = tempfile.TemporaryDirectory()\n os.environ['RKD_PATH'] = d.name\n\n with open(d.name + '/makefile.yml', 'w') as f:\n f.write('''\n version: org.riotkit.rkd/yaml/v1\n imports: []\n tasks: \n :join:iwa-ait:\n description: Subscribe to any local section of IWA-AIT, workers have common interest\n arguments:\n - not a list\n ''')\n\n try:\n path = yaml_loader.find_path_by_name('makefile.yml', '/')\n self.assertTrue(len(path) > 0)\n finally:\n d.cleanup()\n os.environ['RKD_PATH'] = ''", "def _lookup_paths_in_paths(client_dispatcher: IClientDispatcher, lookup_paths: List[str], target_paths: List[str]):\n client = client_dispatcher.current_client\n\n dirs = []\n files = set()\n\n for p in lookup_paths:\n path = Path(get_relative_paths(client.path, [p])[0])\n if path.is_dir():\n dirs.append(path)\n else:\n files.add(path)\n\n target_dirs = []\n target_files = set()\n\n for p in target_paths:\n path = Path(p)\n if path.is_dir():\n target_dirs.append(path)\n else:\n target_files.add(path)\n\n result = set()\n\n for target_file in target_files:\n if target_file in files or any(d in target_file.parents for d in dirs):\n result.add(str(target_file))\n\n for target_dir in target_dirs:\n if target_dir in dirs or any(target_dir in f.parents for f in files):\n result.add(str(target_dir))\n\n return result", "def check_paths(self):\n self.settings.fileStore = os.path.expandvars(self.settings.fileStore) # to allow things like $HOME or $RMGpy\n self.settings.scratchDirectory = os.path.expandvars(self.settings.scratchDirectory)\n for path in [self.settings.fileStore, self.settings.scratchDirectory]:\n if not os.path.exists(path):\n logging.info(\"Creating directory %s for QM files.\" % os.path.abspath(path))\n # This try/except should be redundant, but some networked file systems\n # seem to be slow or buggy or respond strangely causing problems\n # between checking the path exists and trying to create it.\n try:\n os.makedirs(path)\n except OSError as e:\n logging.warning(\"Error creating directory {0}: {1!r}\".format(path, e))\n logging.warning(\"Checking it already exists...\")\n assert os.path.exists(path), \"Path {0} still doesn't exist?\".format(path)", "def testFilePath(self):\n files = list(File().find())\n for file in files:\n adapter = File().getAssetstoreAdapter(file)\n filesystempath = adapter.fullPath(file)\n filepath = File().getLocalFilePath(file)\n fusepath = File().getGirderMountFilePath(file)\n self.assertTrue(os.path.exists(filesystempath))\n self.assertTrue(os.path.exists(filepath))\n self.assertTrue(os.path.exists(fusepath))\n self.assertEqual(filesystempath, filepath)\n self.assertNotEqual(filesystempath, fusepath)\n self.assertEqual(fusepath[:len(self.mountPath)], self.mountPath)\n with open(filepath) as file1:\n with open(fusepath) as file2:\n self.assertEqual(file1.read(), file2.read())\n subpath = fusepath[len(self.mountPath):].lstrip('/')\n if self.knownPaths.get(subpath):\n with open(fusepath) as file1:\n self.assertEqual(file1.read().strip(), self.knownPaths[subpath])", "def check_path():\n root = os.path.abspath(os.path.curdir)\n assert os.path.basename(root) == \"treelite\", \"Must be run on project root.\"", "def test_find_image_path():\n parts = IMAGE_PATH.parts\n root = parts[0]\n relpath = parts[1:]\n windows_path = PureWindowsPath(\"\")\n relpath = windows_path.joinpath(*relpath)\n\n path = find_image_path(str(relpath), root)\n\n assert path == str(IMAGE_PATH)", "def checkSysPath(self):\n coreDir = natlinkcorefunctions.getBaseFolder()\n if coreDir.lower().endswith('core'):\n # check the registry setting:\n try:\n regDict, sectionName = self.getHKLMPythonPathDict()\n except pywintypes.error:\n print \"\"\"PythonPath setting not found in registry\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"\n return\n except ValueError:\n print \"\"\"NatLink setting not found or wrong in PythonPath setting in registry\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"\n return\n\n if regDict is None:\n print \"\"\"NatLink setting not found or wrong in PythonPath setting in registry\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"\n return\n \n section = regDict['NatLink']\n if not section:\n print \"\"\"PythonPath/Natlink setting in registry does exist.\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"\n return\n setting = section['']\n if setting.lower() == coreDir.lower():\n baseDir = os.path.normpath(os.path.join(coreDir, \"..\"))\n self.InsertToSysPath(coreDir)\n self.InsertToSysPath(baseDir)\n else:\n print \"\"\"PythonPath/Natlink setting in registry does not match this core directory\\n\nregistry: %s\\ncoreDir: %s\\n\nPlease try to correct this by running the NatLink Config Program (with administration rights)\"\"\"% (\n setting, coreDir)\n return\n else:\n baseDir = None\n print 'non expected core directory %s, cannot find baseDirectory\\nTry to run the Config Program with administrator rights'% coreDir\n userDir = self.getUserDirectory()\n # special for other user directories, insert also unimacro for actions etc.\n if userDir: \n self.InsertToSysPath(userDir)\n\n \n includeUnimacro = self.getIncludeUnimacroInPythonPath()\n if includeUnimacro:\n if not baseDir:\n print 'no baseDir found, cannot \"IncludeUnimacroInPythonPath\"'\n return\n unimacroDir = os.path.join(baseDir, '..', '..', 'unimacro')\n unimacroDir = os.path.normpath(unimacroDir)\n if os.path.isdir(unimacroDir):\n self.InsertToSysPath(unimacroDir)\n else:\n print 'no valid UnimacroDir found(%s), cannot \"IncludeUnimacroInPythonPath\"'% \\\n unimacroDir\n return 1", "def _get_existing_paths(self, modulepath):\n path_strings = modulepath.split(\":\")\n return [x for x in path_strings if path.exists(x)]", "def test_search_parents_found():\n with tempfile.TemporaryDirectory() as tempdir:\n root_dir = pathlib.Path(tempdir)\n os.makedirs(str(root_dir / \"a\" / \"b\"))\n _install_conf_py(root_dir)\n assert _search_parents(root_dir / \"a\" / \"b\") == root_dir" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that makefile.yml will be searched in RKD_PATH
def test_find_path_by_name_founds_path(self): yaml_loader = YamlFileLoader([]) d = tempfile.TemporaryDirectory() os.environ['RKD_PATH'] = d.name with open(d.name + '/makefile.yml', 'w') as f: f.write(''' version: org.riotkit.rkd/yaml/v1 imports: [] tasks: :join:iwa-ait: description: Subscribe to any local section of IWA-AIT, workers have common interest arguments: - not a list ''') try: path = yaml_loader.find_path_by_name('makefile.yml', '/') self.assertTrue(len(path) > 0) finally: d.cleanup() os.environ['RKD_PATH'] = ''
[ "def test_loads_from_file_is_searching_in_rkd_path(self):\n\n yaml_loader = YamlFileLoader([])\n\n d = tempfile.TemporaryDirectory()\n os.environ['RKD_PATH'] = d.name\n\n with open(d.name + '/makefile.yml', 'w') as f:\n f.write('''\nversion: org.riotkit.rkd/yaml/v1\nimports: []\ntasks: \n :join:iwa-ait:\n description: Subscribe to any local section of IWA-AIT, workers have common interest\n arguments:\n - not a list\n ''')\n\n try:\n self.assertRaises(YAMLFileValidationError,\n lambda: yaml_loader.load_from_file('makefile.yml', 'org.riotkit.rkd/yaml/v1'))\n finally:\n d.cleanup()\n os.environ['RKD_PATH'] = ''", "def testPaths():\n for path in config.main.paths:\n assert(os.path.exists(config.main.paths[path]))", "def check_path():\n root = os.path.abspath(os.path.curdir)\n assert os.path.basename(root) == \"treelite\", \"Must be run on project root.\"", "def is_pkr_path(path):\n from pkr.environment import ENV_FOLDER\n\n return path.is_dir() and len(list(path.glob(\"{}/*/env.yml\".format(ENV_FOLDER)))) > 0", "def test_paths_from_settings():\n import settings_bipype\n\n namespace = settings_bipype.__dict__\n \n variables = { key: namespace[key]\n for key in namespace\n if key.startswith('PATH') }\n \n for var in variables.values():\n assert path_exists(var)", "def test_get_lookup_paths_includes_internal_path_as_well_as_rkd_path(self):\n\n yaml_loader = YamlFileLoader([])\n os.environ['RKD_PATH'] = 'SOME-PATH-THERE'\n\n try:\n paths = yaml_loader.get_lookup_paths('harbor-internal/')\n finally:\n os.environ['RKD_PATH'] = ''\n\n defined_by_rkd_path = paths.index('SOME-PATH-THERE/harbor-internal/')\n\n internal_path = (os.path.realpath(SCRIPT_DIR_PATH) + '/harbor-internal/').replace('test/', '')\n internal_path_index = paths.index(internal_path)\n\n self.assertGreater(defined_by_rkd_path, internal_path_index, msg='defined_by_rkd_path should be favored')", "def check_yaml(recipe):\n\n ## Check yaml keys\n assert (\n \"package\" in recipe and \"version\" in recipe[\"package\"]\n ), \":ggd:check-recipe: must specify 'package:' section with ggd version and package name\"\n assert (\n \"extra\" in recipe\n ), \":ggd:check-recipe: must specify 'extra:' section with author and extra-files\"\n assert (\n \"about\" in recipe and \"summary\" in recipe[\"about\"]\n ), \":ggd:check-recipe: must specify an 'about/summary' section\"\n assert (\n \"identifiers\" in recipe[\"about\"]\n ), \":ggd:check-recipe: must specify an 'identifier' section in about\"\n assert (\n \"genome-build\" in recipe[\"about\"][\"identifiers\"]\n ), \":ggd:check-recipe: must specify 'about:' section with genome-build\"\n assert (\n \"species\" in recipe[\"about\"][\"identifiers\"]\n ), \":ggd:check-recipe: must specify 'about:' section with species\"\n assert (\n \"tags\" in recipe[\"about\"]\n ), \":ggd:check-recipe: must specify 'about:' section with tags\"\n assert \"keywords\" in recipe[\"about\"] and isinstance(\n recipe[\"about\"][\"keywords\"], list\n ), \":ggd:check-recipe: must specify 'about:' section with keywords\"\n\n ##Check tags\n assert (\n \"genomic-coordinate-base\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: must specify a genomic coordinate base for the files created by this recipe\"\n assert (\n \"data-version\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: must specify the data version for the data files created by this recipe\"\n assert (\n \"data-provider\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: must specify the data provider for the files created by this recipe\"\n assert (\n \"ggd-channel\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: must specify the specific ggd channel for the recipe in the 'about:tags' section\"\n assert (\n \"file-type\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: The final data file types must be specified in the 'about:tags' section\"\n assert (\n \"final-files\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: All final data file must be specified in the 'about:tags' section\"\n assert (\n \"final-file-sizes\" in recipe[\"about\"][\"tags\"]\n ), \":ggd:check-recipe: The size of each final data file must be specified in the 'about:tags' section\"\n\n species, build, version, name, dp = (\n recipe[\"about\"][\"identifiers\"][\"species\"],\n recipe[\"about\"][\"identifiers\"][\"genome-build\"],\n recipe[\"package\"][\"version\"],\n recipe[\"package\"][\"name\"],\n recipe[\"about\"][\"tags\"][\"data-provider\"].lower(),\n )\n version = version.replace(\" \", \"\")\n version = version.replace(\" \", \"'\")\n\n _check_build(species, build)\n return species, build, version, name, dp", "def repo_check():\n ls = os.listdir('.')\n if '_distro_map.yml' not in ls or '_distro_map.yml' not in ls:\n print(\"The specified docs base directory {} does\"\n \"not appear to be a valid ascii_binder directory.\"\n .format(os.getcwd()))\n return False\n return True", "def test_build_checks_yaml_syntax_error(self): # pylint: disable=C0103\n mydir = os.path.dirname(__file__)\n path = os.path.join(mydir, './build_checks_bad_syntax.yml')\n checks_path = os.path.join(mydir, './checks_bad_syntax.yml')\n\n with assertRaisesRegex(self, build.BuildException, r'Bad yaml syntax.*checks_bad_syntax\\.yml'):\n build.build_package(None, 'test_syntax_error', PACKAGE, [], path, checks_path=checks_path)", "def test_check_recipe_recipe_path():\n\n pytest_enable_socket()\n\n ## Uninstall the already installed recipe\n try:\n sp.check_call([\"conda\", \"uninstall\", \"-y\", \"trial-hg38-gaps-v1\"])\n except Exception as e:\n pass\n\n ## Remove fragment files\n jdict = ggd_jdict = {u'channeldata_version': 1, u'subdirs': [u'noarch'], u'packages': {u'trial-hg38-gaps-v1': \n {u'activate.d': False, u'version': u'1', u'tags': {u'ggd-channel': u'genomics', \n u'data-version': u'11-Mar-2019'}, u'post_link': True, u'binary_prefix': False, u'run_exports': {}, \n u'pre_unlink': False, u'subdirs': [u'noarch'], u'deactivate.d': False, u'reference_package': u'noarch/trial-hg38-gaps-v1-1-0.tar.bz2', \n u'pre_link': False, u'keywords': [u'gaps', u'region'], u'summary': u'hg38 Assembly gaps from USCS', \n u'text_prefix': False, u'identifiers': {u'genome-build': u'hg38', u'species': u'Homo_sapiens'}}}}\n\n\n uninstall.check_for_installation([\"trial-hg38-gaps-v1\"], jdict)\n ## Check that the recipe is not installed \n out = utils.check_output([\"conda\", \"list\", \"trial-hg38-gaps-v1\"])\n assert \"trial-hg38-gaps-v1\" not in out\n\n ## Check that the tar file was deleted. If not, remove it\n bz2_file = pytest.global_tarball_testing_file\n try:\n assert not os.path.exists(bz2_file)\n assert not os.path.isfile(bz2_file)\n except AssertionError as e:\n if os.path.exists(bz2_file):\n os.remove(bz2_file)\n else:\n raise e\n \n ## Use the previously created ggd recipe path\n recipe_path = pytest.global_ggd_recipe_path\n try: \n assert not os.path.exists(recipe_path)\n assert not os.path.isdir(recipe_path)\n except AssertionError as e:\n if os.path.exists(recipe_path):\n shutil.rmtree(recipe_path)\n else:\n raise e\n\n ## Add recipe\n test__build_normal_run(add_checksum=False,final_files=True)\n recipe_path = pytest.global_ggd_recipe_path\n\n ### check that the checksum file is empty \n assert os.path.getsize(os.path.join(recipe_path,\"checksums_file.txt\")) == 0\n\n ## SKIP md5sum process. -> This will trigger a checksum of the files, which will fail because there is none and the recipe will be uninstalled\n ### exit with 222\n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_path, dont_uninstall=True, dont_add_md5sum_for_checksum=True, id=None)\n \n with pytest.raises(SystemExit) as pytest_wrapped_e:\n check_recipe.check_recipe((),args)\n assert \"SystemExit\" in str(pytest_wrapped_e.exconly()) ## test that SystemExit was raised by sys.exit() \n if sys.version_info[0] >= 3:\n assert pytest_wrapped_e.match(\"222\") ## Check that the exit code is 1\n\n ## Check that the recipe was uninstalled\n out = utils.check_output([\"conda\", \"list\", \"trial-hg38-gaps-v1\"])\n assert \"trial-hg38-gaps-v1\" not in out\n out = utils.check_output([\"ggd\", \"show-env\"])\n assert \"ggd_trial_hg38_gaps_v1\" not in out\n conda_root = utils.conda_root()\n assert os.path.exists(os.path.join(conda_root,\"share/ggd/Homo_sapiens/hg38/trial-hg38-gaps-v1/1\")) == False \n\n ## Check that the final files and the file types were added \n yaml_dict = yaml.safe_load(open(os.path.join(recipe_path,\"meta.yaml\")))\n assert len(yaml_dict[\"about\"][\"tags\"][\"final-files\"]) == 2\n assert len(yaml_dict[\"about\"][\"tags\"][\"file-type\"]) == 1\n assert \"trial-hg38-gaps-v1.bed.gz\" in yaml_dict[\"about\"][\"tags\"][\"final-files\"] \n assert \"trial-hg38-gaps-v1.bed.gz.tbi\" in yaml_dict[\"about\"][\"tags\"][\"final-files\"] \n assert \"bed\" in yaml_dict[\"about\"][\"tags\"][\"file-type\"] \n\n\n ## Add md5sum. No errors should happend, and the package should not be uninstalled because the dont_uninstall flag is set to True\n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n \n assert check_recipe.check_recipe((),args) == True \n\n out = utils.check_output([\"conda\", \"list\", \"trial-hg38-gaps-v1\"])\n assert \"trial-hg38-gaps-v1\" in out\n out = utils.check_output([\"ggd\", \"show-env\"])\n assert \"ggd_trial_hg38_gaps_v1\" in out\n conda_root = utils.conda_root()\n assert os.path.exists(os.path.join(conda_root,\"share/ggd/Homo_sapiens/hg38/trial-hg38-gaps-v1/1\")) == True", "def test_check_recipe_package_env_vars():\n\n pytest_enable_socket()\n\n ## Test that an env_var is created for a single installed file and the dir\n recipe = CreateRecipe(\n \"\"\"\n one_file_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - one_file_v1.bw\n package:\n name: one_file_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing env_var for recipe with one file\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: 11-Mar-2019\n file-type: \n - bw\n final-files: \n - one_file_v1.bw\n final-file-sizes:\n one_file_v1.bw: 10.1K\n data-provider: UCSC\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet --no-check-certificate --output-document hg19phastcons.bw http://hgdownload.cse.ucsc.edu/goldenpath/hg19/phastCons100way/hg19.100way.phastCons.bw\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/one_file_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n (mv $f \"one_file_v1.$ext\")\n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_one_file_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_one_file_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_one_file_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"one_file_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_one_file_v1_dir\" in x or \"ggd_one_file_v1_file\" in x]\n first = False\n second = False\n for x in env_vars:\n if \"ggd_one_file_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/one_file_v1/1\") in x\n first = True\n elif \"ggd_one_file_v1_file\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/one_file_v1/1/one_file_v1.bw\")\n second = True\n else:\n assert False\n assert first == True\n assert second == True\n\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_one_file_v1_file\" in output\n assert \"$ggd_one_file_v1_dir\" in output\n\n ## Test that an env_var is created for the non indexed file when two files are installed with an index present, and the dir\n recipe = CreateRecipe(\n \"\"\"\n two_files_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: []\n package:\n name: two_files_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing env_var for recipe with two files and an index present\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - bed\n final-files: \n - two_files_v1.bed.gz\n - two_files_v1.bed.gz.tbi\n final-file-sizes:\n two_files_v1.bed.gz: 24.02K\n two_files_v1.bed.gz.tbi: 10.24K\n ggd-channel: genomics\n\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet $genome\n\n ## get the file, \n ## unzip it, \n ## remove any lines that do not have a scaffolding in the hg19.genom file. (If scaffolding in hg19.genome, grep exists with 0)\n ## add header to the file, and remove the bin column\n ## sort it based on the genome file\n ## bgzip it\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk '{ if (system(\"grep -Fq \" $2 \" hg19.genome\") == 0) print $0}' \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | gsort /dev/stdin $genome \\\n | bgzip -c > gaps.bed.gz\n\n tabix gaps.bed.gz\n\n rm hg19.genome\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/two_files_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n (mv $f \"two_files_v1.$ext\")\n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_two_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_two_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_two_files_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"two_files_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_two_files_v1_dir\" in x or \"ggd_two_files_v1_file\" in x]\n first = False\n second = False\n for x in env_vars:\n if \"ggd_two_files_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_v1/1\") in x\n first = True\n elif \"ggd_two_files_v1_file\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_v1/1/two_files_v1.bed.gz\")\n second = True\n else:\n assert False\n assert first == True\n assert second == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_two_files_v1_file\" in output\n assert \"$ggd_two_files_v1_dir\" in output \n\n ## Test that NO env_var is created when two files are installed with no index present, and the dir\n recipe = CreateRecipe(\n \"\"\"\n two_files_noindex_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - two_files_noindex_v1.genome\n - two_files_noindex_v1.txt.gz\n package:\n name: two_files_noindex_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing NO file env_var for recipe with two files and no index\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - genome\n - txt\n final-files: \n - two_files_noindex_v1.genome\n - two_files_noindex_v1.txt.gz\n final-file-sizes: \n two_files_noindex_v1.genome: 10.01K\n two_files_noindex_v1.txt.gz: 12.41K\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | bgzip -c > two_files_noindex_v1.txt.gz\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n if [[ ! -f \"two_files_noindex_v1.$ext\" ]] \n then\n (mv $f \"two_files_noindex_v1.$ext\")\n fi \n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_two_files_noindex_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_two_files_noindex_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_two_files_noindex_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"two_files_noindex_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_two_files_noindex_v1_dir\" in x or \"ggd_two_files_noindex_v1_file\" in x]\n first = False\n for x in env_vars:\n if \"ggd_two_files_noindex_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/two_files_noindex_v1/1\") in x\n first = True\n elif \"ggd_two_files_noindex_v1_file\" in x:\n assert False ## There should not be a file env_var made for this package\n else:\n assert False\n assert first == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_two_files_noindex_v1_file\" not in output\n assert \"$ggd_two_files_noindex_v1_dir\" in output\n\n ## Test that NO env_var is created when thre+ files are installed, and the dir\n recipe = CreateRecipe(\n \"\"\"\n three_files_v1:\n meta.yaml: |\n build:\n binary_relocation: false\n detect_binary_files_with_prefix: false\n noarch: generic\n number: 0\n extra:\n authors: mjc \n extra-files: \n - three_files_v1.genome\n - three_files_v1.1.txt.gz\n - three_files_v1.2.txt.gz\n package:\n name: three_files_v1\n version: '1' \n requirements:\n build:\n - gsort\n - htslib\n - zlib\n run:\n - gsort\n - htslib\n - zlib\n source:\n path: .\n about:\n identifiers:\n genome-build: hg19\n species: Homo_sapiens\n keywords:\n - gaps\n - region\n summary: testing NO file env_var for recipe with three+ files\n tags:\n genomic-coordinate-base: 0-based-inclusive\n data-version: Today\n data-provider: UCSC\n file-type: \n - txt\n - genome\n final-files: \n - three_files_v1.1.txt.gz\n - three_files_v1.2.txt.gz\n - three_files_v1.genome\n final-file-sizes: \n three_files_v1.1.txt.gz: 24.04K\n three_files_v1.2.txt.gz: 24.04K\n three_files_v1.genome: 10.01K\n ggd-channel: genomics\n \n recipe.sh: |\n #!/bin/sh\n set -eo pipefail -o nounset\n wget --quiet https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome\n wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\\n | gzip -dc \\\n | awk -v OFS=\"\\t\" 'BEGIN {print \"#chrom\\tstart\\tend\\tsize\\ttype\\tstrand\"} {print $2,$3,$4,$7,$8,\"+\"}' \\\n | bgzip -c > gap.txt.gz\n cp gap.txt.gz gaps.1.txt.gz\n mv gap.txt.gz gaps.2.txt.gz\n\n post-link.sh: |\n set -eo pipefail -o nounset\n\n if [[ -z $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n elif [[ $(conda info --envs | grep \"*\" | grep -o \"\\/.*\") == \"base\" ]]; then\n export CONDA_ROOT=$(conda info --root)\n env_dir=$CONDA_ROOT\n export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n else\n env_dir=$(conda info --envs | grep \"*\" | grep -o \"\\/.*\")\n export CONDA_ROOT=$env_dir\n export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/three_files_v1/1\n fi\n\n PKG_DIR=`find \"$CONDA_SOURCE_PREFIX/pkgs/\" -name \"$PKG_NAME-$PKG_VERSION*\" | grep -v \".tar.bz2\" | grep \"$PKG_VERSION.*$PKG_BUILDNUM$\"`\n\n if [ -d $RECIPE_DIR ]; then\n rm -r $RECIPE_DIR\n fi\n\n mkdir -p $RECIPE_DIR\n\n (cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)\n\n cd $RECIPE_DIR\n\n ## Iterate over new files and replace file name with data package name and data version \n for f in *; do\n ext=\"${f#*.}\"\n filename=\"{f%%.*}\"\n if [[ ! -f \"three_files_v1.$ext\" ]] \n then\n (mv $f \"three_files_v1.$ext\")\n fi \n done\n\n ## Add environment variables \n #### File\n if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file\n then\n recipe_env_file_name=\"ggd_three_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(find $RECIPE_DIR -type f -maxdepth 1)\"\n\n elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files\n then\n indexed_file=`find $RECIPE_DIR -type f \\( -name \"*.tbi\" -or -name \"*.fai\" -or -name \"*.bai\" -or -name \"*.crai\" -or -name \"*.gzi\" \\) -maxdepth 1`\n if [[ ! -z \"$indexed_file\" ]] ## If index file exists\n then\n recipe_env_file_name=\"ggd_three_files_v1_file\"\n recipe_env_file_name=\"$(echo \"$recipe_env_file_name\" | sed 's/-/_/g')\"\n file_path=\"$(echo $indexed_file | sed 's/\\.[^.]*$//')\" ## remove index extension\n fi \n fi \n\n #### Dir\n recipe_env_dir_name=\"ggd_three_files_v1_dir\"\n recipe_env_dir_name=\"$(echo \"$recipe_env_dir_name\" | sed 's/-/_/g')\"\n\n activate_dir=\"$env_dir/etc/conda/activate.d\"\n deactivate_dir=\"$env_dir/etc/conda/deactivate.d\"\n\n mkdir -p $activate_dir\n mkdir -p $deactivate_dir\n\n echo \"export $recipe_env_dir_name=$RECIPE_DIR\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_dir_name\">> $deactivate_dir/env_vars.sh\n\n #### File\n if [[ ! -z \"${recipe_env_file_name:-}\" ]] ## If the file env variable exists, set the env file var\n then\n echo \"export $recipe_env_file_name=$file_path\" >> $activate_dir/env_vars.sh\n echo \"unset $recipe_env_file_name\">> $deactivate_dir/env_vars.sh\n fi\n\n echo 'Recipe successfully built!'\n\n checksums_file.txt: |\n \n \"\"\", from_string=True)\n\n recipe.write_recipes()\n recipe_dir_path = recipe.recipe_dirs[\"three_files_v1\"] \n args = Namespace(command='check-recipe', debug=False, recipe_path=recipe_dir_path, dont_uninstall=True, dont_add_md5sum_for_checksum=False, id=None)\n assert check_recipe.check_recipe((),args) == True\n ## Test dir and file env_var\n conda_root = utils.conda_root()\n with open(os.path.join(conda_root,\"etc/conda/activate.d/env_vars.sh\")) as env_file:\n env_vars = [x for x in env_file if \"ggd_three_files_v1_dir\" in x or \"ggd_three_files_v1_file\" in x]\n first = False\n for x in env_vars:\n if \"ggd_three_files_v1_dir\" in x:\n assert os.path.join(conda_root, \"share/ggd/Homo_sapiens/hg19/three_files_v1/1\") in x\n first = True\n elif \"ggd_three_files_v1_file\" in x:\n assert False ## There should not be a file env_var made for this package\n else:\n assert False\n assert first == True\n\n args = Namespace(command=\"show-env\", pattern=None)\n temp_stdout = StringIO()\n with redirect_stdout(temp_stdout):\n show_env.show_env((),args)\n output = temp_stdout.getvalue().strip()\n assert \"$ggd_three_files_v1_file\" not in output\n assert \"$ggd_three_files_v1_dir\" in output", "def test_rpki_download_validator(self):\n test_path = Path('.')\n self.test___init__()\n rpki_path = Path(RPKI_Validator_Wrapper.rpki_package_path)\n\n assert list(rpki_path.glob('rpki-validator*'))\n assert path.exists(rpki_path / 'preconfigured-tals/arin-ripevalidator.tal')\n assert not path.exists(test_path / 'rpki-validator-3-latest-dist.tar.gz')", "def custom_config_path():\n return 'tests/test-config/valid-config.yaml'", "def test_nested_recipes(config_fixture):\n r = Recipes(\n\n \"\"\"\n shallow:\n meta.yaml: |\n package:\n name: shallow\n version: \"0.1\"\n build.sh: |\n #!/bin/bash\n echo \"Shallow Created\"\n pwd\n normal/normal:\n meta.yaml: |\n package:\n name: normal\n version: \"0.1\"\n build:\n skip: true\n requirements:\n build:\n - python 3.6\n build.sh: |\n #!/bin/bash\n echo \"Testing build.sh through python\"\n python -h\n deep/deep/deep:\n meta.yaml: |\n package:\n name: deep\n version: \"0.1\"\n requirements:\n build:\n - python\n run:\n - python\n build.sh: |\n #!/bin/bash\n ## Empty script\n F/I/V/E/deep:\n meta.yaml: |\n package:\n name: fivedeep\n version: \"0.1\"\n requirements:\n build:\n - python 3.6\n run:\n - python 3.6\n \"\"\", from_string=True)\n r.write_recipes()\n\n build_results = build.build_recipes(r.basedir, config_fixture,\n r.recipe_dirnames,\n testonly=False,\n force=False,\n mulled_test=False)\n assert build_results\n\n assert len(list(utils.get_recipes(r.basedir))) == 4\n\n for k, v in r.recipe_dirs.items():\n for i in utils.built_package_paths(v):\n assert os.path.exists(i)\n ensure_missing(i)", "def test_no_meta_yaml(datafiles):\n current_folder = Path.cwd()\n os.chdir(datafiles)\n with pytest.raises(SystemExit):\n project = Project(rules=RULES)\n os.chdir(str(current_folder))", "def test_get_spotdl_path(setup):\n\n assert get_spotdl_path() == Path(setup.directory, \".spotdl\")\n assert os.path.exists(os.path.join(setup.directory, \".spotdl\"))", "def test_source_file(host):\n recon_tools = host.file('/home/recon/.recon_tools')\n\n assert recon_tools.exists", "def test_build_yaml_syntax_error(self):\n mydir = os.path.dirname(__file__)\n path = os.path.join(mydir, './build_bad_syntax.yml')\n\n with assertRaisesRegex(self, build.BuildException, r'Bad yaml syntax.*build_bad_syntax\\.yml'):\n build.build_package(None, 'test_syntax_error', PACKAGE, [], path)", "def test_template_lookup_path(self):\n lookup_list = settings.TEMPLATES[0]['DIRS']\n found_path = False\n \n for entry in lookup_list:\n entry_normalised = os.path.normpath(entry)\n \n if entry_normalised == os.path.normpath(settings.TEMPLATE_DIR):\n found_path = True\n \n self.assertTrue(found_path, f\"{FAILURE_HEADER}Your project's templates directory is not listed in the TEMPLATES>DIRS lookup list. Check your settings.py module.{FAILURE_FOOTER}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that find_path_by_name() will not return anything if nothing searched was found
def test_find_path_by_name_does_not_found_anything(self): yaml_loader = YamlFileLoader([]) self.assertEqual('', yaml_loader.find_path_by_name('some-file-that-does-not-exists', ''))
[ "def nonexistent_path_nonexistent_name(self):\n self.assertIsNone(get_folder_by_path(path=\"/home/not_a_user/\", name=\"not_a_test_folder\"))", "def existing_path_nonexistent_name(self):\n self.assertIsNone(get_folder_by_path(path=self.r_path, name=\"not_a_test_folder\"))", "def test_find_path_by_name_founds_path(self):\n\n yaml_loader = YamlFileLoader([])\n\n d = tempfile.TemporaryDirectory()\n os.environ['RKD_PATH'] = d.name\n\n with open(d.name + '/makefile.yml', 'w') as f:\n f.write('''\n version: org.riotkit.rkd/yaml/v1\n imports: []\n tasks: \n :join:iwa-ait:\n description: Subscribe to any local section of IWA-AIT, workers have common interest\n arguments:\n - not a list\n ''')\n\n try:\n path = yaml_loader.find_path_by_name('makefile.yml', '/')\n self.assertTrue(len(path) > 0)\n finally:\n d.cleanup()\n os.environ['RKD_PATH'] = ''", "def test_if_path_exists_for_empty(self):\n game_area = {}\n score = prepare_gamearea.check_if_path_exists(game_area, (0, 0), (1, 1))\n self.assertFalse(score)", "def test_non_existent_path(self):\n file_filter = Filter()\n files = list(file_filter.apply(directory=self.dir, file_system_objects=['not-existent-file']))\n self.assertEqual(files, [])", "def nonexistent_parent_existing_name(self):\n self.assertIsNone(get_folder_by_parent(parent_fid=2, name=self.r_name))", "def test_nonexistent_fid(self):\n assert get_folder_by_id(fid=3) is None", "def test_nonexistent_fid(self):\n self.assertRaises(AssertionError, lambda: get_subfolders_recursive(999))", "def verify_paths(config=None, output_collection=None, return_missing=False):\n paths = get_paths(config=config, output_collection=output_collection)\n missing = list(filter(lambda p: p and not os.path.exists(p), paths))\n return missing if return_missing else not bool(missing)", "def test_get_path_keyerror(self):\n x = _random_integer()\n y = _random_integer()\n D = {'a': {\n 'b': {\n 'c': {\n 'd': x,\n },\n 'e': {\n 'f': y,\n }\n }\n }}\n path = ['a', 'b', 'c', 'f']\n val = utils.get_path(D, path)\n self.assertIsNone(val)", "def find_path(tree: Person, name: str):\n target = Person(name) # make name into a person for comparison\n\n if tree == target:\n # found\n return [target.name]\n\n else:\n if tree.rel == []: # dead end\n return None\n \n returned = [tree.name] # list of visited nodes/people\n paths = tree.rel # list of paths from tree\n\n for path in paths:\n # only do this if not found yet\n if name not in returned:\n added = find_path(path,name)\n if added is not None:\n # if does not lead to a dead end\n returned.extend(added)\n \n # should only get here when done, if it get here and is not done\n # then skip\n if name not in returned:\n return []\n return returned", "def test_traverse_notfound(self):\n content = self.api.traverse('nowhere')\n self.assertEqual(content, None)", "def test_missing_git_and_slash_url(self, _, __):\n self.assertTrue(detect_repo.check_for_repo_name('fake-path', 'syzkaller'))", "def test_source_path_no_matching_files(self: TestBackupFile) -> None:\n\n backup_file = BackupFile('foo/bar.baz', 99)\n\n file_not_found_error: Optional[FileNotFoundError] = None\n try:\n backup_file.source_path\n except FileNotFoundError as err:\n file_not_found_error = err\n finally:\n self.assertRegex(str(file_not_found_error),\n 'Matching file not found')", "def test_nonexisting_path_raises():\n with pytest.raises(NotADirectoryError, match=\"Definitions directory not found: foo\"):\n nc.Nomenclature(\"foo\")", "def _check_input_path(self, input_path):", "def check_element_exists_by_name(self, text):\n try:\n element = self.driver.find_element_by_name(text)\n print(\"Element '\" + text + \"' found\")\n return element\n except NoSuchElementException:\n print(\"No element '\" + text + \"' found\")\n return None", "def check_path(self,path) :\n return self.path == path", "def existing_parent_nonexistent_name(self):\n self.assertIsNone(get_folder_by_parent(parent_fid=self.rid, name=\"not_a_test_folder\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes dict from test ids and ranked training ids, labels, scores.
def get_prediction_map(test_ids, train_ids_labels_and_scores, top_k): prediction_map = dict() for test_index, test_id in enumerate(test_ids): hex_test_id = utils.to_hex(test_id) aggregate_scores = {} for _, label, score in train_ids_labels_and_scores[test_index][:top_k]: if label not in aggregate_scores: aggregate_scores[label] = 0 aggregate_scores[label] += score label, score = max( aggregate_scores.items(), key=operator.itemgetter(1)) prediction_map[hex_test_id] = {'score': score, 'class': label} return prediction_map
[ "def calculate_metrics_dict(scores, y, lr_predicted, label):\n X1, X2 = Xy_to_Xn(lr_predicted, y)\n\n return {'cllr' + label: round(calculate_cllr(X1, X2).cllr, 4),\n 'auc' + label: roc_auc_score(y, scores),\n 'accuracy' + label: accuracy_score(y, scores > .5)}", "def map(self, fingerprints_test, fingerprints_train, verbose=False):\n # Get unique fingerprints\n fingerprints_train = np.unique(fingerprints_train)\n fingerprints_test = np.unique(fingerprints_test)\n\n # Create mappings\n mapping_train = dict()\n\n # Loop over fingerprints\n for fp in fingerprints_train:\n # Extract keys\n for key in fp:\n # Add fingerprint to each key\n mapping_train[key] = mapping_train.get(key, set()) | set([fp])\n\n # Refine mapping to fp_test -> set([fps_train labels])\n mapping = dict()\n # Loop over all testing fingerprints\n for i, fp in enumerate(fingerprints_test):\n\n # Print progress if verbose\n if verbose:\n print(\"{}/{}\".format(i+1, fingerprints_test.shape[0]), end='\\r')\n\n # Initialise set\n matches = set()\n\n # Loop over all keys of fingerprint\n for key in fp:\n # Get possible fingerprints\n matches |= mapping_train.get(key, set())\n\n # Initialise highest score\n highest_score = 0\n\n # Loop over all matches\n for match in matches:\n # Get score\n score = fp.compare(match)\n # If larger than highest score, replace match\n if score > highest_score:\n mapping[fp] = match\n highest_score = score\n\n # Return result\n return mapping", "def get_model_scores(pred_boxes):\n model_score = {}\n for img_id, val in pred_boxes.items():\n for score in val['scores']:\n if score not in model_score.keys():\n model_score[score] = [img_id]\n else:\n model_score[score].append(img_id)\n return model_score", "def get_test_scores():\n scores_dict = dict()\n num_scores = input(\"How many test scores would you like to enter? \")\n if not num_scores.isnumeric():\n raise ValueError(\"Please enter a positive integer for number of test scores.\")\n else:\n for x in range(0, int(num_scores)):\n score = input(\"Please enter a test score: \")\n if not str(score).isnumeric():\n raise ValueError(\"Scores must be a positive integer\")\n elif float(score) < 0:\n raise ValueError(\"Scores can't be negative\")\n elif float(score) > 100:\n raise ValueError(\"Scores can't be higher than 100\")\n else:\n scores_dict[\"Test \" + str(x+1)] = score\n return scores_dict", "def speaker_dict(self):\n train_dict = collections.defaultdict(list)\n if self._one_hot == True:\n tmp_label = np.argmax(self._train_label, axis=1)\n else:\n tmp_label = self._train_label\n \n for d, l in zip(self._train_data, tmp_label):\n train_dict[l].append(d) \n self._train_dict = [np.vstack(train_dict[k]) for k in range(self._n_class)]\n self._dict = True\n print('Creating dict')", "def user_score_as_dict(first_name, last_name, score):\n return {'first_name': first_name, 'last_name': last_name, 'score': score}", "def _test(tracked_info, X_test, y_test, metrics):\n for k, v in tracked_info.items():\n # test model with best DEF_SCORE\n best_mod_idx = np.argsort(v[VAL_SCORES][DEF_SCORE])[-1]\n test_model = v[MODELS][best_mod_idx]\n v[\"test_scores\"] = _get_metrics(test_model, X_test, y_test, metrics)\n\n return tracked_info", "def predict(self, test_data, actual_labels):\n # Obtain cluster number for every test item\n classes = []\n\n cluster_no = self._min_dist_pos(test_data[0], self._centroids)\n\n for i, item in enumerate(test_data):\n cluster_no = self._min_dist_pos(item, self._centroids)\n classes.append(cluster_no)\n\n evaluator = ClassificationEvaluator(\n pred_labels = np.array(classes),\n actual_labels = actual_labels\n )\n\n metrics = evaluator.evaluate()\n return np.array(classes), metrics", "def confusion_matrix(self, test_x, test_y):\n\n # Create an empty dictionary of dictionary and initialize it to 0\n d = defaultdict(dict)\n for xx in range(10):\n for yy in range(10):\n d[xx][yy] = 0\n\n data_index = 0\n for xx, yy in zip(test_x, test_y):\n # classify the test example\n predicted = self.classify(xx)\n # populate the dictionary\n d[yy][predicted] += 1\n data_index += 1\n if data_index % 100 == 0:\n print(\"%i/%i for confusion matrix\" % (data_index, len(test_x)))\n return d", "def convert(score_sort_toplist, target_label_pairs, test_label_pairs, index2node_mapping, train, total_list):\n tmp_list = []\n if train:\n for i in score_sort_toplist:\n scores = i[0]\n row = i[1]\n gene1 = index2node_mapping[row]\n col = i[2]\n gene2 = index2node_mapping[col]\n prediction_label_pair = (row, col)\n if prediction_label_pair in target_label_pairs:\n if prediction_label_pair in test_label_pairs:\n tmp_list.append([scores, row, col, gene1, gene2, 0, 1, 0])\n else:\n tmp_list.append([scores, row, col, gene1, gene2, 1, 0, 0])\n else:\n tmp_list.append([scores, row, col, gene1, gene2, 0, 0, 1])\n\n else:\n for i in score_sort_toplist:\n scores = i[0]\n row = i[1]\n gene1 = index2node_mapping[row]\n col = i[2]\n gene2 = index2node_mapping[col]\n prediction_label_pair = (row, col)\n if prediction_label_pair in test_label_pairs:\n tmp_list.append([scores, row, col, gene1, gene2, 0, 1, 0])\n else:\n tmp_list.append([scores, row, col, gene1, gene2, 0, 0, 1])\n total_list.extend(tmp_list)", "def getTrainValidationTestDD(self, trainFilename, validationFilename, testFilename, header=0):\n print(\"Reading Train: \", trainFilename)\n traindf = pd.read_csv(trainFilename, delimiter=',', low_memory=False, header=header)\n\n print(\"Reading Validate: \", validationFilename)\n validationdf = pd.read_csv(validationFilename, delimiter=',', low_memory=False, header=header)\n\n print(\"Reading Test: \", testFilename)\n testdf = pd.read_csv(testFilename, delimiter=',', low_memory=False, header=header)\n\n # Concat the data vertically\n combined_set = pd.concat([traindf, validationdf, testdf], axis=0)\n # print(combined_set.info())\n dict = {}\n # Loop through all columns in the dataframe\n print(\"Encoding all features in columns\")\n for feature in combined_set.columns:\n\n # Only apply for columns with categorical strings\n if combined_set[feature].dtype == 'object':\n\n original = combined_set[feature]\n # Replace strings with an integer\n combined_set[feature] = pd.Categorical(combined_set[feature]).codes\n\n replaced = combined_set[feature]\n\n # TODO: Need to find a way to speed this up\n if feature == 'bidid':\n colDict = {}\n for i in range(len(original)):\n # print(\"ttt: \", original.iloc[i], \" \", replaced.iloc[i])\n if replaced.iloc[i] not in colDict:\n colDict[replaced.iloc[i]] = original.iloc[i]\n dict[feature] = colDict\n\n train = combined_set[:traindf.shape[0]]\n validation = combined_set[traindf.shape[0]:(traindf.shape[0]+validationdf.shape[0])]\n test = combined_set[(traindf.shape[0]+validationdf.shape[0]):]\n\n print(\"Length of Train: \", train.shape[0])\n print(\"Length of Validation: \", validation.shape[0])\n print(\"Length of Test: \", test.shape[0])\n\n return train, validation, test, dict\n\n # print(\"dict\", dict)", "def createDictLabels(labels):\n\n # Re-arange the Target vectors between [0..nClasses_train]\n labels = labels.numpy()\n unique_labels = np.unique(labels)\n dictLabels = {val: i for i, val in enumerate(unique_labels)}\n dictLabelsInverse = {i: val for i, val in enumerate(unique_labels)}\n return dictLabels,dictLabelsInverse", "def __create_classification_indexes(true_labels: list, predicted_labels: list) -> dict:\n classification_indexes = {}\n for classes in true_labels:\n if classes not in classification_indexes:\n classification_indexes[classes] = len(classification_indexes)\n for classes in predicted_labels:\n if classes not in classification_indexes:\n classification_indexes[classes] = len(classification_indexes)\n return classification_indexes", "def split_train_dev_test(data):\n\n\tsongs = []\n\tverses = []\n\tfor dictio in data:\n\t\tif isinstance(dictio['featuring'],float): # nan is a float, thus if no featuring artists it's a float\n\t\t\tsongs.append(dictio)\n\t\telse:\n\t\t\tverses.append(dictio)\n\trandom.seed(50)\n\trandom.shuffle(songs)\n\ttrain = songs[:int(0.8*len(songs))] # training data consists of 80%\n\tdev = songs[int(0.8*len(songs)):int(0.9*len(songs))] # development data consists of 10%\n\ttest = songs[int(0.9*len(songs)):] # test data consists of 10%\n\t#if add_verses == True:\n\ttrain = train + verses # add verses to the training data\n\trandom.shuffle(train)\n\treturn {\"train\":train,\"dev\":dev,\"test\":test}", "def create_splits(self):\n train_inds, valid_inds, test_inds = gen_rand_split_inds(\n self.NUM_TRAIN_CLASSES, self.NUM_VALID_CLASSES, self.NUM_TEST_CLASSES)\n # \"Variant\" refers to the aircraft model variant (e.g., A330-200) and is\n # used as the class name in the dataset.\n variants_path = os.path.join(self.data_root, 'data', 'variants.txt')\n with tf.io.gfile.GFile(variants_path, 'r') as f:\n variants = [line.strip() for line in f.readlines() if line]\n variants = sorted(variants)\n assert len(variants) == (\n self.NUM_TRAIN_CLASSES + self.NUM_VALID_CLASSES + self.NUM_TEST_CLASSES)\n\n splits = {\n 'train': [variants[i] for i in train_inds],\n 'valid': [variants[i] for i in valid_inds],\n 'test': [variants[i] for i in test_inds]\n }\n return splits", "def predict_scores(self, dataset: 'CDataset') -> Dict[int, float]:\n response = json.loads(_handle_rust_str(lib.predict_scores(self.pointer, dataset.pointer)))\n _maybe_raise_error_json(response)\n return dict((int(k), v) for k,v in response.items())", "def split(adata, test_size=0.2, random_state=0):\n all_inds = np.arange(len(adata))\n inds_train, inds_test = train_test_split(\n all_inds, test_size=test_size, random_state=random_state\n )\n split_inds_dict = {\"train\": sorted(inds_train), \"test\": sorted(inds_test)}\n split_inds_dict = {k: [int(i) for i in v] for k, v in split_inds_dict.items()}\n return split_inds_dict, {k: adata[v, :] for k, v in split_inds_dict.items()}", "def train_test_split(keys, validation_size, seed, is_train):\n np.random.seed(seed)\n if is_train:\n valid_keys = np.random.choice(keys, size=int(len(keys) * validation_size), replace=False)\n train_keys = list(set(keys) - set(valid_keys))\n test_keys = []\n else:\n train_keys = []\n valid_keys = []\n test_keys = keys\n return sorted(train_keys), sorted(valid_keys), sorted(test_keys)", "def prepare_imdb_data(data):\n \n # TODO: Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n \n # TODO: Shuffle reviews and corresponding labels within training and test sets\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n \n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets predictions using embedding similarity and local feature reranking.
def get_predictions(model: AbstractEmbeddingModel, rerank: AbstractRerankStrategy, labelmap, num_to_rerank, top_k, distance_func='cosine'): train_image_paths = [ x for x in pathlib.Path( const.INFER_TRAIN_IMAGE_DIR).rglob('*.jpg')] test_image_paths = [ x for x in pathlib.Path( const.INFER_TEST_IMAGE_DIR).rglob('*.jpg')] test_ids, test_embeddings = \ model.extract_global_features(test_image_paths) train_ids, train_embeddings = \ model.extract_global_features(train_image_paths) train_ids_labels_and_scores = [None] * test_embeddings.shape[0] # Using (slow) for-loop, as distance matrix doesn't fit in memory. for test_index in range(test_embeddings.shape[0]): distances = spatial.distance.cdist( test_embeddings[np.newaxis, test_index, :], train_embeddings, distance_func)[0] partition = np.argpartition(distances, num_to_rerank)[:num_to_rerank] nearest = sorted([(train_ids[p], distances[p]) for p in partition], key=lambda x: x[1]) train_ids_labels_and_scores[test_index] = [ (train_id, labelmap[utils.to_hex(train_id)], 1. - cosine_distance) for train_id, cosine_distance in nearest ] del test_embeddings del train_embeddings gc.collect() pre_verification_predictions = get_prediction_map( test_ids, train_ids_labels_and_scores) for test_index, test_id in enumerate(test_ids): train_ids_labels_and_scores[test_index] = \ rerank.rescore_and_rerank( test_id, train_ids_labels_and_scores[test_index]) post_verification_predictions = get_prediction_map( test_ids, train_ids_labels_and_scores, top_k) return pre_verification_predictions, post_verification_predictions
[ "def test_predict():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_binary_recommend_ml100k(recommender, 0.1)", "def getPrediction(self):\r\n \treturn self.prediction", "def predictions_relevance(self):\n raise NotImplementedError", "def predict():\n if model:\n\n try:\n incoming_data = request.get_json()\n client_ip = request.environ['REMOTE_ADDR']\n # Keep only the variables contribution to model prediction\n repeat_contact = {key: [value] for key, value in incoming_data.items() if key.lower() not in config.NOT_TO_READ}\n \n with counter.get_lock():\n counter.value += 1\n out = counter.value\n predictions = predict_repeat_contact(repeat_contact, model, features_transform_pipe)\n app.logger.info(f\"The prediction has been served for request id {counter} with client ip {client_ip}\")\n \n # we can store the incoming_data and final predictions in the database \n\n return jsonify(predictions)\n except:\n return jsonify({'trace': traceback.format_exc()})\n else:\n return (\"No model loaded\")", "def predict_rent():\n\n test_X, test_Y, model = train_model()\n predicted_values = model.predict(predicted_values)\n return test_X.as_matrix(), test_Y.as_matrix(), predicted_values", "def predict(user_id, movie_id):\n print_user_info(user_id)\n print_movie_info(movie_id)\n print_actual_rating(user_id, movie_id)\n avg = average_rating(movie_id)\n nearest = nearest_neighbour(user_id, movie_id)\n slope = slope_one(user_id, movie_id)\n hybrid_algorithm(avg, nearest, slope)", "def generate_predictions(self):\n answers = []\n logger.info(\"*** generate predictions ***\")\n logger.info(\"*** eval examples: {} ***\".format(len(self.best_scores)))\n logger.info(\"*** known examples: {} ***\".format(len(self.results)))\n logger.info(\"*** unknown examples: {} ***\".format(len(self.unknown_examples)))\n assert len(self.best_scores) == len(self.slices) + len(self.unknown_examples)\n for id in self.best_scores.keys():\n if id in self.results.keys() and id in self.slices.keys():\n doc_start, index = self.results[id]\n slice: SliceItem = self.slices[id]\n passage_token_start = doc_start + index[0] - len(slice.question.question_tokens) - 2\n passage_token_end = doc_start + index[1] - len(slice.question.question_tokens) - 2\n assert 0 <= passage_token_start < len(slice.question.context.tokens)\n assert 0 < passage_token_end <= len(slice.question.context.tokens)\n answer = \"\".join(slice.question.context.tokens[passage_token_start:passage_token_end])\n else:\n answer = '疫情' # 该样本经过预测没有答案\n slice = self.unknown_examples[id]\n answers.append({'id': id, 'pred': answer, 'label': slice.question.answer})\n return answers", "def predict(self):\n out_array = []\n if len(self.tracks) < 1:\n return np.zeros(0, 4 + self.num_classes)\n for track in self.tracks:\n out_array.append(track.to_cwh())\n out_array = np.array(out_array)\n predictions = self.predictor.get_predictions(out_array)\n\n return np.concatenate([predictions, out_array[:, -1, 4:]], axis=-1)", "def sample_prediction(self):\n\t\tnn_param_set = np.random.choice(self.nn_param_sets, p = self.posterior_weights)\n\t\tself.set_k_weights(nn_param_set)\n\t\treturn self.model.predict(self.x)", "def test_predict():\n\t\n\t# Create a row of data and run prediction.\n\thome = 'Arsenal'\n\taway = 'Chelsea'\n\tstats = pd.read_sql_query(\"select * from stats;\", engine)\n\tmodel = joblib.load('./model.pkl')\n\tresult = prediction.prediction(home, away, stats, model)\n\n\t# Check type of output.\n\tassert isinstance(result, np.ndarray)\n\n\t# Check array length.\n\tassert len(result) == 3", "def predict(self, text):", "def do_predictions(self):\n\n self.train_preds = self.tfmodel.predict(self.Data.X_train)\n self.test_preds = self.tfmodel.predict(self.Data.X_test)\n\n self.Helpers.logger.info(\n \"Training predictions: \" + str(self.train_preds))\n self.Helpers.logger.info(\n \"Testing predictions: \" + str(self.test_preds))\n print(\"\")", "def predict():\n # pass the song into the lclf object, like before\n\n # now, convert the results into json!\n\n # return the json data to the endpoint.\n return data", "def predict_qoe(self):\r\n\t\tfor prediction_metric in self.prediction_metrics:\r\n\t\t\tfor service in VIDEO_SERVICES:\r\n\t\t\t\tthese_players = [player for player in self.players if\\\r\n\t\t\t\t\tself.players[player]['service'] == service and self.players[player]['features'][prediction_metric] is not None]\r\n\t\t\t\t# Predictions are run in parallel, since this is fastest\r\n\t\t\t\tall_player_features = [self.players[player][\"features\"][prediction_metric] \\\r\n\t\t\t\t\tfor player in these_players]\r\n\t\t\t\t\r\n\t\t\t\tif all_player_features == []: continue\r\n\r\n\t\t\t\tif not self.use_perfect or prediction_metric != \"buffer\":\r\n\t\t\t\t\t# Call the prediction function\r\n\t\t\t\t\tpredicted_metrics = self.prediction_models[prediction_metric][service](np.array(all_player_features))\r\n\t\t\t\telse: # buffer and we want to use perfect information\r\n\t\t\t\t\t# get the buffers from the zmq stream\r\n\t\t\t\t\tstats_msg = None\r\n\t\t\t\t\twhile True:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tstats_msg = self.zmq_pull_socket.recv_pyobj(flags=zmq.NOBLOCK)\r\n\t\t\t\t\t\t\t# update players with new info\r\n\t\t\t\t\t\t\tfor player in stats_msg:\r\n\t\t\t\t\t\t\t\tfor k in stats_msg[player]:\r\n\t\t\t\t\t\t\t\t\tself.players[\"10.0.0.{}\".format(player+1)][\"ground_truth_values\"][k].append(\r\n\t\t\t\t\t\t\t\t\t\tstats_msg[player][k])\r\n\t\t\t\t\t\texcept zmq.ZMQError:\r\n\t\t\t\t\t\t\tbreak # No new messages\r\n\t\t\t\t\t# use most recent ground truth info for each player\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tpredicted_metrics = [self.players[player][\"ground_truth_values\"][\"buffer\"][-1] for player in self.players]\r\n\t\t\t\t\texcept IndexError:\r\n\t\t\t\t\t\t# no information yet -- just wait\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t# save predictions for other parts of the pipeline\r\n\t\t\t\tfor predicted_metric, player in zip(predicted_metrics, these_players):\r\n\t\t\t\t\tself.players[player][\"predictions\"][prediction_metric].append((time.time(), predicted_metric))\r\n\t\t\t\t# Log predictions for post-mortem analysis\r\n\t\t\t\tself.log_stat(\"pred\", [(player, prediction_metric, predicted_metric, time.time()) \r\n\t\t\t\t\tfor predicted_metric, player in zip(predicted_metrics, these_players)])", "def predict(self, h_test, r_test, mulit_labels_test):\n assert self.clf is not None, \"The model need to be trained before used for prediction\"\n h_test = h_test.sign()\n\n h_mat = self.h_train.sign()\n r_mat = self.r_train.sign()\n tot_ret_rate = (r_mat.sum(0)/h_mat.sum(0)).A1\n pred = []\n\n ratio = self.ratio\n for i in range(h_test.shape[0]):\n nn = self.clf.kneighbors(h_test[i, :], self.k)[1][0]\n if self.step == 1:\n pred_bsk = 1\n else:\n res_label = 1-self.bsk_label_train[nn]\n res_multi = self.multi_labels_train[nn]\n\n a = res_label.dot(1-res_multi)/len(nn)\n c = res_label.dot(res_multi)/len(nn)\n pred_i = ((1-a)*ratio + (1-c) - np.sqrt((1-a)**2*ratio**2+(1-c)**2+2*(a*c+(a+c)-1)*ratio))/(2*ratio)\n\n if mulit_labels_test[i]:\n pred_i = pred_i * ratio\n\n res_h = self.h_train[nn, :].sign()\n res_r = self.r_train[nn, :].sign()\n with np.errstate(divide='ignore',invalid='ignore'):\n pred_prod_i = (res_r.T.dot(1-res_label))/(res_h.T.dot(1-res_label))\n idx = np.isnan(pred_prod_i)\n pred_prod_i[idx] = tot_ret_rate[idx]\n res_h1 = (h_test[i, :] > 1).todense().A1+1\n pred_prod_i = pred_prod_i * res_h1\n idx = (h_test[i, :].todense().A1 > 0)\n pred_prod_i = pred_prod_i[idx] * pred_i\n\n pred.append((pred_i, r_test[i, idx].sum() > 0,\n pred_prod_i, r_test[i, idx].todense().A1 > 0))\n pred_rst = pd.DataFrame(pred, columns=['pred_prob', 'obs', 'pred_prob_prod', 'obs_prod'])\n return pred_rst", "def predict(model, X_test):", "def predict_RF(self):\r\n data = self.data_train1\r\n labels = self.labels_train\r\n data_test = self.data_test1\r\n labels_test = self.labels_test\r\n \r\n model = RandomForestClassifier()\r\n model.fit(data, labels.iloc[:,0])\r\n prediction = model.predict(data_test) \r\n model_score = model.score(data_test, labels_test)\r\n \r\n self.RF_prediction = prediction\r\n self.RF_score = model_score", "def get_predictions(self, inferences):\n return inferences", "def predict(self):\n\t\treturn self.y_pred" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test handling of incomplete pgs. Requires 4 osds.
def test_incomplete_pgs(ctx, config): testdir = teuthology.get_testdir(ctx) if config is None: config = {} assert isinstance(config, dict), \ 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) assert num_osds == 4 manager = ceph_manager.CephManager( mon, ctx=ctx, logger=log.getChild('ceph_manager'), ) while len(manager.get_osd_status()['up']) < 4: time.sleep(10) manager.flush_pg_stats([0, 1, 2, 3]) manager.wait_for_clean() log.info('Testing incomplete pgs...') for i in range(4): manager.set_config( i, osd_recovery_delay_start=1000) # move data off of osd.0, osd.1 manager.raw_cluster_cmd('osd', 'out', '0', '1') manager.flush_pg_stats([0, 1, 2, 3], [0, 1]) manager.wait_for_clean() # lots of objects in rbd (no pg log, will backfill) p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '20', 'write', '-b', '1', '--no-cleanup']) p.wait() # few objects in rbd pool (with pg log, normal recovery) for f in range(1, 20): p = rados_start(testdir, mon, ['-p', 'rbd', 'put', 'foo.%d' % f, '/etc/passwd']) p.wait() # move it back manager.raw_cluster_cmd('osd', 'in', '0', '1') manager.raw_cluster_cmd('osd', 'out', '2', '3') time.sleep(10) manager.flush_pg_stats([0, 1, 2, 3], [2, 3]) time.sleep(10) manager.wait_for_active() assert not manager.is_clean() assert not manager.is_recovered() # kill 2 + 3 log.info('stopping 2,3') manager.kill_osd(2) manager.kill_osd(3) log.info('...') manager.raw_cluster_cmd('osd', 'down', '2', '3') manager.flush_pg_stats([0, 1]) manager.wait_for_active_or_down() assert manager.get_num_down() > 0 # revive 2 + 3 manager.revive_osd(2) manager.revive_osd(3) while len(manager.get_osd_status()['up']) < 4: log.info('waiting a bit...') time.sleep(2) log.info('all are up!') for i in range(4): manager.kick_recovery_wq(i) # cluster must recover manager.wait_for_clean()
[ "def test_parse_phout_incomplete_fields_count(self, remove_data_file):\n\n filename = remove_data_file()\n data = self.set_phout_data()\n data.append(\"a\\tb\")\n self.set_phout_file(filename, data)\n\n # check exception text\n with pytest.raises(\n ValueError, match=r'Incorrect fields count in line 11'):\n phout.parse_phout(filename)", "def test_list_of_non_modulatory_phrases_is_empty_for_pieces_with_heavy_polymodal_frame():\n assert piece3.non_modulatory_phrases == []\n assert piece4.non_modulatory_phrases == []", "def test_full(self):\n with open(os.path.join(RESOURCE_PATH, 'unit_514-2014-351-2-0.mrg'), 'rU') as file_handle:\n parser = GliderParser(self.config, file_handle, self.exception_callback)\n\n particles = parser.get_records(40)\n # requested more than are available in file, should only be 10\n self.assertEquals(len(particles), 31)\n\n self.assertEqual(self.exception_callback_value, [])", "def test_simple_with_bad_eop(self):\r\n log.debug('CAG TEST: BAD END OF PROFILE')\r\n stream_handle = StringIO(TEST_DATA_beop)\r\n parser = CtdpfCklWfpSioParser(self.config, stream_handle,\r\n self.exception_callback)\r\n # next get records\r\n result = parser.get_records(4)\r\n if result:\r\n log.debug('CAG TEST: FAILED TO DETECT BAD END OF PROFILE')\r\n self.fail()\r\n else:\r\n log.debug('CAG TEST: BAD END OF PROFILE DETECTED')\r\n pass", "def test_bad_data(self):\n\n # the first data record in this file is corrupted and will be ignored\n # we expect the first 2 particles to be the metadata particle and the\n # intrument particle from the data record after the corrupted one\n with open(os.path.join(RESOURCE_PATH, '11079419_BAD_PPB_OCR.txt'), 'rU') as file_handle:\n\n log.debug(self.exception_callback_value)\n\n parser = SpkirAbjCsppParser(self._recov_config,\n file_handle,\n self.exception_callback)\n\n particles = parser.get_records(2)\n\n self.assert_particles(particles, 'bad_data_record_recov.yml', RESOURCE_PATH)\n\n with open(os.path.join(RESOURCE_PATH, '11079419_BAD_PPB_OCR.txt'), 'rU') as file_handle:\n\n log.debug(self.exception_callback_value)\n\n parser = SpkirAbjCsppParser(self._telem_config,\n file_handle,\n self.exception_callback)\n\n particles = parser.get_records(2)\n\n self.assert_particles(particles, 'bad_data_record_telem.yml', RESOURCE_PATH)", "def test_bad_sample(self):\n # create some data to parse\n self.clear_async_data()\n\n path = self.create_sample_data('multiple_ctdgv_record.mrg', \"unit_363_2013_245_6_9.mrg\")\n\n # Create and store the new driver state\n state = {\n 'unit_363_2013_245_6_9.mrg': self.get_file_state(path, False, 2506),\n }\n self.driver = self._get_driver_object(memento=state)\n\n self.driver.start_sampling()\n\n # verify data is produced\n self.assert_data(GgldrCtdgvDelayedDataParticle, 'bad_sample_ctdgv_record.mrg.result.yml', count=3, timeout=10)\n self.assert_file_ingested(\"unit_363_2013_245_6_9.mrg\")", "def test_simple_with_no_eop(self):\r\n log.debug('CAG TEST: MISSING END OF PROFILE')\r\n stream_handle = StringIO(TEST_DATA_neop)\r\n parser = CtdpfCklWfpSioParser(self.config, stream_handle,\r\n self.exception_callback)\r\n # next get records\r\n result = parser.get_records(4)\r\n if result:\r\n log.debug('CAG TEST: FAILED TO DETECT MISSING END OF PROFILE')\r\n self.fail()\r\n else:\r\n log.debug('CAG TEST: MISSING END OF PROFILE DETECTED')\r\n pass", "def test_non_modulatory_phrases():\n assert len(piece1.non_modulatory_phrases) == 3\n assert len(piece2.non_modulatory_phrases) == 14\n assert len(piece5.non_modulatory_phrases) == 13\n assert len(piece7.non_modulatory_phrases) == 19\n assert len(piece8.non_modulatory_phrases) == 14\n assert len(piece14.non_modulatory_phrases) == 26\n assert len(piece45.non_modulatory_phrases) == 20", "def test_notEnoughData(self):\n self.assertRaises(struct.error, self.getMP, '\\x02\\x00')", "def test_good_geom(self):\n #NOTE Turbomole uses bohr radius: x//0.52917720859 for geom locations\n result=[\n '1.88972613289 3.77945226577 -1.88972613289 C',\n '3.77945226577 5.66917839866 1.88972613289 H'\n ]\n self.assertEqual(check_geom(self.good_geom), result)", "def test_rec_ct_missing_end(self):\n in_file = open(os.path.join(RESOURCE_PATH,\n 'SBE37-IM_20110101_missing_end.hex'), 'r')\n parser = CtdmoGhqrRecoveredCtParser(self.config_rec_ct, in_file, self.exception_callback)\n\n # Not expecting any particles.\n expected_results = []\n\n # Try to get one particle and verify we didn't get any.\n result = parser.get_records(1)\n self.assertEqual(result, expected_results)\n\n in_file.close()\n self.assertEqual(self.exception_callback_value, [])", "def test_bogus_pg83_in_pv(self):\n with self.assertLogs(stor.__name__, 'WARNING'):\n self.assertIsNone(self.dwrap.phys_vols[2].pg83)", "def dummy_no_ephem():", "def check_for_bad_chunks():\n chunked_data = set(s3_list_files(\"CHUNKED_DATA\"))\n bad_chunks = []\n for entry in ChunksRegistry():\n if entry.data_type in CHUNKABLE_FILES and entry.chunk_path not in chunked_data:\n bad_chunks.append(entry)\n print \"bad chunks:\", len(bad_chunks)\n\n # for chunk in bad_chunks:\n # u = chunk.user_id\n # print Study(_id=u.study_id).name", "def test_chunk_case_one_miss_point(num_chunks, chunk_size):\n labels = gen_labels_for_chunks(num_chunks, chunk_size)\n\n assert len(labels) >= 1\n constraints = Constraints(labels[1:])\n with pytest.raises(ValueError) as e:\n constraints.chunks(num_chunks=num_chunks, chunk_size=chunk_size,\n random_state=SEED)\n\n expected_message = (('Not enough possible chunks of %d elements in each'\n ' class to form expected %d chunks - maximum number'\n ' of chunks is %d'\n ) % (chunk_size, num_chunks, num_chunks - 1))\n\n assert str(e.value) == expected_message", "def check_illegal(self):\n for i in range(self.__sample_size):\n j = 0\n while j < self.__dimension.get_dim_size():\n if not (self.get_region(j)[0] < self.__population[i].get_feature(j) < self.get_region(j)[1]):\n break\n else:\n j += 1\n if j == self.__dimension.get_dim_size():\n return False\n return True", "def test_bad_data(self):\n\tself.state = {StateKey.UNPROCESSED_DATA:[[0, len(CtdmoParserUnitTestCase.BAD_TEST_DATA)]],\n\t StateKey.IN_PROCESS_DATA:[],\n\t StateKey.TIMESTAMP:0.0}\n self.stream_handle = StringIO(CtdmoParserUnitTestCase.BAD_TEST_DATA)\n self.parser = CtdmoParser(self.config, self.state, self.stream_handle,\n self.state_callback, self.pub_callback) # last one is the link to the data source\n\n result = self.parser.get_records(1)\n\tself.stream_handle.close()\n self.assert_result(result, [[894,1085,12,1],[1472,1663,12,0],[2297,2487,12,0]],\n\t\t\t [[0,50],[374,507],[894,1085],[1199,1663],[2297,2487]],\n\t\t\t self.timestamp4, self.particle_d_new)", "def test_input_data_is_not_empty(self):\n self.assertTrue(self.data_processor.input_data_frames)", "def test_parse_phout_exceeded_fields_count(self, remove_data_file):\n\n filename = remove_data_file()\n data = self.set_phout_data()\n data.append(\"1\\t2\\t3\\t4\\t5\\t6\\t7\\t8\\t9\\t10\\t11\\t12\\t13\")\n self.set_phout_file(filename, data)\n\n # check exception text\n with pytest.raises(\n ValueError, match=r'Incorrect fields count in line 11'):\n phout.parse_phout(filename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test that the class initialised correctly. Mainly I want to check that when the class initialised, it ran tally_hpo_terms() correctly. Check that the counts of the HPO terms used in the probands match what is expected.
def test_setup(self): self.assertEqual(self.graph.total_freq, 3) self.assertEqual(self.graph.get_ids_per_term("HP:0002011"), {'person_02', 'person_03'} ) # check that a redundant term has been added, even though a more specific # descendant term was included self.assertTrue('sample_ids' in self.graph.nodes['HP:0000118']) # Check that we get an error if we look for probands with a term that was # not used in the probands. with self.assertRaises(KeyError): self.graph.nodes["HP:0000001"]['sample_ids'] # but a similar check using the official method returns an empty set self.assertEqual(self.graph.get_ids_per_term("HP:0000001"), set([]))
[ "def test_calculate_217_count(self):\r\n\r\n self.DUT.hazard_rate_type = 1\r\n self.DUT.operating_voltage = 1.25\r\n self.DUT.acvapplied = 0.025\r\n self.DUT.rated_voltage = 3.3\r\n\r\n self.assertFalse(self.DUT.calculate_part())\r\n self.assertEqual(self.DUT.hazard_rate_model['equation'],\r\n 'lambdab * piQ')\r\n self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'], 21.0)\r\n self.assertEqual(self.DUT.hazard_rate_model['piQ'], 10.0)\r\n self.assertAlmostEqual(self.DUT.hazard_rate_active, 0.00021)", "def test_calculate_217_count(self):\r\n\r\n self.DUT.hazard_rate_type = 1\r\n self.DUT.operating_voltage = 1.25\r\n self.DUT.acvapplied = 0.025\r\n self.DUT.rated_voltage = 3.3\r\n\r\n self.assertFalse(self.DUT.calculate_part())\r\n self.assertEqual(self.DUT.hazard_rate_model['equation'],\r\n 'lambdab * piQ')\r\n self.assertAlmostEqual(self.DUT.hazard_rate_model['lambdab'], 28.0)\r\n self.assertEqual(self.DUT.hazard_rate_model['piQ'], 10.0)\r\n self.assertAlmostEqual(self.DUT.hazard_rate_active, 0.00028)", "def test_boyer_moore_with_counts(self):\r\n\r\n # example 1\r\n p = 'word'\r\n t = 'there would have been a time for such a word'\r\n lowercase_alphabet = 'abcdefghijklmnopqrstuvwxyz '\r\n p_bm = bm.BoyerMoore(p, lowercase_alphabet)\r\n occurrences, _, _, num_alignments, num_character_comparisons = rau.boyer_moore_with_counts(p, p_bm, t)\r\n print(occurrences, num_alignments, num_character_comparisons)\r\n\r\n self.assertEqual(len(occurrences), 1)\r\n self.assertEqual(occurrences[0], 40)\r\n self.assertEqual(num_alignments, 12)\r\n self.assertEqual(num_character_comparisons, 15)\r\n\r\n #example 2\r\n p = 'needle'\r\n t = 'needle need noodle needle'\r\n p_bm = bm.BoyerMoore(p, lowercase_alphabet)\r\n occurrences, _, _, num_alignments, num_character_comparisons = rau.boyer_moore_with_counts(p, p_bm, t)\r\n print(occurrences, num_alignments, num_character_comparisons)\r\n\r\n self.assertEqual(len(occurrences), 2)\r\n self.assertEqual(occurrences[0], 0)\r\n self.assertEqual(occurrences[1], 19)\r\n self.assertEqual(num_alignments, 5)\r\n self.assertEqual(num_character_comparisons, 18)", "def __init__(self, n, ngram_counts, vocab, unk=False):\n\n self.n = n\n\n self.vocab = vocab\n\n self.V = len(vocab)\n\n self.ngram_counts = ngram_counts\n\n # YOUR CODE HERE\n # START BY MAKING THE RIGHT COUNTS FOR THIS PARTICULAR self.n\n # for unigrams, we only need total word count\n if n == 1:\n self.total_count = sum(self.ngram_counts.values())\n # for bigrams, we need total count wrt each word. In our language, it is history count.\n elif n == 2:\n self.history_count = Counter()\n for k, v in self.ngram_counts.items():\n self.history_count[k[0]] = self.history_count[k[0]] + v\n # since we only count for the first word in the tuple, we will always\n # miss counting </s>. However, since the frequency of </s> is the same\n # as the frequency of <s>, we can simply assign it equal to it.\n self.history_count['</s>'] = self.history_count['<s>']", "def test_basics(self):\n self.report('Testing adding data, evaluation and marginal likelihood.' +\n ' Probabilistic test, might fail.')\n num_coeffs_vals = [2, 1, 4, 5] * 5\n num_tests = 0\n num_successes = 0\n for dataset in self.datasets:\n for dist_type in self.dist_types:\n for kernel_type in self.kernel_types[dist_type]:\n curr_num_coeffs = num_coeffs_vals.pop(0)\n curr_gp = build_nngp_with_dataset(dataset, kernel_type, curr_num_coeffs,\n dist_type)\n # Predictions & Marginal likelihood\n curr_preds, _ = curr_gp.eval(dataset[2], 'std')\n curr_gp_err = compute_average_prediction_error(dataset, curr_preds)\n const_err = compute_average_prediction_error(dataset, dataset[1].mean())\n lml = curr_gp.compute_log_marginal_likelihood()\n is_success = curr_gp_err < const_err\n num_tests += 1\n num_successes += is_success\n self.report(('(%s, ntr=%d, nte=%d):: GP-lml=%0.4f, GP-err=%0.4f, ' +\n 'Const-err=%0.4f. succ=%d')%(dataset[-1][:5], len(dataset[0]),\n len(dataset[2]), lml, curr_gp_err, const_err, is_success),\n 'test_result')\n succ_frac = num_successes / float(num_tests)\n self.report('Summary: num_successes / num_floats = %d/%d = %0.4f'%(num_successes,\n num_tests, succ_frac), 'test_result')\n assert succ_frac > 0.5", "def test_heavyhitters_init_wd(self):\n hh1 = HeavyHitters(num_hitters=1000, width=1000, depth=5)\n self.assertEqual(hh1.width, 1000)\n self.assertEqual(hh1.depth, 5)\n self.assertEqual(hh1.confidence, 0.96875)\n self.assertEqual(hh1.error_rate, 0.002)\n self.assertEqual(hh1.elements_added, 0)\n self.assertEqual(hh1.heavy_hitters, dict())\n self.assertEqual(hh1.number_heavy_hitters, 1000)", "def test_heavyhitters_init_ce(self):\n hh1 = HeavyHitters(num_hitters=1000, confidence=0.96875, error_rate=0.002)\n self.assertEqual(hh1.width, 1000)\n self.assertEqual(hh1.depth, 5)\n self.assertEqual(hh1.confidence, 0.96875)\n self.assertEqual(hh1.error_rate, 0.002)\n self.assertEqual(hh1.elements_added, 0)\n self.assertEqual(hh1.heavy_hitters, dict())\n self.assertEqual(hh1.number_heavy_hitters, 1000)", "def test_toCounts(self):\n a = Alphabet('abc')**2\n m = Probs([0.5,0.25,0.25,0.1,0.8,0.1,0.3,0.6,0.1], a)\n obs = m.toCounts(30)\n assert isinstance(obs, Counts)\n exp = Counts([[5.,2.5,2.5,1,8,1,3,6,1]], a)\n self.assertEqual(obs, exp)", "def test_count_all(self):", "def test_init(self):\n self.assertEqual(self.first_menu.calories, 2000)\n self.assertEqual(self.first_menu.proteins, 70)\n self.assertEqual(self.first_menu.fats, 60)\n self.assertEqual(self.first_menu.carbohydrates, 300)\n self.assertEqual(self.first_menu.daily_calories, 0)\n self.assertEqual(self.first_menu.daily_fats, 0)\n self.assertEqual(self.first_menu.daily_proteins, 0)\n self.assertEqual(self.first_menu.daily_carbohydrates, 0)\n self.assertEqual(len(self.first_menu.all_dishes), 6580)\n self.assertEqual(len(self.second_menu.all_dishes), 7344)", "def count_haplotypes(self):\n\n\t\tfor current_SNP, mutation_object in self.instance_mutation_population.iteritems():\n\t\t\t## count the occurrence of each haplotype, naive method\n\t\t\tpresent_haplotypes = mutation_object.get_haplotypes()\n\t\t\tpresent_calls = mutation_object.get_calls()\n\t\t\tpresent_conditions = mutation_object.get_conditions()\n\t\t\tfrequency = collections.Counter(present_haplotypes).most_common()\n\t\t\tmutation_object.set_frequency(frequency)\n\n\t\t\t## determine normal/expanded frequencies\n\t\t\tnormal_alleles = []; expanded_alleles = []\n\t\t\tnormal_calls = []; expanded_calls = []\n\t\t\tfor disease_tuple in present_conditions:\n\t\t\t\tif disease_tuple[1] == 'N': \n\t\t\t\t\tnormal_alleles.append(disease_tuple[0])\n\t\t\t\tif disease_tuple[1] == 'X':\n\t\t\t\t\texpanded_alleles.append(disease_tuple[0])\n\n\t\t\tfor call_vector in present_calls:\n\t\t\t\tfor sub_vector in [call_vector[1], call_vector[2]]:\n\t\t\t\t\tif sub_vector[1] == 'N':\n\t\t\t\t\t\tnormal_calls.append(call_vector[0])\n\t\t\t\t\tif sub_vector[1] == 'X':\n\t\t\t\t\t\texpanded_calls.append(call_vector[0])\n\n\t\t\t## count calls\n\t\t\tnormal_callfreq = collections.Counter(normal_calls).most_common()\n\t\t\tnormal_frequency = collections.Counter(normal_alleles).most_common()\n\t\t\texpanded_callfreq = collections.Counter(expanded_calls).most_common()\n\t\t\texpanded_frequency = collections.Counter(expanded_alleles).most_common()\n\t\t\tmutation_object.set_normalfreq(normal_frequency)\n\t\t\tmutation_object.set_expandedfreq(expanded_frequency)\n\t\t\tmutation_object.set_normalcalls(normal_callfreq)\n\t\t\tmutation_object.set_expandedcalls(expanded_callfreq)", "def test_suggestion_rankings(self):\n answers = {\"problem\": \"MALADIES_FONGIQUES\", \"rotation\": [], \"department\": \"01\"}\n engine = Engine(answers, [], [])\n practices = engine.calculate_results()\n suggestions = engine.get_suggestions(practices)\n\n # There should be two practices with weight 1.5\n self.assertEqual(len(suggestions), 3)\n weights = list(map(lambda x: x.weight, suggestions))\n self.assertEqual(len(list(filter(lambda x: x == 1.5, weights))), 2)", "def test_update_counts(self):\n\n # made a restaurant\n restaurant = enterRestaurant('Kate', 91871)\n\n # entered a predicate branch\n enterPredicateBranch(RestaurantPredicate.objects.all()[0].question, 0, 1, 1)\n PB = PredicateBranch.objects.all()[0]\n\n # entered a task\n enterTask(001, True, 1000, 100, RestaurantPredicate.objects.all()[0])\n\n # updated count of total answers and total no's\n updateCounts(PB, Task.objects.all()[0])\n\n # total answer should now be 2\n self.assertEqual(PB.returnedTotal,2)\n\n # entered another task\n enterTask(002, False, 1000, 60, RestaurantPredicate.objects.all()[0])\n\n # updated its counts of total answers and total no's\n updateCounts(PB, Task.objects.all()[1])\n\n # total answers should be 3 and total no's should be 2\n self.assertEqual(PB.returnedTotal,2.6)\n self.assertEqual(PB.returnedNo, 1.6)", "def test_constructor(self): \r\n \r\n self.assertEqual(investment([1,10,100], 1000).positions,[1,10,100])\r\n self.assertEqual(investment([1,10,100], 1000).num_trials, 1000)", "def test_returned_dictionary_count(legal_dict_fixture):\n t = legal_dict_fixture\n print(t)\n assert len(t) == 7", "def test_cars_count_success(self):\r\n self.assertEqual(self.cesar1.cars_count(), 2.0)\r\n self.assertEqual(self.cesar2.cars_count(), 0)\r\n self.assertEqual(self.cesar3.cars_count(), 0)", "def test_non_modulatory_phrases():\n assert len(piece1.non_modulatory_phrases) == 3\n assert len(piece2.non_modulatory_phrases) == 14\n assert len(piece5.non_modulatory_phrases) == 13\n assert len(piece7.non_modulatory_phrases) == 19\n assert len(piece8.non_modulatory_phrases) == 14\n assert len(piece14.non_modulatory_phrases) == 26\n assert len(piece45.non_modulatory_phrases) == 20", "def test_hallucinated_predictions(self):\n self.report('Testing hallucinated predictions for NNGP.')\n num_coeffs_vals = [2, 1, 4, 5] * 5\n for dataset in self.datasets:\n for dist_type in self.dist_types:\n for kernel_type in self.kernel_types[dist_type]:\n curr_num_coeffs = num_coeffs_vals.pop(0)\n curr_gp = build_nngp_with_dataset(dataset, kernel_type, curr_num_coeffs,\n dist_type)\n curr_preds, curr_stds = curr_gp.eval(dataset[2][1:], 'std')\n ha_preds, ha_stds = curr_gp.eval_with_hallucinated_observations(\n dataset[2][1:], dataset[0][4:] + dataset[2][:1], 'std')\n assert np.linalg.norm(curr_preds - ha_preds) < _TOL\n assert np.all(curr_stds >= ha_stds)", "def test_logistic_counts():\n\n #Form the count version of the problem\n trials = np.random.binomial(5,0.5,100)+1\n successes = np.random.binomial(trials,0.5,len(trials)) \n n = len(successes)\n p = 2*n\n X = np.random.normal(0,1,n*p).reshape((n,p))\n\n loss = rr.logistic_loglike.linear(X, successes=successes, trials=trials)\n penalty = rr.quadratic_loss(p, coef=1.)\n\n prob1 = rr.container(loss, penalty)\n solver1 = rr.FISTA(prob1)\n solver1.fit()\n solution1 = solver1.composite.coefs\n \n #Form the binary version of the problem\n Ynew = []\n Xnew = []\n\n for i, (s,n) in enumerate(zip(successes,trials)):\n Ynew.append([1]*s + [0]*(n-s))\n for j in range(n):\n Xnew.append(X[i,:])\n Ynew = np.hstack(Ynew)\n Xnew = np.vstack(Xnew)\n\n loss = rr.logistic_loglike.linear(Xnew, successes=Ynew)\n penalty = rr.quadratic_loss(p, coef=1.)\n\n prob2 = rr.container(loss, penalty)\n solver2 = rr.FISTA(prob2)\n solver2.fit()\n solution2 = solver2.composite.coefs\n\n \n npt.assert_array_almost_equal(solution1, solution2, 3)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check that get_descendants works correctly
def test_get_descendants(self): # check that a high-level node returns the expected set of nodes self.assertEqual(self.graph.get_descendants("HP:0000118"), \ set(['HP:0000707', 'HP:0002011', 'HP:0000924'])) # check that a terminal node doesn't have any descendants self.assertEqual(self.graph.get_descendants("HP:0000924"), \ set([]))
[ "def test_children_of(self):\n expected = [self.second_category, self.third_category, ]\n self.assertEqual(expected, models.Category.objects.children_of(self.root_category))\n\n expected = [self.third_category, ]\n self.assertEqual(expected, models.Category.objects.children_of(self.second_category))", "def test_children_of_leaf(self):\n expected = []\n self.assertEqual(expected, models.Category.objects.children_of(self.third_category))", "def test_children_generator(self):\n button = self.dlg.by(class_name=\"Button\", name=\"OK\").find()\n children = [child for child in button.iter_children()]\n self.assertEqual(len(children), 1)\n self.assertEqual(children[0].class_name(), \"TextBlock\")", "def test_nonTipChildren(self):\n self.assertEqual(self.Empty.nonTipChildren(), [])\n self.assertEqual(self.Child.nonTipChildren(), [])\n self.assertEqual(self.OneChild.nonTipChildren(), [])\n \n nodes, tree = self.TreeNode, self.TreeRoot\n a = nodes['a']\n b = nodes['b']\n c = nodes['c']\n d = nodes['d']\n e = nodes['e']\n f = nodes['f']\n g = nodes['g']\n h = nodes['h']\n\n self.assertEqual(g.nonTipChildren(), [])\n self.assertEqual(f.nonTipChildren(), [])\n self.assertEqual(e.nonTipChildren(), [])\n self.assertEqual(d.nonTipChildren(), [])\n self.assertEqual(c.nonTipChildren(), [f])\n self.assertEqual(b.nonTipChildren(), [c])\n self.assertEqual(h.nonTipChildren(), [])\n self.assertEqual(a.nonTipChildren(), [b])", "def test_parents(self):\n expected = []\n self.assertEqual(expected, self.root_category.parents())\n\n expected = [self.root_category, ]\n self.assertEqual(expected, self.second_category.parents())\n\n expected = [self.root_category, self.second_category, ]\n self.assertEqual(expected, self.third_category.parents())", "def get_descendants(self) -> List['Node']:\n descendants = self.children[:]\n for child in self.children:\n descendants += child.get_descendants()\n return descendants", "def list_descendants(self):\n return self._list(self.client, descendants_of_group=self.name)", "def test_unpublish_descendants_view(self):\n # Get unpublish page\n response = self.client.get(reverse('wagtailadmin_pages:unpublish', args=(self.test_page.id, )))\n\n # Check that the user received an unpublish confirm page\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'wagtailadmin/pages/confirm_unpublish.html')\n # Check the form contains the checkbox field include_descendants\n self.assertContains(response, '<input id=\"id_include_descendants\" name=\"include_descendants\" type=\"checkbox\">')", "def test_get_tree(self):\n pass", "def test_contains_with_descendants(self):\n\n self.assert_selector(\n self.MARKUP,\n 'body :-soup-contains(\" that \")',\n ['1', '2'],\n flags=util.HTML\n )", "def iter_descendants(self, **kwargs):\n desc_elements = self.element_info.iter_descendants(**kwargs)\n for element_info in desc_elements:\n yield self.backend.generic_wrapper_class(element_info)", "def test_unpublish_descendants_view(self):\n # Get unpublish page\n response = self.client.get(reverse('wagtailadmin_pages:unpublish', args=(self.page.id, )))\n\n # Check that the user received an unpublish confirm page\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'wagtailadmin/pages/confirm_unpublish.html')\n # Check the form does not contain the checkbox field include_descendants\n self.assertNotContains(response, '<input id=\"id_include_descendants\" name=\"include_descendants\" type=\"checkbox\">')", "def all_proper_children(self, obj):\n return self.all_children(obj)[1:]", "def test_hierarchy(self):\n expected = [self.root_category, ]\n self.assertEqual(expected, self.root_category.hierarchy())\n\n expected = [self.root_category, self.second_category, ]\n self.assertEqual(expected, self.second_category.hierarchy())\n\n expected = [self.root_category, self.second_category, self.third_category, ]\n self.assertEqual(expected, self.third_category.hierarchy())", "def test_ancestors(self):\n nodes, tree = self.TreeNode, self.TreeRoot\n self.assertEqual(nodes['a'].ancestors(), [])\n self.assertEqual(nodes['b'].ancestors(), [nodes['a']])\n self.assertEqual(nodes['d'].ancestors(), nodes['f'].ancestors())\n self.assertEqual(nodes['g'].ancestors(), \\\n [nodes['f'], nodes['c'], nodes['b'], nodes['a']])", "def test_duplicate_nodes_with_excluded_descendants(self):\n new_channel = testdata.channel()\n\n # simulate a clean, right-after-publish state to ensure only new channel is marked as change\n self.channel.main_tree.changed = False\n self.channel.main_tree.save()\n\n excluded_node_id = self.channel.main_tree.get_children().first().node_id\n\n self.channel.main_tree.copy_to(\n new_channel.main_tree, excluded_descendants={excluded_node_id: True}\n )\n\n self.assertEqual(\n new_channel.main_tree.get_children().last().get_children().count(),\n self.channel.main_tree.get_children().count() - 1,\n )", "def test_tipChildren(self):\n self.assertEqual(self.Empty.tipChildren(), [])\n self.assertEqual(self.Child.tipChildren(), [])\n self.assertEqual(self.OneChild.tipChildren(), [self.Child])\n \n nodes, tree = self.TreeNode, self.TreeRoot\n a = nodes['a']\n b = nodes['b']\n c = nodes['c']\n d = nodes['d']\n e = nodes['e']\n f = nodes['f']\n g = nodes['g']\n h = nodes['h']\n\n self.assertEqual(g.tipChildren(), [])\n self.assertEqual(f.tipChildren(), [g])\n self.assertEqual(e.tipChildren(), [])\n self.assertEqual(d.tipChildren(), [])\n self.assertEqual(c.tipChildren(), [d,e])\n self.assertEqual(b.tipChildren(), [])\n self.assertEqual(h.tipChildren(), [])\n self.assertEqual(a.tipChildren(), [h])", "def descendants(self) -> QuerySet['TreeModel']:\n return self.__class__.objects.exclude(id=self.id).filter(path__ancestor=self.path)", "def descendants(self, **kwargs):\n desc_elements = self.element_info.descendants(**kwargs)\n return [self.backend.generic_wrapper_class(element_info) for element_info in desc_elements]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check that find_common_ancestors works correctly
def test_find_common_ancestors(self): # check that two terms on different arms only return their common # ancestors self.assertEqual(self.graph.find_common_ancestors('HP:0000924', \ 'HP:0000707'), set(["HP:0000001", "HP:0000118"])) # check that two identical terms return their list of ancestors self.assertEqual(self.graph.find_common_ancestors('HP:0000707', \ 'HP:0000707'), set(["HP:0000001", "HP:0000118", "HP:0000707"])) # check that if one of the two terms is not in the HPO graqph, then we # return an empty set self.assertEqual(self.graph.find_common_ancestors('HP:9999999', \ 'HP:0000707'), set([]))
[ "def test_common_ancestors(self):\n tree = tree_from_tuples(\n (1,\n (3,\n (4, None, None),\n (5, None, None)\n ),\n (6,\n (15, None, None),\n (7,\n None,\n (16, None, None)\n )\n )\n )\n )\n node_15 = tree.right.left\n node_16 = tree.right.right.right\n node_4 = tree.left.left\n assert node_15\n assert node_16\n assert node_4\n self.assertEqual(common_ancestor(node_15, node_16).value, 6)\n self.assertEqual(common_ancestor(node_4, node_16).value, 1)", "def common_ancestors(self, node1, node2):\n node1_path = self.path(node1)\n node2_path = self.path(node2)\n node2_path_names = [node.name for node in node2_path]\n ca = [node for node in node1_path if node.name in node2_path_names]\n return ca", "def test_ancestors(self):\n nodes, tree = self.TreeNode, self.TreeRoot\n self.assertEqual(nodes['a'].ancestors(), [])\n self.assertEqual(nodes['b'].ancestors(), [nodes['a']])\n self.assertEqual(nodes['d'].ancestors(), nodes['f'].ancestors())\n self.assertEqual(nodes['g'].ancestors(), \\\n [nodes['f'], nodes['c'], nodes['b'], nodes['a']])", "def test_Ancestors(self):\n result = self.tx[\"7\"].ancestors()\n tax_ids = [taxon_obj.TaxonId for taxon_obj in result]\n self.assertEqual(tax_ids, [6, 2, 1])", "def common_ancestor(parent_list_0, parent_list_1):\n for b in parent_list_0[::-1]:\n if b in parent_list_1:\n return b\n return None", "def common_ancestor(node_a, node_b):\n ancestors_a = ancestors(node_a)\n ancestors_b = ancestors(node_b)\n lowest_ancestors = ancestors_a if node_a.level > node_b.level else ancestors_b\n for _ in range(abs(node_a.level - node_b.level)):\n next(lowest_ancestors)\n same = (pa for pa, pb in zip(ancestors_a, ancestors_b) if pa == pb)\n return next(same)", "def test_last_common_ancestor(self):\n assert self.tx[9].last_common_ancestor(self.tx[9]) is self.tx[9]\n assert self.tx[9].last_common_ancestor(self.tx[7]) is self.tx[7]\n assert self.tx[9].last_common_ancestor(self.tx[10]) is self.tx[6]\n assert self.tx[9].last_common_ancestor(self.tx[1]) is self.tx[1]", "def test_lastCommonAncestor(self):\n nodes, tree = self.TreeNode, self.TreeRoot\n a = nodes['a']\n b = nodes['b']\n c = nodes['c']\n d = nodes['d']\n e = nodes['e']\n f = nodes['f']\n g = nodes['g']\n h = nodes['h']\n \n self.assertEqual(a.lastCommonAncestor(a), a)\n self.assertEqual(a.lastCommonAncestor(b), a)\n self.assertEqual(a.lastCommonAncestor(g), a)\n self.assertEqual(a.lastCommonAncestor(h), a)\n\n self.assertEqual(b.lastCommonAncestor(g), b)\n self.assertEqual(b.lastCommonAncestor(d), b)\n self.assertEqual(b.lastCommonAncestor(a), a)\n self.assertEqual(b.lastCommonAncestor(h), a)\n\n self.assertEqual(d.lastCommonAncestor(f), c)\n self.assertEqual(d.lastCommonAncestor(g), c)\n self.assertEqual(d.lastCommonAncestor(a), a)\n self.assertEqual(d.lastCommonAncestor(h), a)\n\n self.assertEqual(g.lastCommonAncestor(g), g)\n self.assertEqual(g.lastCommonAncestor(f), f)\n self.assertEqual(g.lastCommonAncestor(e), c)\n self.assertEqual(g.lastCommonAncestor(c), c)\n self.assertEqual(g.lastCommonAncestor(b), b)\n self.assertEqual(g.lastCommonAncestor(a), a)\n self.assertEqual(g.lastCommonAncestor(h), a)\n\n t = TreeNode('h')\n for i in [a,b,c,d,e,f,g,h]:\n self.assertEqual(i.lastCommonAncestor(t), None)\n self.assertEqual(t.lastCommonAncestor(i), None)\n\n u = TreeNode('a', Children=[t])", "def test_parents(self):\n expected = []\n self.assertEqual(expected, self.root_category.parents())\n\n expected = [self.root_category, ]\n self.assertEqual(expected, self.second_category.parents())\n\n expected = [self.root_category, self.second_category, ]\n self.assertEqual(expected, self.third_category.parents())", "def get_ancestors(self, **kw):\n return type(self).objects.get_ancestors(self, **kw)", "def get_all_ancestors(node):\n return node.iterancestors()", "def get_ancestor(self, cs1, cs2):\n raise NotImplementedError(\"Abstract method\")", "def check_relatives(self):\n for name in self.people:\n person = self.people[name]\n if person.spouse:\n person.children.update(person.spouse.children)\n for child in person.children:\n child.parents.add(person.spouse)\n for sibling in person.siblings:\n person.parents.update(sibling.parents)\n for parent in person.parents:\n parent.children.add(sibling)\n sibling.parents.update(person.parents)\n for parent in sibling.parents:\n parent.children.add(person)", "def _is_ancestor(d: Path, f: Path) -> bool:\n return str(f.resolve()).startswith(str(d.resolve()))", "def _closest_common_ancestor(*args) -> type:\n cls_list = map(lambda obj: obj if isinstance(obj, type) else type(obj), args)\n mros = [cls.mro() for cls in cls_list]\n base = min(mros, key=len)\n mros.remove(base)\n for cls in base:\n if all(cls in mro for mro in mros):\n return cls\n return None # Note: safeguard, `object` always shared (never called) # pragma: no cover", "def get_ancestors(entity, entity_type, base):\n ancestors = etree.Element('ancestors')\n entity.insert(0, ancestors)\n mixins = ['ancestor_titles', 'genre_groupings']\n for ancestor in etree.ElementTree(entity).xpath('/n:' + entity_type + '/n:ancestor_titles/*', namespaces=NSMAP):\n # keep requests to below 100/min\n sleep(0.6)\n successful = False\n while not successful:\n ancestor_response = get_response(base, mixins, {'pid': ancestor.xpath('n:pid/text()', namespaces=NSMAP)[0]}, '1')\n if ancestor_response.status_code != 200:\n sleep(10)\n elif ancestor_response.status_code == 200:\n response_xml = infoset(ancestor_response)\n ancestors.append(response_xml.xpath('/n:nitro/n:results/n:' + etree.QName(ancestor).localname, namespaces=NSMAP)[0])\n successful = True\n else:\n successful = True\n return entity", "def lowest_common_ancestor_depth(c):\n ancestor1 = np.array(c[0].sentence.xpath.split('/'))\n ancestor2 = np.array(c[1].sentence.xpath.split('/'))\n min_len = min(ancestor1.size, ancestor2.size)\n return min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])", "def get_ancestors(graph: nx.MultiDiGraph, node: str, relations: List[str] = None) -> List[str]:\n seen = []\n nextnodes = [node]\n while len(nextnodes) > 0:\n nn = nextnodes.pop()\n if nn not in seen:\n seen.append(nn)\n nextnodes += get_parents(graph, nn, relations=relations)\n seen.remove(node)\n return seen", "def lowest_common_ancestor(self, node1, node2):\n ca = self.common_ancestors(node1, node2)\n if len(ca) > 0:\n return ca[-1]\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the OpenAQ instance.
def __init__(self, version='v1', **kwargs): self._baseurl = 'https://api.openaq.org' super(OpenAQ, self).__init__(version=version, baseurl=self._baseurl)
[ "def initialize(self):\n try:\n api_key = self._pomodoro_service.get_config(\"task.asana\", \"api_key\")\n self.asana_api = self._get_asana_api(api_key)\n except Exception as ex:\n logger.error(\"Error initializing plugin: {0}\".format(ex))", "def __init__(self):\n global mq_broker_url\n global params\n self.amqp_url = mq_broker_url\n self._connection = pika.BlockingConnection(params)\n self._channel = self.connection.channel()\n self._knownQueues = []\n self._knownExchanges = []", "def _initialize(self):\n n_events = self.n_events\n delta_t = int(self.delta_t)\n if self.mode == \"delta_t\":\n n_events = 0\n elif self.mode == 'n_events':\n n_events = self.n_events\n delta_t = 0\n self.buffer_producer = EventsBufferProducer(\n self._process_batch, event_count=n_events, time_slice_us=delta_t)\n self._event_buffer = deque()\n self.seek_time(self.start_ts)", "def __initialize__(self, agent):\n msg = comm.RequestInitializationMessage(agent_id=agent.agent_id)\n agent.communicate(msg)", "def __init__(self):\n from pokeman import _current_os\n LOGGER.debug('Initializing Pokeman on current os: {OS}'.format(OS=_current_os))\n self.POKER_ID = str(uuid4())\n self.connection_parameters = None\n self.MSCID = 'main_{POKER_ID}'.format(POKER_ID=self.POKER_ID)\n self.connections = {\n 'sync': {\n self.MSCID: None\n },\n 'async': {}\n }\n\n self.MSC = lambda: self.connections['sync'][self.MSCID]\n self._declared = False\n self.channels = []\n self.cleaned_up = False\n _heapq_.ResourceHeapQ.create_database(poker_id=self.POKER_ID)\n atexit.register(self.cleanup)\n LOGGER.debug('Initializing Pokeman on current os: {OS} OK!'.format(OS=_current_os))", "def test_init(self):\n test_seq = 'AAGCTTGAGGTCCAA'\n # Testing init with String sequence\n amp = Amplicon(test_seq)\n self.assertTrue(str(amp.sequence) == test_seq)\n self.assertTrue(amp.fP == None)\n self.assertTrue(amp.rP == None) \n # Testing init with Sequence object sequence\n amp = Amplicon(Sequence(test_seq)) \n self.assertTrue(str(amp.sequence) == test_seq)\n self.assertTrue(amp.fP == None)\n self.assertTrue(amp.rP == None) \n # Testing with forward primer\n amp = Amplicon(Sequence(test_seq), Forward_Primer(\"AAG\", 0))\n self.assertTrue(amp.fP == Forward_Primer(\"AAG\", 0))\n # Testing with reverse primer\n amp = Amplicon(Sequence(test_seq), fP=None, rP=Reverse_Primer(\"TTG\", 1))\n self.assertTrue(amp.fP == None)\n self.assertTrue(amp.rP == Reverse_Primer(\"TTG\", 5))", "def initialize(self):\n if self.dummy:\n self.logger.info('Dummy device initialized')\n else:\n self.rsc = serial.Serial(port=self._port,\n baudrate=self.DEFAULTS['baudrate'],\n timeout=self.DEFAULTS['read_timeout'],\n write_timeout=self.DEFAULTS['write_timeout']\n )\n\n self.logger.info('Initialized device AOTF at port {}.'.format(self._port))\n self._is_initialized = True", "async def init(self):\n pass", "def performOpen(self, **kw):\n # connect through deviceID\n apilevel = 6 # The API level supported by this driver\n (daq, device, props) = zhinst.utils.create_api_session(self.deviceID, apilevel, \n required_devtype='UHF', \n required_options=['AWG'])\n zhinst.utils.api_server_version_check(daq)\n # Create a base configuration: Disable all available outputs, awgs, demods, scopes,...\n zhinst.utils.disable_everything(daq, device)\n self.daq = daq\n self.device = device\n self.props = props", "def __init__ (self):\n # Create a connection to S3\n self.handle = self.connect()", "def __init__(self, amqp_url):\n self._connection = None\n self._channel = None\n self._closing = False\n self._consumer_tag = None\n self._url = amqp_url", "def __init__(self, debug=False):\n self._debug = debug\n \n self._environment = {}\n self._parse_agi_environment()", "def initialize(self):\n self._change_state(\"initialize\")", "def _init():\n global _lib, _encoding, _goto_index, _goto_parent, _goto_next_elem\n # Initialize the CODA C library\n clib = _get_c_library_filename()\n _lib = _ffi.dlopen(clib)\n _goto_index = _lib.coda_cursor_goto_record_field_by_index\n _goto_parent = _lib.coda_cursor_goto_parent\n _goto_next_elem = _lib.coda_cursor_goto_next_array_element\n\n # Import constants\n for attrname in dir(_lib):\n attr = getattr(_lib, attrname)\n if isinstance(attr, int):\n globals()[attrname] = attr\n\n if os.getenv('CODA_DEFINITION') is None:\n # Set coda definition path relative to C library\n basename = os.path.basename(clib)\n if platform.system() == \"Windows\":\n dirname = None\n else:\n dirname = os.path.dirname(clib)\n relpath = \"../share/coda/definitions\"\n coda_set_definition_path_conditional(basename, dirname, relpath)\n\n # Set default encoding.\n _encoding = \"ascii\"\n\n init()", "def initialize(self):\n self._initialize_dfa()", "def __init__(self, conf):\n self.conf = conf\n self.q_route_spec = Queue.Queue()", "def __shared_initialize__(self, **kwargs):", "def __init__(self, amqp_url, queue, routing_key, exchange='pika', exchange_type='topic'):\n\n self.exchange = exchange\n self.exchange_type = exchange_type\n self.queue = queue\n self.routing_key = routing_key\n self._connection = None\n self._channel = None\n self._closing = False\n self._consumer_tag = None\n self._url = amqp_url", "def initialize(self):\n self.logger.info(\"Initializing connection to the MQTT broker.\")\n self.client = AWSIoTMQTTClient(self.client_id)\n self.client.configureEndpoint(self.endpoint, portNumber=8883)\n self.client.configureCredentials(CAFilePath=self.root_ca, KeyPath=self.private_key,\n CertificatePath=self.client_certificate)\n self.client.configureConnectDisconnectTimeout(self.conn_disconnect_timeout)\n self.client.configureMQTTOperationTimeout(self.mqtt_oper_timeout)\n if self.client.connect():\n self.logger.info(\"Connected!\")", "def autonomousInit(self) -> None:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Provides data about individual measurements
def measurements(self, **kwargs):
[ "def measurements(self):\n return dict([(x['name'], x) for x in self.meta['measurements']])", "def get_data(self,sensor):\n if sensor.id in self.measurements:\n return self.measurements[sensor.id]\n else: raise Exception(\"Sensor has no measurements available\")", "def get_measurements(self, id, key):\n m = self._get_measurement_raw(id, key)\n m = m.get('body', {}).get('measuregrps', {})\n if not m:\n return\n\n for entry in m:\n # Category 1 is actual measure, as opposed to objective.\n # Skip all others.\n if entry['category'] != 1:\n continue\n date = datetime.datetime.fromtimestamp(entry['date'])\n for measure in entry['measures']:\n name = measure['type']\n name = self.TYPES.get(name, str(name))\n # actual value = value * 10^unit\n val = measure.get('value', 0) * (10 ** measure.get('unit', 0))\n yield date, name, val", "def get_measurement_map(self) -> dict:\n\n data = self.get_map()\n return data[\"measurements\"]", "def getData(self):\n return {\n 'temperature': self.temperature,\n 'humidity': self.humidity,\n 'pressure': self.pressure,\n 'sum': self.getMeasurementSum(),\n 'message': self.message\n }", "def populate_measurements(self):\n self._populate(settings.ROOT_MEASUREMENTS, 'measurement', 'meas')", "def __init__(self, measurements):\n\n self.measurements = measurements", "def get_monitor_data():\n md = {}\n md['time'] = current_milli_time()\n\n return md", "def format_measurements_request(self, data):\n measurements = []\n for sensor_name, sensor_data in data['measurements'].items():\n for name, value in sensor_data.items():\n measurement = {\n 'sensor': sensor_name,\n 'name': name,\n 'value': value,\n }\n measurements.append(measurement)\n\n data['measurements'] = measurements", "def get_data(self):\n\n return self.metric_data", "def measure(self, raw=False):\n data = self.send_cmd(SHT30.MEASURE_CMD, 6); \n\n if raw:\n return data\n\n t_celsius = (((data[0] << 8 | data[1]) * 175) / 0xFFFF) - 45 + self.delta_temp;\n rh = (((data[3] << 8 | data[4]) * 100.0) / 0xFFFF) + self.delta_hum;\n return t_celsius, rh", "def get_terror_waves_info(self):", "def api_measurements(mtype, mdate = None, mhours = None):\n mdatetime = get_datetime(mdate, mhours)\n data = Measurement.all(mtype, mdatetime)\n measurements = [m.to_geojson() for m in data]\n return jsonify(measurements=measurements)", "def get_data(self):\n\t\treturn \"{} {} {}\".format(self.data.latitude, self.data.longitude, self.data.altitude)", "def data(self):\n self.update() # Updates sensor data before new reading\n sensor = OrderedDict()\n sensor['sensorStatus'] = self.status\n sensor['name'] = self.name\n sensor['reading'] = self.__format(self.reading)\n sensor['units'] = self.units\n lower, upper = self.non_critical\n sensor['lowerNC'] = self.__format(lower)\n sensor['upperNC'] = self.__format(upper)\n lower, upper = self.critical\n sensor['lowerCT'] = self.__format(lower)\n sensor['upperCT'] = self.__format(upper)\n lower, upper = self.non_recoverable\n sensor['lowerNR'] = self.__format(lower)\n sensor['upperNR'] = self.__format(upper)\n return sensor", "def show_measurement(self):\n return f\"{int(self.amount)} {self.unit} {self.ingredient.name}\"", "def _calculate_stats(self):\n\n return {\n 'mean': numpy.mean(self.data, axis=self.time_axis),\n 'std': numpy.std(self.data, axis=self.time_axis),\n 'n': self.data.shape[self.time_axis],\n 's1': numpy.sum(self.data, axis=self.time_axis),\n 's2': numpy.sum(self.data ** 2, axis=self.time_axis),\n }", "def __add_measurement(self, name, dtype, nodata, units):\n if name in self.measurements.keys():\n raise IndexError('measurement {} already exists'.format(name))\n self.meta['measurements'].append({'name': name,\n 'dtype': dtype,\n 'units': units,\n 'nodata': nodata})", "def generateTelemetry(self) -> SensorData:\n\t\tsd = SensorData(sensorType = SensorData.HUMIDITY_SENSOR_TYPE)\t\t\n\t\tval = self.i2cBus.read_word_data(self.pressureAddr, 0, 0);\n\t\tsd.setValue(float(val))\n\t\treturn sd" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds monthly accrual days to all users who have not yet accrued days in the current month
def accrue_days(): # Get the current month in ISO format today = date.today() current_month = today.strftime('%Y-%m-01T00:00:00.000Z') # Get profiles that have not been updated yet this month profiles = Profile.objects.filter(update_timestamp__lt=current_month) for profile in profiles: # Get the monthly accrual days and max allowable accrual days monthly_accrual_days = profile.annual_accrual_days / 12 max_allowable_accrual_days = profile.max_allowable_accrual_days # Add the monthly accrual days to the remaining accrual days profile.remaining_accrual_days += monthly_accrual_days # If the remaining accrual days exceeds the max, set it to the max if profile.remaining_accrual_days > max_allowable_accrual_days: profile.remaining_accrual_days = max_allowable_accrual_days profile.save()
[ "async def count_monthly_users(self) -> int:\n thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)\n return await self.db_pool.runInteraction(\n \"count_monthly_users\", self._count_users, thirty_days_ago\n )", "def inactive_lost_accounts_last_month(self):\n if not hasattr(self, '_inactive_lost_accounts_last_month'):\n accounts = []\n thirty_one_days_ago = datetime.datetime.now() - datetime.timedelta(31)\n events = LifecycleEvent.objects.filter(\n Q(account__in=self.accounts, type=3, date_created__gte=thirty_one_days_ago) | Q(\n account__in=self.accounts, type=5, date_created__gte=thirty_one_days_ago))\n\n for event in events:\n if event.account not in accounts and event.account.status != 1:\n accounts.append(event.account)\n self._inactive_lost_accounts_last_month = accounts\n\n return self._inactive_lost_accounts_last_month", "def add_monthly_availability():\n input_json = request.json\n year = input_json['year']\n month = input_json['month']\n doctor_id = input_json['doctor_id']\n\n print(\"Quick assigning monthly event for Doctor No.{} on {}-{}\".format(doctor_id,year,month))\n doctor_calendar.insertMonthlyEvents(int(year),int(month),int(doctor_id))\n\n return jsonify(input_json)", "def month_expense(self, month: int, year: int, user_id: int) -> QuerySet:\n return self.by_user(user_id).filter(date__month=month, date__year=year)", "def advance(self):\n max_days = self.months[self.month - 1]\n if self.month == 2 and self.leapyear(self.year):\n max_days += 1\n if self.day == max_days:\n self.day = 1\n if self.month == 12:\n self.month = 1\n self.year += 1\n else:\n self.month += 1\n else:\n self.day += 1", "def set_onboarding_allocated_hours_this_month(self):\n accounts = Client.objects.filter(status=0)\n for account in accounts:\n now = datetime.datetime.now()\n account.onboarding_hours_allocated_this_month_field = account.onboarding_hours_remaining_total()\n account.onboarding_hours_allocated_updated_timestamp = now\n account.save()\n\n return 'set_onboarding_allocated_hours_this_month'", "def last_n_months_expense(self, month_count: int, user_id: int) -> QuerySet:\n last_month = self.last_month()\n start_date: date = date(*year_month_before(month_count), 1)\n return self.by_user(user_id).filter(date__gte=start_date, date__lte=last_month)", "def expire(self, request_user):\n if self.status and \\\n self.status_detail == 'active' and \\\n self.approved:\n self.status_detail = 'expired'\n self.expiration_dt = datetime.now()\n self.save()\n\n memberships = MembershipDefault.objects.filter(\n corporate_membership_id=self.id\n )\n for membership in memberships:\n membership.expire(request_user)\n return True\n return False", "def add_users(self, *users):\n # silently ignores anonymous and inactive users so that any that are\n # legit get updated.\n from common.djangoapps.student.models import CourseAccessRole # lint-amnesty, pylint: disable=redefined-outer-name, reimported\n for user in users:\n if user.is_authenticated and user.is_active and not self.has_user(user):\n entry = CourseAccessRole(user=user, role=self._role_name, course_id=self.course_key, org=self.org)\n entry.save()\n if hasattr(user, '_roles'):\n del user._roles", "def __pay_customer_accruals():\n accrual_payments = get_accrual_payouts()\n\n if not accrual_payments:\n print \"Customer has not yet met the threshold for accrual payout\"\n return\n\n for accrual_payment in accrual_payments:\n invoice_ids = accrual_payment['invoice_ids'].split(',')\n update_invoice_accrual_paid_date(invoice_ids)\n\n customer_name = get_customer_name(\n str(accrual_payment['customer_id']).strip()\n )\n\n pay_out_amount = Decimal(\n accrual_payment['total_accrual_amt']\n ).quantize(Decimal('1.00'))\n\n print (\"Congrats {customer} will get a payment in the amount of \"\n \"${amount} from invoice ids {invoice_ids}\".format(\n customer=customer_name,\n amount=pay_out_amount,\n invoice_ids=str(accrual_payment['invoice_ids']).strip()\n ))\n\n sys.exit()", "def recalcuate_days(self, cr, uid, ids, context=None):\n employee_mission_line_obj = self.pool.get('hr.employee.mission.line')\n for rec in self.browse(cr, uid, ids, context=context):\n for line in rec.mission_line:\n new_amount = employee_mission_line_obj.onchange_days(cr, uid, ids, line.days, line.employee_id.id, line.allow_state, rec.mission_id.id, rec.allow_state.id, rec.type_mission)\n employee_mission_line_obj.write(cr, uid, [line.id], new_amount['value'], context=context)\n return True", "def get_users_attempt_per_month_distribution():\n try:\n users_per_month_distribution = list()\n sorted_certifications_data = sorted(certifications_data, key=lambda i: i['startDate'])\n \n for month, attempts in groupby(sorted_certifications_data, key=lambda i: i['startDate'].split(\"T\")[0][:7]):\n distribution = dict()\n distribution[\"month\"] = month\n distribution[\"total_number_of_users\"] = len(list(attempts))\n\n users_per_month_distribution.append(distribution)\n \n return UserAttemptPerMonthDistributionListResponse().dump({\"users_per_month_distribution\": users_per_month_distribution})\n except Exception as e:\n print(\"Error at /api/v1/get-user-attempt-per-month-distribution: \", e)", "def influence_access_timeline(active_users, first_monday, last_monday):\n\n txns = []\n x_data_txns = []\n events_list = []\n feeds_list = []\n for t in TechCashTransaction.objects.filter(timestamp__range=(first_monday, last_monday), location__type=Location.EATERY, user__in=active_users).order_by(\"timestamp\"):\n x_data_txns.append(t.timestamp)\n txns.append(t)\n\n x_data_events = []\n for e in Event.objects.filter(timestamp__range=(first_monday, last_monday)).filter(user__in=active_users).order_by(\"timestamp\"):\n # plot all events by color\n x_data_events.append(e.timestamp)\n events_list.append(e.action) \n \n # find number of unique users that have events\n num_mobile_users = Event.objects.filter(timestamp__range=(first_monday, last_monday)).order_by('user').values('user').distinct().count()\n for u in active_users:\n print \"User %d: %d\"%(u, Event.objects.filter(timestamp__range=(first_monday, last_monday)).filter(user__id=u).count())\n \n x_data_feeds = []\n for e in FeedEvent.objects.filter(timestamp__range=(first_monday, last_monday)).filter(user__in=active_users).order_by(\"timestamp\"):\n x_data_feeds.append(e.timestamp)\n feeds_list.append(e.action)\n\n # find number of unique users that looked at feeds \n num_feed_users = FeedEvent.objects.filter(timestamp__range=(first_monday, last_monday)).order_by('user').values('user').distinct().count()\n for u in active_users:\n print \"User %d: %d\"%(u, FeedEvent.objects.filter(timestamp__range=(first_monday, last_monday)).filter(user__id=u).count())\n \n print \"Num mobile users:\", num_mobile_users\n print \"Num feed users:\", num_feed_users\n\n years = mdates.YearLocator()\n fridays = mdates.WeekdayLocator(byweekday=mdates.FR)\n months = mdates.MonthLocator() # every month\n weekFmt = mdates.DateFormatter('%b-%d')\n days = mdates.DayLocator()\n\n fig1 = plt.figure(figsize=(15,10))\n fig1.subplots_adjust(hspace=0.3)\n ax1 = fig1.add_subplot(211)\n ax1.plot(x_data_events, events_list, \".\")\n ax1.set_title(\"Access event time line\")\n # format the ticks\n labels = ax1.get_xticklabels() \n for label in labels: \n label.set_rotation(45) \n ax1.xaxis.set_major_locator(fridays)\n ax1.xaxis.set_major_formatter(weekFmt)\n ax1.xaxis.set_minor_locator(days)\n ax1.autoscale_view()\n # format the coords message box\n #def price(x): return '$%1.2f'%x\n #ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')\n #ax.fmt_ydata = price\n ax1.grid(True)\n\n ax2 = fig1.add_subplot(212)\n ax2.plot(x_data_feeds, feeds_list, \".\")\n ax2.set_title(\"Feed access time line\")\n # format the ticks\n labels = ax2.get_xticklabels() \n for label in labels: \n label.set_rotation(45) \n ax2.xaxis.set_major_locator(fridays)\n ax2.xaxis.set_major_formatter(weekFmt)\n ax2.xaxis.set_minor_locator(days)\n ax2.autoscale_view()\n # format the coords message box\n #def price(x): return '$%1.2f'%x\n #ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')\n #ax.fmt_ydata = price\n ax2.grid(True)\n\n #fig.autofmt_xdate()\n fig1.savefig(PREFIX_IMG+\"influence_timeline.%s\"%img_type, bbox_inches=\"tight\")\n\n fig1.show()", "def striked_off_months(self, joining_date,start_date,end_date,last_date_of_month,month_year_obj):\n fee_month_obj = self.env['fee.month']\n if start_date <= joining_date <= end_date:\n cal_date = joining_date\n else:\n cal_date = start_date\n after_joining_months = []\n cal_month = self.months_between(cal_date, last_date_of_month)\n for count_month in cal_month:\n month_data = fee_month_obj.search([('name', '=', count_month[0]),\n ('year', '=', count_month[1]),\n ('leave_month', '=', False),\n ('batch_id', '=', self.academic_year_id.id)])\n if len(month_data) > 1:\n raise except_orm(_(\"Warning!\"), _(\"multiple month's found !\"))\n if month_data.id:\n after_joining_months.append(month_data)\n if len(after_joining_months) > 0:\n return after_joining_months\n else:\n return month_year_obj", "def IncMonth(self):\n self.month = self.month + 1\n if self.month > 12:\n self.month = 1\n self.year = self.year + 1\n self.set_day = None", "def previous_month_user_emission_info():\n\n current_user = session.get('current_user')\n user_obj = crud.get_user_by_id(current_user)\n month = datetime.now().month\n year = datetime.now().year\n last_month = month - 1\n \n previous_elect_emission = crud.compare_monthly_elect(user_obj.user_id, last_month, year)\n #print(\"SEEE PREVIOS ELECT EMISSSS ------------\", previous_elect_emission)\n previous_month_gas_emit = crud.compare_monthly_nat_gas(user_obj.user_id, last_month, year)\n previous_month_vehicle_emit = crud.compare_monthly_vehicle_emissions(user_obj.user_id, last_month, year)\n previous_month_public_trans_emit = crud.compare_monthly_public_trans(user_obj.user_id, last_month, year)\n\n previous_month_emit_info = {\"labels\": [\"Electricity Emissions\", \"Vehicle Emissions\", \"Natural Gas Emissions\", \"Transit Emissions\"],\n \"data\": [previous_elect_emission, previous_month_vehicle_emit, previous_month_gas_emit, previous_month_public_trans_emit]}\n\n return jsonify(previous_month_emit_info)", "def _add_months(self, source_date, months: int = 1) -> datetime.date:\n\n month = source_date.month - 1 + months\n year = source_date.year + month // 12\n month = month % 12 + 1\n day = min(source_date.day, calendar.monthrange(year, month)[1])\n return datetime.date(year, month, day)", "def user_next_duty(mongo_db, user_dict, update):\n \n user_id = user_dict['id']\n now = datetime.datetime.now()\n today = datetime.datetime(now.year, now.month, now.day)\n\n # mongo_db = setup_mongodb()\n duties = mongo_db[\"duties\"]\n\n # Find next uncompleted duty\n duty = duties.find_one({\n 'user': user_id,\n 'isCompleted': False,\n 'date': { '$gte': today }\n }, sort=[('date', 1)])\n\n if duty is None:\n return None\n\n duty_date = duty['date']\n user = User(**user_dict)\n user_text = user.mention_markdown_v2()\n\n if duty_date == today:\n message = fr'📅 {user_text}: Your laundry duty is today'\n elif duty_date == today + datetime.timedelta(days=1):\n message = fr'📅 {user_text}: Your next laundry duty is tomorrow'\n else:\n date = duty['date'].strftime(\"%A %-d %b\")\n message = fr'📅 {user_text}: Your next laundry duty is on {date}'\n\n update.callback_query.message.reply_markdown_v2(message, quote=False)", "def _updateCalendarPermissions(self, account):\n calendar = self.getCalendar(account)\n\n if calendar:\n calendar.setViewers(account, self.getAuthorisedUsers(account))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A dictionary of cookie names and values.
def cookies(self): # TODO:jek: pass this off to the driver? let it use a customized csv # reader to split & unpack? cookie_strings = self.selenium('getCookie').split('; ') cookies = dict() for cookie_string in cookie_strings: if not cookie_string: continue key, val = cookie_string.split('=', 1) cookies[key] = val.strip('"') return cookies
[ "def cookies(self) -> Dict[str, http.cookies.Morsel]:\n if not hasattr(self, \"_cookies\"):\n self._cookies = (\n http.cookies.SimpleCookie()\n ) # type: http.cookies.SimpleCookie\n if \"Cookie\" in self.headers:\n try:\n parsed = parse_cookie(self.headers[\"Cookie\"])\n except Exception:\n pass\n else:\n for k, v in parsed.items():\n try:\n self._cookies[k] = v\n except Exception:\n # SimpleCookie imposes some restrictions on keys;\n # parse_cookie does not. Discard any cookies\n # with disallowed keys.\n pass\n return self._cookies", "def get_cookies():\n # Read all cookie pairs\n try:\n cookie_pairs = os.getenv(\"HTTP_COOKIE\").split()\n except AttributeError:\n cookie_pairs = []\n cookies = {}\n for cookie_pair in cookie_pairs:\n key, val = split2(cookie_pair.strip(), \"=\")\n if cookies in key:\n cookies[key].append(val)\n else:\n cookies[key] = [val, ]\n return cookies", "def cookie_to_dict(cookie):\n cookie_dict = dict()\n C = Cookie.SimpleCookie()\n C.load(cookie)\n print cookie\n print '*', C\n for morsel in C.values():\n cookie_dict[morsel.key] = morsel.value\n return cookie_dict", "def cookies(self):\r\n if not hasattr(self, \"_cookies\"):\r\n self._cookies = Cookie.SimpleCookie()\r\n if \"Cookie\" in self.headers:\r\n try:\r\n self._cookies.load(\r\n native_str(self.headers[\"Cookie\"]))\r\n except Exception:\r\n self._cookies = {}\r\n return self._cookies", "def _get_cookies(headers):\n result = {}\n for cookie_str in headers.getlist('Set-Cookie'):\n cookie = parse_cookie(cookie_str)\n result.update(cookie) # cookies are k => val dicts\n\n return result", "def from_cookiejar(cookiejar):\n cookies = {}\n\n # for cookie in cookiejar:\n # cookies[cookie.name] = cookie.value\n\n for domain, d_cookies in cookiejar._cookies.items():\n for path, p_cookies in d_cookies.items():\n for cookie in list(p_cookies.values()):\n cookies[cookie.name] = cookie.value\n return cookies", "def parseCookies(self, headers):\n cookies = {}\n cookieReg = re.compile(r'^Set-Cookie:\\s*(\\w+)=(\\w*);')\n\n for header in headers:\n m = cookieReg.match(header)\n if m:\n cookies[m.group(1)] = m.group(2)\n\n return cookies", "def parse_cookie(cookie: str) -> Dict[str, str]:\n cookiedict = {}\n for chunk in cookie.split(str(\";\")):\n if str(\"=\") in chunk:\n key, val = chunk.split(str(\"=\"), 1)\n else:\n # Assume an empty name per\n # https://bugzilla.mozilla.org/show_bug.cgi?id=169091\n key, val = str(\"\"), chunk\n key, val = key.strip(), val.strip()\n if key or val:\n # unquote using Python's algorithm.\n cookiedict[key] = _unquote_cookie(val)\n return cookiedict", "def cookies(self) -> dict:\n return self._flask_request.cookies", "def _get_cookie(cls, referer):\r\n return {\"Cookie\" : \"LanguageFilter=\" + cls.SELECTED_LANGUAGE + \"; \"\r\n \"ShowSubtitleDetails=true; \" + \r\n \"ShowSubtitlePreview=false;\",\r\n \"Referer\" : referer}", "def _gen_cookie_headers(self):\n cookie_headers = []\n for name, attrs in self.cookies.items():\n value = str(attrs['value'])\n if '\"' in value:\n value = value.replace('\"', '\\\\\"')\n chunks = ['%s=\"%s\"' % (name, value)]\n for name, val in attrs.items():\n name = name.lower()\n if val is None:\n continue\n if name in ('expires', 'domain', 'path', 'max_age', 'comment'):\n name = name.replace('_', '-')\n chunks.append('%s=%s' % (name, val))\n elif name == 'secure' and val:\n chunks.append(\"secure\")\n cookie_headers.append((\"Set-Cookie\", '; '.join(chunks)))\n return cookie_headers", "def cookies(self):\n l=[]\n r=Loader.capi.cppcms_capi_session_cookie_first(self.d)\n while r:\n l.append(Cookie(r))\n r=Loader.capi.cppcms_capi_session_cookie_next(self.d)\n self.check()\n return l", "def _cookie_attrs(self, cookies):\r\n # add cookies in order of most specific (ie. longest) path first\r\n def decreasing_size(a, b): return cmp(len(b.path), len(a.path))\r\n cookies.sort(decreasing_size)\r\n\r\n version_set = False\r\n\r\n attrs = []\r\n for cookie in cookies:\r\n # set version of Cookie header\r\n # XXX\r\n # What should it be if multiple matching Set-Cookie headers have\r\n # different versions themselves?\r\n # Answer: there is no answer; was supposed to be settled by\r\n # RFC 2965 errata, but that may never appear...\r\n version = cookie.version\r\n if not version_set:\r\n version_set = True\r\n if version > 0:\r\n attrs.append(\"$Version=%s\" % version)\r\n\r\n # quote cookie value if necessary\r\n # (not for Netscape protocol, which already has any quotes\r\n # intact, due to the poorly-specified Netscape Cookie: syntax)\r\n if ((cookie.value is not None) and\r\n self.non_word_re.search(cookie.value) and version > 0):\r\n value = self.quote_re.sub(r\"\\\\\\1\", cookie.value)\r\n else:\r\n value = cookie.value\r\n\r\n # add cookie-attributes to be returned in Cookie header\r\n if cookie.value is None:\r\n attrs.append(cookie.name)\r\n else:\r\n attrs.append(\"%s=%s\" % (cookie.name, value))\r\n if version > 0:\r\n if cookie.path_specified:\r\n attrs.append('$Path=\"%s\"' % cookie.path)\r\n if cookie.domain.startswith(\".\"):\r\n domain = cookie.domain\r\n if (not cookie.domain_initial_dot and\r\n domain.startswith(\".\")):\r\n domain = domain[1:]\r\n attrs.append('$Domain=\"%s\"' % domain)\r\n if cookie.port is not None:\r\n p = \"$Port\"\r\n if cookie.port_specified:\r\n p = p + ('=\"%s\"' % cookie.port)\r\n attrs.append(p)\r\n\r\n return attrs", "def __parse_cookies(headers):\n\n cookies = {}\n if 'Set-Cookie' in headers:\n raw_cookies = headers['Set-Cookie'].split(';')\n for cookie in raw_cookies:\n cookie = cookie.split('=', 1)\n if cookie[0].strip() and len(cookie) > 1:\n cookies.update({cookie[0]: cookie[1]})\n return cookies", "def get_cookies():\n\n cookie_jar = {}\n\n with open(BINARYCOOKIES, \"rb\") as cookies_file:\n # Field 1: 4 byte magic number = 'cook'\n file_header = cookies_file.read(4)\n if file_header != b\"cook\":\n print(\"Not a Cookies.binarycookies file.\")\n\n # Field 2: 4 byte int = number of pages\n num_pages = unpack(\">i\", cookies_file.read(4))[0]\n # Field 3: 4 byte int (one for each page) = page length\n page_sizes = [unpack(\">i\", cookies_file.read(4))[0] for n in range(num_pages)]\n\n for page in [cookies_file.read(ps) for ps in page_sizes]:\n # Convert the string to a file with\n page = BytesIO(page)\n\n # Field 1: 4 byte header: '\\x00\\x00\\x01\\x00'\n page.read(4)\n\n # Field 2: 4 byte int: number of cookies\n num_cookies = unpack(\"<i\", page.read(4))[0]\n\n # Field 3: 4 byte int (one for each cookie) = cookie offset\n cookie_offsets = [unpack(\"<i\", page.read(4))[0] for n in range(num_cookies)]\n\n # Field 4: 4 byte footer: '\\x00\\x00\\x00\\x00'\n _page_footer = unpack(\">i\", page.read(4))[0]\n\n for offset in cookie_offsets:\n cookie = {}\n\n # seek to the cookie position in the page\n page.seek(offset)\n\n # Field 1: 4 byte int: cookie size\n # get the cookie length and then the binary cookie content\n cookie_bytes = BytesIO(page.read(unpack(\"<i\", page.read(4))[0]))\n # Field 2: 4 byte: '\\x00\\x00\\x00\\x00'\n cookie_bytes.read(4)\n\n # Field 3: 4 byte: cookie flags\n cookie[\"flags\"] = unpack(\"<i\", cookie_bytes.read(4))[0]\n # Field 4: 4 byte: '\\x00\\x00\\x00\\x00'\n cookie_bytes.read(4)\n\n # Field 5: 4 byte int: url field offset from cookie start\n # Field 6: 4 byte int: name field offset from cookie start\n # Field 7: 4 byte int: path field offset from cookie start\n # Field 8: 4 byte int: value field offset from cookie start\n offset_values = [\"url\", \"name\", \"path\", \"value\"]\n content_offsets = dict(\n zip(\n offset_values,\n [unpack(\"<i\", cookie_bytes.read(4))[0] for n in offset_values],\n )\n )\n\n # Field 9: 8 byte footer: '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n _cookie_offset_footer = cookie_bytes.read(8)\n\n # Seconds between Mac Epoch and Unix Epoch\n mac_epoch = int(datetime(2001, 1, 1).strftime(\"%s\"))\n\n # Field 10: 8 byte double: expiry time of cookie\n # Field 11: 8 byte double: last access time of cookie\n # time is in Mac Epoch - we change to Unix Epoch\n cookie[\"expiry_time\"] = (\n unpack(\"<d\", cookie_bytes.read(8))[0] + mac_epoch\n )\n cookie[\"last_access_time\"] = (\n unpack(\"<d\", cookie_bytes.read(8))[0] + mac_epoch\n )\n\n # Field 12: variable length, null-terminated: cookie name\n # Field 13: variable length, null-terminated: cookie value\n # Field 14: variable length, null-terminated: cookie url\n # Field 15: variable length, null-terminated: cookie path\n for k in content_offsets.keys():\n # seek to the offset (-4 because .. ?) and read until we\n # hit the null-termination\n cookie_bytes.seek(content_offsets[k] - 4)\n _byte = cookie_bytes.read(1)\n _value = \"\"\n while unpack(\"<b\", _byte)[0] != 0:\n _value = _value + _byte.decode(\"ascii\")\n _byte = cookie_bytes.read(1)\n cookie[k] = _value\n\n # put the cookie in the jar\n url = cookie.pop(\"url\")\n path = cookie.pop(\"path\")\n name = cookie.pop(\"name\")\n if url in cookie_jar:\n if path in cookie_jar[url]:\n cookie_jar[url][path][name] = cookie\n else:\n cookie_jar[url][path] = {name: cookie}\n else:\n cookie_jar[url] = {path: {name: cookie}}\n\n return cookie_jar", "def verify_cookies(self, cookies):\n if cookies is None:\n cookies = {}\n if isinstance(cookies, Cookie):\n cookies = {\"cookie\": cookies}\n return {k: v for k, v in cookies.iteritems() if v != None}", "def format_response_cookies(self, response: http.Response) -> list[dict]:\n cookie_list = response.cookies.items(multi=True)\n rv = []\n for name, (value, attrs) in cookie_list:\n cookie = {\n \"name\": name,\n \"value\": value,\n \"path\": attrs[\"path\"],\n \"domain\": attrs[\"domain\"],\n \"httpOnly\": \"httpOnly\" in attrs,\n \"secure\": \"secure\" in attrs,\n }\n # TODO: handle expires attribute here.\n # This is not quite trivial because we need to parse random date formats.\n # For now, we just ignore the attribute.\n\n if \"sameSite\" in attrs:\n cookie[\"sameSite\"] = attrs[\"sameSite\"]\n\n rv.append(cookie)\n return rv", "def cookies(*requireds, **defaults):\r\n cookie = Cookie.SimpleCookie()\r\n cookie.load(ctx.env.get('HTTP_COOKIE', ''))\r\n try:\r\n d = storify(cookie, *requireds, **defaults)\r\n for k, v in d.items():\r\n d[k] = v and urllib.unquote(v)\r\n return d\r\n except KeyError:\r\n badrequest()\r\n raise StopIteration", "def cookies(self) -> ConfigNodePropertyArray:\n return self._cookies" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fill all possible fields with key/[value] pairs from values.
def _fill_fields(fields, values): unfilled = [] for name, field_values in values: if len(field_values) == 1: value = field_values[0] else: value = field_values try: fields[name] = value except ValueError: unfilled.append((name, field_values)) return unfilled
[ "def fill(fields, adapter):", "def prepare_values(self, fields, values):\n\n return {\n field['name']: field['type'].prepare_value_for_db(\n field['field'],\n values[field_id] if field_id in values else values[field['name']]\n )\n for field_id, field in fields.items()\n if field_id in values or field['name'] in values\n }", "def _load_values(values, fields):\n for dest, field in fields.items():\n if isinstance(field, models.Var):\n location = field.load_from or dest\n value = values.get(location)\n\n if value is None:\n if field.default is None:\n raise errors.ParseError(\n 'Required field \"{}\" missing.'.format(location),\n location=location,\n )\n\n yield dest, field.default\n\n else:\n value = field.parse(value.strip(), location)\n field.validate(value, location)\n yield dest, value\n elif isinstance(field, dict):\n yield dest, dict(_load_values(values, field))", "def set_fields_from_dict(self):\n for key, value in self.dictionary.items():\n try:\n self.set_field(key, value)\n except KeyError:\n pass", "def shuffle_fields(fields):\n keys, values = zip(*fields.items())\n zipped = list(zip(*values))\n random.shuffle(zipped)\n unzipped = list(zip(*zipped))\n for k, v in zip(keys, unzipped):\n fields[k] = list(v)\n return fields", "def populate(self, **kw):\n for name, value in kw.items():\n try:\n field = self._fields[name]\n except KeyError:\n continue\n\n parsed_value = field.parse_value(value)\n\n setattr(self, name, parsed_value)", "def _set_fields(self, params):\n for key in params:\n if key in self.fields:\n setattr(self, key, params[key])", "def complete_dflt_vals(cfg):\n dflt = cfg['default_params'] # all default params\n for key, entries in cfg.items():\n if key not in _dict_fields:\n continue\n\n logger.debug(\"check for %s defaults\", key)\n dflts = dflt.get(key, {}) # default params for given section\n\n # if not dflts:\n # continue\n logger.info(\"set defaults for %s\", key)\n if dflts:\n logger.debug(\"defaults %s\", dflts)\n\n for name, entry in sorted(entries.items()):\n logger.debug(\"%s:%s\", key, name)\n\n if 'name' not in entry: # set name field if missing\n logger.debug(\"NAME = %r\", name)\n entry['name'] = name\n\n for dkey, dval in dflts.items():\n if dkey not in entry:\n entry[dkey] = dval\n logger.debug(\"%r = %r\", dkey, dval)", "def __init__(self, vals):\n\n self.__vals = vals\n self.__dct = {}\n for (name, val, _) in vals:\n self.__dct[name] = val", "def _setValues(self, dict, *values, **args):\n\n if 'skip' in args:\n skip=args['skip']\n if isinstance(skip, basestring):\n skip=[skip]\n else:\n skip=[]\n\n #extract intersection of dictionary and current columns\n items = [ c.name for c in self.__table__.columns if c.name in dict]\n \n if values:\n #further specify the values (if not in items, it won't be even considered)\n items = [ i for i in items if i in values]\n \n #insert all the attributes as long as not in 'skip'\n for att in items:\n if att not in skip:\n type = self.__table__.columns[att].type.__class__.__name__\n if type == 'String': \n self.__dict__[att] = dict[att]\n elif type == 'Integer' and dict[att]: \n self.__dict__[att] = int(dict[att])\n else: \n #in other cases just use the string or NoneType\n self.__dict__[att] = dict[att]", "def _update_fields(self):\n real_values = {}\n for k, v in self._power_supplies.items():\n real_values[k] = v.read_value()\n new_basis = self._converter.convert_from_xyz(**real_values)\n mode = self.getValue(\"Specification mode\")\n if mode == \"XYZ\":\n names = (\"Field X\", \"Field Y\", \"Field Z\")\n elif mode == \"Cylindrical\":\n names = (\"Field magnitude\", \"Phi\", \"Field Z\")\n elif mode == \"Spherical\":\n names = (\"Field magnitude\", \"Theta\", \"Phi\")\n for name, value in zip(names, new_basis):\n self.setValue(name, value)", "def __normalize_fields(self, check_fields: bool = False) -> None:\n field_dict = OrderedDict()\n for k, v in self.fields.items():\n # field names to lower case\n k = k.strip().lower()\n # assert k in BIB_FIELDS, f\"{k} is not a valid field name\"\n # remove redundant curly braces and commas\n v = str(v).strip(\" ,\") # DO NOT strip \"{}\"\n self.__double_braces_flags[k] = False\n braces_count = 0\n while all([v.startswith(\"{\"), v.endswith(\"}\")]) or all(\n [v.startswith('\"'), v.endswith('\"')]\n ):\n v = v[1:-1]\n braces_count += 1\n if braces_count >= 2:\n self.__double_braces_flags[k] = True\n # convert month to number if applicable\n if k.lower().strip() == \"month\" and v.capitalize() in calendar.month_abbr:\n v = strptime(v, \"%b\").tm_mon\n field_dict[k] = v\n self.__fields = field_dict\n if check_fields:\n self.check_required_fields()\n for k, v in self.fields.items():\n self.__setattr__(k, v)", "def field_values_gen(self):\n fvals = FieldValue.objects.filter(event_id=self)\n lut = self.datasheet_id.internal_fieldname_lookup\n for fval in fvals.iterator():\n key = unicode(lut[fval.field_id.internal_name])\n value = (fval.field_value, fval.field_id.datatype.name)\n yield key, value", "def init_mapping(self, values):\n for value, prob in values.iteritems():\n self.set(value, prob)", "def set_fields(source, fields, target):\n for field in fields:\n tag = source.find(field[\"name\"])\n _set_value(tag, field, target)", "def mkRecord(keys, fields):\n d = {}\n for k, v in zip(keys, fields):\n\tif v: d[k] = v\n return d", "def _init_values(self) -> None:\n self._values: Mapping[str, configutils.Values] = {}\n for name, opt in configdata.DATA.items():\n self._values[name] = configutils.Values(opt)", "def _structure_simple_fields(_simple_fields, _payload):\n for _field, _value in _simple_fields.items():\n if _value:\n _payload['data'][_field] = _value\n return _payload", "def accumulate_metadata(\n items: Iterable[Mapping[str, Any]],\n fields: Union[str, Sequence[str], Literal[True]] = True,\n skip_fields: Container[str] = (),\n only_allsame: Union[bool, Literal[\"ignore-missing\"]] = False,\n) -> Dict[str, Any]:\n if isinstance(fields, str):\n fields = (fields,)\n\n all_fields: Dict[str, Any] = {}\n i = 0\n for i, item in enumerate(items):\n for existing_field in all_fields.keys():\n value = item.get(existing_field, None)\n if value is None and only_allsame == \"ignore-missing\":\n continue\n existing_value = all_fields[existing_field]\n if existing_value == value:\n # leave fields that are the same for every item as singletons\n continue\n\n if isinstance(existing_value, _ourlist):\n # we already have a list going; add do it\n existing_value.append(value)\n else:\n if only_allsame:\n # Either `only_allsame is True`, or `only_allsame == \"ignore-missing\"`\n # and the value wasn't missing\n all_fields[existing_field] = None\n else:\n # all prior values for this field were the same (or missing).\n # start a new list collecting them, including Nones at the front\n # for however many items were missing the field.\n all_fields[existing_field] = _ourlist(\n [None] * (i - 1) + [existing_value, value]\n )\n\n if fields is True:\n # want all properties - add in any ones we haven't processed already\n for new_field in item.keys() - all_fields.keys():\n if new_field in skip_fields:\n continue\n all_fields[new_field] = item[new_field]\n else:\n # just want some properties\n for field in cast(Iterable[str], fields):\n # ^ cast: pyright isn't smart enough to know the `else` branch means `properties` isn't True\n # https://github.com/microsoft/pyright/issues/1573\n if field not in all_fields.keys():\n try:\n all_fields[field] = item[field]\n except KeyError:\n pass\n\n if only_allsame:\n return {\n field: value for field, value in all_fields.items() if value is not None\n }\n\n return all_fields", "def generate_form_data(self, form):\n\t\tplaceholder_values = {}\n\t\tfor i in form.fields:\n\t\t\tplaceholder_values[i.name] = i.get_placeholder()\n\t\t\n\t\tyield placeholder_values, None, None\n\t\t\n\t\tfor k in placeholder_values:\n\t\t\tfor v in self.vectors:\n\t\t\t\tnx = placeholder_values.copy()\n\t\t\t\tnx[k] = v\n\t\t\t\tyield nx, k, v" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fill form with values, retrying fields that fail with ValueErrors. If multiple passes are required to set all fields in values, the document will be resynchronizes between attempts with wait_for called between each attempt.
def _fill_form_async(form, values, wait_for=None, timeout=None): browser = form.browser unset_count = len(values) while values: values = _fill_fields(form.fields, values) if len(values) == unset_count: # nothing was able to be set raise ValueError("Unable to set fields %s" % ( ', '.join(pair[0] for pair in values))) if wait_for: browser.wait_for(wait_for, timeout) browser.sync_document() # replace *form* with the new lxml element from the refreshed document form = browser.document.xpath(form.fq_xpath)[0] unset_count = len(values)
[ "def fill(self, selector, values):\n if not self.exists(selector):\n raise Exception(\"Can't find form\")\n \n for field in values:\n self.set_field_value(\"%s [name=%s]\" % (selector, field),\n values[field])\n return True", "def _fill_form_item(self, form_info, value):\n while True:\n try:\n form = self._find_element(form_info)\n except NoSuchElementException:\n # Page is probably still loading.\n self.driver.implicitly_wait(1)\n else:\n try:\n # Clear if we can\n form.clear()\n except WebDriverException:\n # Happens on drop down forms\n pass\n form.send_keys(value)\n return", "def post_ad_mandatory_fields_set(self, driver, ad):\n for el in driver.find_elements_by_xpath('//*[@class=\"formgroup-label-mandatory\"]'):\n try:\n for_id = el.get_attribute(\"for\")\n if for_id is not None:\n self.log.debug(\"Detected mandatory field (Name='%s', ID='%s')\", el.text, for_id)\n re_match = re.search(r'.*\\.(.*)_s.*', for_id, re.IGNORECASE)\n if re_match is not None:\n for_id_raw = re_match.group(1)\n use_default = False\n if \"field_\" + for_id_raw in ad:\n try:\n Select(driver.find_element_by_id(for_id)).select_by_visible_text(ad[\"field_\" + for_id_raw])\n except NoSuchElementException:\n self.log.warning(\"Value for combo box '%s' invalid in config, setting to default (first entry)\", for_id_raw)\n use_default = True\n else:\n self.log.warning(\"No value for combo box '%s' defined, setting to default (first entry)\", for_id_raw)\n use_default = True\n if use_default:\n s = Select(driver.find_element_by_id(for_id))\n idx_opt = 0\n value = \"\"\n for o in s.options:\n value = o.get_attribute(\"value\")\n # Skip empty options (defaults?)\n if not value:\n continue\n self.log.debug(\"Value at index %d: %s\", idx_opt, value)\n if value == u\"Bitte wählen\":\n continue\n idx_opt += 1\n self.log.info(\"Setting combo box '%s' to '%s'\", for_id_raw, value)\n s.select_by_value(value)\n self.fake_wait()\n else:\n for_id_raw = for_id\n if \"field_\" + for_id_raw in ad:\n value = ad[\"field_\" + for_id_raw]\n else:\n self.log.debug(\"No value for text field '%s' defined, setting to empty value\", for_id_raw)\n value = 'Nicht angegeben'\n try:\n driver.find_element_by_id(for_id).send_keys(value)\n self.fake_wait()\n except:\n pass\n except NoSuchElementException:\n pass", "def test_populate(self):\n\n class TableForm(forms.Form):\n name = forms.CharField()\n value = forms.IntegerField()\n\n TableFormset = forms.formsets.formset_factory(TableForm, extra=0)\n\n class Table(table_formset.FormsetDataTable):\n formset_class = TableFormset\n\n name = tables.Column('name')\n value = tables.Column('value')\n\n class Meta(object):\n name = 'table'\n\n table = Table(self.request)\n table.data = TEST_DATA_4\n formset = table.get_formset()\n self.assertEqual(2, len(formset))\n form = formset[0]\n form_data = form.initial\n self.assertEqual('object_1', form_data['name'])\n self.assertEqual(2, form_data['value'])", "def validate_finite_values_entity(values: List[Dict], supported_values: List[str] = None,\n invalid_trigger: str = None, key: str = None,\n support_multiple: bool = True, pick_first: bool = False, **kwargs) -> SlotValidationResult:\n\n values_length = len(values)\n filled_count = 0\n filled=False\n partially_filled=False\n trigger=\"\"\n default_response = (False,False,trigger,{})\n\n params={key:[]}\n \n \n\n if values_length==0:\n return build_response(default_response)\n\n for doc in values:\n try:\n if supported_values.index(doc[\"value\"]) >= 0:\n filled_count+=1\n params[key].append(doc[\"value\"].upper())\n except:\n trigger = invalid_trigger\n \n \n if filled_count==values_length:\n filled = True\n partially_filled = False\n else:\n partially_filled=True\n\n \n if len(params[key])==0:\n params={}\n elif pick_first and len(params[key])>0:\n params[key]=params[key][0]\n \n response = (filled,partially_filled,trigger,params)\n \n return build_response(response)", "def _fill_form_dict(self, form_dict):\n for form in form_dict:\n form_item = {\n 'class': 'input',\n 'attrib': 'id',\n 'value': form\n }\n self._fill_form_item(form_item, form_dict[form])", "def validate(self):\n\t\tfor key in self._fields:\n\t\t\tsetattr(self, key, self._fields[key])\n\t\tfor key, value in self._fields.iteritems():\n\t\t\tif hasattr(value, 'validate'):\n\t\t\t\tvalue.validate()\n\t\t\telif isinstance(value, list):\n\t\t\t\tfor v in value:\n\t\t\t\t\tif hasattr(v, 'validate'):\n\t\t\t\t\t\tv.validate()", "def fill_form(self, **kwargs):\n suite_account_name = kwargs.get(\"suite_account_name\")\n suite_type = kwargs.get(\"suite_type\")\n suite = kwargs.get(\"suite\")\n billing_address = kwargs.get(\"billing_address\")\n notes = kwargs.get(\"notes\")\n suite_holder = kwargs.get(\"suite_holder\")\n suite_admin = kwargs.get(\"suite_admin\")\n authorized_signers = kwargs.get(\"authorized_signers\")\n\n if suite_account_name is not None:\n self.fill_suite_account_name(suite_account_name)\n if suite_type is not None:\n self.fill_suite_type(suite_type)\n if suite is not None:\n self.fill_suite(suite)\n if billing_address is not None:\n self.fill_billing_address(billing_address)\n if notes is not None:\n self.fill_notes(notes)\n if suite_holder is not None:\n self.fill_suite_holder(suite_holder)\n if suite_admin is not None:\n self.fill_suite_admin(suite_admin)\n if authorized_signers is not None:\n for signer in authorized_signers:\n self.add_authorized_signer(signer)", "def _build_forms_from_get(self):\n \n if self.config_id is None:\n # New form\n \n initial_values = []\n if 'data_file' in self.request.GET:\n initial_values = [{'data_runs': self.request.GET.get('data_file', '')}]\n ScanFormSet = formset_factory(ScanForm,extra=0)\n else:\n ScanFormSet = formset_factory(ScanForm,extra=1)\n self.scans_form = ScanFormSet(initial=initial_values, prefix=\"sf\")\n \n initial_config = {}\n if 'experiment' in self.request.GET:\n initial_config['experiment'] = self.request.GET.get('experiment', '')\n if 'reduction_name' in self.request.GET:\n initial_config['reduction_name'] = self.request.GET.get('reduction_name', '')\n self.config_form = ConfigurationForm(initial=initial_config)\n MaskFormSet = formset_factory(MaskForm,extra=1)\n self.masks_form = MaskFormSet(prefix=\"mf\")\n \n else:\n # Retrieve existing configuration\n reduction_config = get_object_or_404(ReductionConfiguration, pk=self.config_id, owner=self.request.user)\n initial_config = ConfigurationForm.data_from_db(self.request.user, reduction_config)\n \n logger.debug(\"initial_config: %s\" % initial_config)\n ScanFormSet = formset_factory(ScanForm,extra=0)\n initial_values = []\n for item in reduction_config.reductions.all().order_by('timestamp'):\n props = ScanForm.data_from_db(self.request.user, item.pk)\n initial_values.append(props)\n \n \n self.scans_form = ScanFormSet(initial=initial_values, prefix=\"sf\")\n self.config_form = ConfigurationForm(initial=initial_config)\n MaskFormSet = formset_factory(MaskForm,extra=0)\n if initial_config.get('mask'):\n self.masks_form = MaskFormSet(initial=initial_config['mask'],prefix=\"mf\")\n else:\n self.masks_form = MaskFormSet(prefix=\"mf\")", "def run_validators(self, value):\n if isinstance(value, dict):\n to_validate = self._read_only_defaults()\n to_validate.update(value)\n else:\n to_validate = value\n super().run_validators(to_validate)", "def _load_values(values, fields):\n for dest, field in fields.items():\n if isinstance(field, models.Var):\n location = field.load_from or dest\n value = values.get(location)\n\n if value is None:\n if field.default is None:\n raise errors.ParseError(\n 'Required field \"{}\" missing.'.format(location),\n location=location,\n )\n\n yield dest, field.default\n\n else:\n value = field.parse(value.strip(), location)\n field.validate(value, location)\n yield dest, value\n elif isinstance(field, dict):\n yield dest, dict(_load_values(values, field))", "def test_updateValues_with_bad_values(self):\n generateConfig()\n with self.assertRaises(ValueError):\n updateValue('size', 'Chaos')\n with self.assertRaises(ValueError):\n updateValue('datacenter', '5')\n with self.assertRaises(ValueError):\n updateValue('universalisupdatefrequency', \"standard\")", "def full_clean(self):\n self._errors = ErrorDict()\n if not self.is_bound: # Stop further processing.\n return\n self.cleaned_data = {}\n # If the form is permitted to be empty, and none of the form data has\n # changed from the initial data, short circuit any validation.\n if self.empty_permitted and not self.has_data():\n return\n self._clean_fields()\n self._clean_form()\n self._post_clean()", "def run_validators(self, value):\n try:\n self.model_field.run_validators(value)\n except ModelValidationError as err:\n raise ValidationError(err.messages)\n except TypeError as err:\n raise ValidationError(err)\n super(DjongoField, self).run_validators(value)", "def test_form_logger_type_automatic_fill(self):\n with self.app.test_client() as client:\n response = client.get('/query')\n biomimic_type_choices = self.db.fetch_biomimic_types() \n for biomimic_type in biomimic_type_choices:\n self.assertIn(self.stringToBytes(biomimic_type[0]), response.data)", "def _check_form_validity(self):\n\n for idsp in self._idsp_input:\n if not idsp.form_is_valid():\n self._invalid_input_eh()\n return\n\n self._valid_input_eh()", "def validate_form(self, dict_items, required):\n messages = []\n for field in required:\n value = dict_items.get(field)\n if value==\"\" or value==None:\n messages.append(\"You must enter a value for %s in body\" % field)\n return messages", "def test_manual_field_validation(self):\n fields = (\"sell_currency\", \"sell_amount\", \"buy_currency\")\n values = (\"INVALID\", \"\", None)\n test_cases = itertools.product(fields, values)\n for field, value in test_cases:\n trade = self.factory.make_trade(save=False)\n setattr(trade, field, value)\n with self.assertRaises(\n ValidationError, msg=f\"Expected {field} with value {value} to raise.\"\n ):\n trade.full_clean()", "def process_formdata(self, valuelist):\n if valuelist:\n if self.is_related:\n self.data = self.datamodel.get_related_interface(self.col_name).get(\n valuelist[0]\n )\n else:\n self.data = self.datamodel.get(valuelist[0])", "def set_errors(self, errors):\n self.errors = errors\n self.fill()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The fastest Selenium locator expression for this element.
def _locator(self): try: return 'id=' + self.attrib['id'] except KeyError: return 'xpath=' + self.fq_xpath
[ "def find_element_by_xpath(self, xpath):\n raise NotImplementedError", "def byXpath(self, xpath):\r\n return self.find_element((By.XPATH, xpath))", "def _change_to_selenium_elem(self, elem=None, **kwargs):\n if elem:\n soup_element = elem.soup_hoth\n else:\n soup_element = Finders().find(**kwargs).soup_hoth\n if soup_element.has_attr('id'):\n locator = soup_element['id']\n return \"self.get_driver().find_element_by_id(\\'\"+locator+\"\\')\"\n elif soup_element.has_attr('class'):\n locator = soup_element['class'][0]\n return \"self.get_driver().find_element_by_class_name(\\'\"+locator+\"\\')\"\n else:\n def find_by_css(element):\n without_text = str(element).replace(element.get_text(), '')\n tag = without_text.split()[0][1:]\n without_front_tag = without_text.replace(\"<\"+tag+\" \", \"\")\n without_tag = \"\"\n if \"</\"+tag+\">\" in without_front_tag:\n without_tag = without_front_tag.replace(\"</\"+tag+\">\", \"\")\n elif \"/>\" in without_front_tag:\n without_tag = without_front_tag.replace(\"/>\", \"\")\n list_of_params = without_tag.split(\"\\\" \")\n locator = tag\n for each in list_of_params:\n if each[-1] != \"\\\"\":\n locator = locator + \"[\" + each + \"\\\"]\"\n else:\n locator = locator + \"[\" + each + \"]\"\n return \"self.get_driver().find_element_by_css_selector(\\'\"+locator+\"\\')\"\n return find_by_css(soup_element)", "def find_element(self, selector, attribute=\"CSS_SELECTOR\"):\n return self.driver.find_element(getattr(By, attribute.upper()), selector)", "def get_element(self, locator):\n self.logger.debug('function get_element')\n method = locator.getType()\n values = locator.getSelector()\n self.logger.debug(method)\n self.logger.debug(values)\n if type(values) is str or unicode:\n return self.get_element_by_type(method, values)\n elif type(values) is list:\n for value in values:\n try:\n return self.get_element_by_type(method, value)\n except NoSuchElementException:\n pass\n raise NoSuchElementException", "def query_selector(self, selector = \"html\"):\n return self.execute_script('document.querySelector(\\'{selector}\\').outerHTML;')", "def find_elements_by_xpath(self, xpath):\n raise NotImplementedError", "def select_syntax(by=\"ID\"):\n by_dict = {\"ID\": By.ID,\n \"XPATH\": By.XPATH,\n \"CLASS\": By.CLASS_NAME,\n \"LINK_TEXT\": By.LINK_TEXT,\n \"CSS\": By.CSS_SELECTOR}\n return by_dict[\"ID\"] if by is None else by_dict[by] if by in by_dict.keys() else None", "def get_locator (self):\n return self._locator", "def get_by_type(self, locator_type):\r\n locator_type = locator_type.lower()\r\n if locator_type == \"id\":\r\n return By.ID\r\n elif locator_type == \"name\":\r\n return By.NAME\r\n elif locator_type == \"xpath\":\r\n return By.XPATH\r\n elif locator_type == \"css\":\r\n return By.CSS_SELECTOR\r\n elif locator_type == \"classname\":\r\n return By.CLASS_NAME\r\n elif locator_type == \"linktext\":\r\n return By.LINK_TEXT\r\n else:\r\n self.log.error(\"Locator type: \" + locator_type + \r\n \" is not correct/supported\")\r\n return False", "def get_locator(self) -> Any:\n return self.__locator", "def _find(self, by, q, many, visible, nothrow):\n\n # Determine the correct expected condition to wrap\n if many:\n ec = EC.visibility_of_all_elements_located if visible else EC.presence_of_all_elements_located\n else:\n ec = EC.visibility_of_element_located if visible else EC.presence_of_element_located\n\n # Wrap it\n f = None\n\n if type(q) is list: # We have a list of queries, or them together.\n # NOTA BENE: We can't just comma separate the queries because this is generic and should support CSS Selectors & XPATHs\n if not q:\n def f(_): return False\n else:\n def f(d): return reduce(lambda acc, v: acc or ec((by, v))(d), q, False)\n else:\n f = ec((by, q)) # Just use the original expected condition.\n\n if not f:\n raise Exception(\"Browser#_find: Programming Error: f is None\")\n\n return f", "def find_elements(self, selector, attribute=\"CSS_SELECTOR\"):\n return self.driver.find_elements(getattr(By, attribute.upper()), selector)", "def _get_selector(self):\n query = self._get_query()\n if self.negate:\n return self._negate_query(query)\n else:\n return query", "def find_element_by_name(self, name):\n return self.find_element_by_xpath('//*[@name=\"%s\"]' % name)", "def get_element(self, locator: str, locator_type: str):\n try:\n by_type = self.get_locator_type(locator_type=locator_type)\n if by_type is not None:\n return self.driver.find_element(by_type, locator)\n else:\n raise InvalidSelectorException\n except Exception as msg:\n self.log.error(\"Exception: \" + str(msg))\n return None", "def locator_number(self):\n return self._locator_number", "def find_element(self, locator, tag=None, required=True, parent=None):\n return self.element_finder.find(locator, tag, True, required, parent)", "def get_one_selector_from_str(raw_selector):\n raw_selector = raw_selector.strip()\n mode = raw_selector[:1]\n de = raw_selector[1:]\n if mode == '#':\n return lambda n: n.attr('id') == de\n elif mode == '.':\n return lambda n: de in n.attr('class')\n else:\n return lambda n: n.name == raw_selector" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Place holder function for a future one that will calculate the change mesh (if it doesnt exist).
def calculate_change_mesh(self):
[ "def UpdateMesh(self):\r\n pass", "def mesher():\n return Mesher(func=sphere, delta=0.1)", "def reload(self):\n with open(self.filename,'r') as meshfile:\n # scan file until we reach a mesh format declarator\n if not scan_for_keyword(meshfile, \"$meshformat\"):\n return False\n # read mesh format information\n self.meshformat = meshfile.readline()\n #check for end of mesh formatting block\n if meshfile.readline().lower().strip() != \"$endmeshformat\":\n print(\"Can only read ASCII meshes.\")\n return False\n\n if not scan_for_keyword(meshfile, \"$nodes\"):\n return False\n\n self.num_nodes = int(meshfile.readline())\n self.node_positions = np.zeros((self.num_nodes, 3))\n nodeids = [0]*self.num_nodes\n for i in range(self.num_nodes):\n nodeinf = meshfile.readline().split()\n # shift to zero-indexing from gmsh/matlab 1-indexing\n nodeids[i] = int(nodeinf[0]) - 1\n nodex = np.array([float(k) for k in nodeinf[1:]])\n #set axis-aligned bounding box for the mesh\n if (i == 0):\n self.bounding_box[0] = nodex\n self.bounding_box[1] = nodex\n else:\n self.bounding_box[0] = [min(self.bounding_box[0][k],nodex[k]) for k in range(3)]\n self.bounding_box[1] = [max(self.bounding_box[1][k],nodex[k]) for k in range(3)]\n self.node_positions[i] = nodex\n if not scan_for_keyword(meshfile, \"$endnodes\"):\n return False\n if not scan_for_keyword(meshfile, \"$elements\"):\n return False\n\n self.num_elements = int(meshfile.readline())\n #constants given by the file format\n num_infos = 4\n tagidx = 3\n self.element_infos = [[0]*num_infos]*self.num_elements\n self.element_tags = [0]*self.num_elements\n self.num_points = 0\n self.num_lines = 0\n self.num_tris = 0\n self.num_quads = 0\n # self.num_tets = 0\n # self.num_hexas = 0\n # self.num_prisms = 0\n # self.num_pyramids = 0\n self.num_lines3 = 0\n self.num_tris6 = 0\n\n self.points = np.zeros((self.num_elements,2), np.int32)\n self.lines = np.zeros((self.num_elements,3), np.int32)\n self.tris = np.zeros((self.num_elements,4), np.int32)\n self.quads = np.zeros((self.num_elements,5), np.int32)\n # self.tets = np.zeros((self.num_elements,5), np.int32)\n # self.hexas = np.zeros((self.num_elements,9), np.int32)\n # self.prisms = np.zeros((self.num_elements,7), np.int32)\n # self.pyramids = np.zeros((self.num_elements,6), np.int32)\n self.lines3 = np.zeros((self.num_elements,4), np.int32)\n self.tris6 = np.zeros((self.num_elements,7), np.int32)\n\n tokens = []\n tline = meshfile.readline().lower().strip()\n while tline != \"$endelements\":\n if not tline:\n return False\n tokens = tokens + [int(k) for k in tline.split()]\n tline = meshfile.readline().lower().strip()\n for i in range(self.num_elements):\n self.element_infos[i] = [tokens.pop(0) for k in range(num_infos)]\n # I have honestly no clue what this means, but it consumes tokens\n # so it's staying in the code\n self.element_tags[i] = [tokens.pop(0) for k in range(self.element_infos[i][2]-1)]\n # minus 1s to shift from one-indexing to zero-indexing\n element_nodes = [tokens.pop(0)-1 for k in range(NODES_PER_ELEMENT_TYPE[self.element_infos[i][1]-1])]\n\n if self.element_infos[i][1] == 15:\n self.points[self.num_points][0] = nodeids[element_nodes[0]]\n self.points[self.num_points][1] = self.element_infos[i][tagidx]\n self.num_points = self.num_points + 1\n elif self.element_infos[i][1] == 1:\n self.add_line(i, nodeids, element_nodes, 1)\n elif self.element_infos[i][1] == 8:\n self.add_line(i, nodeids, element_nodes, 2)\n elif self.element_infos[i][1] == 2:\n self.add_triangle(i, nodeids, element_nodes, 1)\n elif self.element_infos[i][1] == 9:\n self.add_triangle(i, nodeids, element_nodes, 2)\n elif self.element_infos[i][1] == 3:\n for j in range(4):\n self.quads[self.num_quads][j] = nodeids[element_nodes[j]]\n self.quads[self.num_quads][4] = self.element_infos[i][tagidx]\n self.num_quads = self.num_quads + 1\n\n #TODO tetras/hexes/prisms/pyramids\n \n\n return True", "def MFS(mesh,plotx,ploty,plotz,k,incDir,incAmp=1.0,tau=10,frac_samp=2,numSource=0,numSamp=0,offset=0.15):\n mesh.MFS=True\n \n if numSource == 0:\n for d in mesh.dList:\n d.numSource = int(np.ceil(tau**2*k**2*d.area()/(4*np.pi*np.pi)))\n #d.numSource = int(np.ceil(tau*k*d.length()/(2*np.pi)))\n #d.numSamp = int(frac_samp*d.numSource)\n \n def number_of_points(d,N):\n a = d.numelements * N**2\n b = d.edges * (N-2)\n c = d.corners * 3\n d = d.extraordinary_points\n return a-b-c+d \n \n # Singular (source) points\n for d in mesh.dList:\n N=1\n if numSource == 0:\n while number_of_points(d,N) < d.numSource: N+=1\n else:\n while number_of_points(d,N) < numSource: N+=1\n d.numSource = number_of_points(d,N)\n xi1 = np.linspace(d.eList[0].limits[0],d.eList[0].limits[1],N)\n xi2 = np.linspace(d.eList[0].limits[2],d.eList[0].limits[3],N) \n xi1,xi2=np.meshgrid(xi1,xi2)\n xi1=xi1.reshape(-1,) ; xi2=xi2.reshape(-1,) \n souvals = d.eList[0].vals(d.eList[0].limits[0],d.eList[0].limits[2])\n sounorms = d.eList[0].normals(d.eList[0].limits[0],d.eList[0].limits[2])\n for e in d.eList:\n newvals = e.vals(xi1,xi2)\n newnorms = e.normals(xi1,xi2) \n px,py,pz = newvals\n px=px.reshape(-1,1)\n py=py.reshape(-1,1)\n pz=pz.reshape(-1,1)\n qx,qy,qz=newvals\n rx=px-qx ; ry=py-qy ; rz=pz-qz\n rx=np.tril(rx, -1)[:,:-1]+np.triu(rx, 1)[:,1:]\n ry=np.tril(ry, -1)[:,:-1]+np.triu(ry, 1)[:,1:]\n rz=np.tril(rz, -1)[:,:-1]+np.triu(rz, 1)[:,1:]\n r = np.sqrt( rx**2 + ry**2 + rz**2 )\n delete = np.where(np.any(r<1e-10,axis=1))[0]\n newvals = np.delete(newvals,delete[1:],axis=1)\n newnorms = np.delete(newnorms,delete[1:],axis=1)\n px,py,pz = newvals\n px=px.reshape(-1,1)\n py=py.reshape(-1,1)\n pz=pz.reshape(-1,1) \n qx,qy,qz=souvals\n r = np.sqrt( (qx-px)**2 + (qy-py)**2 + (qz-pz)**2 )\n delete = np.where(np.any(r<1e-12,axis=1))[0]\n souvals = np.hstack([souvals,np.delete(newvals,delete,axis=1)])\n sounorms = np.hstack([sounorms,np.delete(newnorms,delete,axis=1)]) \n d.sourceVals = souvals + offset*sounorms\n d.sourceNormals = sounorms\n mesh.sourceVals = np.hstack([d.sourceVals for d in mesh.dList])\n mesh.sourceNormals = np.hstack([d.sourceNormals for d in mesh.dList])\n \n # Sampling points \n for d in mesh.dList:\n N=1\n if numSamp == 0:\n while number_of_points(d,N) < frac_samp*d.numSource: N+=1\n else:\n while number_of_points(d,N) < numSource: N+=1\n d.numSamp = number_of_points(d,N)\n xi1 = np.linspace(d.eList[0].limits[0],d.eList[0].limits[1],N)\n xi2 = np.linspace(d.eList[0].limits[2],d.eList[0].limits[3],N) \n xi1,xi2=np.meshgrid(xi1,xi2)\n xi1=xi1.reshape(-1,) ; xi2=xi2.reshape(-1,) \n sampvals = d.eList[0].vals(d.eList[0].limits[0],d.eList[0].limits[0])\n sampnorms = d.eList[0].normals(d.eList[0].limits[0],d.eList[0].limits[0])\n for e in d.eList:\n newvals = e.vals(xi1,xi2)\n newnorms = e.normals(xi1,xi2) \n px,py,pz = newvals\n px=px.reshape(-1,1)\n py=py.reshape(-1,1)\n pz=pz.reshape(-1,1)\n qx,qy,qz=newvals\n rx=px-qx ; ry=py-qy ; rz=pz-qz\n rx=np.tril(rx, -1)[:,:-1]+np.triu(rx, 1)[:,1:]\n ry=np.tril(ry, -1)[:,:-1]+np.triu(ry, 1)[:,1:]\n rz=np.tril(rz, -1)[:,:-1]+np.triu(rz, 1)[:,1:]\n r = np.sqrt( rx**2 + ry**2 + rz**2 )\n delete = np.where(np.any(r<1e-10,axis=1))[0]\n newvals = np.delete(newvals,delete[1:],axis=1)\n newnorms = np.delete(newnorms,delete[1:],axis=1)\n px,py,pz = newvals\n px=px.reshape(-1,1)\n py=py.reshape(-1,1)\n pz=pz.reshape(-1,1) \n qx,qy,qz=sampvals\n r = np.sqrt( (qx-px)**2 + (qy-py)**2 + (qz-pz)**2 )\n delete = np.where(np.any(r<1e-12,axis=1))[0] \n sampvals = np.hstack([sampvals,np.delete(newvals,delete,axis=1)])\n sampnorms = np.hstack([sampnorms,np.delete(newnorms,delete,axis=1)]) \n d.sampVals = sampvals\n d.sampNormals = sampnorms\n mesh.sampVals = np.hstack([d.sampVals for d in mesh.dList])\n mesh.sampNormals = np.hstack([d.sampNormals for d in mesh.dList]) \n \n dphidn = evaluate_dphidn(mesh,k,incAmp,incDir) # derivative phi_inc wrt n\n \n T = evaluate_T(mesh,k)\n\n A = np.dot(T,T.T)\n b = np.sum(-T*dphidn,axis=1)\n\n # Solve for fundamental solution amplitudes\n mesh.amplitudes = np.linalg.solve(A,b)\n\n return get_potentials(np.vstack([plotx,ploty,plotz]),mesh,k,incAmp,incDir)", "def update_mesh(self, obj, depsgraph):\n\n # Get/create the mesh instance and determine if we need\n # to reupload geometry to the GPU for this mesh\n rebuild_geometry = obj.name in self.updated_geometries\n if obj.name not in self.meshes:\n mesh = Mesh(obj.name)\n rebuild_geometry = True\n else:\n mesh = self.meshes[obj.name]\n\n mesh.update(obj)\n\n # If modified - prep the mesh to be copied to the GPU next draw\n if rebuild_geometry:\n mesh.rebuild(obj.evaluated_get(depsgraph))\n\n self.updated_meshes[obj.name] = mesh", "def extract_mesh_deltas(skin_mesh_name=\"\", corrected_mesh_name=\"\"):\n # return mel.eval(\"extractDeltas -s {} -c {}\".format(skin_mesh_name, corrected_mesh_name))\n return cmds.invertShape(skin_mesh_name, corrected_mesh_name)", "def expand(self, mesh: feMesh):", "def _uniform_refine(self):\n raise NotImplementedError(\"Single refine not implemented \" +\n \"for this mesh type!\")", "def asMeshTransformed(*args, **kwargs):\n \n pass", "def boolean_difference_mesh_mesh(A, B):\n pass", "def _generate_mesh(self):\n self._mesh_points = self._make_pos()", "def testMeshIteration(self):\n\t\tvds = [11,22,33];\n\t\tnds = [0, 11, 33, 66];\n\t\tlpos = nds[0:-1];\n\t\trpos = nds[1:];\n\t\tisonbnd = [True, False, False, True];\n\t\tm = Mesh.Mesh(vds)\n\t\tfor l,z in zip(vds, m.Zones()):\n\t\t\tself.assertEqual(l, z.length())\n\t\tfor x,n in zip(nds, m.Nodes()):\n\t\t\tself.assertAlmostEqual(x, n.x() );\n\t\tfor b,n in zip(isonbnd, m.Nodes()):\n\t\t\tself.assertEqual(b, n.onBoundary() );\n\t\tfor x,z in zip(lpos, m.Zones()):\n\t\t\tself.assertAlmostEqual(x, z.getNodeLeft().x() );\n\t\tfor x,z in zip(rpos, m.Zones()):\n\t\t\tself.assertAlmostEqual(x, z.getNodeRight().x() );", "def reconstruct_mesh(self):\n\n # NOTE: Before drawing the skeleton, create the materials once and for all to improve the\n # performance since this is way better than creating a new material per section or segment\n nmv.builders.create_skeleton_materials(builder=self)\n\n # Verify and repair the morphology, if required\n result, stats = nmv.utilities.profile_function(self.verify_morphology_skeleton)\n self.profiling_statistics += stats\n\n # Apply skeleton - based operation, if required, to slightly modify the skeleton\n result, stats = nmv.utilities.profile_function(\n nmv.builders.modify_morphology_skeleton, self)\n self.profiling_statistics += stats\n\n # Build the soma, with the default parameters\n result, stats = nmv.utilities.profile_function(nmv.builders.reconstruct_soma_mesh, self)\n self.profiling_statistics += stats\n\n # Build the arbors and connect them to the soma\n if self.options.mesh.soma_connection == nmv.enums.Meshing.SomaConnection.CONNECTED:\n\n # Build the arbors\n result, stats = nmv.utilities.profile_function(self.build_arbors, True)\n self.profiling_statistics += stats\n\n # Connect to the soma\n result, stats = nmv.utilities.profile_function(\n nmv.builders.connect_arbors_to_soma, self)\n self.profiling_statistics += stats\n\n # Build the arbors only without any connection to the soma\n else:\n # Build the arbors\n result, stats = nmv.utilities.profile_function(self.build_arbors, False)\n self.profiling_statistics += stats\n\n # Tessellation\n result, stats = nmv.utilities.profile_function(nmv.builders.decimate_neuron_mesh, self)\n self.profiling_statistics += stats\n\n # Surface roughness\n result, stats = nmv.utilities.profile_function(\n nmv.builders.add_surface_noise_to_arbor, self)\n self.profiling_statistics += stats\n\n # Add the spines\n result, stats = nmv.utilities.profile_function(nmv.builders.add_spines_to_surface, self)\n self.profiling_statistics += stats\n\n # Join all the objects into a single object\n result, stats = nmv.utilities.profile_function(\n nmv.builders.join_mesh_object_into_single_object, self)\n self.profiling_statistics += stats\n\n # Transform to the global coordinates, if required\n result, stats = nmv.utilities.profile_function(\n nmv.builders.transform_to_global_coordinates, self)\n self.profiling_statistics += stats\n\n # Collect the stats. of the mesh\n result, stats = nmv.utilities.profile_function(nmv.builders.collect_mesh_stats, self)\n self.profiling_statistics += stats\n\n # Done\n nmv.logger.header('Mesh Reconstruction Done!')\n nmv.logger.log(self.profiling_statistics)\n\n # Write the stats to file\n nmv.builders.write_statistics_to_file(builder=self, tag='skinning')", "def _update_mayavi(self):\n self._generate_mesh()\n self._mesh.mlab_source.set(x=self._mesh_points[0],\n y=self._mesh_points[1], z=self._mesh_points[2])", "def build_mesh_old(cells, vertices):\n mesh = Mesh()\n editor = MeshEditor()\n dim = len(vertices[0])\n if dim == 2:\n editor.open(mesh, 'triangle', 2, 2)\n else:\n editor.open(mesh, 'tetrahedron', 3, 3)\n editor.init_vertices(len(vertices))\n editor.init_cells(len(cells))\n for i, v in enumerate(vertices):\n editor.add_vertex(i, *v)\n for i, c in enumerate(cells):\n editor.add_cell(i, *c)\n editor.close()\n return mesh", "def _update_mayavi(self):\n self._generate_mesh()\n self._mesh.mlab_source.set(x=self._mesh_points['x'],\n y=self._mesh_points['y'], z=self._mesh_points['z'])", "def polyRemesh(refineThreshold=float, reduceThreshold=float, smoothStrength=float, nodeState=int, interpolationType=int, tessellateBorders=bool, constructionHistory=bool, caching=bool, name=\"string\"):\n pass", "def _orthogonal_meshing(self):\n global sweep_nodes, z_group_recorder\n self.assigned_node_tag = []\n self.previous_node_tag = []\n self.sweep_path_points = []\n\n for i, edge_obj in enumerate(self.multi_span_control_point_list[:-1]):\n start_point_x = edge_obj.node_list[0][0]\n start_point_z = edge_obj.node_list[0][2]\n\n start_edge_line = edge_obj\n end_edge_line = self.multi_span_control_point_list[i + 1]\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # first edge construction line\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # start_point_x = self.mesh_origin[0]\n # start_point_z = self.mesh_origin[2]\n # if skew angle of edge line is below threshold for orthogonal, perform mesh as oblique for edge line\n if np.abs(self.skew_1 + self.zeta) < self.skew_threshold[0]:\n # if angle less than threshold, assign nodes of edge member as it is\n current_sweep_nodes = start_edge_line.node_list\n # if curve mesh, rotate the edge sweep nodes\n current_sweep_nodes = self._rotate_edge_sweep_nodes(current_sweep_nodes)\n\n for z_count_int, nodes in enumerate(current_sweep_nodes):\n x_inc = start_point_x\n z_inc = start_point_z\n node_coordinate = [nodes[0] + x_inc, nodes[1], nodes[2] + z_inc]\n self._assign_node_coordinate(\n node_coordinate, z_count_int=z_count_int\n )\n\n # if loop assigned more than two nodes, link nodes as a transverse member\n if z_count_int > 0:\n # run sub procedure to assign\n # self.__assign_transverse_members(pre_node=self.assigned_node_tag[z_count_int - 1],\n # cur_node=self.assigned_node_tag[z_count_int])\n if not self.beam_element_flag:\n # skip and go to next x position\n continue\n if len(self.assigned_node_tag) >= 1:\n self._assign_edge_trans_members(\n self.assigned_node_tag[z_count_int - 1],\n self.assigned_node_tag[z_count_int],\n self.global_edge_count,\n )\n # get and link edge nodes from previous and current as skewed edge member\n self.edge_node_recorder.setdefault(\n self.assigned_node_tag[z_count_int - 1],\n self.global_edge_count,\n )\n self.edge_node_recorder.setdefault(\n self.assigned_node_tag[z_count_int],\n self.global_edge_count,\n )\n\n if len(self.assigned_node_tag) == len(self.noz):\n self.first_connecting_region_nodes = self.assigned_node_tag\n self.global_x_grid_count += 1\n self.assigned_node_tag = [] # reset variable\n # print(\"Edge mesh @ start span completed\")\n else: # perform edge meshing with variable distance between transverse members by looping through all control\n # points of edgecontrolline\n # loop for each control point of edge line with sweep nodes\n for z_count, int_point in enumerate(start_edge_line.node_list):\n # search point on sweep path line whose normal intersects int_point.\n ref_point_x, ref_point_z = self._search_x_point(\n int_point,\n )\n # record points\n self.sweep_path_points.append(\n [ref_point_x, self.y_elevation, ref_point_z]\n )\n # find m' of line between intersect int_point and ref point on sweep path\n m_prime, phi = get_slope(\n [ref_point_x, self.y_elevation, ref_point_z], int_point\n )\n # rotate sweep line such that parallel to m' line\n # if skew is positive, algorithm may mistake first point as orthogonal 90 deg, specify initial m based\n # on zeta\n if self.skew_1 > 0:\n angle = np.arctan(self.zeta / 180 * np.pi)\n else:\n angle = np.pi / 2 - np.abs(phi)\n current_sweep_nodes = self._rotate_sweep_nodes(angle)\n # get z group of first node in current_sweep_nodes - for correct assignment in loop\n z_group = start_edge_line.get_node_group_z(int_point)\n # check angle condition, if skew + zeta (offset from plane)\n if 90 + self.skew_1 + self.zeta > 90:\n sweep_nodes = current_sweep_nodes[z_count:]\n z_group_recorder = list(\n range(z_group, len(current_sweep_nodes))\n )\n elif 90 + self.skew_1 + self.zeta < 90:\n sweep_nodes = current_sweep_nodes[0 : (z_count + 1)]\n z_group_recorder = (\n list(range(0, z_group + 1)) if z_group != 0 else [0]\n )\n\n # on each control point, loop through sweeping nodes to create nodes\n for z_count_int, nodes in enumerate(sweep_nodes):\n x_inc = ref_point_x\n z_inc = ref_point_z\n node_coordinate = [nodes[0] + x_inc, nodes[1], nodes[2] + z_inc]\n\n exist_node, assigned_node = self._assign_node_coordinate(\n node_coordinate, z_count_int=z_group_recorder[z_count_int]\n )\n\n if exist_node:\n replace_ind = self.assigned_node_tag.index(assigned_node)\n self.assigned_node_tag = (\n self.assigned_node_tag[:replace_ind]\n + [exist_node]\n + self.assigned_node_tag[replace_ind + 1 :]\n )\n\n # if loop assigned more than two nodes, link nodes as a transverse member\n if not self.beam_element_flag:\n continue\n if z_count_int > 0:\n # run sub procedure to assign\n self._assign_transverse_members(\n pre_node=self.assigned_node_tag[z_count_int - 1],\n cur_node=self.assigned_node_tag[z_count_int],\n )\n\n if not self.beam_element_flag:\n continue\n # if loop is in first step, there is only one column of nodes, skip longitudinal assignment\n if z_count == 0:\n self.previous_node_tag = self.assigned_node_tag\n if z_count > 0:\n for pre_node in self.previous_node_tag:\n for cur_node in self.assigned_node_tag:\n cur_z_group = self.node_spec[cur_node][\"z_group\"]\n prev_z_group = self.node_spec[pre_node][\"z_group\"]\n if cur_z_group == prev_z_group:\n self._assign_longitudinal_members(\n pre_node=pre_node,\n cur_node=cur_node,\n cur_z_group=cur_z_group,\n )\n break # break assign long ele loop (cur node)\n\n # if angle is positive (slope negative), edge nodes located at the first element of list\n if len(self.assigned_node_tag) >= 1:\n if 90 + self.skew_1 + self.zeta > 90:\n self._assign_edge_trans_members(\n self.previous_node_tag[0],\n self.assigned_node_tag[0],\n self.global_edge_count,\n )\n # get and link edge nodes from previous and current as skewed edge member\n self.edge_node_recorder.setdefault(\n self.previous_node_tag[0], self.global_edge_count\n )\n self.edge_node_recorder.setdefault(\n self.assigned_node_tag[0], self.global_edge_count\n )\n elif 90 + self.skew_1 + self.zeta < 90:\n self._assign_edge_trans_members(\n self.previous_node_tag[-1],\n self.assigned_node_tag[-1],\n self.global_edge_count,\n )\n # get and link edge nodes from previous and current as skewed edge member\n self.edge_node_recorder.setdefault(\n self.previous_node_tag[-1], self.global_edge_count\n )\n self.edge_node_recorder.setdefault(\n self.assigned_node_tag[-1], self.global_edge_count\n )\n # update recorder for previous node tag step\n self.previous_node_tag = self.assigned_node_tag\n # update and reset recorders for next column of sweep nodes\n self.global_x_grid_count += 1\n if len(self.assigned_node_tag) == len(self.noz):\n self.first_connecting_region_nodes = self.assigned_node_tag\n self.ortho_previous_node_column = self.assigned_node_tag\n self.assigned_node_tag = []\n\n # print(\"Edge mesh @ start span completed\")\n if i < 1:\n self.global_edge_count += 1\n # --------------------------------------------------------------------------------------------\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # second edge construction line\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # get end point of sweep line = point which sweep path intersects end span construction line\n end_point_x = self.long_dim\n # end_point_z = line_func(self.sweep_path.m,self.sweep_path.c,end_point_x)\n end_point_z = self.sweep_path.get_line_function(end_point_x)\n if np.abs(self.skew_2 + self.zeta) < self.skew_threshold[0]:\n # if angle less than threshold, assign nodes of edge member as it is\n current_sweep_nodes = end_edge_line.node_list\n\n # get angle #TODO not generalized, improve here\n current_angle = -self.sweep_path.get_cartesian_angle(end_point_x)\n # rotate all about point x,z\n current_sweep_nodes = self._rotate_points(\n ref_point=current_sweep_nodes[0],\n rotating_point_list=current_sweep_nodes,\n angle=current_angle,\n )\n\n # edge_angle = self.sweep_path.get_cartesian_angle(x=end_point_x)\n # # if curve mesh, rotate the edge sweep nodes\n # #current_sweep_nodes = self._rotate_sweep_nodes(-edge_angle)\n # current_sweep_nodes = self._rotate_edge_sweep_nodes(current_sweep_nodes,angle=-edge_angle)\n\n for z_count_int, nodes in enumerate(current_sweep_nodes):\n x_inc = 0 # end_point_x\n z_inc = 0 # end_point_z\n node_coordinate = [nodes[0] + x_inc, nodes[1], nodes[2] + z_inc]\n self.node_spec.setdefault(\n self.node_counter,\n {\n \"tag\": self.node_counter,\n \"coordinate\": node_coordinate,\n \"x_group\": self.global_x_grid_count,\n \"z_group\": z_count_int,\n },\n )\n\n self.assigned_node_tag.append(self.node_counter)\n self.node_counter += 1\n # if loop assigned more than two nodes, link nodes as a transverse member\n if z_count_int > 0:\n # run sub procedure to assign\n # self.__assign_transverse_members(pre_node=self.assigned_node_tag[z_count_int - 1],\n # cur_node=self.assigned_node_tag[z_count_int])\n if not self.beam_element_flag:\n continue\n if len(self.assigned_node_tag) >= 1:\n self._assign_edge_trans_members(\n self.assigned_node_tag[z_count_int - 1],\n self.assigned_node_tag[z_count_int],\n self.global_edge_count,\n )\n # get and link edge nodes from previous and current as skewed edge member\n self.edge_node_recorder.setdefault(\n self.assigned_node_tag[z_count_int - 1],\n self.global_edge_count,\n )\n self.edge_node_recorder.setdefault(\n self.assigned_node_tag[z_count_int],\n self.global_edge_count,\n )\n # self.end_connecting_region_nodes = self.assigned_node_tag\n if len(self.assigned_node_tag) == len(self.noz):\n self.end_connecting_region_nodes = self.assigned_node_tag\n self.global_x_grid_count += 1\n self.global_edge_count += 1\n else:\n for z_count, int_point in enumerate(end_edge_line.node_list):\n # search point on sweep path line whose normal intersects int_point.\n ref_point_x, ref_point_z = self._search_x_point(\n int_point,\n )\n # record points\n self.sweep_path_points.append(\n [ref_point_x, self.y_elevation, ref_point_z]\n )\n # find m' of line between intersect int_point and ref point on sweep path\n m_prime, phi = get_slope(\n [ref_point_x, self.y_elevation, ref_point_z], int_point\n )\n\n # rotate sweep line such that parallel to m' line\n current_sweep_nodes = self._rotate_sweep_nodes(\n np.pi / 2 - np.abs(phi)\n )\n # get z group of first node in current_sweep_nodes - for correct assignment in loop\n z_group = end_edge_line.get_node_group_z(\n int_point\n ) # extract from class EdgeConstructionLine\n # check\n # condition\n if 90 + self.skew_2 + self.zeta > 90:\n sweep_nodes = current_sweep_nodes[0 : (z_count + 1)]\n z_group_recorder = (\n list(range(0, z_group + 1)) if z_group != 0 else [0]\n )\n elif 90 + self.skew_2 + self.zeta < 90:\n sweep_nodes = current_sweep_nodes[z_count:]\n z_group_recorder = list(\n range(z_group, len(current_sweep_nodes))\n )\n for z_count_int, nodes in enumerate(sweep_nodes):\n x_inc = ref_point_x\n z_inc = ref_point_z\n node_coordinate = [nodes[0] + x_inc, nodes[1], nodes[2] + z_inc]\n\n exist_node, assigned_node = self._assign_node_coordinate(\n node_coordinate, z_count_int=z_group_recorder[z_count_int]\n )\n # if exist_node:\n # i = self.assigned_node_tag.index(assigned_node)\n # self.assigned_node_tag = self.assigned_node_tag[:i] + [\n # exist_node] + self.assigned_node_tag[i + 1:]\n\n if not self.beam_element_flag:\n continue\n # if loop assigned more than two nodes, link nodes as a transverse member\n if z_count_int > 0:\n # run sub procedure to assign\n self._assign_transverse_members(\n pre_node=self.assigned_node_tag[z_count_int - 1],\n cur_node=self.assigned_node_tag[z_count_int],\n )\n\n if not self.beam_element_flag:\n continue\n\n # if loop is in first step, there is only one column of nodes, skip longitudinal assignment\n if z_count == 0:\n self.previous_node_tag = self.assigned_node_tag\n if z_count > 0:\n for pre_node in self.previous_node_tag:\n for cur_node in self.assigned_node_tag:\n cur_z_group = self.node_spec[cur_node][\"z_group\"]\n prev_z_group = self.node_spec[pre_node][\"z_group\"]\n if cur_z_group == prev_z_group:\n self._assign_longitudinal_members(\n pre_node=pre_node,\n cur_node=cur_node,\n cur_z_group=cur_z_group,\n )\n break # break assign long ele loop (cur node)\n\n # if angle is positive (slope negative), edge nodes located at the first element of list\n if len(self.assigned_node_tag) >= 1:\n if 90 + self.skew_2 + self.zeta > 90:\n self._assign_edge_trans_members(\n self.previous_node_tag[-1],\n self.assigned_node_tag[-1],\n self.global_edge_count,\n )\n self.edge_node_recorder.setdefault(\n self.previous_node_tag[-1], self.global_edge_count\n )\n self.edge_node_recorder.setdefault(\n self.assigned_node_tag[-1], self.global_edge_count\n )\n elif 90 + self.skew_2 + self.zeta < 90:\n self._assign_edge_trans_members(\n self.previous_node_tag[0],\n self.assigned_node_tag[0],\n self.global_edge_count,\n )\n self.edge_node_recorder.setdefault(\n self.previous_node_tag[0], self.global_edge_count\n )\n self.edge_node_recorder.setdefault(\n self.assigned_node_tag[0], self.global_edge_count\n )\n # update recorder for previous node tag step\n self.previous_node_tag = self.assigned_node_tag\n # update and reset recorders for next column of sweep nodes\n self.global_x_grid_count += 1\n if len(self.assigned_node_tag) == len(self.noz):\n self.end_connecting_region_nodes = self.assigned_node_tag\n self.ortho_previous_node_column = self.assigned_node_tag\n self.assigned_node_tag = []\n self.global_edge_count += 1\n # print(\"Edge mesh @ end span completed\")\n # --------------------------------------------------------------------------------------------\n self.assigned_node_tag = [] # reset\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # remaining distance mesh with uniform spacing\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n x_first = self.first_connecting_region_nodes[0]\n x_second = self.end_connecting_region_nodes[0]\n # loop each point in self.nox\n cor_fir = self.node_spec[x_first][\"coordinate\"]\n cor_sec = self.node_spec[x_second][\"coordinate\"]\n # get x coordinate for uniform region\n if self.transverse_mbr_x_spacing_list:\n raise Exception(\n NameError, \"OrthoMesh can not be paired wit custom spacing\"\n )\n else:\n self.uniform_region_x = np.linspace(\n cor_fir[0], cor_sec[0], self.multi_span_num_points[i]\n )\n\n for z_count, x in enumerate(self.uniform_region_x[1:-1]):\n # get slope, m at current point x\n z = self.sweep_path.get_line_function(x)\n # get sweep nodes\n current_sweep_nodes = self.sweeping_nodes\n # shift all points by +x and +z\n shift_sweep_nodes = [\n [point[0] + x, point[1], point[2] + z]\n for point in current_sweep_nodes\n ]\n # get angle #TODO not generalized, improve here\n current_angle = -self.sweep_path.get_cartesian_angle(x)\n # rotate all about point x,z\n current_sweep_nodes = self._rotate_points(\n ref_point=shift_sweep_nodes[0],\n rotating_point_list=shift_sweep_nodes,\n angle=current_angle,\n )\n\n # current_sweep_nodes = self._rotate_edge_sweep_nodes(current_sweep_nodes, angle=-current_angle)\n # rotating sweep nodes about current nox increment point of uniform region\n # if angle less than threshold, assign nodes of edge member as it is\n for z_count_int, nodes in enumerate(current_sweep_nodes):\n node_coordinate = [nodes[0], nodes[1], nodes[2]]\n self._assign_node_coordinate(\n node_coordinate, z_count_int=z_count_int\n )\n\n if not self.beam_element_flag:\n continue\n # if loop assigned more than two nodes, link nodes as a transverse member\n if z_count_int > 0:\n # run sub procedure to assign\n self._assign_transverse_members(\n pre_node=self.assigned_node_tag[z_count_int - 1],\n cur_node=self.assigned_node_tag[z_count_int],\n )\n if not self.beam_element_flag:\n continue\n\n if z_count == 0:\n self.previous_node_tag = self.first_connecting_region_nodes\n elif z_count > 0 and z_count != len(self.uniform_region_x[1:-1]) - 1:\n pass\n for pre_node in self.previous_node_tag:\n for cur_node in self.assigned_node_tag:\n cur_z_group = self.node_spec[cur_node][\"z_group\"]\n prev_z_group = self.node_spec[pre_node][\"z_group\"]\n if cur_z_group == prev_z_group:\n self._assign_longitudinal_members(\n pre_node=pre_node,\n cur_node=cur_node,\n cur_z_group=cur_z_group,\n )\n break # break assign long ele loop (cur node)\n # update and reset recorders for next column of sweep nodes\n self.global_x_grid_count += 1\n # update previous node tag recorder\n if z_count != len(self.uniform_region_x[1:-1]) - 1:\n self.previous_node_tag = self.assigned_node_tag\n self.assigned_node_tag = []\n else:\n self.previous_node_tag = self.assigned_node_tag\n self.assigned_node_tag = self.end_connecting_region_nodes\n\n # Extra step to connect uniform region with nodes along end span edge region\n # if number of transverse in uniform region is 2 or less, assigne the first and end connecting\n # region nodes as long elements\n if len(self.uniform_region_x) <= 2:\n self.previous_node_tag = self.end_connecting_region_nodes\n self.assigned_node_tag = self.first_connecting_region_nodes\n # or else assign the previous node of uniform region to end connecting region node\n for pre_node in self.previous_node_tag:\n if not self.beam_element_flag:\n break\n for cur_node in self.assigned_node_tag:\n cur_z_group = self.node_spec[cur_node][\"z_group\"]\n prev_z_group = self.node_spec[pre_node][\"z_group\"]\n if cur_z_group == prev_z_group:\n self._assign_longitudinal_members(\n pre_node=pre_node,\n cur_node=cur_node,\n cur_z_group=cur_z_group,\n )\n break\n self.assigned_node_tag = []\n self.previous_node_tag = []", "def remesh(self,max_length = None):\n dz = self.dz\n mesh = self.mesh\n bmesh = BoundaryMesh(mesh,'exterior',order=True)\n x = bmesh.coordinates()[:,0]\n\n if max_length == None:\n max_length = np.max(x)*10\n\n pts = sort_boundary_nodes(bmesh)\n\n # Now remove nodes that are plast the cutoff length and\n pt_new = []\n pt_flag = None\n length_flag = True\n xcliff = max_length\n for n in range(len(pts)):\n pt = pts[n]\n # We will stack x points along the calving front if they exceed the distance\n if near(pt[0],0) and pt[1]<self.bed_fun(0.0):\n pt_new.append(pt)\n else:\n if pt[0]<=xcliff:\n if len(pt_new)==0:\n pt_new.append(pt)\n else:\n # If there is at least one point, we calculate the distance\n # between the new and old point\n dist = np.sqrt((pt[0]-pt_new[-1][0])**2+(pt[1]-pt_new[-1][1])**2)\n pt_new.append(pt)\n\n pt_new = np.array(pt_new)\n # The characteristic length is the radius so twice the mesh size\n new_mesh = meshGmsh(pt_new.transpose(),dz*2)\n\n\n #mesh = Mesh()\n #with XDMFFile(\"tmp.xdmf\") as infile:\n # infile.read(mesh)\n self.mesh=new_mesh\n self.mesh.bounding_box_tree().build(self.mesh)\n self.generate_function_spaces()\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists objects related via given property
def list_related(self, prop="http://www.w3.org/2004/02/skos/core#related"): return [e.object for e in Triple.objects.filter(subject=self, predicate__uri=prop) .order_by("predicate")]
[ "def get_related_properties(self):\n return []", "def get_objects_list(self) -> list:\n user = self.request.user\n site_name = self.model_admin.admin_site.name\n\n objects = []\n for obj in self.queryset:\n # Investigate the field paths in display_fields:\n # - if the path follows a relation, add a link to each related\n # object that is going to be impacted by the action's changes\n # - if it's a field of the object, get that field's value\n sub_list = []\n for field_path in self.display_fields:\n field = get_fields_from_path(self.opts.model, field_path)[0]\n if field.is_relation:\n related_pks = self.queryset.filter(pk=obj.pk).values_list(\n field.name, flat=True\n ).order_by(field.name)\n for pk in related_pks:\n if not pk:\n # values_list() will also gather None values\n continue # pragma: no cover\n related_obj = field.related_model.objects.get(pk=pk)\n sub_list.append(get_object_link(related_obj, user, site_name))\n else:\n value = display_for_field(getattr(obj, field.name), field, '---')\n verbose_name = field.verbose_name\n if verbose_name == field.name.replace('_', ' '):\n # The field has the default django verbose_name\n verbose_name = verbose_name.title()\n sub_list.append(\"{}: {}\".format(verbose_name, str(value)))\n if self.display_fields:\n links = (get_object_link(obj, user, site_name), sub_list)\n else:\n links = (get_object_link(obj, user, site_name),) # type: ignore[assignment]\n objects.append(links)\n return objects", "def GetRelatedObjects(cybox_object, klass):\n for related in cybox_object.parent.related_objects:\n related_object = related.properties\n if isinstance(related_object, klass):\n yield related_object", "def select_relationships(self,\n label: str = None,\n prop_key: DB_TYPE = None,\n prop_value: DB_TYPE = None,\n query_cond: str = None) -> List[Relationship]:", "def get_relations_of( cls, obj, from_attribute=None ):\n catalog = getUtility( ICatalog )\n intids = getUtility( IIntIds )\n obj_id = intids.getId( obj )\n items = list( catalog.findRelations({\n 'from_id' : obj_id,\n }) )\n items += list( catalog.findRelations({\n 'to_id' : obj_id,\n }))\n if from_attribute:\n condition = lambda r:r.from_attribute==from_attribute and \\\n not r.is_broken()\n items = filter( condition, items )\n return items", "def do_show(self, args, obj):\n id = args.id\n act = getattr(self.endpoint, obj)\n if args.property is None:\n #No property specified, print whole item.\n print act[id]\n else:\n item = act[id]\n for path_section in args.property.split('.'):\n\n # Lookup by object attribute\n if hasattr(item, path_section):\n item = getattr(item, path_section)\n continue\n else:\n try:\n # Lookup by dictionary key\n item = item[path_section]\n continue\n except:\n try:\n # Lookup by list index\n item = item[int(path_section)]\n continue\n except:\n pass\n\n # None of the lookup methods succeeded, so property path must\n # be invalid.\n raise ValueError(\n 'Cannot resolve \"%s\" from property string \"%s\" for'\n ' %s %s' % (\n path_section,\n args.property,\n singularize(obj),\n act[id].name\n )\n )\n\n # Assume the property is JSON and try to pretty-print. If that\n # fails, print the item normally\n try:\n print json.dumps(item, sort_keys=True, indent=2,\n separators=(',', ':'))\n except:\n print item", "def related_by_category(obj, count, collected_so_far, mods=[], only_from_same_site=True):\n related = []\n # top objects in given category\n if count > 0:\n from ella.core.models import Listing\n cat = obj.category\n listings = Listing.objects.get_queryset_wrapper(\n category=cat,\n content_types=[ContentType.objects.get_for_model(m) for m in mods],\n exclude=obj\n )\n for l in listings[0:count + len(collected_so_far)]:\n t = l.publishable\n if t not in collected_so_far and t not in related:\n related.append(t)\n count -= 1\n\n if count <= 0:\n return related\n return related", "def relationship(model, type):\n return getattr(model, type + '_entries', None)", "def get_object_list(self, url):\n raise NotImplementedError", "def get_related_pobjs(self, prep_token):\n\n return self.get_related_tokens(prep_token, pobj)", "def get_postrelated_by(obj, **filters):\r\n if getattr(obj, 'postrelated_post', False):\r\n cachekey = u'getpostrelatedby-{}'.format(hash(\r\n frozenset(filters.items())))\r\n\r\n _cache = cache.get(cachekey)\r\n if _cache:\r\n return _cache\r\n\r\n containers = [i.related for i in obj.postrelated_post.filter(**filters)\r\n .order_by('order')]\r\n\r\n cache.set(cachekey, containers, 3600)\r\n return containers\r\n return ''", "def list(self):\n return self._objects", "def get_related_pages(page):\n pass", "def get_related_objects(self, params):\n\n # Get the passed items\n values = params['object_list']\n object_type = params['object_type']\n related_object_type = params['related_object_type']\n\n # Make sure the object type is valid\n action_result = ActionResult(dict(params))\n obj_data = Utils.match_name_to_object(object_type)\n related_obj_data = Utils.match_name_to_object(related_object_type)\n if not obj_data or not related_obj_data:\n action_result.set_status(phantom.APP_ERROR, \"Invalid object type provided!\")\n return action_result\n\n self.save_progress(\"Fetching related [{}] in ThreatQ\".format(obj_data.get('display_name_plural')))\n\n # Convert the input values into a list\n try:\n items = self.get_value_list(values)\n except Exception as e:\n error_msg = self._get_error_message_from_exception(e)\n msg = '{} -- {}'.format(error_msg, traceback.format_exc())\n self.debug_print(msg)\n action_result.set_status(phantom.APP_ERROR, THREATQ_ERR_PARSE_OBJECT_LIST.format(error=error_msg))\n return action_result\n\n results = []\n for index, item in enumerate(items):\n # Add action results\n action_result = ActionResult(dict(params))\n\n base_obj = obj_data.get(\"collection\")\n related_obj = related_obj_data.get(\"collection\")\n\n # Get results from ThreatQ\n self.save_progress(\"Querying for {}'s related {} - {}/{}\".format(\n obj_data.get(\"display_name\"), related_obj_data.get(\"display_name_plural\"), index + 1, len(items)))\n\n try:\n result = self.query_object_details(base_obj, item, exact=True, relationships=False)\n except Exception as e:\n error_msg = self._get_error_message_from_exception(e)\n msg = '{} -- {}'.format(error_msg, traceback.format_exc())\n self.debug_print(msg)\n action_result.set_status(phantom.APP_ERROR, THREATQ_ERR_QUERY_OBJECT_DETAILS.format(error=error_msg))\n results.append(action_result)\n continue\n\n if not result:\n action_result.set_status(phantom.APP_SUCCESS, THREATQ_NO_DATA)\n results.append(action_result)\n continue\n\n related_objects = []\n\n try:\n related_res = self.tq.get(\n '/api/{}/{}/{}'.format(base_obj, result[0].oid, related_obj), withp=\"attributes\").get('data', [])\n except Exception as e:\n error_msg = self._get_error_message_from_exception(e)\n msg = '{} -- {}'.format(error_msg, traceback.format_exc())\n self.debug_print(msg)\n action_result.set_status(phantom.APP_ERROR, THREATQ_ERR_GET_RELATED_OBJECTS.format(error=error_msg))\n results.append(action_result)\n continue\n\n for rel in related_res:\n rel_obj = ThreatQObject(self.tq, related_obj)\n rel_obj.fill_from_api_response(rel)\n related_objects.append(rel_obj)\n\n msg = \"ThreatQ found [{}] result(s)\".format(len(related_objects))\n self.save_progress(msg)\n\n # Set the status of the request\n if len(related_objects) == 0:\n action_result.set_status(phantom.APP_SUCCESS, THREATQ_NO_DATA)\n else:\n action_result.set_status(phantom.APP_SUCCESS, msg)\n\n # Add in summary information\n action_result.update_summary({\"total\": len(related_objects)})\n try:\n action_result = self.set_data_response(action_result, related_objects)\n except Exception as e:\n error_msg = self._get_error_message_from_exception(e)\n msg = '{} -- {}'.format(error_msg, traceback.format_exc())\n self.debug_print(msg)\n action_result.set_status(phantom.APP_ERROR, THREATQ_ERR_SET_DATA_RESPONSE.format(error=error_msg))\n\n # Add results\n results.append(action_result)\n\n return results", "def references(self):\n return ( rd for rd in ReferenceDatum.all() if self == rd.property )", "def collect_properties(service_instance, view_ref, obj_type, path_set=None, include_mors=False):\n collector = service_instance.content.propertyCollector\n\n # Create object specification to define the starting point of\n # inventory navigation\n obj_spec = vmodl.query.PropertyCollector.ObjectSpec()\n obj_spec.obj = view_ref\n obj_spec.skip = True\n\n # Create a traversal specification to identify the path for collection\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = 'traverseEntities'\n traversal_spec.path = 'view'\n traversal_spec.skip = False\n traversal_spec.type = view_ref.__class__\n obj_spec.selectSet = [traversal_spec]\n\n # Identify the properties to the retrieved\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.type = obj_type\n\n if not path_set:\n property_spec.all = True\n\n property_spec.pathSet = path_set\n\n # Add the object and property specification to the\n # property filter specification\n filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n filter_spec.objectSet = [obj_spec]\n filter_spec.propSet = [property_spec]\n\n # Retrieve properties\n props = collector.RetrieveContents([filter_spec])\n\n data = []\n for obj in props:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n\n if include_mors:\n properties['obj'] = obj.obj\n\n data.append(properties)\n return data", "def get_property_for(self, instance):\n results = {}\n sorted_roles = defaultdict(list)\n for acl in getattr(instance, self.alias, []):\n ac_role = acl.ac_role.name\n person_id = acl.person.id\n if not results.get(acl.ac_role.name, None):\n results[acl.ac_role.name] = {}\n sorted_roles[ac_role].append(acl.person.email)\n results[ac_role][\"{}-email\".format(person_id)] = acl.person.email\n results[ac_role][\"{}-name\".format(person_id)] = acl.person.name\n for role in sorted_roles:\n results[role][\"__sort__\"] = u':'.join(sorted(sorted_roles[role]))\n return results", "def relationship(self) -> List[Relationship]:\n return self._relationship", "def test_get_node_relationship_all_using_get(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }