query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Just to be sure the constants returned by the generator are reasonable and the correct number.
|
def test_generate_constants( self ) :
print( "test_generate_constants" )
entropy_bits = \
0xd262fbc7cbc7e757d16234bd7e88f12cc5dfef7c2ee82c9a4e289113d83d8724
n_prngs = 19
for integer_width in [ 64, 128, 256 ] :
for n_prngs in [ 7, 19, 31 ] :
constant_generator = generate_constants( integer_width, n_prngs,
entropy_bits )
for _ in range( n_prngs ) :
multiplier, addition, lag, delta = next( constant_generator)
print( multiplier, addition, lag, delta )
try :
multiplier, addition, lag, delta = next( constant_generator)
except StopIteration :
print( "StopIteration -- Proper result" )
print( "success test_generate_constants" )
|
[
"def gen_z_val():\n while True:\n num = random.random()\n f = 1 / (1 + num)\n if random.random() <= f:\n return num",
"def gen_bouncy():\n\tglobal num\n\twhile True:\n\t\tnum+=1\n\t\tif (not is_increasing_number(num) and not is_decreasing_number(num)):\n\t\t\tyield num",
"def next_int(self, min, max):\n\t\treturn min + int((max + 1) * self.__rand.random())",
"def test_generate_integers_with_range_excluding_specific_digits(self):\n for i in range(iterations):\n for not_equal in range(10):\n value = get_random_int(0, 9, not_equal)\n self.assertIsInstance(value, int)\n self.assertGreaterEqual(value, 0)\n self.assertLessEqual(value, 9)\n self.assertNotEqual(value, not_equal)",
"def BuildNumber(self) -> int:",
"def test_id_does_not_increment_when_reading_value(self):\n generator = LocalScanIdGenerator(start=5)\n expected = [5, 5, 5, 5, 5]\n actual = [generator.value for _ in range(5)]\n assert actual == expected",
"def _random_whole(self):\n random_whole = lambda: self._raw_random_whole(self.random_whole_bit_count)\n n = random_whole()\n\n if self.randomize_random_whole_bit_count:\n # modify the number of bits the next call will use\n\n offset = random_whole()\n \n if self.random_whole_bit_count >= 3:\n offset = math.ceil(self._severe_log(offset))\n offset *= -1 if self._random_bool() else 1\n self.random_whole_bit_count += offset\n\n if self.random_whole_bit_count <= 0:\n self.random_whole_bit_count = 1\n return n",
"def generate_numbers(self):\n for i in xrange(624):\n y = (self.MT[i] & self.bitmask_2) + (self.MT[(i + 1 ) % 624] & self.bitmask_3)\n self.MT[i] = self.MT[(i + 397) % 624] ^ (y >> 1)\n if y % 2 != 0:\n self.MT[i] ^= 2567483615",
"def atomic_number(self) -> int:\n return self.random.randint(1, 119)",
"def error_vector_generate_c(self, sequence):\r\n result = ''\r\n for bit in sequence:\r\n rand = random.uniform(0, 1.0)\r\n if rand <= self.probability and bit != '0':\r\n result += '1'\r\n else:\r\n result += '0'\r\n self.e = int(result, 2)",
"def amount(gen, limit=float(\"inf\")):\n size = 0\n for unused in gen:\n size += 1\n if size >= limit:\n break\n return size",
"def next_float(self, min, max):\n\t\treturn min + (max * self.__rand.random())",
"def gas_gen():\r\n\tgas=0\r\n\tgas_presence=0\r\n\tgas_presence=stellagama.dice(2,6)\r\n\tif gas_presence >= 5:\r\n\t\tgas=stellagama.dice(1, 6) - 2\r\n\t\tif gas < 1:\r\n\t\t\tgas = 1\r\n\telse:\r\n\t\tgas=0\r\n\treturn gas #output gas giant number\r",
"def gen_int_param(n, codes):\r\n ret = [0]*n\r\n \r\n ind_max = len(codes)-1\r\n for k in range(n):\r\n ind = random.randint(0, ind_max)\r\n ret[k] = codes[ind]\r\n \r\n return ret",
"def generation_account_number():\n return random.randrange(1111111111, 9999999999)",
"def _normal_random_whole(self):\n n = self._random_whole()\n\n if self.random_whole_bit_count >= 3:\n n = math.ceil(self._severe_log(n))\n return n",
"def main():\n for i in range(10):\n NUM_RANDOM = random.randint(MIN_RANDOM, MAX_RANDOM)\n print(NUM_RANDOM)",
"def get_random_open():\r\n return round((random.uniform(300,1000)),2)",
"def nextPsuedoRandNum(num, length):\n return ((num * 113) + 137) % length"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Just what it says, do we produce a good random table? Real randomness is not for these tests, that is dieharder for components. This just makes sure something stupid isn't wrong. Dieharder is part of the final acceptance test, this is just simple software checks.
|
def test_generate_random_table( self ) :
print( "\ntest_generate_random_table" )
self.test_name = 'test_generate_random_table'
self.setUp()
str_random_table = generate_random_table( self.the_rnt, 4096, 64 )
# that is strings, so need an integer array
the_program = '\nN_K_RANDOM_BYTES=[\n' + \
str_random_table + ']\n'
N_K_RANDOM_BYTES = convert_string( the_program )
self.assertTrue( count_duplicates( N_K_RANDOM_BYTES ) == 0 )
self.assertTrue( count_zeros( N_K_RANDOM_BYTES ) == 0 )
|
[
"def test_fair():\n die = Die()\n \n # Set the number of rolls\n rolls = 1000000\n \n # Create a dictionary keep tally\n tally={}\n for i in range(1,7):\n tally[i] =0\n #Roll the dice 'rolls' times\n for i in range(0,rolls):\n tally[die.roll()]+=1\n \n # Assert that the probability is correct\n for i in range(1,7):\n assert tally[i]/rolls == pytest.approx(1/6, 1e-2)",
"def random_good_hues():\n return random.choice(GOOD_HUES)",
"def testRandomMedium():\n simulateRandom(\n maxCaps=6,\n maxSpecs=5,\n maxResources=15,\n runsPerConfig=100,\n numConfigs=100,\n verifyFunc=checkSolve,\n seed=int(time.time())\n )",
"def hashTableRandomTest(size):\n ht = HashTable()\n dic = {}\n time_ht = datetime.timedelta(0)\n time_dic = datetime.timedelta(0)\n\n for i in range(size):\n k, v = random.randint(1,100000), random.randint(-99999,99999)\n d = datetime.datetime.now()\n dic[k] = v\n time_dic += (datetime.datetime.now() - d)\n d = datetime.datetime.now()\n ht[k] = v\n time_ht += (datetime.datetime.now() - d)\n\n correct = True\n if not hashTableCompare(ht, dic):\n print \"Hash table comparison failed, after insertions. ht =\", ht, \" dic =\", dic\n correct = False\n \n keys = dic.keys()\n\n for i in range(size//4):\n index = random.randint(0,len(keys)-1)\n k = keys[index]\n d = datetime.datetime.now()\n if k in ht:\n del dic[k]\n time_dic += (datetime.datetime.now() - d)\n d = datetime.datetime.now()\n del ht[k]\n time_ht += (datetime.datetime.now() - d)\n\n if not hashTableCompare(ht, dic):\n print \"Hash table comparison failed, after deletions. ht =\", ht, \" dic =\", dic\n correct = False\n\n for i in range(size//4):\n k = random.randint(-999,999)\n d = datetime.datetime.now()\n if k in ht:\n del dic[k]\n time_dic += (datetime.datetime.now() - d)\n d = datetime.datetime.now()\n del ht[k]\n time_ht += (datetime.datetime.now() - d)\n\n if not hashTableCompare(ht, dic):\n print \"Hash table comparison failed, after random deletions. ht =\", ht, \" dic =\", dic\n correct = False \n\n print \"Time comparison: HashTable\",time_ht, \"dictionary\", time_dic\n \n return correct",
"def randoms():\n from hypothesis.searchstrategy.misc import RandomStrategy\n return RandomStrategy(integers())",
"def test_rnd_paper_count():\n rnd_entries = rldb.find_all({\n 'source-title': 'Exploration by Random Network Distillation',\n })\n\n assert len(rnd_entries) == (\n 0\n + 6 # Dynamics\n + 6 # PPO\n + 6 # RND\n )",
"def test_random(self):\n\t\tfor _ in range(1000):\n\t\t\tself.assertReadData(rnd.randrange(0, len(self.basis)))",
"def test_linear_probing():\n keys = (12, 44, 13, 88, 23, 94, 11, 39, 20, 16, 5)\n entries = [13, 94, 39, 16, 5, 44, 88, 11, 12, 23, 20]\n hash_table = chap10.SimpleLinearProbeHashTable()\n # Test operations on empty hash table\n with pytest.raises(KeyError):\n hash_table[11] # Accessing empty table\n with pytest.raises(KeyError):\n del hash_table[11] # Deleting item in empty table\n for _ in hash_table:\n pass # Iterate over empty table\n # Verify solution to exercise\n for key in keys:\n hash_table[key] = key # Insert keys\n assert len(hash_table) == 11\n for idx, bucket in enumerate(hash_table):\n assert entries[idx] == bucket # Compare entries to expected\n # Test SimpleHashTable methods\n assert hash_table[13] == 13 # Valid key\n with pytest.raises(KeyError):\n hash_table[111] = 222 # Table is full\n with pytest.raises(KeyError):\n hash_table[3] # Accessed key does not exist\n with pytest.raises(KeyError):\n del hash_table[3] # Deleted key does not exist\n hash_table[12] = 999\n assert hash_table[12] == 999 # Test modifying keys\n del hash_table[12]\n with pytest.raises(KeyError):\n hash_table[12] # Verify key is deleted\n assert len(hash_table) == 10\n assert hash_table[5] == 5\n del hash_table[5]\n assert len(hash_table) == 9\n hash_table[11] = 7\n assert len(hash_table) == 9",
"def run_example():\n num_die_sides = 8\n hand = (4,4)\n \n hand_score = score(hand)\n \n print \"score of hand\", hand_score\n \n print\"-----------expected value-------------\"\n \n held_dice = [4,4] \n num_die_sides = 6 \n num_free_dice= 5\n \n exp_value = expected_value(held_dice, num_die_sides, num_free_dice)\n\n print \"expected value:\", exp_value\n \n print gen_all_holds(hand)\n \n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score",
"def test_top_economical_bowlers_in_2015(self):\n\n \"\"\"First test case.\"\"\"\n expected_output = {'RA Jadeja': 6.0, 'DJ Bravo': 36.0, 'SP Narine': 0.0, 'AD Russell': 6.0, 'PP Chawla': 0.0}\n sql_methods.create_table_populate('resources/matches.csv', self.connection)\n sql_methods.create_table_populate('resources/problem_4/test_1.csv', self.connection, deliveries_table)\n calculated_output = data.top_economical_bowlers_in_2015(self.connection)\n self.assertEqual(expected_output, calculated_output)\n\n \"\"\"Second test case.\"\"\"\n sql_methods.delete_table(self.connection)\n expected_output = {}\n sql_methods.create_table_populate('resources/matches.csv', self.connection)\n sql_methods.create_table_populate('resources/problem_4/test_2.csv', self.connection, deliveries_table)\n calculated_output = data.top_economical_bowlers_in_2015(self.connection)\n self.assertEqual(expected_output, calculated_output)",
"def test_always_valid_roll():\n \n die= Die()\n \n for i in range(10000):\n roll= die.roll()\n \n assert roll> 0 and roll < 7",
"def random():\n positive_t = (random.getrandbits(1)==0)\n while True:\n try:\n # has 50% chance to succeed\n return JacobiQuartic.from_s(fe_random(),positive_t)\n except NotASquare:\n continue",
"def test(numTrials):\n # Your Code Here\n n = 100\n yes = 0\n for i in range(numTrials):\n africa = 0\n europe = 0\n samerica = 0\n asia = 0\n for i in range(n):\n rand = random.random()\n if rand < 0.25:\n africa += 1\n if rand < 0.5 and rand > 0.25:\n europe += 1\n if rand < 0.75 and rand > 0.5:\n samerica += 1\n if rand > 0.75:\n asia += 1\n #print africa, samerica, asia, europe\n if asia >= 30 or africa >= 30 or europe >= 30 or samerica >= 30:\n yes += 1\n prob = float(yes)/float(numTrials)\n return prob",
"def test_random_sample_1962(self):\n\t\t#-Load Random Sample From RAW DATASET-#\n\t\tyears = [1962]\n\t\tobj = self.obj\n\t\trs = import_csv_as_statatypes(TEST_DATA_DIR+\"nberfeenstra_wtf62_random_sample.csv\") \t\t#random sample\n\t\tdel rs['obs']\n\t\tassert_rows_in_df(df=self.obj.raw_data, rows=rs)\n\t\tassert_unique_rows_in_df(df=self.obj.raw_data, rows=rs)",
"def testRandomLarge():\n simulateRandom(\n maxCaps=8,\n maxSpecs=12,\n maxResources=100,\n runsPerConfig=100,\n numConfigs=100,\n verifyFunc=checkValid,\n seed=int(time.time())\n )",
"def test3():\n\n #TODO: Test for a couple normal skier codes.\n #TODO: Then check for corner cases like under certain weights, heights, or under/over ages\n print(\"Not yet implemented\")",
"def test_random(self):\n random_tx_trytes = TransactionTrytes.random()\n self.assertEqual(len(random_tx_trytes), TransactionTrytes.LEN)",
"def _generateTestCase():\n\tN = random.randint(3,_MAX_SAMPLES)\n\t########################################################################\n\t# Choose covariate degeneracy first as it will constrain the types\n\t# of covariates required to manifest it.\n\tdegen = None\n\tcount = None\n\tconst = None # ...unless set otherwise below.\n\tif random.random() < _PROB_COV_DEGEN:\n\t\tdegen = random.choice([\"count\",\"const\"])\n\t\tconst = random.choice([\"num\",\"cat\"])\n\t\tif degen==\"count\":\n\t\t\tcount = random.randint(0,2)\n\t\t\ttypes = random.randint(0,3) # any features will do\n\t\telif const==\"num\":\n\t\t\ttypes = random.randint(0,2) # pair must have a numerical feature\n\t\telse:\n\t\t\ttypes = random.randint(1,3) # pair must have a categorical feature\n\telse: # no degeneracy in this case\n\t\ttypes = random.randint(0,3) # any features will do\n\t########################################################################\n\t# Now generate covariates.\n\t# Both can contain univariate degeneracy.\n\tk = [ random.randint( 1 if random.random() < _PROB_UNI_DEGEN else 2, _MAX_K ) for n in range(2) ]\n\tr = [ random.randint( 1, N ) if random.random() < _PROB_UNI_DEGEN else 0, 0 ]\n\ti = random.randint(0,1) # to insure both parameters not same\n\t# Generate the covariate pair. \n\t# It might contain univariate degeneracy, but it does not yet contain\n\t# any (forced) covariate degeneracy...though some could have occurred\n\t# entirely by chance.\n\tif types == 0: # NN\n\t\tp = CovariatePair( Num(N,rep=r[i]), Num(N,rep=r[1-i]) )\n\telif types == 1: # CN\n\t\tp = CovariatePair( Cat(N,k[0]), Num(N,rep=r[0]) )\n\telif types == 2: # NC\n\t\tp = CovariatePair( Num(N,rep=r[0]), Cat(N,k[0]) )\n\telse: # CC\n\t\tp = CovariatePair( Cat(N,k[i]) , Cat(N,k[1-i]) )\n\t########################################################################\n\t# Now muck up the covariates if degeneracy was selected above.\n\tif degen:\n\t\tif degen==\"count\":\n\t\t\tp.fewSurvivors( count )\n\t\telif const==\"num\":\n\t\t\tp.constScalar()\n\t\telse:\n\t\t\tp.constCategory()\n\t# Return a \"specification\" that contains the covariate data and a\n\t# qualitative description of it with respect to presence/absence of \n\t# degeneracy.\n\treturn {'n':N, \n\t\t'degen':degen,\n\t\t'param':count if degen==\"count\" else const, \n\t\t'types':types, \n\t\t'covar':p }",
"def test_random_init(self, r, p, s, t):\n expected_scores = {\n (C, D): (s, t),\n (D, C): (t, s),\n (D, D): (p, p),\n (C, C): (r, r),\n }\n game = axl.Game(r, s, t, p)\n self.assertEqual(game.scores, expected_scores)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
checks the lcg crypto function for randomness at different sizes. These are complex and exhustive tests.
|
def test_lcg_crypto( self ) :
print( "\ntest_lcg_crypto" )
self.test_name = 'test_lcg_crypto'
self.setUp() # setup() after setting test_name
check_function( self, LcgCrypto, self.the_rnt )
|
[
"def check_function( self, the_function, the_rnt ) :\n print( the_function )\n print( \"vec_size int_width statesize p_lvl difficulty \" +\n \"duplicates zeros all ff's elapsed time byterate\" )\n sys.stdout.flush()\n\n function_return = True\n n_samples = self.difficulty * 64 * 1024\n random_table = [ 0 for _ in range( n_samples ) ]\n for n_lcgs in [ 7, 11, 19 ] :\n for integer_width in [ 64, 128 ] :\n for lcg_depth in [ 9, 17 ] :\n for paranoia_level in [ 1, 2 ] :\n beginning_time = int( time.time() )\n the_crypto = the_function( the_rnt, n_lcgs,\n integer_width, lcg_depth,\n paranoia_level )\n for i in range( n_samples ) :\n # this becomes slower over time. Why?\n new_random = the_crypto.next( integer_width,\n paranoia_level )\n random_table[ i ] = new_random\n\n ending_time = int( time.time() )\n \n sys.stdout.flush()\n\n elapsed_time = ending_time - beginning_time \n if elapsed_time == 0 :\n elapsed_time = 1\n byte_rate = ( n_samples * ( integer_width / 8 )) / \\\n elapsed_time\n\n duplicates = count_duplicates( random_table )\n function_return &= duplicates == 0\n\n zeros = count_zeros( random_table )\n function_return &= zeros == 0\n\n # these are not signed numbers, 0xFFFF... is problem\n all_fs = count_all_fs( random_table, integer_width )\n function_return &= all_fs == 0\n\n print( \"%5d %10d %8d %7d %10d %7d %7d %7d %7d %18.2f\" %\n ( n_lcgs, integer_width, lcg_depth, paranoia_level,\n n_samples, duplicates, zeros, all_fs, \n ending_time - beginning_time, byte_rate ) )\n\n sys.stdout.flush()\n\n self.assertTrue( function_return )",
"def test_random_examples(self):\r\n\r\n for n in range(0, 1000):\r\n num1 = random.choices(range(0, 10 ** 3), k=1)\r\n num2 = random.choices(range(0, 10 ** 3), k=1)\r\n\r\n self.assertEqual(gcd_it(num1[0], num2[0]), math.gcd(num1[0], num2[0]))\r\n self.assertEqual(gcd_rec(num1[0], num2[0]), math.gcd(num1[0], num2[0]))",
"def testRandomLarge():\n simulateRandom(\n maxCaps=8,\n maxSpecs=12,\n maxResources=100,\n runsPerConfig=100,\n numConfigs=100,\n verifyFunc=checkValid,\n seed=int(time.time())\n )",
"def test_generate_with_different_length(self):\n pg = PasswordGenerator()\n length = 16\n pg.minlen = length\n pg.maxlen = length\n self.assertEqual(len(pg.generate()), length)",
"def verify(g, h, p, x):\n return pow(g, x, p) == h",
"def test_cltgrng():\n packed = [\n [5, 15, 19], [11, 25, 30, 31], [10, 17, 21, 28], [1, 3, 23],\n [2, 7, 18, 29], [9, 14, 20, 27], [4, 8, 16, 26], [0, 6, 12, 24],\n [13, 22, 26], [10, 14, 24, 28], [2, 13, 15, 19], [4, 6, 9, 27],\n [3, 17, 23, 25], [12, 16, 22, 30], [0, 1, 7, 8], [11, 18, 20, 31],\n [2, 5, 21, 29], [0, 1, 14, 17], [9, 22, 25], [3, 18, 28, 31],\n [7, 21, 24, 29], [4, 5, 6, 16], [8, 13, 20], [11, 15, 19, 26],\n [10, 12, 23, 30], [5, 10, 13, 27], [2, 8, 22, 25], [7, 12, 14, 21],\n [3, 15, 24, 31], [4, 6, 19, 23], [17, 28, 30], [16, 18, 20]]\n urng = LUTOPT.from_packed(packed, init=1)\n grng = CLTGRNG(urng)\n n = len(packed)\n logn = int(np.log2(n))\n\n def tb():\n # Run a few cycles of the URNG to warm it up and fill up the\n # register hierarchy of the GRNG.\n for _ in range(2*logn):\n yield\n\n # Check first 100 outputs match\n results = []\n for i in range(100):\n # Run the hardware simulation for one clock cycle\n yield\n\n # Fetch the URNG value and compute the corresponding Gaussian.\n # Note that we bit-reverse the URNG to correspond to the bit\n # indexing of the hardware.\n x = np.array([int(x) for x in\n bin(int((yield urng.x)))[2:].rjust(n, \"0\")[::-1]])\n for level in range(logn):\n level_n = 2**(logn - level)\n y = np.zeros(level_n//2, dtype=np.int16)\n for pair in range(0, level_n, 2):\n y[pair//2] = x[pair] - x[pair+1]\n x = y\n results.append(x[0])\n\n # Convert grng.x into signed form\n grng_x = (yield grng.x)\n grng_x = grng_x if grng_x < 2**31 else (grng_x - 2**32)\n\n # Once we've collected enough results to compensate for the\n # clock delay, start comparing numbers.\n if len(results) > logn:\n assert grng_x == results[-logn-1]\n\n run_simulation(grng, tb())",
"def test_password_generation_length(self):\n\n # +10 for the minimum password length\n for pw_length in [10+int(100*random.random()) for i in range(100)]:\n password = generate_password(pw_length)\n self.assertTrue(len(password), pw_length)",
"def chi2_is_random(\n data: bytes, \n alpha: int=0.05,\n unit_size_bits: int=8, \n ) -> bool:\n # Build frequency table\n f_obs = []\n for i in range(2**unit_size_bits):\n f_obs.append(0)\n if unit_size_bits == 8:\n for i in range(len(data)):\n f_obs[data[i]] += 1\n else:\n raise Exception(f\"unit_size_bits = {unit_size_bits} not yet implemented\")\n \n return chi2_from_freqs_pval(f_obs) > alpha",
"def TEST_uniform_deterministic() -> None:\n res = list(it.islice(uniform_deterministic(), 0, 6))\n assert res == [.5, .25, .75, .125, .625, .375]\n print(f\"PC:KEYggLG: TEST_uniform_deterministic done\")\n exit(1)",
"def php_mt_rand(s1):\n s1 ^= (s1 >> 11)\n s1 ^= (s1 << 7) & 0x9d2c5680\n s1 ^= (s1 << 15) & 0xefc60000\n s1 ^= (s1 >> 18)\n return s1",
"def test_generate_mandelbrot_data(self):\n compare_data = generate()\n self.assertIsNone(npt.assert_almost_equal(compare_data,EXAMPLE_DATA))",
"def test_prng_crypto( self ) :\n print( \"\\ntest_prng_crypto\" )\n self.test_name = 'test_prng_crypto'\n\n self.setUp() # setup() after setting test_name\n\n check_function( self, PrngCrypto, self.the_rnt )",
"def hash(x):\r\n return (randint(1, 5*c)*x + randint(1, 5*c)) % c",
"def advapi32_CryptGenRandom(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hProv\", \"dwLen\", \"pbBuffer\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def congruentEq():\n \n a = random.randint(start, end)\n b = random.randint(start, end)\n \n m = random.randint(start + 3, end + 3)\n \n c = random.randint(start, end)\n k = random.randrange(start+1, end, 2)\n \n while(math.gcd(c,m) != 1):\n c = random.randint(start, end)\n m = random.randint(start + 3, end + 3)\n \n exp1 = a * c * k\n \n while(exp1 < 100 or exp1 > 500 or math.gcd(a,m) != 1 or a == b):\n a = random.randint(start, end)\n k = random.randrange(start+1, end, 2)\n exp1 = a * c * k\n \n exp2 = b * c * k\n modulo = m * k\n\n print(\"a={}, b={}, m={}, c={}, k={}\".format(a, b, m, c, k))\n print(\"{}*{}*{}x = {}*{}*{} ({}*{})\".format(a, c, k, b, c, k, m, k))\n print(\"{}x = {} ({})\".format(exp1, exp2, modulo))",
"def testRandomMedium():\n simulateRandom(\n maxCaps=6,\n maxSpecs=5,\n maxResources=15,\n runsPerConfig=100,\n numConfigs=100,\n verifyFunc=checkSolve,\n seed=int(time.time())\n )",
"def test_problem11(self):\n blocklen = 16\n for i in range(100):\n guess, real = cryptanalysis.encryption_detection_oracle_ecb_cbc(ciphers.black_box1, blocklen, True)\n self.assertEqual(real, guess)",
"def test_generate_constants( self ) :\n print( \"test_generate_constants\" )\n\n entropy_bits = \\\n 0xd262fbc7cbc7e757d16234bd7e88f12cc5dfef7c2ee82c9a4e289113d83d8724\n n_prngs = 19\n for integer_width in [ 64, 128, 256 ] :\n\n for n_prngs in [ 7, 19, 31 ] :\n constant_generator = generate_constants( integer_width, n_prngs,\n entropy_bits )\n\n for _ in range( n_prngs ) :\n multiplier, addition, lag, delta = next( constant_generator)\n print( multiplier, addition, lag, delta )\n\n try :\n multiplier, addition, lag, delta = next( constant_generator)\n\n except StopIteration :\n print( \"StopIteration -- Proper result\" )\n\n print( \"success test_generate_constants\" )",
"def test_cases_for_hash(self,\n alg: crypto_knowledge.Algorithm\n ) -> Iterator[test_case.TestCase]:\n calc = self.CALCULATE[alg.expression]\n if calc is None:\n return # not implemented yet\n\n short = b'abc'\n hash_short = calc(short)\n long = (b'Hello, world. Here are 16 unprintable bytes: ['\n b'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a'\n b'\\x80\\x81\\x82\\x83\\xfe\\xff]. '\n b' This message was brought to you by a natural intelligence. '\n b' If you can read this, good luck with your debugging!')\n hash_long = calc(long)\n\n yield self.one_test_case(alg, 'hash_empty', '', [calc(b'')])\n yield self.one_test_case(alg, 'hash_valid_one_shot', '',\n [short.hex(), hash_short])\n for n in [0, 1, 64, len(long) - 1, len(long)]:\n yield self.one_test_case(alg, 'hash_valid_multipart',\n '{} + {}'.format(n, len(long) - n),\n [long[:n].hex(), calc(long[:n]),\n long[n:].hex(), hash_long])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
checks the prng crypto function for randomness at different sizes. These are complex and exhustive tests.
|
def test_prng_crypto( self ) :
print( "\ntest_prng_crypto" )
self.test_name = 'test_prng_crypto'
self.setUp() # setup() after setting test_name
check_function( self, PrngCrypto, self.the_rnt )
|
[
"def testRandomLarge():\n simulateRandom(\n maxCaps=8,\n maxSpecs=12,\n maxResources=100,\n runsPerConfig=100,\n numConfigs=100,\n verifyFunc=checkValid,\n seed=int(time.time())\n )",
"def check_function( self, the_function, the_rnt ) :\n print( the_function )\n print( \"vec_size int_width statesize p_lvl difficulty \" +\n \"duplicates zeros all ff's elapsed time byterate\" )\n sys.stdout.flush()\n\n function_return = True\n n_samples = self.difficulty * 64 * 1024\n random_table = [ 0 for _ in range( n_samples ) ]\n for n_lcgs in [ 7, 11, 19 ] :\n for integer_width in [ 64, 128 ] :\n for lcg_depth in [ 9, 17 ] :\n for paranoia_level in [ 1, 2 ] :\n beginning_time = int( time.time() )\n the_crypto = the_function( the_rnt, n_lcgs,\n integer_width, lcg_depth,\n paranoia_level )\n for i in range( n_samples ) :\n # this becomes slower over time. Why?\n new_random = the_crypto.next( integer_width,\n paranoia_level )\n random_table[ i ] = new_random\n\n ending_time = int( time.time() )\n \n sys.stdout.flush()\n\n elapsed_time = ending_time - beginning_time \n if elapsed_time == 0 :\n elapsed_time = 1\n byte_rate = ( n_samples * ( integer_width / 8 )) / \\\n elapsed_time\n\n duplicates = count_duplicates( random_table )\n function_return &= duplicates == 0\n\n zeros = count_zeros( random_table )\n function_return &= zeros == 0\n\n # these are not signed numbers, 0xFFFF... is problem\n all_fs = count_all_fs( random_table, integer_width )\n function_return &= all_fs == 0\n\n print( \"%5d %10d %8d %7d %10d %7d %7d %7d %7d %18.2f\" %\n ( n_lcgs, integer_width, lcg_depth, paranoia_level,\n n_samples, duplicates, zeros, all_fs, \n ending_time - beginning_time, byte_rate ) )\n\n sys.stdout.flush()\n\n self.assertTrue( function_return )",
"def test_password_generation_length(self):\n\n # +10 for the minimum password length\n for pw_length in [10+int(100*random.random()) for i in range(100)]:\n password = generate_password(pw_length)\n self.assertTrue(len(password), pw_length)",
"def test_generate_with_different_length(self):\n pg = PasswordGenerator()\n length = 16\n pg.minlen = length\n pg.maxlen = length\n self.assertEqual(len(pg.generate()), length)",
"def test_random_examples(self):\r\n\r\n for n in range(0, 1000):\r\n num1 = random.choices(range(0, 10 ** 3), k=1)\r\n num2 = random.choices(range(0, 10 ** 3), k=1)\r\n\r\n self.assertEqual(gcd_it(num1[0], num2[0]), math.gcd(num1[0], num2[0]))\r\n self.assertEqual(gcd_rec(num1[0], num2[0]), math.gcd(num1[0], num2[0]))",
"def generate_secret(self):\n bits = self.args.get('length')\n # Bits should dividable by 8, because we will ask the os for random\n # bytes and because we can't encode partial bytes. Base32 will cause a\n # 160% inflation of the data and we can't have padding for TOTP secrets\n # so `bits * 1.6` can not be a fraction.\n if (bits % 8 > 0):\n self.msg('not_common_totp_val')\n exit(2)\n if bits not in [80, 160] and not self.args['expert']:\n self.msg('not_common_totp_val')\n exit(2)\n return base64.b32encode(os.urandom(bits // 8)).decode('utf-8')",
"def urandom(size: int) -> str:\n ...",
"def chi2_is_random(\n data: bytes, \n alpha: int=0.05,\n unit_size_bits: int=8, \n ) -> bool:\n # Build frequency table\n f_obs = []\n for i in range(2**unit_size_bits):\n f_obs.append(0)\n if unit_size_bits == 8:\n for i in range(len(data)):\n f_obs[data[i]] += 1\n else:\n raise Exception(f\"unit_size_bits = {unit_size_bits} not yet implemented\")\n \n return chi2_from_freqs_pval(f_obs) > alpha",
"def nextPsuedoRandNum(num, length):\n return ((num * 113) + 137) % length",
"def verify(g, h, p, x):\n return pow(g, x, p) == h",
"def random_cipher():\n return np.random.permutation(26)",
"def rmspp(number, attempts=28):\r\n if number < 2:\r\n return False\r\n if number == 2:\r\n return True\r\n if number % 2 == 0:\r\n return False\r\n # Given an odd integer n, let n = 2**r*s+1, with s odd... \r\n s = number - 1\r\n r = 0\r\n while s % 2 == 0:\r\n r += 1\r\n s /= 2\r\n while attempts:\r\n # ... choose a random integer a with 1 ≤ a ≤ n-1\r\n a = random.randint(1, number-1)\r\n # Unless a**s % n ≠ 1 ...\r\n if mod_exp(a, s, number) != 1:\r\n # ... and a**((2**j)*s) % n ≠ -1 for some 0 ≤ j ≤ r-1 \r\n for j in range(0, r):\r\n if mod_exp(a, (2**j)*s, number) == number-1:\r\n break\r\n else:\r\n return False\r\n attempts -= 1\r\n continue\r\n # A prime will pass the test for all a.\r\n return True",
"def test_newrand_bounded_rand_int(range_, n_pts):\n # XXX: this test is very seed sensitive: either it is wrong (too strict?)\n # or the wrapped RNG is not uniform enough, at least on some platforms.\n set_seed_wrap(42)\n n_iter = 100\n ks_pvals = []\n uniform_dist = stats.uniform(loc=0, scale=range_)\n # perform multiple samplings to make chance of outlier sampling negligible\n for _ in range(n_iter):\n # Deterministic random sampling\n sample = [bounded_rand_int_wrap(range_) for _ in range(n_pts)]\n res = stats.kstest(sample, uniform_dist.cdf)\n ks_pvals.append(res.pvalue)\n # Null hypothesis = samples come from an uniform distribution.\n # Under the null hypothesis, p-values should be uniformly distributed\n # and not concentrated on low values\n # (this may seem counter-intuitive but is backed by multiple refs)\n # So we can do two checks:\n\n # (1) check uniformity of p-values\n uniform_p_vals_dist = stats.uniform(loc=0, scale=1)\n res_pvals = stats.kstest(ks_pvals, uniform_p_vals_dist.cdf)\n assert res_pvals.pvalue > 0.05, (\n \"Null hypothesis rejected: generated random numbers are not uniform.\"\n \" Details: the (meta) p-value of the test of uniform distribution\"\n f\" of p-values is {res_pvals.pvalue} which is not > 0.05\"\n )\n\n # (2) (safety belt) check that 90% of p-values are above 0.05\n min_10pct_pval = np.percentile(ks_pvals, q=10)\n # lower 10th quantile pvalue <= 0.05 means that the test rejects the\n # null hypothesis that the sample came from the uniform distribution\n assert min_10pct_pval > 0.05, (\n \"Null hypothesis rejected: generated random numbers are not uniform. \"\n f\"Details: lower 10th quantile p-value of {min_10pct_pval} not > 0.05.\"\n )",
"def testRandomMedium():\n simulateRandom(\n maxCaps=6,\n maxSpecs=5,\n maxResources=15,\n runsPerConfig=100,\n numConfigs=100,\n verifyFunc=checkSolve,\n seed=int(time.time())\n )",
"def gentempkey(bsize=16):\n args = ['openssl','rand','-base64',str(bsize)]\n pipeline = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n return pipeline.stdout.read()",
"def test_generate_random_table( self ) :\n print( \"\\ntest_generate_random_table\" )\n self.test_name = 'test_generate_random_table'\n\n self.setUp()\n\n str_random_table = generate_random_table( self.the_rnt, 4096, 64 )\n\n # that is strings, so need an integer array\n the_program = '\\nN_K_RANDOM_BYTES=[\\n' + \\\n str_random_table + ']\\n'\n\n N_K_RANDOM_BYTES = convert_string( the_program )\n \n self.assertTrue( count_duplicates( N_K_RANDOM_BYTES ) == 0 )\n self.assertTrue( count_zeros( N_K_RANDOM_BYTES ) == 0 )",
"def get_secure_random_string(size):\r\n value = os.urandom(size)\r\n value = binascii.hexlify(value)\r\n value = value.decode('utf-8')[:size]\r\n return value",
"def test_2(self):\n\t\tself.compare_multi_random(self.files, k=2, n=1)",
"def make_totp_secret():\n return pyotp.random_base32()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function to calculate the number days between today and the same day n months ago.
|
def ndays(nmonth=3):
today0 = datetime.now()
year3, month3 = (today0.year, today0.month - nmonth) if today0.month - nmonth >= 1 \
else (today0.year - 1, today0.month - nmonth + 12)
date3 = datetime(year3, month3, today0.day)
ndays = (today0 - date3).days
return ndays
|
[
"def days(n):\n return timedelta(days=n)",
"def previous_days(n, before=None):\n before = before or pendulum.today()\n return (before - before.subtract(days=n)).range('days')",
"def diff_dates():\n return abs((target_date - todays_date).days)",
"def get_days_diff(input_date: tuple) -> int:\n return (datetime.now()-datetime(*input_date)).days",
"def calculate_delta(datum):\r\n date_string = datum.split(\"-\")\r\n d0 = datetime.date(int(date_string[0]), int(date_string[1]), int(date_string[2].split('T')[0]))\r\n d1 = datetime.date.today()\r\n delta = d1 - d0\r\n return delta.days",
"def days_left():\n return str((datetime(2020, 10, 31) - datetime.now()).days)",
"def months_passed(self):\n\n return relativedelta(self.start_date, date.today()).months",
"def get_n_months_after_post(tweet_id,n):\n\n if n <= 0:\n logger.error('Must not set n<=0')\n sys.exit(1)\n start_time_dt = get_posting_datetime(tweet_id)\n start_time_dt = start_time_dt + datetime.timedelta(27*n-1)\n start_time = start_time_dt.strftime(\"%Y-%m-%d\")\n end_time = (start_time_dt + datetime.timedelta(27)).strftime(\"%Y-%m-%d\")\n return start_time,end_time",
"def remaining_days(self) -> float:\n return self.project_days - self.committed_days",
"def ntradingdays():\n return 252*10",
"def delta_days(filename, folder, cfg):\n archives = archives_create_days(folder, cfg['pattern'])\n if archives:\n last_archive_day = list(archives.keys())[-1]\n return (file_create_day(filename) - last_archive_day).days",
"def number_of_days(iteration):\r\n return iteration // 24",
"def countdown():\n wedding_date = Event.objects.order_by('date')[:1].get()\n countdown_to = abs((wedding_date.date - date.today()).days)\n return countdown_to",
"def _get_days_in_months(start_date, end_date, n_months, list_yr_mo):\n if n_months == 1:\n days_in_months = np.array([(end_date - start_date).days])\n else:\n days_in_month_1 = ((start_date + MonthEnd()) - start_date).days\n days_in_month_n = (end_date - (end_date - MonthBegin())).days + 1\n days_in_months = [days_in_month_1]\n for month in list_yr_mo[1:-1]:\n Y, m = list(map(int, month.split(\"-\")))\n days_in_months.append(calendar.monthrange(Y, m)[1])\n days_in_months.append(days_in_month_n)\n return np.array(days_in_months)",
"def date_delta(dt1, dt2):\n delta = dt2 - dt1 \n return abs(delta.days)",
"def getNumDays(self, curDateTime, expDateTime):\n return (expDateTime - curDateTime).days",
"def seconds2days(n):\n days = n / 60 / 60 / 24\n return days",
"def elapsed_days(cls, year):\n months_elapsed = quotient(235 * year - 234, 19)\n parts_elapsed = 12084 + 13753 * months_elapsed\n days = 29 * months_elapsed + quotient(parts_elapsed, 25920)\n return days + 1 if mod(3 * (days + 1), 7) < 3 else days",
"def days(self):\n return len(self.get_dates())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets a random entry from this text file
|
def getEntry(path):
l = makeList(path)
#return random.choice(l)
|
[
"def random_word():\n file_name = \"wordbank.txt\"\n number_of_lines = file_len(file_name) - 1\n target_line = randint(0, number_of_lines)\n target_word = linecache.getline(file_name, target_line)\n return target_word.replace(\"\\n\", \"\")",
"def pick_random_word():\n # open the sowpods dictionary\n with open(\"sowpods.txt\", 'r') as f:\n words = f.readlines()\n # generate a random index\n # -1 because len(words) is not a valid index into the list `words`\n index = random.randint(0, len(words) - 1)\n # print out the word at that index\n word = words[index].strip()\n return word",
"def getRandom():\n alist = []\n with open(\"./wappen.tsv\", encoding=\"utf8\") as coas:\n reader = csv.DictReader(coas, delimiter=\"\\t\")\n for row in reader:\n alist.append(row)\n chosen = random.choice(alist)\n return chosen",
"def random_line(filename):\r\n linecount = count_lines(open(filename))\r\n chosen_line_number = random.randrange(linecount)\r\n return linecache.getline(filename, chosen_line_number)",
"def randomWord(self):\n with open(self.category) as f:\n words = f.read().split()\n word = random.choice(words)\n return word",
"def generate_field():\r\n\timport random\r\n\tmap_choice = [1, 2, 3, 4, 5]\r\n\treturn read_field(('map{}.txt').format(str(random.choice(map_choice))))",
"def random(request):\n\n return entry(request, choice(util.list_entries()))",
"def random_word(self, source=\"words.txt\"):\n # slice to remove newlines from word\n return random.choice(open(source, 'r').readlines())[:-1]",
"def get_random_tweet(self):\n tweets = json.loads(open('data/tweets.json', 'r').read())\n tweet = random.choice(tweets)\n\n if tweet['text'].strip() in self.last_n_tweets:\n return self.get_random_tweet()\n else:\n return tweet",
"def get_random_file(path):\n files = get_files(path)\n\n return random.choice(files)",
"def _random_file(self):\n #track_no = f'{np.random.randint(len(self.file_paths)//3):03}'\n #track_name = f'{track_no}.{part}.h5'\n return random.choice(self.file_paths)",
"def jokes_helper():\n\n resp = open('response_phrases/jokes.txt')\n line = next(resp)\n for num, aline in enumerate(resp):\n if random.randrange(num + 2):\n continue\n if aline != '':\n line = aline\n else:\n line = 'I intend to live forever. So far, so good.'\n return line",
"def PickRandomMatch(self):\n return self.matches[random.choice(list(self.matches.keys()))][0]",
"def load_word():\n f = open('words.txt', 'r')\n words_list = f.readlines()\n f.close()\n\n words_list = words_list[0].split(' ')\n secret_word = random.choice(words_list)\n return secret_word",
"def get_quote(file=\"addons/quotes.csv\"):\n # get length of file\n num_lines = sum(1 for line in open(file))\n # select random row\n index = random.randint(0, num_lines)\n with open(file, 'r', errors='ignore') as f:\n reader = csv.reader(f)\n row = [row for idx, row in enumerate(reader) if idx == index][0]\n return {\"author\": row[0], \"quote\": row[1]}",
"def randomize(self, widget):\r\n f = open(\"dishes.txt\", 'r')\r\n self.dishes = [] \r\n for line in f:\r\n self.dishes.append(line)\r\n f.close()\r\n randomLabel = random.choice(self.dishes) \r\n widget[\"text\"] = randomLabel.strip()",
"def _random_file(self):\n #track_no = f'{np.random.randint(len(self.file_paths)//args.stems):03}'\n #track_name = f'{track_no}.{part}.wav'\n return random.choice(self.file_paths)",
"def getATweet():\n return random.choice(config.Tweets_msgs)",
"def getRandom(self):\n return self.set_list[random.randint(0, len(self.set_list) - 1)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Draws the overlay as a box.
|
def _overlay_box(self, component, gc):
if self._screen_start and self._screen_end:
with gc:
gc.set_antialias(0)
gc.set_line_width(self.border_size)
gc.set_stroke_color(self.border_color_)
gc.clip_to_rect(component.x, component.y, component.width, component.height)
x, y = self._screen_start
x2, y2 = self._screen_end
rect = (x, y, x2-x+1, y2-y+1)
if self.color != "transparent":
if self.alpha:
color = list(self.color_)
if len(color) == 4:
color[3] = self.alpha
else:
color += [self.alpha]
else:
color = self.color_
gc.set_fill_color(color)
gc.draw_rect(rect)
else:
gc.rect(*rect)
gc.stroke_path()
return
|
[
"def cover_box(self, x, y, width, height):\n pg.draw.rect(self.screen, (255, 255, 255), (x, y, width, height))",
"def _draw_box(self, dc, box):\n CIRCLE_RAD = 1 if self.scale == 1.0 else 3\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.SetPen(wx.Pen(box.color, box.line_width))\n \n w_img, h_img = self.img_bitmap.GetWidth(), self.img_bitmap.GetHeight()\n x1, y1 = int(round(box.x1*w_img)), int(round(box.y1*h_img))\n x2, y2 = int(round(box.x2*w_img)), int(round(box.y2*h_img))\n w, h = abs(x1-x2), abs(y1-y2)\n (ul_x, ul_y), (lr_x, lr_y) = util_gui.get_box_corners((x1,y1),(x2,y2))\n dc.DrawRectangle(ul_x,ul_y,w,h)\n # Draw the 'grabber' circles\n dc.SetPen(wx.Pen(\"Black\", 1))\n dc.SetBrush(wx.Brush(\"White\"))\n dc.DrawCircle(ul_x, ul_y, CIRCLE_RAD) # Upper-Left\n dc.DrawCircle(ul_x+(w/2), ul_y, CIRCLE_RAD) # Top\n dc.DrawCircle(ul_x+w, ul_y, CIRCLE_RAD) # Upper-Right\n dc.DrawCircle(ul_x, ul_y+(h/2), CIRCLE_RAD) # Left\n dc.DrawCircle(ul_x+w, ul_y+(h/2), CIRCLE_RAD) # Right\n dc.DrawCircle(ul_x, ul_y+h, CIRCLE_RAD) # Lower-Left\n dc.DrawCircle(ul_x+(w/2), lr_y, CIRCLE_RAD) # Bottom\n dc.DrawCircle(lr_x, lr_y, CIRCLE_RAD) # Lower-Right\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n \n # Draw the ID numbers (for now)\n #if box.is_contest:\n # dc.SetTextForeground(\"Blue\")\n #else:\n # dc.SetTextForeground(\"Red\")\n #dc.DrawText(str(box.contest_id), ul_x, ul_y)",
"def draw_box(detection, detection_image_cv2):\n\n logging.debug('drawing box for {}'.format(detection['label'].upper()))\n scale = 2\n color = (0, 0, 255)\n cv2.rectangle(\n detection_image_cv2,\n (detection['topleft']['x'], detection['topleft']['y']),\n (detection['bottomright']['x'], detection['bottomright']['y']),\n color,\n scale\n ) \n\n return detection_image_cv2",
"def _add_bbox_overlay(self):\n cv2.rectangle(self.frame, \n tuple(self.bbox[:2]), tuple(self.bbox[2:]), \n (0,255,0), 2)",
"def drawOverlays(self):\r\n\t\tpass",
"def draw(self) -> None:\n if SHOW_OUTLINE:\n pg.draw.rect(self._screen, RED, self._rect, width=1)\n pg.draw.rect(self._screen, self._bg_color, self._rect)\n pg.draw.rect(self._screen, GRAY, self._rect, width=1)\n for _, sb in self._scoreboxes.items():\n sb.draw()\n\n pg.display.update(self._rect)",
"def draw_box(axes, size, view):\n a, b, c = size\n x = a*np.array([+1, -1, +1, -1, +1, -1, +1, -1])\n y = b*np.array([+1, +1, -1, -1, +1, +1, -1, -1])\n z = c*np.array([+1, +1, +1, +1, -1, -1, -1, -1])\n x, y, z = transform_xyz(view, None, x, y, z)\n def _draw(i, j):\n axes.plot([x[i], x[j]], [y[i], y[j]], [z[i], z[j]], color='black')\n _draw(0, 1)\n _draw(0, 2)\n _draw(0, 3)\n _draw(7, 4)\n _draw(7, 5)\n _draw(7, 6)",
"def draw_box(self, boxsize):\n self.go_to(Point(-boxsize, -boxsize))\n self.draw_square(boxsize * 2)\n self.go_home()",
"def __draw (self, display_surface):\n pygame.draw.rect(display_surface, self._colour, (self._rect))",
"def _draw_as_overlay(self, gc, view_bounds=None, mode=\"normal\"):\n # Determine the position we are going to draw at from our alignment\n # corner and the corresponding outer_padding parameters. (Position\n # refers to the lower-left corner of our border.)\n\n # First draw the border, if necesssary. This sort of duplicates\n # the code in PlotComponent._draw_overlay, which is unfortunate;\n # on the other hand, overlays of overlays seem like a rather obscure\n # feature.\n\n with gc:\n if self.inside:\n if self.align == \"ur\":\n self.x -= 5\n self.y -= 5\n elif self.align == \"ul\":\n self.x += 5\n self.y -= 5\n else:\n self.y += self.height + 5\n\n self.x += self.xoffset\n self.y += self.yoffset\n gc.clip_to_rect(int(self.x), int(self.y), int(self.width), int(self.height))\n edge_space = self.border_width + self.border_padding\n icon_width, icon_height = self.icon_bounds\n\n icon_x = self.x + edge_space\n text_x = icon_x + icon_width + self.icon_spacing\n y = self.y2 - edge_space\n\n if self._cached_label_positions is not None:\n if len(self._cached_label_positions) > 0:\n self._cached_label_positions[:, 0] = icon_x\n\n for i, label_name in enumerate(self._cached_label_names):\n # Compute the current label's position\n label_height = self._cached_label_sizes[i][1]\n y -= label_height\n self._cached_label_positions[i][1] = y\n\n # Try to render the icon\n icon_y = y + (label_height - icon_height) / 2\n # plots = self.plots[label_name]\n plots = self._cached_visible_plots[i]\n render_args = (gc, icon_x, icon_y, icon_width, icon_height)\n\n try:\n if isinstance(plots, list) or isinstance(plots, tuple):\n # TODO: How do we determine if a *group* of plots is\n # visible or not? For now, just look at the first one\n # and assume that applies to all of them\n if not plots[0].visible:\n # TODO: the get_alpha() method isn't supported on the Mac kiva backend\n # old_alpha = gc.get_alpha()\n old_alpha = 1.0\n gc.set_alpha(self.invisible_plot_alpha)\n else:\n old_alpha = None\n if len(plots) == 1:\n plots[0]._render_icon(*render_args)\n else:\n self.composite_icon_renderer.render_icon(\n plots, *render_args\n )\n elif plots is not None:\n # Single plot\n if not plots.visible:\n # old_alpha = gc.get_alpha()\n old_alpha = 1.0\n gc.set_alpha(self.invisible_plot_alpha)\n else:\n old_alpha = None\n plots._render_icon(*render_args)\n else:\n old_alpha = None # Or maybe 1.0?\n\n icon_drawn = True\n except:\n icon_drawn = self._render_error(*render_args)\n\n if icon_drawn:\n # Render the text\n gc.translate_ctm(text_x, y)\n gc.set_antialias(0)\n self._cached_labels[i].draw(gc)\n gc.set_antialias(1)\n gc.translate_ctm(-text_x, -y)\n\n # Advance y to the next label's baseline\n y -= self.line_spacing\n if old_alpha is not None:\n gc.set_alpha(old_alpha)\n\n return",
"def draw_box(img, boxes):\n box = ImageDraw.Draw(img)\n for i in range(boxes.shape[0]):\n data = list(boxes[i])\n shape = [data[0], data[1], data[2], data[3]]\n box.rectangle(shape, outline =\"#02d5fa\", width=3)\n return img",
"def draw_rect(surface, fill_color, outline_color, rect, border=1):\n\tsurface.fill(outline_color, rect)\n\tsurface.fill(fill_color, rect.inflate(-border*2, -border*2))",
"def drawBox(width,height,depth,touched):\n\n vertices = (\n (width/2,-height/2,-depth/2),\n (width/2,height/2,-depth/2),\n (-width/2,height/2,-depth/2),\n (-width/2,-height/2,-depth/2),\n (width/2,-height/2,depth/2),\n (width/2,height/2,depth/2),\n (-width/2,-height/2,depth/2),\n (-width/2,height/2,depth/2)\n )\n\n edges = (\n (0,1),\n (0,3),\n (0,4),\n (2,1),\n (2,3),\n (2,7),\n (6,3),\n (6,4),\n (6,7),\n (5,1),\n (5,4),\n (5,7)\n )\n\n surfaces = (\n (0,1,2,3),\n (3,2,7,6),\n (6,7,5,4),\n (4,5,1,0),\n (1,5,7,2),\n (4,0,3,6)\n )\n\n glBegin(GL_QUADS)\n for surface in surfaces:\n x = 0\n for vertex in surface:\n x+=1\n if not(touched):\n glColor3fv((1,1,1)) # white by default\n else: glColor3fv((0,0.5,1)) # blue if touched\n glVertex3fv(vertices[vertex])\n glEnd()\n\n # glBegin(GL_LINES)\n # for edge in edges:\n # for vertex in edge:\n # glColor3fv((1,1,1))\n # glVertex3fv(vertices[vertex])\n # glEnd()",
"def draw_overlay(self, func=None, **kwargs):\n \n # check control\n if self.control is None:\n return\n \n # do not clean if empty\n if func is None and self._dc_overlay_empty:\n return\n \n # clear current tooltip\n self.SetToolTip(\"\")\n \n # make overlay DC\n dc = wx.ClientDC(self)\n odc = wx.DCOverlay(self._dc_overlay, dc)\n odc.Clear()\n \n # draw overlay\n if func is not None:\n canvas = self._make_canvas(dc)\n func(canvas, **kwargs)\n self._dc_overlay_empty = False\n \n # delete DC\n del odc",
"def drawRectangle(self, canvas):",
"def plot_rectangle( xl, yl, xh, yh, **style):\n x0 = (xh+xl)/2.0\n y0 = (yh+yl)/2.0\n lx = (xh-xl)\n ly = (yh-yl)\n plot_box( x0, y0, lx, ly, 0, **style )",
"def _show(self):\n\t\t#print ('showing rect')\n\t\tpg.draw.rect(gameDisplay, self._c, [self._x, self._y, self._w, self._h])",
"def box(self):\n self._write_pos(0, 0, None, \"┌\", None) # ┌╭\n for x in range(1, self.window_size[1]):\n self._write_pos(0, x, None, \"─\", None)\n\n self._write_pos(0, self.window_size[1] - 1, None, \"┐\", None) # ┐╮\n for y in range(1, self.window_size[0]):\n self._write_pos(y, self.window_size[1] - 1, None, \"│\", None)\n\n self._write_pos(\n self.window_size[0], self.window_size[1] - 1, None, \"┘\", None\n ) # ┘╯\n\n for x in range(self.window_size[1] - 2, 0, -1):\n self._write_pos(self.window_size[0], x, None, \"─\", None)\n\n self._write_pos(self.window_size[0], 0, None, \"└\", None) # └╰\n\n for y in range(self.window_size[0] - 1, 0, -1):\n self._write_pos(y, 0, None, \"│\", None)",
"def display_rectangle(self,ob,xmin=0,ymin=0,xmax=1,ymax=1):\n x=ob\n## canvas_color_function=lambda x,y,X,Y:raise\n img_function_name='display_rectangle'\n if x in self.image_hooks:\n if isinstance(x,str):\n x=self.image_hooks[x]\n w=self.image_hooks[x]\n w.display_rectangle(xmin,ymin,xmax,ymax) \n self.itemconfig(x,image=w.Tk)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given start and end points in screen space, returns corresponding low and high points in data space.
|
def _map_coordinate_box(self, start, end):
low = [0,0]
high = [0,0]
for axis_index, mapper in [(0, self.component.x_mapper), \
(1, self.component.y_mapper)]:
# Ignore missing axis mappers (ColorBar instances only have one).
if not mapper:
continue
low_val = mapper.map_data(start[axis_index])
high_val = mapper.map_data(end[axis_index])
if low_val > high_val:
low_val, high_val = high_val, low_val
low[axis_index] = low_val
high[axis_index] = high_val
return low, high
|
[
"def start_stop_indices(t_pts, plot_start, plot_stop):\n start_index = (np.fabs(t_pts-plot_start)).argmin() # index in t_pts array \n stop_index = (np.fabs(t_pts-plot_stop)).argmin() # index in t_pts array \n return start_index, stop_index",
"def get_bounds( reads, start_pos_index, end_pos_index ):\n max_low = sys.maxint\n max_high = -sys.maxint\n for read in reads:\n if read[ start_pos_index ] < max_low:\n max_low = read[ start_pos_index ]\n if read[ end_pos_index ] > max_high:\n max_high = read[ end_pos_index ]\n return max_low, max_high",
"def h__getBounds(n1, n2, percent_left_search, percent_right_search):\n # Create array of n1 evenly spaced numbers from 0 to 1 inclusive\n percentiles = np.linspace(0, 1, n1)\n start_percentiles = percentiles - percent_left_search\n stop_percentiles = percentiles + percent_right_search\n\n # An integer array giving the leftmost to navigate before stopping,\n # for each point along n2.\n start_indices = np.floor(start_percentiles * n2)\n stop_indices = np.ceil(stop_percentiles * n2)\n # Truncate any indices pointing outside the range between 0 and n2-1\n start_indices[start_indices < 0] = 0\n stop_indices[stop_indices >= n2] = n2 - 1\n\n return start_indices.astype(np.int), stop_indices.astype(np.int)",
"def __get_range(self):\n return self.high - self.low",
"def hilightRange(start, limit, range):\n\n global brushOv\n\n hilightStart = hilightEnd = None\n\n # Keep the hilight within the span of the data\n if start + range > limit:\n hilightStart = limit - range\n hilightEnd = limit\n else:\n hilightStart = start\n hilightEnd = start + range\n\n brushOv.selected = np.array([hilightStart, hilightEnd])\n\n return hilightStart, hilightEnd",
"def getIndexRange(hist, minValue, maxValue):\n fromIndex = -1\n toIndex = len(hist)\n for index, value in enumerate(hist):\n if value >= minValue and fromIndex < 0:\n fromIndex = index\n if value >= minValue and value <= maxValue and index < len(hist):\n toIndex = index\n return (fromIndex, toIndex)",
"def screen(xs, ys, xbounds, ybounds):\n good_x = np.logical_and(xs >= xbounds[0], xs < xbounds[1])\n good_y = np.logical_and(ys >= ybounds[0], ys < ybounds[1])\n good = np.logical_and(good_x, good_y)\n xs = xs[good]\n ys = ys[good]\n return xs, ys",
"def myfind_peak(xdata,ydata, min_height, min_dis):\n indexes=peakutils.indexes(ydata,thres=min_height, min_dist=min_dis)\n return indexes,xdata[indexes]",
"def line_endpoints(line): \n start = shapely.get_point(line,0)\n end = shapely.get_point(line,-1)\n return start, end",
"def __get_param_range(self, paramLow, paramHigh, step):\n if paramLow == paramHigh:\n return (paramLow, )\n else:\n preAnswer = []\n param = paramLow\n while param <= paramHigh:\n preAnswer.append(param)\n param += step\n if preAnswer[-1] != paramHigh:\n preAnswer.append(paramHigh)\n return tuple(preAnswer)",
"def _get_buffering_subregion_minmax(ip, pmin, pmax, rmax):\n if ip == -1:\n smin, smax = pmin - rmax, pmin\n elif ip == 0:\n smin, smax = pmin, pmax\n elif ip == 1:\n smin, smax = pmax, pmax + rmax\n return smin, smax",
"def search_vert_bounds(signal, x_min, x_max, y, res_y=4):\n indxs = []\n curs = x_min\n\n while curs <= x_max:\n\n if signal[curs] > y + res_y:\n x = curs\n if len(indxs) == 0 and x_min == 0 and signal[0] > y + res_y:\n indxs.append((0, x))\n else:\n\n while curs <= x_max and signal[curs] > y + res_y:\n curs += 1\n indxs.append((x, curs - 1))\n\n curs += 1\n\n return indxs",
"def data_range(self, n=-1):\n if len(self.results['peak_v']) > 0:\n last_v = self.results['peak_v'][n]\n else:\n last_v = self.v_start\n v_index = self.spectrogram._velocity_to_index(last_v)\n start_index = max(0, v_index - self.span)\n end_index = min(v_index + self.span,\n len(self.spectrogram.velocity))\n return (start_index, end_index)",
"def _calculate_startend_sample(delta, starttime, plot_start, plot_end):\n beg_sample = int(-1*starttime/delta + plot_start/delta)\n end_sample = int(-1*starttime/delta + plot_end/delta)\n return beg_sample, end_sample",
"def hsv_color_range(image, points, padding=0):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n color_list = []\n for point in points:\n color = image[point[0], point[1]][:3]\n color_list.append(color)\n color_list = np.asarray(color_list)\n max = np.array([color_list[:, 0].max(), \n color_list[:, 1].max(), \n color_list[:, 2].max()])\n low = np.array([color_list[:, 0].min(), \n color_list[:, 1].min(),\n color_list[:, 2].min(),])\n \n return max, low",
"def get_range_linear(self, models):\n data = self.get_series(models)\n ymin = min([min(d) for d in data])\n ymax = max([max(d) for d in data])\n return (ymin, ymax)",
"def _get_minmax_and_indices(self, min=None, max=None):\n self._get_sort_index()\n s=self['sort_index']\n\n dowhere=False\n if min is not None:\n xmin = min\n dowhere=True\n else:\n xmin = self.x[s[0]]\n\n\n if max is not None:\n xmax = max\n dowhere=True\n else:\n xmax = self.x[s[-1]]\n \n self.dmin = xmin\n self.dmax = xmax\n\n self[self.xpref+'min'] = xmin\n self[self.xpref+'max'] = xmax\n\n if dowhere:\n # where function will preserve order, so subscript with s\n w,=numpy.where( (self.x[s] >= xmin) & (self.x[s] <= xmax) )\n if w.size == 0:\n raise ValueError(\"No data in specified min/max range: [%s,%s]\" % (xmin,xmax))\n self['wsort'] = s[w]\n else:\n self['wsort'] = s",
"def _start_end_site_coordinate(a_k, b_k):\n if a_k < b_k: # A below B so go from top of A to bottom of B\n start_k = a_k + 1\n end_k = b_k\n elif a_k > b_k: # A above B so go from bottom of A to top of B\n start_k = a_k\n end_k = b_k + 1\n else: # A in line with B so go from bottom(top) of A to bottom(top) of B (if k below zero)\n start_k = end_k = max(b_k, 0)\n return start_k, end_k",
"def get_interpolation_points(self):\n return self._lower_points, self._upper_points"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify that the image 'quacks like a spatialimage'.
|
def is_spatial_image(image: Any) -> bool:
if not isinstance(image, xr.DataArray):
return False
if not set(image.dims).issubset(_supported_dims):
return False
for dim in _spatial_dims.intersection(image.dims):
if not image.coords[dim].dtype == np.float64:
return False
diff = np.diff(image.coords[dim])
if not np.allclose(diff, diff[0]):
return False
if "t" in image.dims:
t_coord = image.coords["t"]
if (
t_coord.dtype.char not in np.typecodes["AllInteger"]
and t_coord.dtype.char not in np.typecodes["AllFloat"]
and t_coord.dtype.char not in np.typecodes["Datetime"]
):
return False
return True
|
[
"def checkForGeom(dataset):\n \n spatial = False\n if \"Shape\" in [f.name for f in arcpy.ListFields(dataset) if f.required]:\n spatial = True\n return spatial",
"def verify_aperture_img_shape(self):\n assert self.tpf[1].header['TDIM5'] == '({},{})'.format(self.tpf[2].header['NAXIS1'], self.tpf[2].header['NAXIS2'])",
"def test_raster_shape(dem_rast):\n width = dem_rast.width\n height = dem_rast.height\n\n assert width == 1147 and height == 974",
"def img_check(img):\n with rasterio.open(img) as src:\n if src.crs.is_valid and src.crs.is_projected and src.crs.is_epsg_code:\n print(\"Input raster is valid and has valid CRS\")\n else:\n print(\"Input raster does not have valid CRS. Exiting the script\")\n # exiting from script\n sys.exit()",
"def _is_image(self, blob):\n if blob.layout != \"NCHW\":\n return False\n channels = blob.shape[1]\n return channels == 3",
"def test_valid_rgb_in_magick(self):\n assert poly.isInMap((205, 201, 201), \"magick\")",
"def is_single_face_valid(img) -> int:\n # TODO stub\n return 0",
"def test_good_geom(self):\n #NOTE Turbomole uses bohr radius: x//0.52917720859 for geom locations\n result=[\n '1.88972613289 3.77945226577 -1.88972613289 C',\n '3.77945226577 5.66917839866 1.88972613289 H'\n ]\n self.assertEqual(check_geom(self.good_geom), result)",
"def test_image(self):\n im = self.camera.acquire_image(0.5)\n\n self.assertEqual(im.dtype, np.int16)\n self.assertTupleEqual(im.shape, (2048, 2048))",
"def test_psf_image_view():\n import time\n t1 = time.time()\n array, _ = galsim.optics.psf(array_shape=testshape)\n image = galsim.optics.psf_image(array_shape=testshape)\n np.testing.assert_array_almost_equal(array.astype(np.float32), image.array, decimal)\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)",
"def test_instance(self):\n self.assertTrue(isinstance(self.image,Image))",
"def isGeospatial(path):\n if isinstance(path, rio.io.DatasetReaderBase):\n ds = path\n else:\n try:\n ds = rio.open(path)\n except Exception:\n return False\n if ds.crs or (ds.transform and ds.transform != rio.Affine(1, 0, 0, 0, 1, 0)):\n return True\n if len(ds.gcps[0]) and ds.gcps[1]:\n return True\n return False",
"def _check_image_input(observation_space: spaces.Box, key: str = \"\") -> None:\n if observation_space.dtype != np.uint8:\n warnings.warn(\n f\"It seems that your observation {key} is an image but the `dtype` \"\n \"of your observation_space is not `np.uint8`. \"\n \"If your observation is not an image, we recommend you to flatten the observation \"\n \"to have only a 1D vector\"\n )\n\n if np.any(observation_space.low != 0) or np.any(observation_space.high != 255):\n warnings.warn(\n f\"It seems that your observation space {key} is an image but the \"\n \"upper and lower bounds are not in [0, 255]. \"\n \"Generally, CNN policies assume observations are within that range, \"\n \"so you may encounter an issue if the observation values are not.\"\n )",
"def test_valid_rgb_notin_magick(self):\n assert not poly.isInMap((40, 40, 40), \"magick\")",
"def test_just_inside():\n rmg = RasterModelGrid(4, 5, dx=2.0)\n\n assert_equal(rfuncs.is_coord_on_grid(rmg, (0., 4.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (8. - 1e-12, 4.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 0.)), True)\n assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 6. - 1e-12)), True)",
"def _is_tensor_image(img):\n return isinstance(img, (paddle.Tensor, Variable))",
"def check_size(image):\n width = image.width\n height = image.height\n if width == height:\n return True\n else:\n return False",
"def test_undefined_image():\n for i in range(len(types)):\n im1 = galsim.Image(dtype=types[i])\n assert not im1.bounds.isDefined()\n assert im1.array.shape == (1,1)\n assert im1 == im1\n\n im2 = galsim.Image()\n assert not im2.bounds.isDefined()\n assert im2.array.shape == (1,1)\n assert im2 == im2\n if types[i] == np.float32:\n assert im2 == im1\n\n im3 = galsim.Image(array=np.array([[]],dtype=types[i]))\n assert not im3.bounds.isDefined()\n assert im3.array.shape == (1,1)\n assert im3 == im1\n\n im4 = galsim.Image(array=np.array([[]]), dtype=types[i])\n assert not im4.bounds.isDefined()\n assert im4.array.shape == (1,1)\n assert im4 == im1\n\n im5 = galsim.Image(array=np.array([[1]]), dtype=types[i], bounds=galsim.BoundsI())\n assert not im5.bounds.isDefined()\n assert im5.array.shape == (1,1)\n assert im5 == im1\n\n im6 = galsim.Image(array=np.array([[1]], dtype=types[i]), bounds=galsim.BoundsI())\n assert not im6.bounds.isDefined()\n assert im6.array.shape == (1,1)\n assert im6 == im1\n\n im7 = 1.0 * im1\n assert not im7.bounds.isDefined()\n assert im7.array.shape == (1,1)\n if types[i] == np.float64:\n assert im7 == im1\n\n im8 = im1 + 1j * im3\n assert not im8.bounds.isDefined()\n assert im8.array.shape == (1,1)\n if types[i] == np.complex128:\n assert im8 == im1\n\n im9 = galsim.Image(0, 0)\n assert not im9.bounds.isDefined()\n assert im9.array.shape == (1,1)\n assert im9 == im1\n\n im10 = galsim.Image(10, 0)\n assert not im10.bounds.isDefined()\n assert im10.array.shape == (1,1)\n assert im10 == im1\n\n im11 = galsim.Image(0, 19)\n assert not im11.bounds.isDefined()\n assert im11.array.shape == (1,1)\n assert im11 == im1\n\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.setValue, 0, 0, 1)\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.__call__, 0, 0)\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.view().setValue, 0, 0, 1)\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.view().__call__, 0, 0)\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.view().addValue, 0, 0, 1)\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.fill, 3)\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.view().fill, 3)\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.invertSelf)\n\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.__getitem__,galsim.BoundsI(1,2,1,2))\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.subImage,galsim.BoundsI(1,2,1,2))\n\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.setSubImage,galsim.BoundsI(1,2,1,2),\n galsim.Image(2,2, init_value=10))\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.__setitem__,galsim.BoundsI(1,2,1,2),\n galsim.Image(2,2, init_value=10))\n\n im1.scale = 1.\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.calculate_fft)\n assert_raises(galsim.GalSimUndefinedBoundsError,im1.calculate_inverse_fft)\n\n do_pickle(im1.bounds)\n do_pickle(im1)\n do_pickle(im1.view())\n do_pickle(im1.view(make_const=True))",
"def has_gps(img):\n imagen = open(img, 'rb')\n losTags = exifread.process_file(imagen)\n\n return True if 'GPS GPSLongitude' in losTags.keys() else False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert the arraylike to a spatialimage.
|
def to_spatial_image(
array_like: Any,
dims: Optional[Sequence[Union["t", "z", "y", "x", "c"]]] = None,
scale: Optional[Union[Mapping[Hashable, float]]] = None,
translation: Optional[Union[Mapping[Hashable, float]]] = None,
name: str = default_name,
axis_names: Optional[Union[Mapping[Hashable, str]]] = None,
axis_units: Optional[Union[Mapping[Hashable, str]]] = None,
t_coords: Optional[Sequence[Union[AllInteger, AllFloat, np.datetime64]]] = None,
c_coords: Optional[Sequence[Union[AllInteger, str]]] = None,
) -> SpatialImage:
ndim = array_like.ndim
if dims is None:
if ndim < 4:
dims = ("z", "y", "x")[-ndim:]
elif ndim < 5:
dims = ("z", "y", "x", "c")
logging.info("Assuming channel dimension is last, if not, please specify dims.")
elif ndim < 6:
dims = ("t", "z", "y", "x", "c")
else:
raise ValueError("Unsupported dimension: " + str(ndim))
else:
if not set(dims).issubset(_supported_dims):
raise ValueError("dims not valid for a SpatialImage")
dims = tuple(dims)
if dims not in SpatialImageDataClasses:
raise ValueError("The dims provided are not supported yet")
SIDataClass = SpatialImageDataClasses[dims]
si_kwargs = {
"scale": scale,
"translation": translation,
"name": name,
"axis_names": axis_names,
"axis_units": axis_units,
}
if "c" in dims:
si_kwargs["c_coords"] = c_coords
if "t" in dims:
si_kwargs["t_coords"] = t_coords
image = SIDataClass.new(array_like, **si_kwargs)
return image
|
[
"def from_ndarray(data):\n assert isinstance(data, np.ndarray), \"Input should be a Numpy array\"\n assert len(data.shape) == 3, \"Input data should be of shape (h, w, nc)\"\n h, w, nc = data.shape\n # Create a synthetic gdal dataset\n driver = gdal.GetDriverByName('MEM')\n itemsize = data[0, 0, 0].itemsize\n dtype = data[0, 0, 0].dtype\n gdal_dtype = get_gdal_dtype(itemsize,\n dtype == np.complex64 or dtype == np.complex128,\n signed=False if dtype in (np.uint8, np.uint16) else True)\n ds = driver.Create('', w, h, nc, gdal_dtype)\n for i in range(0, nc):\n ds.GetRasterBand(i + 1).WriteArray(data[:, :, i])\n\n geo_image = GeoImage.from_dataset(ds)\n return geo_image",
"def fromArray(cls, epsg, origin, height, width, array):\n x, y = origin\n\n # We have two cases, a single channel image\n # and a multi band image.\n # this obviously needs to be straightened.\n # takes arrays as rows, cols, numbands\n try:\n rows, cols = array.shape\n num_bands = 1\n except ValueError:\n rows, cols, num_bands = array.shape\n\n # Creating in memory data set\n driver = gdal.GetDriverByName('MEM')\n ds = driver.Create('',\n cols,\n rows,\n num_bands,\n cls._NP2GDAL_CONVERSION[str(array.dtype)])\n ds.SetGeoTransform((x, width, 0, y, 0, height))\n\n # The two cases again, single band\n # or multiband.\n\n if num_bands == 1:\n band = ds.GetRasterBand(1)\n band.WriteArray(array)\n elif num_bands > 1:\n for idx in range(num_bands):\n band = ds.GetRasterBand(idx+1)\n band.WriteArray(array[:, :, idx])\n\n out_srs = osr.SpatialReference()\n out_srs.ImportFromEPSG(epsg)\n ds.SetProjection(out_srs.ExportToWkt())\n band.FlushCache()\n return Raster(ds)",
"def arrayToImage(self, a):\n i=Image.fromstring('L',(a.shape[1],a.shape[0]),\n (a.astype('b')).tostring())\n return i",
"def _togis(self, *args, **kwargs):\n return togis(self, *args, **kwargs)",
"def convert_griddata_to_image(gd):\n return create_image_from_array(gd.data, gd.grid_wcs, gd.polarisation_frame)",
"def np2feature(np_array, output_feature, spatial_ref):\n\t# set projection info using spatial_ref = arcpy.Describe(input).spatialReference\n\tarcpy.da.NumPyArrayToFeatureClass(np_array, output_feature, (\"POINT_X\", \"POINT_Y\"), spatial_ref)\n\treturn",
"def to_java(self, data):\n if self._is_memoryarraylike(data):\n return imglyb.to_imglib(data)\n if self._is_xarraylike(data):\n return self.to_dataset(data)\n return to_java(data)",
"def convert_to_2d(geom):\n from django.contrib.gis.geos import WKBWriter, WKBReader\n wkb_r = WKBReader()\n wkb_w = WKBWriter()\n wkb_w.outdim = 2\n return wkb_r.read(wkb_w.write(geom))",
"def new_numpy_image(self, image):\n try:\n dtype_to_use = self.dtype(image)\n except TypeError:\n dtype_to_use = numpy.dtype('float64')\n return numpy.zeros(self.dims(image), dtype=dtype_to_use)",
"def arr2Geotiff(arr, fname, TL, Res, projEPSG):\n\n\n # Transform array to 64 bit floats (readable in QGIS)\n arr = np.float64(arr)\n # get array dimensions\n rows, cols = np.shape(arr)\n\n # Create gdal driver in the geo-tiff format\n drv = gdal.GetDriverByName(\"GTiff\")\n ds = drv.Create(fname, cols, rows, 1, gdal.GDT_Float64)\n\n # get resolution\n if type(Res) == list or type(Res) == tuple or type(Res) == np.ndarray:\n x_res, y_res = Res\n elif type(Res) == tuple:\n x_res, y_res = Res\n else:\n x_res, y_res = Res, Res\n\n # get coordinates of topleft corner and transform to bottom left corner\n TL_x, TL_y = TL\n BL_x, BL_y = TL_x, TL_y - y_res*arr.shape[0]\n\n # set geometry data (TL coordinates, resolution and skew)\n args = (BL_x, x_res, 0, BL_y, 0, y_res)\n ds.SetGeoTransform(args)\n\n # write band 1 values\n ds.GetRasterBand(1).WriteArray(np.flip(arr, axis = 0))\n ds.GetRasterBand(1).SetNoDataValue(-9999)\n\n # set projection based on given EPSG\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(projEPSG)\n ds.SetProjection(srs.ExportToWkt())\n\n # Close\n ds = None",
"def fancyConvert(image):",
"def rasterToArray(layer):\n\n\t\tlyr_name = os.path.split(layer)[1]\n\n\t\tif layer is not None and layer is not \"\":\n\t\t\tnew_array = gdal.Dataset.ReadAsArray(gdal.Open(layer)).astype(\n\t\t\t\tnp.float32)\n\t\t\tnew_array = np.nan_to_num(new_array)\n\t\telse:\n\t\t\twarnings.warn(\n\t\t\t\t\"Layer {lr} has not been readed. No data will be \"\n\t\t\t\t\"used instead\".format(lr=lyr_name), stacklevel=3)\n\t\t\tnew_array = None\n\n\t\treturn new_array",
"def ndarray2GSLIB(array, data_file, col_name):\n\n if array.ndim not in [1, 2]:\n raise ValueError(\"must use a 2D array\")\n\n with open(data_file, \"w\") as f:\n f.write(data_file + \"\\n\")\n f.write(\"1 \\n\")\n f.write(col_name + \"\\n\")\n\n if array.ndim == 2:\n ny, nx = array.shape\n\n for iy in range(ny):\n for ix in range(nx):\n f.write(str(array[ny - 1 - iy, ix]) + \"\\n\")\n\n elif array.ndim == 1:\n nx = len(array)\n for ix in range(0, nx):\n f.write(str(array[ix]) + \"\\n\")",
"def pixel_to_proj(self, p):\n\n p = np.matrix(p).transpose()\n p = np.vstack((p, np.ones((1, p.shape[1]))))\n out = self.geo_transform[:2,:] * p\n out = out.transpose()\n return np.array(out)",
"def to_numpy(self):\n x, y = self.to_coords()\n numpy_sol = utils.turn_coords_to_numpy(x, y)\n numpy_sol = np.reshape(numpy_sol[:, :, 0], (480, 640, 1))\n return numpy_sol",
"def get_raw_array(raw):\n h, w = raw.sizes.raw_height, raw.sizes.raw_width\n raw_array = np.array(raw.raw_image).reshape((h, w)).astype('float')\n return raw_array",
"def image_obj_to_numpy(img_obj: Any) -> np.ndarray:\r\n if isinstance(img_obj, plt.Figure):\r\n return plt_figure_to_numpy(img_obj)\r\n else:\r\n return np.array(img_obj)",
"def rasterize(gdf_geom, column, shape, trans, dst_crs, dtype=np.uint8, nodata=0):\n raster_data = features.rasterize(\n shapes=[\n (r[1].geometry, r[1][column]) \n for r in gdf_geom.iterrows()\n ],\n out_shape=shape, \n fill=nodata, \n transform=trans,\n )\n return raster_data.astype(dtype)",
"def _make_raster_from_array(base_array, target_raster_path, projected=True):\r\n srs = osr.SpatialReference()\r\n if projected:\r\n srs.ImportFromEPSG(EPSG_CODE) # UTM Zone 10N, unit = meter\r\n project_wkt = srs.ExportToWkt()\r\n\r\n pygeoprocessing.numpy_array_to_raster(\r\n base_array, -1, (1, -1), ORIGIN, project_wkt, target_raster_path)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given the activations, perform the specified dimensionality reduction.
|
def reduce_activations(acts: np.ndarray, reduction: str = 'NMF', dim: int = 6) -> np.ndarray:
reducer = ChannelReducer(dim, reduction)
if reduction == 'NMF':
# NMF requires activations to be positive
acts = get_positive_activations(acts)
return reducer._reducer.fit_transform(acts)
|
[
"def dimensionality_reduction(samples_data, config):\n\n drs = {\n 'pca': dr_pca,\n 'tsne': dr_tsne,\n 'rfc': dr_rfc,\n 'irfc': dr_irfc\n }\n\n uuids = samples_data.index[samples_data['selected'] == 1].tolist()\n x_train = samples_data.index[samples_data['train'] == 1].tolist()\n x_dev = samples_data.index[samples_data['dev'] == 1].tolist()\n x_test = samples_data.index[samples_data['test'] == 1].tolist()\n\n # Prompts the user to select an action\n dr = interaction.ask_action(constants.msg_dr, set(drs.keys()))\n if dr == 's':\n return None, None\n\n components = interaction.ask_number(constants.msg_components)\n to_cla = interaction.ask_yes_no(constants.msg_cla_clu)\n\n if to_cla:\n data, model = drs[dr].reduce(config, components, None, x_train, x_dev, x_test)\n\n else:\n data, model = drs[dr].reduce(config, components, uuids, None, None, None)\n\n return data, model",
"def dimensionality_reduction_lda(n_components, train_features, labels, test_features=None):\n\n model = LinearDiscriminantAnalysis(n_components=n_components)\n model.fit(train_features, labels)\n\n # Transform the training and test class data with a dim reduction algorithm.\n train_features_reduced = model.transform(train_features)\n if test_features is not None:\n test_features_reduced = model.transform(test_features)\n else:\n test_features_reduced = None\n\n variance = np.sum(model.explained_variance_ratio_)\n print('Variance explained with '+str(n_components)+' components: '+ str(variance))\n\n return train_features_reduced, test_features_reduced",
"def dimensionality_reduction(n_components, train_features, test_features=None, \\\n pca=True):\n if (pca):\n model = PCA(n_components=n_components)\n else:\n model = TruncatedSVD(n_components=n_components)\n model.fit(train_features)\n\n # Transform the training and test class data with a dim reduction algorithm.\n train_features_reduced = model.transform(train_features)\n if test_features is not None:\n test_features_reduced = model.transform(test_features)\n else:\n test_features_reduced = None\n\n variance = np.sum(model.explained_variance_ratio_)\n print('Variance explained with '+str(n_components)+' components: '+ str(variance))\n\n return train_features_reduced, test_features_reduced",
"def reduce_dimensionality(src_dir, model, dst_dir):\r\n desc = src_dir.split('/')[1] #The progress bar will show the name of the current src directory\r\n for f in tqdm(os.listdir(src_dir), desc=desc):\r\n features = deserialize_features(os.path.join(src_dir, f))\r\n reduced_features = model.predict(features)\r\n\r\n file_name = f.split('.')[0]\r\n if desc != \"features_5_pool\":\r\n file_name = file_name[:-2] #We want to serialize the reduced arrays without the index number at the end\r\n\r\n serialize_features(reduced_features, os.path.join(dst_dir, file_name))",
"def specify_dimensionality(self, input_sample, output_dimensionality=torch.tensor([10])):\n if self.dimensionality_configured:\n print(\"Warning - trying to configure dimensionality multiple times on the same network\")\n return\n if self.lr == 0:\n raise Exception('Must set net learning rate before calling specify dimensionality')\n output_nodes = int(list(output_dimensionality)[0])\n output = self(input_sample, configuration_run=True)\n if output is None:\n raise Exception(\"Error: failed to pass input through nn\")\n\n in_layers = Utils.get_flat_number(output)\n\n self.final_layer = nn.Linear(in_layers, output_nodes).to(Config.get_device())\n\n self.dimensionality_configured = True\n self.outputDimensionality = output_dimensionality\n final_params = self.final_layer.parameters()\n full_parameters = self.module_graph.module_graph_root_node.get_parameters({})\n full_parameters.extend(final_params)\n\n self.optimizer = optim.Adam(full_parameters, lr=self.lr, betas=(self.beta1, self.beta2))\n\n self.init_weights()\n self.module_graph.blueprint_genome.weight_init.get_value()(self.final_layer.weight)",
"def apply_attention(input, attention):\n # import pdb\n # pdb.set_trace()\n n, c = input.size()[:2]\n glimpses = attention.size(1)\n\n # flatten the spatial dims into the third dim, since we don't need to care about how they are arranged\n input = input.view(n, c, -1)\n attention = attention.view(n, glimpses, -1)\n s = input.size(2)\n\n # apply a softmax to each attention map separately\n # since softmax only takes 2d inputs, we have to collapse the first two dimensions together\n # so that each glimpse is normalized separately\n attention = attention.view(n * glimpses, -1)\n attention = F.softmax(attention)\n\n # apply the weighting by creating a new dim to tile both tensors over\n target_size = [n, glimpses, c, s]\n input = input.view(n, 1, c, s).expand(*target_size)\n attention = attention.view(n, glimpses, 1, s).expand(*target_size)\n weighted = input * attention\n # sum over only the spatial dimension\n weighted_mean = weighted.sum(dim=3)\n # the shape at this point is (n, glimpses, c, 1)\n return weighted_mean.view(n, -1)",
"def setup_cnn(\n input_dimensions=DEFAULT_INPUT_DIMENSIONS,\n conv_block_layer_counts=DEFAULT_CONV_BLOCK_LAYER_COUNTS,\n conv_layer_channel_counts=DEFAULT_CONV_CHANNEL_COUNTS,\n conv_layer_dropout_rates=DEFAULT_CONV_DROPOUT_RATES,\n conv_layer_filter_sizes=DEFAULT_CONV_FILTER_SIZES,\n dense_layer_neuron_counts=DEFAULT_DENSE_NEURON_COUNTS,\n dense_layer_dropout_rates=DEFAULT_DENSE_DROPOUT_RATES,\n inner_activ_function_name=DEFAULT_INNER_ACTIV_FUNCTION_NAME,\n inner_activ_function_alpha=DEFAULT_INNER_ACTIV_FUNCTION_ALPHA,\n output_activ_function_name=DEFAULT_OUTPUT_ACTIV_FUNCTION_NAME,\n output_activ_function_alpha=DEFAULT_OUTPUT_ACTIV_FUNCTION_ALPHA,\n l1_weight=DEFAULT_L1_WEIGHT, l2_weight=DEFAULT_L2_WEIGHT,\n use_batch_normalization=True):\n\n # TODO(thunderhoser): Allow for tasks other than binary classification.\n assert dense_layer_neuron_counts[-1] == 1\n\n num_conv_layers = len(conv_layer_channel_counts)\n assert numpy.sum(conv_block_layer_counts) == num_conv_layers\n\n num_input_rows = input_dimensions[0]\n num_input_columns = input_dimensions[1]\n num_input_channels = input_dimensions[2]\n\n input_layer_object = keras.layers.Input(\n shape=(num_input_rows, num_input_columns, num_input_channels)\n )\n regularizer_object = utils._get_weight_regularizer(\n l1_weight=l1_weight, l2_weight=l2_weight\n )\n\n layer_object = None\n\n for i in range(num_conv_layers):\n if layer_object is None:\n this_input_layer_object = input_layer_object\n else:\n this_input_layer_object = layer_object\n\n layer_object = _get_2d_conv_layer(\n num_rows_in_filter=conv_layer_filter_sizes[i],\n num_columns_in_filter=conv_layer_filter_sizes[i],\n num_rows_per_stride=1, num_columns_per_stride=1,\n num_filters=conv_layer_channel_counts[i], use_edge_padding=True,\n weight_regularizer=regularizer_object\n )(this_input_layer_object)\n\n layer_object = utils._get_activation_layer(\n function_name=inner_activ_function_name,\n slope_param=inner_activ_function_alpha\n )(layer_object)\n\n if conv_layer_dropout_rates[i] > 0:\n layer_object = utils._get_dropout_layer(\n dropout_fraction=conv_layer_dropout_rates[i]\n )(layer_object)\n\n if use_batch_normalization:\n layer_object = utils._get_batch_norm_layer()(layer_object)\n\n if i + 1 not in numpy.cumsum(conv_block_layer_counts):\n continue\n\n if i == num_conv_layers - 1:\n continue\n\n layer_object = _get_2d_pooling_layer(\n num_rows_in_window=2, num_columns_in_window=2,\n num_rows_per_stride=2, num_columns_per_stride=2, do_max_pooling=True\n )(layer_object)\n\n layer_object = keras.layers.Flatten()(layer_object)\n\n num_dense_layers = len(dense_layer_neuron_counts)\n\n for i in range(num_dense_layers):\n if layer_object is None:\n this_input_layer_object = input_layer_object\n else:\n this_input_layer_object = layer_object\n\n layer_object = utils._get_dense_layer(\n num_output_units=dense_layer_neuron_counts[i],\n weight_regularizer=regularizer_object\n )(this_input_layer_object)\n\n if i == num_dense_layers - 1:\n layer_object = utils._get_activation_layer(\n function_name=output_activ_function_name,\n slope_param=output_activ_function_alpha\n )(layer_object)\n else:\n layer_object = utils._get_activation_layer(\n function_name=inner_activ_function_name,\n slope_param=inner_activ_function_alpha\n )(layer_object)\n\n if dense_layer_dropout_rates[i] > 0:\n layer_object = utils._get_dropout_layer(\n dropout_fraction=dense_layer_dropout_rates[i]\n )(layer_object)\n\n if use_batch_normalization and i != num_dense_layers - 1:\n layer_object = utils._get_batch_norm_layer()(layer_object)\n\n model_object = keras.models.Model(\n inputs=input_layer_object, outputs=layer_object\n )\n\n model_object.compile(\n loss=keras.losses.binary_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=METRIC_FUNCTION_LIST\n )\n\n model_object.summary()\n return model_object",
"def SetDimensionInformation(dims):",
"def isometrize(self):\n for idx,w0 in enumerate(self.W[0]):\n temp=np.reshape(w0,[self.d**2,self.Dbond])\n dmin=min(temp.shape)\n Q,R=np.linalg.qr(temp)\n self.W[0][idx]=np.reshape(Q,[self.d,self.d,dmin])\n\n for i in range(1,self.Nlayer):\n for idx,wj in enumerate(self.W[i]):\n temp=np.reshape(wj,[self.Dbond*self.Dbond,wj.shape[2]])\n Q,R=np.linalg.qr(temp)\n self.W[i][idx]=np.reshape(Q,[self.Dbond,self.Dbond,wj.shape[2]])",
"def execute(self, inputs):\n\t\trun = np.vectorize(lambda x: x.execute(inputs))\n\t\tself.output = np.array(list(self.activation(run(self.neurons).reshape(self.shape))))\n\t\treturn self.output",
"def test_01_apply_dilation_to_image(self):\n dilation_image = self.keyword.apply_dilation_to_image(self.processed_image, (1, 1), 0)\n self.assertTrue(isinstance(dilation_image, np.ndarray))\n dilation_image = self.keyword.apply_dilation_to_image(self.processed_image, (1, 1), 1)\n self.assertTrue(isinstance(dilation_image, np.ndarray))\n dilation_image = self.keyword.apply_dilation_to_image(self.processed_image, (1, 1), 2)\n self.assertTrue(isinstance(dilation_image, np.ndarray))",
"def forward(self, tensors,\n mask=None):\n if len(tensors) != self.mixture_size:\n raise ConfigurationError(\"{} tensors were passed, but the module was initialized to \"\n \"mix {} tensors.\".format(len(tensors), self.mixture_size))\n\n # TODO: check why using mask and arranged layernorm\n def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked):\n tensor_masked = tensor * broadcast_mask\n mean = F.sum(tensor_masked) / num_elements_not_masked\n variance = F.sum(\n ((tensor_masked - mean) * broadcast_mask)**2) / num_elements_not_masked\n return (tensor - mean) / F.sqrt(variance + 1E-12)\n\n normed_weights = F.softmax(self.scalar_parameters[None], axis=1)[0]\n normed_weights = F.split_axis(normed_weights, normed_weights.shape[0],\n axis=0)\n # TODO: remove for-loop by broadcast\n\n if not self.do_layer_norm:\n pieces = []\n for weight, tensor in zip(normed_weights, tensors):\n weight = F.broadcast_to(weight[None, None], tensor.shape)\n pieces.append(weight * tensor)\n gamma = F.broadcast_to(self.gamma[None, None], tensor.shape)\n return gamma * sum(pieces)\n\n else:\n mask_float = mask\n broadcast_mask = F.expand_dims(mask_float, -1)\n input_dim = tensors[0].shape(-1)\n num_elements_not_masked = F.sum(mask_float) * input_dim\n\n pieces = []\n for weight, tensor in zip(normed_weights, tensors):\n weight = F.broadcast_to(weight[None, None], tensor.shape)\n pieces.append(weight * _do_layer_norm(tensor,\n broadcast_mask, num_elements_not_masked))\n gamma = F.broadcast_to(self.gamma[None, None], tensor.shape)\n return gamma * sum(pieces)",
"def test_product_dimension_is_sum_of_dimensions(self, space_args):\n spaces_list = space_args[0]\n result = self.Space(*space_args).dim\n expected = sum(space.dim for space in spaces_list)\n self.assertAllClose(result, expected)",
"def ResNetA3D(x, mdlParams, placeholders=None):\r\n with tf.variable_scope('ResNetA3D'):\r\n with slim.arg_scope([slim.convolution], padding='SAME', activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), normalizer_fn=slim.batch_norm, normalizer_params={'is_training':placeholders['train_state'], 'epsilon':0.0001, 'decay':0.9, 'center':True, 'scale':True, 'activation_fn':None, 'updates_collections':tf.GraphKeys.UPDATE_OPS, 'fused': False}):\r\n # Initial part\r\n with tf.variable_scope('Initial'):\r\n layer = slim.convolution(x, 48, 3, stride=1, scope='conv1')\r\n layer = slim.convolution(layer, 64, 3, stride=2, scope='conv2')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv3')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv4')\r\n layer = slim.convolution(layer, 64, 3, stride=1, scope='conv5')\r\n # Resnet modules\r\n with tf.variable_scope('Resnet_modules'):\r\n # Initial output feature map size\r\n output_fm = mdlParams['ResNetA3D_FM']\r\n # Iterate through all modules\r\n for i in range(len(mdlParams['ResNetA3D_Size'])):\r\n with tf.variable_scope('Module_%d'%(i)):\r\n # Iterate through all blocks inside the module\r\n for j in range(mdlParams['ResNetA3D_Size'][i]):\r\n with tf.variable_scope('Block_%d'%(j)):\r\n # Set desired output feature map dimension of the block and the desired stride for the first block in the module\r\n if j==0:\r\n output_fm = 2*output_fm\r\n block_stride = mdlParams['ResNetA3D_Stride'][i]\r\n else:\r\n block_stride = 1\r\n layer = resneta_block(layer, output_fm, block_stride)\r\n # GAP for 1D,2D,3D\r\n if len(layer.get_shape().as_list()) == 5:\r\n layer = math_ops.reduce_mean(layer, axis=[1,2,3], keep_dims = False, name='global_pool')\r\n elif len(layer.get_shape().as_list()) == 4:\r\n layer = math_ops.reduce_mean(layer, axis=[1,2], keep_dims = False, name='global_pool')\r\n else:\r\n layer = math_ops.reduce_mean(layer, axis=[1], keep_dims = False, name='global_pool')\r\n # Dense output layer\r\n output = slim.layers.fully_connected(layer, len(mdlParams['tar_range']), activation_fn=None)\r\n return output",
"def _check_dimensionality(self, array, dims_dict):\n if self.feature_type in [FeatureType.DATA, FeatureType.MASK]:\n return self._reshape_array(array, dims_dict)\n elif self.feature_type in [FeatureType.DATA_TIMELESS, FeatureType.MASK_TIMELESS]:\n array = array.squeeze(axis=0)\n return self._reshape_array(array, dims_dict)\n return array",
"def apply_neural_nets(observation_matrix, weights):\n\n # we take the dot-product of the observation matrix and weight array no.1\n hidden_layer_values = np.dot(weights['1'], observation_matrix)\n hidden_layer_values = relu(hidden_layer_values)\n\n output_layer_values = np.dot(hidden_layer_values, weights['2'])\n output_layer_values = sigmoid(output_layer_values)\n return hidden_layer_values, output_layer_values",
"def run_reductions():\n \n params = get_args()\n \n data = dataset_utils.DataCollection()\n \n data.get_datasets_for_reduction(params['datasets_file'])\n \n log = logs.start_pipeline_log(data.red_dir, 'prepare_datasets')\n \n log.info(data.summary())\n print(data.summary())\n \n setup = dataset_utils.build_pipeline_setup(data)\n \n check_sanity(setup,data,log)\n \n run_parallel_data_preparations(setup, data, log)\n \n logs.close_log(log)",
"def reduce_along_dim(img, dim, weights, indices):\n other_dim = abs(dim - 1)\n if other_dim == 0: # resizing image width\n weights = np.tile(weights[np.newaxis, :, :, np.newaxis], (img.shape[other_dim], 1, 1, 3))\n out_img = img[:, indices, :] * weights\n out_img = np.sum(out_img, axis=2)\n else: # resize image height\n weights = np.tile(weights[:, :, np.newaxis, np.newaxis], (1, 1, img.shape[other_dim], 3))\n out_img = img[indices, :, :] * weights\n out_img = np.sum(out_img, axis=1)\n\n return out_img",
"def _apply_dense(self, grad, var):\n raise NotImplementedError()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If any activations are negative, return a twiceaslong positive array instead, with the originally positive values in the first half and the originally negative values in the second half. Essentially, this contains all the information in the original array, but in the form of a positive array. e.g. [1, 2, 3] > [0, 2, 3, 1, 0, 0]
|
def get_positive_activations(acts: np.ndarray) -> np.ndarray:
if (acts > 0).all():
return acts
else:
return np.concatenate([np.maximum(0, acts), np.maximum(-acts, 0)], axis=-1)
|
[
"def reordering(array):\n negative = []\n positive = []\n while array:\n x = array.pop()\n if x < 0:\n negative.append(x)\n else:\n positive.append(x)\n negative.reverse()\n positive.reverse()\n return negative + positive",
"def __neg__(self):\n ret_mat = self.data\n for i in range(0,8):\n ret_mat[i] = -ret_mat[i]\n return ret_mat",
"def neg(x):\r\n return -min_elemwise(x, 0)",
"def getNegativeReturns(self):\n\t\treturn np.array(self.__negativeReturns)",
"def cnot() -> np.ndarray:\n return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])",
"def convert_neg_indices(indices, ndim):\n def canonicalizer(ax):\n return ax + ndim if ax < 0 else ax\n indices = tuple([canonicalizer(axis) for axis in indices])\n return indices",
"def is_all_negative(arr):\n for e in arr:\n if e >= 0:\n return False\n return True",
"def imcomplement(matrix):\n newmatrix = -matrix + np.max(matrix) + np.min(matrix)\n return newmatrix",
"def softsign(self, data):\n return (data / (1 + np.abs(data)))",
"def split_pos_neg(npX, npY):\n iPos = int(np.sum(npY))\n npNeg = np.zeros((npX.shape[0] - iPos, npX.shape[1]), dtype=\"int8\")\n npPos = np.zeros((iPos, npX.shape[1]), dtype=\"int8\")\n\n iIndexPos = 0\n iIndexNeg = 0\n for iIndex in range(npX.shape[0]):\n if npY[iIndex] == 0:\n npNeg[iIndexNeg] = npX[iIndex]\n iIndexNeg += 1\n elif npY[iIndex] == 1:\n npPos[iIndexPos] = npX[iIndex]\n iIndexPos += 1\n return npPos, npNeg",
"def nonz(self, arr: list):\n for i in range(len(arr)):\n if arr[i] == 0:\n continue\n else:\n return arr[i]",
"def abs(self):\n return SparseVector(np.abs(self.data), self.indices, self.size)",
"def inverse(xx: np.ndarray) -> np.ndarray:\n yy = (10 - 1 / 1.1) ** (-1) * (xx + 0.1) ** (-1)\n\n return yy",
"def getPositiveReturns(self):\n\t\treturn np.array(self.__positiveReturns)",
"def filter_zeros(self,array_data):\n array_data[array_data==0]=np.nan\n return array_data",
"def nonzero_values(x):\n return x[x != 0]",
"def _copysign(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n signs_differ = (a < 0) != (b < 0)\n return torch.where(signs_differ, -a, a)",
"def _sample_negatives(\r\n features: torch.FloatTensor, num_negatives: int, attention_mask: Optional[torch.LongTensor] = None\r\n ):\r\n batch_size, sequence_length, hidden_size = features.shape\r\n if sequence_length <= 1:\r\n raise ValueError(\r\n f\"`features should have `sequence_length` > 1, but are of shape (batch_size, sequence_length, hidden_size) = ({batch_size, sequence_length, hidden_size}).\"\r\n )\r\n\r\n features = features.view(-1, hidden_size) # BTC => (BxT)C\r\n\r\n with torch.no_grad():\r\n # get `num_negatives` random vector indices from the same utterance\r\n sampled_negative_indices = []\r\n for batch_idx in range(batch_size):\r\n high = attention_mask[batch_idx].sum() - 1 if attention_mask is not None else sequence_length - 1\r\n sampled_indices_slice = torch.randint(\r\n 0, high, size=(num_negatives * sequence_length,), device=features.device\r\n )\r\n sampled_negative_indices.append(sampled_indices_slice)\r\n\r\n sampled_negative_indices = torch.stack(sampled_negative_indices)\r\n\r\n # generate indices of the positive vectors themselves, repeat them `num_negatives` times\r\n feature_indices = (\r\n torch.arange(sequence_length, device=features.device)[:, None]\r\n .expand(sequence_length, num_negatives)\r\n .flatten()\r\n )\r\n\r\n # avoid sampling the same positive vector, but keep the distribution uniform\r\n sampled_negative_indices[sampled_negative_indices >= feature_indices] += 1\r\n\r\n # correct for batch size\r\n for batch_idx in range(1, batch_size):\r\n sampled_negative_indices[batch_idx] += batch_idx * sequence_length\r\n\r\n # take negative vectors from sampled indices\r\n sampled_negatives = features[sampled_negative_indices.view(-1)]\r\n sampled_negatives = sampled_negatives.view(batch_size, sequence_length, num_negatives, hidden_size).permute(\r\n 2, 0, 1, 3\r\n )\r\n\r\n return sampled_negatives",
"def nonneg(s):\n return filter(lambda x: x>=0, s)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resolve a Subject by Bank Identification Number (BIN).
|
def by_idin_bin(self, bin):
dao = self.session.query(BankIdentificationNumber)\
.filter(BankIdentificationNumber.bin == bin)\
.one()
return self.dto({
'type': 'idin:bin',
'gsid': dao.gsid.hex,
})
|
[
"def _asn_lookup(asn:str):\n \n response = ''\n \n try: \n \n request = requests.get(f\" https://whois.arin.net/rest/asn/AS{asn}\", headers={\"Content-Type\": 'text/plain', 'Accept': 'text/plain'})\n \n #ARIN doesnt return response codes. If not found, return text/html, else return text/plain\n if request.text.splitlines()[0].startswith('<!DOCTYPE'):\n response = f'\\nASN {asn} not found in the ARIN registery'\n else:\n response = 'ASNumber: ' + request.text.split('ASNumber')[1].split('ARIN')[0]\n \n except (requests.exceptions.ConnectionError, requests.exceptions.InvalidURL):\n pass\n finally:\n return response",
"def find_isbns(self):\n def normalize_isbn(value):\n return ''.join([s for s in value if s.isdigit() or s == 'X'])\n\n text = self.extract_pdf_text()\n matches = re.compile('\\d[\\d\\-X\\ ]+').findall(text)\n matches = [normalize_isbn(value) for value in matches]\n isbns = [isbn for isbn in matches if stdnum.isbn.is_valid(isbn)\n and len(isbn) in (10, 13)\n and isbn not in (\n '0' * 10,\n '0' * 13,\n )]\n return isbns[0] if isbns else None",
"def lookup(number):\n res = requests.get(\"https://api.opencnam.com/v2/phone/%s\" % number)\n cnam = res.content.decode()\n if cnam != \"\":\n return cnam\n else:\n return None",
"def get_isbn_by_asin(self, asin):\n query = \"select isbn from %s where asin = ?\" % self.table_name\n isbn = self.conn.execute(query, (asin,)).fetchone()\n if len(isbn) == 1:\n return isbn\n else:\n return None",
"def read_ATM1b_QFIT_binary(full_filename, SUBSETTER=None):\n # read the input file to get file information\n fd = os.open(os.path.expanduser(full_filename),os.O_RDONLY)\n file_info = os.fstat(fd)\n # open the filename in binary read mode\n fid = os.fdopen(fd, 'rb')\n\n # regular expression pattern for extracting parameters\n rx=re.compile((r'(BLATM1B|ILATM1B|ILNSA1B)_'\n r'((\\d{4})|(\\d{2}))(\\d{2})(\\d{2})'\n r'(.*?)\\.qi$'),re.VERBOSE)\n # extract mission and other parameters from filename\n match_object = rx.match(os.path.basename(full_filename))\n # convert year, month and day to float variables\n year = np.float64(match_object.group(2))\n month = np.float64(match_object.group(5))\n day = np.float64(match_object.group(6))\n # early date strings omitted century and millenia (e.g. 93 for 1993)\n if match_object.group(4):\n year = (year + 1900.0) if (year >= 90) else (year + 2000.0)\n\n # get the number of variables and the endianness of the file\n n_blocks,dtype = get_record_length(fid)\n MAXARG = 14\n # check that the number of blocks per record is less than MAXARG\n if (n_blocks > MAXARG):\n raise Exception('ERROR: Unexpected number of variables')\n # read over header text\n header_count,header_text = read_ATM1b_QFIT_header(fid, n_blocks, dtype)\n\n # number of records to read with and without input subsetter\n if SUBSETTER is None:\n # number of records within file (file size - header size)\n n_records = (file_info.st_size-header_count)//n_blocks//dtype.itemsize\n else:\n # number of records in subsetter\n n_records = len(SUBSETTER)\n # convert from data point indices into binary variable indices\n SUBSETTER = header_count + dtype.itemsize*(np.array(SUBSETTER)*n_blocks)\n\n # read input data\n ATM_L1b_input = read_ATM1b_QFIT_records(fid, n_blocks, n_records, dtype,\n [year, month, day], SUBSETTER=SUBSETTER)\n\n # close the input file\n fid.close()\n # return the data and header text\n return ATM_L1b_input, header_text",
"def __callSearchSubject(self):\r\n idGiven=input(\" Give ID:\")\r\n try:\r\n sub=self.__table.searchSubject(idGiven)\r\n print(\"Subject with this ID is: \"+sub.getName())\r\n except IdNotFound as ex:\r\n print(ex.getErrors())",
"def _num_to_bank_label(num):\n banks = {\n 0: 'A',\n 1: 'B',\n 2: 'C',\n 3: 'D'\n }\n bank = banks[(num - 1) // 64]\n # number = num % 64 if bank == 'A' else (num + 1) % 64\n number = num % 64 or 64\n return \"{}-CH{:02d}\".format(bank, number)",
"def _handle_sscc_urn(self, groups, urn_value):\n self.is_sgtin = False\n serial_number = groups.get('serial_number')\n self._extension_digit = serial_number[:1]\n self._serial_number = serial_number[1:]\n barcode = '%s%s' % (self._extension_digit, self._company_prefix)\n padding_length = 17 - len(barcode)\n serial_number = self._serial_number.zfill(padding_length)\n barcode = '%s%s' % (barcode, serial_number)\n self._sscc18 = calculate_check_digit(barcode)",
"def bin_search_id(boarding_pass: str) -> int:\n rows = boarding_pass[:7]\n seats = boarding_pass[7:]\n low, high = 0, 127\n for step in rows:\n if step == \"F\": # lower half\n high = (high + low) // 2\n elif step == \"B\": # upper half\n low = ((high + low) // 2) + 1\n left, right = 0, 7\n for seat in seats:\n if seat == \"L\": # lower half\n right = (left + right) // 2\n elif seat == \"R\": # upper half\n left = ((left + right) // 2) + 1\n return (low * 8) + left",
"def test_single_binary_package(self, manifest):\n util.create_validate_oci(util.bionic_dpkg_file, \"bionic_3642\",\n [\"--usn-number\", \"3642-1\", \"--usn-oval-release\", \"bionic\"],\n manifest, manifest)",
"def select_book(self, isbn):\n return self.cur.execute('SELECT * FROM books WHERE isbn=?', (isbn,)).fetchone()",
"def lookup_prefix(digits: str) -> Tuple[int, int]:\n return lookup_isbn_prefix(digits)",
"def isbn(self):\n isbns = self._head.get('source', {}).get('isbn', [])\n if not isinstance(isbns, list):\n isbns = [isbns]\n if len(isbns) == 0:\n return None\n else:\n return tuple((i['$'] for i in isbns))",
"def identity_to_codename(identity):\n identity = identity.replace(' ', '_').replace('/', '-')\n return 'B307_gasalarm_{}'.format(identity)",
"def read_uk_biobank_codings(coding_number):\n return lambda: pd.read_csv(\n conf.UK_BIOBANK[f\"CODING_{coding_number}_FILE\"], sep=\"\\t\"\n )",
"def get_isbn_by_asins(self, asins):\n isbns = []\n query = \"select isbn from %s where asin = ?\" % self.table_name\n for asin in asins:\n isbn = self.conn.execute(query, asin).fetchone()\n if len(isbn) == 1:\n isbns.append(isbn)\n return isbns",
"def bid_sub(self):\n return BID(self.data[16:24])",
"def test_iban_fields(self):\r\n valid = {\r\n 'NL02ABNA0123456789': 'NL02ABNA0123456789',\r\n 'NL02 ABNA 0123 4567 89': 'NL02ABNA0123456789',\r\n 'NL02-ABNA-0123-4567-89': 'NL02ABNA0123456789',\r\n\r\n 'NL91ABNA0417164300': 'NL91ABNA0417164300',\r\n 'NL91 ABNA 0417 1643 00': 'NL91ABNA0417164300',\r\n 'NL91-ABNA-0417-1643-00': 'NL91ABNA0417164300',\r\n\r\n 'MU17BOMM0101101030300200000MUR': 'MU17BOMM0101101030300200000MUR',\r\n 'MU17 BOMM 0101 1010 3030 0200 000M UR': 'MU17BOMM0101101030300200000MUR',\r\n 'MU 17BO MM01011010 3030-02 000-00M UR': 'MU17BOMM0101101030300200000MUR',\r\n\r\n 'BE68539007547034': 'BE68539007547034',\r\n 'BE68 5390 0754 7034': 'BE68539007547034',\r\n 'BE-685390075470 34': 'BE68539007547034',\r\n }\r\n\r\n invalid = {\r\n 'NL02ABNA012345678999': ['NL IBANs must contain 18 characters.'],\r\n 'NL02 ABNA 0123 4567 8999': ['NL IBANs must contain 18 characters.'],\r\n\r\n 'NL91ABNB0417164300': ['Not a valid IBAN.'],\r\n 'NL91 ABNB 0417 1643 00': ['Not a valid IBAN.'],\r\n\r\n 'MU17BOMM0101101030300200000MUR12345': [\r\n 'MU IBANs must contain 30 characters.',\r\n 'Ensure this value has at most 34 characters (it has 35).'],\r\n 'MU17 BOMM 0101 1010 3030 0200 000M UR12 345': [\r\n 'MU IBANs must contain 30 characters.',\r\n 'Ensure this value has at most 34 characters (it has 35).'],\r\n\r\n # This IBAN should only be valid only if the Nordea extensions are turned on.\r\n 'EG1100006001880800100014553': ['EG is not a valid country code for IBAN.'],\r\n 'EG11 0000 6001 8808 0010 0014 553': ['EG is not a valid country code for IBAN.']\r\n }\r\n\r\n self.assertFieldOutput(IBANFormField, valid=valid, invalid=invalid)\r\n\r\n # Test valid inputs for model field.\r\n iban_model_field = IBANField()\r\n for input, output in valid.items():\r\n self.assertEqual(iban_model_field.clean(input, None), output)\r\n\r\n # Invalid inputs for model field.\r\n for input, errors in invalid.items():\r\n with self.assertRaises(ValidationError) as context_manager:\r\n iban_model_field.clean(input, None)\r\n # The error messages for models are in a different order.\r\n errors.reverse()\r\n self.assertEqual(context_manager.exception.messages, errors)",
"def binomial_authority(self, binomial_authority):\n\n self._binomial_authority = binomial_authority"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resolve a Subject by using the fingerprint of a X.509 certificate, issued by a trused Certification Authority (CA).
|
def by_x509_fingerprint(self, fingerprint):
dao = self.session.query(CertificateFingerprint)\
.filter(CertificateFingerprint.fingerprint == fingerprint)\
.one()
return self.dto({
'type': 'x509.fingerprint',
'gsid': dao.gsid.hex,
})
|
[
"def der_cert_to_subject_hash(der_bytes):\n iterator = ASN1Iterator(der_bytes)\n iterator.step_into() # enter certificate structure\n iterator.step_into() # enter TBSCertificate\n iterator.step_over() # over version\n iterator.step_over() # over serial\n iterator.step_over() # over signature algorithm\n iterator.step_over() # over issuer name\n iterator.step_over() # over validity\n return hashlib.sha256(iterator.contents()).digest()",
"def _calculate_fingerprint(cert):\n x509 = M2Crypto.X509.load_cert_string(cert, M2Crypto.X509.FORMAT_PEM)\n fp = x509.get_fingerprint('sha1')\n fp = ':'.join(fp[pos:pos + 2] for pos in xrange(0, len(fp), 2))\n\n return fp",
"def getCertificateFileName( self, fingerprint ):\n\t\t# Check if the configuration file contains such an item\n\t\tfor configValue in self.config.keys():\n\t\t\tif configValue.startswith('CERTIFICATE'):\n\t\t\t\tcertFilename = self.config[ configValue ]\n\t\t\telse:\n\t\t\t\tcontinue\n\n\t\t\t# Generate a fingerprint from the certificate in the file.\n\t\t\tbuff = self.Security.createCertFingerprint( certFilename )\n\t\t\tif buff == False:\n\t\t\t\t# Could not create fingerprint from configured certificate.\n\t\t\t\treturn False\n\n\t\t\t# Check if the fingerprint is equal to the desired one.\n\t\t\tif fingerprint == buff:\n\t\t\t\treturn certFilename\n\n\t\tself.log( TRACE_ERROR, 'Could not find certificate with fingerprint %s' % fingerprint )\n\t\tself.setError( ING_ERROR_COULD_NOT_VERIFY, 'Could not verify message', IDEAL_PRV_GENERIC_ERROR )\n\n\t\t# By default, report no success.\n\t\treturn False",
"def get_subj_alt_name(peer_cert):\n # Pass the cert to cryptography, which has much better APIs for this.\n if hasattr(peer_cert, \"to_cryptography\"):\n cert = peer_cert.to_cryptography()\n else:\n # This is technically using private APIs, but should work across all\n # relevant versions before PyOpenSSL got a proper API for this.\n cert = _Certificate(openssl_backend, peer_cert._x509)\n\n # We want to find the SAN extension. Ask Cryptography to locate it (it's\n # faster than looping in Python)\n try:\n ext = cert.extensions.get_extension_for_class(\n x509.SubjectAlternativeName\n ).value\n except x509.ExtensionNotFound:\n # No such extension, return the empty list.\n return []\n except (x509.DuplicateExtension, UnsupportedExtension,\n x509.UnsupportedGeneralNameType, UnicodeError) as e:\n # A problem has been found with the quality of the certificate. Assume\n # no SAN field is present.\n log.warning(\n \"A problem was encountered with the certificate that prevented \"\n \"urllib3 from finding the SubjectAlternativeName field. This can \"\n \"affect certificate validation. The error was %s\",\n e,\n )\n return []\n\n # We want to return dNSName and iPAddress fields. We need to cast the IPs\n # back to strings because the match_hostname function wants them as\n # strings.\n # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8\n # decoded. This is pretty frustrating, but that's what the standard library\n # does with certificates, and so we need to attempt to do the same.\n # We also want to skip over names which cannot be idna encoded.\n names = [\n ('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))\n if name is not None\n ]\n names.extend(\n ('IP Address', str(name))\n for name in ext.get_values_for_type(x509.IPAddress)\n )\n\n return names",
"def test_inspectCertificate(self):\n c = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM)\n pk = c.getPublicKey()\n keyHash = pk.keyHash()\n # Maintenance Note: the algorithm used to compute the \"public key hash\"\n # is highly dubious and can differ between underlying versions of\n # OpenSSL (and across versions of Twisted), since it is not actually\n # the hash of the public key by itself. If we can get the appropriate\n # APIs to get the hash of the key itself out of OpenSSL, then we should\n # be able to make it statically declared inline below again rather than\n # computing it here.\n self.assertEqual(\n c.inspect().split('\\n'),\n [\"Certificate For Subject:\",\n \" Common Name: example.twistedmatrix.com\",\n \" Country Name: US\",\n \" Email Address: nobody@twistedmatrix.com\",\n \" Locality Name: Boston\",\n \" Organization Name: Twisted Matrix Labs\",\n \" Organizational Unit Name: Security\",\n \" State Or Province Name: Massachusetts\",\n \"\",\n \"Issuer:\",\n \" Common Name: example.twistedmatrix.com\",\n \" Country Name: US\",\n \" Email Address: nobody@twistedmatrix.com\",\n \" Locality Name: Boston\",\n \" Organization Name: Twisted Matrix Labs\",\n \" Organizational Unit Name: Security\",\n \" State Or Province Name: Massachusetts\",\n \"\",\n \"Serial Number: 12345\",\n \"Digest: C4:96:11:00:30:C3:EC:EE:A3:55:AA:ED:8C:84:85:18\",\n \"Public Key with Hash: \" + keyHash])",
"def lookup_fingerprint(fingerprint, **tags):\n postargs = dict(\n # Identification.\n cid = MD_KEY,\n cvr = \"harmonize.fm/alpha1\",\n \n # The given fingerprint.\n fpt=fingerprint,\n\n # These are required by the license agreement, to help fill out the\n # MusicDNS database.\n art=tags.get('artist', ''),\n ttl=tags.get('title', ''),\n alb=tags.get('album', ''),\n tnm=tags.get('tracknumer', ''),\n gnr=tags.get('genre', ''),\n yrr=tags.get('date', ''),\n brt=tags.get('bitrate', ''),\n fmt=tags.get('format', ''),\n dur=str(int(float(tags.get('duration', '')))),\n\n # Return the metadata?\n rmd='0',\n )\n\n data = urllib.urlencode(postargs)\n response = urllib.urlopen(MD_URL, data)\n\n presp = ElementTree.parse(response)\n el = presp.find('{%s}puid'%MD_NAMESPACE)\n raise RuntimeError()\n\n if el is not None:\n return el.attrib['id']\n\n return None",
"def pem_cert_file_to_subject_hash(pem_filename):\n return der_cert_to_subject_hash(_pem_cert_to_binary(pem_filename))",
"def certify(self, subject, level=SignatureType.Generic_Cert, **prefs):\n hash_algo = prefs.pop('hash', None)\n sig_type = level\n if isinstance(subject, PGPKey):\n sig_type = SignatureType.DirectlyOnKey\n\n sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))\n\n # signature options that only make sense in certifications\n usage = prefs.pop('usage', None)\n exportable = prefs.pop('exportable', None)\n\n if usage is not None:\n sig._signature.subpackets.addnew('KeyFlags', hashed=True, flags=usage)\n\n if exportable is not None:\n sig._signature.subpackets.addnew('ExportableCertification', hashed=True, bflag=exportable)\n\n keyfp = self.fingerprint\n if isinstance(subject, PGPKey):\n keyfp = subject.fingerprint\n if isinstance(subject, PGPUID) and subject._parent is not None:\n keyfp = subject._parent.fingerprint\n\n if keyfp == self.fingerprint:\n # signature options that only make sense in self-certifications\n cipher_prefs = prefs.pop('ciphers', None)\n hash_prefs = prefs.pop('hashes', None)\n compression_prefs = prefs.pop('compression', None)\n key_expires = prefs.pop('key_expiration', None)\n keyserver_flags = prefs.pop('keyserver_flags', None)\n keyserver = prefs.pop('keyserver', None)\n primary_uid = prefs.pop('primary', None)\n attested_certifications = prefs.pop('attested_certifications', [])\n\n if key_expires is not None:\n # key expires should be a timedelta, so if it's a datetime, turn it into a timedelta\n if isinstance(key_expires, datetime):\n key_expires = key_expires - self.created\n\n sig._signature.subpackets.addnew('KeyExpirationTime', hashed=True, expires=key_expires)\n\n if cipher_prefs is not None:\n sig._signature.subpackets.addnew('PreferredSymmetricAlgorithms', hashed=True, flags=cipher_prefs)\n\n if hash_prefs:\n sig._signature.subpackets.addnew('PreferredHashAlgorithms', hashed=True, flags=hash_prefs)\n if sig.hash_algorithm is None:\n sig._signature.halg = hash_prefs[0]\n if sig.hash_algorithm is None:\n sig._signature.halg = HashAlgorithm.SHA256\n\n if compression_prefs is not None:\n sig._signature.subpackets.addnew('PreferredCompressionAlgorithms', hashed=True, flags=compression_prefs)\n\n if keyserver_flags is not None:\n sig._signature.subpackets.addnew('KeyServerPreferences', hashed=True, flags=keyserver_flags)\n\n if keyserver is not None:\n sig._signature.subpackets.addnew('PreferredKeyServer', hashed=True, uri=keyserver)\n\n if primary_uid is not None:\n sig._signature.subpackets.addnew('PrimaryUserID', hashed=True, primary=primary_uid)\n\n cert_sigtypes = {SignatureType.Generic_Cert, SignatureType.Persona_Cert,\n SignatureType.Casual_Cert, SignatureType.Positive_Cert,\n SignatureType.CertRevocation}\n # Features is always set on certifications:\n if sig._signature.sigtype in cert_sigtypes:\n sig._signature.subpackets.addnew('Features', hashed=True, flags=Features.pgpy_features)\n\n # If this is an attestation, then we must include a Attested Certifications subpacket:\n if sig._signature.sigtype == SignatureType.Attestation:\n attestations = set()\n for attestation in attested_certifications:\n if isinstance(attestation, PGPSignature) and attestation.type in cert_sigtypes:\n h = sig.hash_algorithm.hasher\n h.update(attestation._signature.canonical_bytes())\n attestations.add(h.digest())\n elif isinstance(attestation, (bytes, bytearray)) and len(attestation) == sig.hash_algorithm.digest_size:\n attestations.add(attestation)\n else:\n warnings.warn(\n 'Attested Certification element is neither a PGPSignature certification nor '\n 'a bytes object of size {:d}; ignoring'.format(sig.hash_algorithm.digest_size)\n )\n sig._signature.subpackets.addnew('AttestedCertifications', hashed=True, attested_certifications=b''.join(sorted(attestations)))\n\n else:\n # signature options that only make sense in non-self-certifications\n trust = prefs.pop('trust', None)\n regex = prefs.pop('regex', None)\n\n if trust is not None:\n sig._signature.subpackets.addnew('TrustSignature', hashed=True, level=trust[0], amount=trust[1])\n\n if regex is not None:\n sig._signature.subpackets.addnew('RegularExpression', hashed=True, regex=regex)\n\n return self._sign(subject, sig, **prefs)",
"def extract_subject_from_dn(cert_obj):\n return (\n ','.join(\n '{}={}'.format(\n OID_TO_SHORT_NAME_DICT.get(v.oid.dotted_string, v.oid.dotted_string),\n rdn_escape(v.value)\n ) for v in reversed(list(cert_obj.subject))\n )\n )",
"def set_subject(self):\n self.subject = self.cert.get_subject()",
"def test_record(self):\n\n side_effect = [[\"example.org\"]]\n expected = side_effect[0]\n self.dns_lookup.resolver.resolve = Mock(side_effect=side_effect)\n\n actual = self.dns_lookup.cname_record(self.subject)\n\n self.assertEqual(expected, actual)",
"def load_certs() -> Dict[str, x509.Certificate]:\n re_clean_fname = re.compile(r\"[^A-Za-z0-9_-]\")\n\n eu_url = \"https://ec.europa.eu/information_society/policy/esignature/trusted-list/tl-mp.xml\"\n log.info(\"Downloading EU index from %s\", eu_url)\n eu_tl = load_url(eu_url)\n it_url = eu_tl.get_tsl_pointer_by_territory(\"IT\")\n log.info(\"Downloading IT data from %s\", it_url)\n trust_service_status_list = load_url(it_url)\n\n by_name = defaultdict(list)\n for tsp in trust_service_status_list.trust_service_provider_list.trust_service_provider:\n for tsp_service in tsp.tsp_services.tsp_service:\n si = tsp_service.service_information\n if si.service_status not in (\n \"http://uri.etsi.org/TrstSvc/TrustedList/Svcstatus/recognisedatnationallevel\",\n \"http://uri.etsi.org/TrstSvc/TrustedList/Svcstatus/granted\"):\n continue\n if si.service_type_identifier not in (\n \"http://uri.etsi.org/TrstSvc/Svctype/CA/QC\",):\n continue\n # print(\"identifier\", si.service_type_identifier)\n # print(\"status\", si.service_status)\n cert = []\n sn = []\n for di in si.service_digital_identity.digital_id:\n if di.x509_subject_name is not None:\n sn.append(di.x509_subject_name)\n # if di.x509_ski is not None:\n # print(\" SKI:\", di.x509_ski)\n if di.x509_certificate is not None:\n from cryptography import x509\n from cryptography.hazmat.backends import default_backend\n der = base64.b64decode(di.x509_certificate)\n cert.append(x509.load_der_x509_certificate(der, default_backend()))\n\n if len(cert) == 0:\n raise RuntimeError(\"{} has no certificates\".format(sn))\n elif len(cert) > 1:\n raise RuntimeError(\"{} has {} certificates\".format(sn, len(cert)))\n else:\n from cryptography.x509.oid import NameOID\n cert = cert[0]\n cn = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value\n # print(\"sn\", sn)\n # print(cert)\n # print(\"full cn\", cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME))\n # print(\"cn\", cn)\n fname = re_clean_fname.sub(\"_\", cn)\n by_name[fname].append(cert)\n\n res = {}\n for name, certs in by_name.items():\n if len(certs) == 1:\n if name in res:\n raise RuntimeError(\"{} already in result\".format(name))\n res[name] = certs[0]\n else:\n for idx, cert in enumerate(certs, start=1):\n idxname = name + \"_a38_{}\".format(idx)\n if idxname in res:\n raise RuntimeError(\"{} already in result\".format(name))\n res[idxname] = cert\n return res",
"def lookup_fp(fingerprint, duration):\n import acoustid\n mb_json = acoustid.lookup(mb_apikey, fingerprint, duration)\n # # lookup(apikey, fingerprint, duration): Make a request to the\n # Acoustid API to look up the fingerprint returned by the\n # previous function. An API key is required, as is the length,\n # in seconds, of the source audio. Returns a parsed JSON\n # response.\n result = acoustid.parse_lookup_result(mb_json)\n # # parse_lookup_result(data): Given a parsed JSON response, return\n # an iterator over tuples containing the match score (a float\n # between 0 and 1), the MusicBrainz recording ID, title, and\n # artist name for each match\n return result",
"def mk_temporary_cert(cacert_file, ca_key_file, cn):\n cert_req, pk2 = mk_request(1024, cn=cn)\n if cacert_file and ca_key_file:\n cacert = X509.load_cert(cacert_file)\n pk1 = EVP.load_key(ca_key_file)\n else:\n cacert = None\n pk1 = None\n\n cert = mk_cert()\n cert.set_subject(cert_req.get_subject())\n cert.set_pubkey(cert_req.get_pubkey())\n\n if cacert and pk1:\n cert.set_issuer(cacert.get_issuer())\n cert.sign(pk1, 'sha256')\n else:\n cert.set_issuer(cert.get_subject())\n cert.sign(pk2, 'sha256')\n\n certf = namedtmp()\n certf.write(cert.as_pem())\n certf.write(pk2.as_pem(None))\n certf.flush()\n\n return certf",
"def CertInfo(fname: str) -> RET:\n try:\n with open(fname, encoding = \"ascii\", errors = \"replace\") as f:\n cert_bytes = f.read()\n except Exception:\n return RET(2, '', f'File >>>{fname}<<< not found') # ENOENT /* No such file or directory */\n\n try:\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_bytes)\n except Exception:\n return RET(5, '', f'Could not load certificate >>>{fname}<<<') # EIO /* I/O error */\n\n utc_time_notafter = datetime.datetime.strptime(x509.get_notAfter().decode(\"utf-8\"), \"%Y%m%d%H%M%SZ\")\n utc_time_notbefore = datetime.datetime.strptime(x509.get_notBefore().decode(\"utf-8\"), \"%Y%m%d%H%M%SZ\")\n issuer = '/'.join([f'{k.decode(\"utf-8\")}={v.decode(\"utf-8\")}' for k, v in x509.get_issuer().get_components()])\n subject = '/'.join([f'{k.decode(\"utf-8\")}={v.decode(\"utf-8\")}' for k, v in x509.get_subject().get_components()])\n info = f'DN >>> {subject}\\nISSUER >>> {issuer}\\nBEGIN >>> {utc_time_notbefore}\\nEXPIRE >>> {utc_time_notafter}'\n return RET(0, info)",
"def probe_cert(self, domain, **kwargs):\n if \"host\" not in kwargs:\n host = socket.gethostbyname(domain)\n logging.debug('%s resolved to %s', domain, host)\n kwargs[\"host\"] = host\n\n kwargs.setdefault(\"port\", self.PORT)\n kwargs[\"name\"] = self.z_domain\n # TODO: try different methods?\n # pylint: disable=protected-access\n return crypto_util.probe_sni(**kwargs)",
"def getSubject(self, cert):\r\n return self.parseCertAttribute(cert, \"SUBJECT\")",
"def verify(self, subject, signature=None):\n sspairs = []\n\n # some type checking\n if not isinstance(subject, (type(None), PGPMessage, PGPKey, PGPUID, PGPSignature, str, bytes, bytearray)):\n raise TypeError(\"Unexpected subject value: {:s}\".format(str(type(subject))))\n if not isinstance(signature, (type(None), PGPSignature)):\n raise TypeError(\"Unexpected signature value: {:s}\".format(str(type(signature))))\n\n def _filter_sigs(sigs):\n _ids = {self.fingerprint.keyid} | set(self.subkeys)\n for sig in sigs:\n if sig.signer in _ids:\n yield sig\n\n # collect signature(s)\n if signature is None:\n if isinstance(subject, PGPMessage):\n for sig in _filter_sigs(subject.signatures):\n sspairs.append((sig, subject.message))\n\n if isinstance(subject, (PGPUID, PGPKey)):\n sspairs += [ (sig, subject) for sig in _filter_sigs(subject.__sig__) ]\n\n if isinstance(subject, PGPKey):\n # user ids\n for uid in subject.userids:\n for sig in _filter_sigs(uid.__sig__):\n sspairs.append((sig, uid))\n # user attributes\n for ua in subject.userattributes:\n for sig in _filter_sigs(ua.__sig__):\n sspairs.append((sig, ua))\n\n # subkey binding signatures\n for subkey in subject.subkeys.values():\n for sig in _filter_sigs(subkey.__sig__):\n sspairs.append((sig, subkey))\n\n elif signature.signer in {self.fingerprint.keyid} | set(self.subkeys):\n sspairs += [(signature, subject)]\n\n if len(sspairs) == 0:\n raise PGPError(\"No signatures to verify\")\n\n # finally, start verifying signatures\n sigv = SignatureVerification()\n for sig, subj in sspairs:\n if self.fingerprint.keyid != sig.signer and sig.signer in self.subkeys:\n sigv &= self.subkeys[sig.signer].verify(subj, sig)\n\n else:\n if isinstance(subj, PGPKey):\n self_verifying = sig.signer == subj.fingerprint\n else:\n self_verifying = False\n\n subkey_issues = self.check_soundness(self_verifying)\n signature_issues = self.check_primitives()\n\n if self_verifying:\n signature_issues &= ~SecurityIssues.HashFunctionNotCollisionResistant\n\n issues = signature_issues | subkey_issues\n if issues and issues.causes_signature_verify_to_fail:\n sigv.add_sigsubj(sig, self, subj, issues)\n else:\n verified = self._key.verify(sig.hashdata(subj), sig.__sig__, getattr(hashes, sig.hash_algorithm.name)())\n if verified is NotImplemented:\n raise NotImplementedError(sig.key_algorithm)\n\n sigv.add_sigsubj(sig, self, subj, SecurityIssues.WrongSig if not verified else SecurityIssues.OK)\n\n return sigv",
"def getSubject(self):\n subject_st = urllib.unquote(self.path[1:].split('?', 1)[0]).decode('utf8')\n return article.Subject.fromString(subject_st)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This hook is used to add a warehouse on existing companies when module stock is installed.
|
def create_missing_warehouse(self):
company_ids = self.env['res.company'].search([])
company_with_warehouse = self.env['stock.warehouse'].with_context(active_test=False).search([]).mapped('company_id')
company_without_warehouse = company_ids - company_with_warehouse
for company in company_without_warehouse:
self.env['stock.warehouse'].create({
'name': company.name,
'code': company.name[:5],
'company_id': company.id,
'partner_id': company.partner_id.id,
})
|
[
"def add_to_warehouse(self, warehouse):\n if self.is_assigned():\n assignment = self.current_property()\n assignment.active = False\n assignment.save()\n wf = self.warehousefurniture_set.create(warehouse=warehouse)\n wf.save()\n return wf",
"def add_item_to_warehouse(self):\n if self._name in MyWarehouse.warehouse: # если товар с таким именем уже есть на складе добовляем ему кол-во\n MyWarehouse.warehouse[self._name]['_quantity'] += self._quantity\n else:\n MyWarehouse.warehouse[self._name] = self.__dict__",
"def add_fake_stock(self, api):\n range_start = int(PyMkmHelper.prompt_string(\"Range pid start\"))\n range_end = int(PyMkmHelper.prompt_string(\"Range pid end\"))\n if PyMkmHelper.prompt_bool(\"Sure?\"):\n print(\"Adding fake stock...\")\n product_list = []\n for product_no in range(range_start, range_end):\n product_list.append(\n {\n \"idProduct\": product_no,\n \"idLanguage\": 1,\n \"count\": 1,\n \"price\": 1,\n \"comments\": \"TEST ARTICLE DO NOT BUY\",\n \"condition\": \"PO\",\n \"isFoil\": \"false\",\n }\n )\n\n api.add_stock(product_list)",
"def on_install(self):\n pass",
"def update_warehouses(data):\n warehouse, created = Warehouse.objects.get_or_create(\n id_from_assets=data['warehouse_id']\n )\n warehouse.name = data['warehouse_name']\n warehouse.save()\n return created",
"def addStockToBrewlog(self,username,brewlog,checkStock=0,recipeName=None,process=None,reset=0):\n\n\t\tsys.stderr.write(\"\\nSTART: addStockToBrewlog() %s\\n\" %(brewlog))\n\t\tstatus=0\n\t\ttry:\n\n\t\t\tif not reset:\n\t\t\t\tif not checkStock:\n\t\t\t\t\texistingBrewlog = self.dbWrapper.GqlQuery(\"SELECT * FROM gBrewlogs WHERE owner = :1 AND brewlog = :2\",username,brewlog).fetch(1)\n\t\t\t\t\tif len(existingBrewlog) < 1:\n\t\t\t\t\t\tsys.stderr.write(\"END: addStockToBrewlog() %s .. not existing brewlog ..\\n\" %(brewlog))\n\t\t\t\t\t\treturn {'operation' : 'addStockToBrewlog','status':-1}\n\n\t\t\t\t\trecipeName = existingBrewlog[0].recipe\n\t\t\t\t\tprocess = existingBrewlog[0].process\n\n\n\n\t\t\t\t# workaround for now\n\t\t\t\t# hopAddAt -1 represents the total of this hops and isn't a real hop\n\t\t\t\t# we delete it here and re create based on the >0 hopAddAts\n\t\t\t\tourRecipeIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND hopAddAt <= :4\",username,recipeName,\"hops\",0.0).fetch(400)\n\t\t\t\tfor ori in ourRecipeIngredients:\tori.delete()\n\t\t\t\tHOPSUMMARY={}\n\t\t\t\tourRecipeIngredients = self.dbWrapper.GqlQuery(\"SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND hopAddAt > :4\",username,recipeName,\"hops\",0.0).fetch(400)\n\t\t\t\tfor ori in ourRecipeIngredients:\n\t\t\t\t\tif not HOPSUMMARY.has_key(ori.ingredient):\tHOPSUMMARY[ ori.ingredient ] =0\n\t\t\t\t\tHOPSUMMARY[ ori.ingredient ] = HOPSUMMARY[ ori.ingredient ] + ori.qty\n\t\t\t\tfor hs in HOPSUMMARY:\n\t\t\t\t\ting = gIngredients(owner=username)\n\t\t\t\t\ting.db=self.dbWrapper\n\t\t\t\t\ting.recipename=recipeName\n\t\t\t\t\ting.qty = HOPSUMMARY[hs]\n\t\t\t\t\ting.ingredient=hs\t\n\t\t\t\t\ting.ingredientType='hops'\n\t\t\t\t\ting.hopAddAt=float(-1)\n\t\t\t\t\ting.processIngredient = False\n\t\t\t\t\ting.put()\n\t\t\t\n\t\t\t\tourstock=self.takeStock( username,recipeName,existingBrewlog[0].process )\n\t\t\n\n\t\t\t\tfor storeType in ourstock:\n\t\t\t\t\tfor a in ourstock[storeType]:\n\t\t\t\t\t\tsys.stderr.write(\" %s\\n\" %(a))\n\t\t\t\t\t\tfor (pcnt,qty,stocktag,name,purchaseObj) in ourstock[storeType][a]:\n\n\t\t\t\t\t\t\tnewstock=gBrewlogStock(\towner=username,brewlog=brewlog,recipe=recipeName)\n\t\t\t\t\t\t\tnewstock.db=self.dbWrapper\n\t\t\t\t\t\t\tnewstock.qty=qty\n\t\t\t\t\t\t\tnewstock.stock=name\n\t\t\t\t\t\t\tnewstock.cost=purchaseObj.purchaseCost * qty\n\t\t\t\t\t\t\tnewstock.storecategory=purchaseObj.storecategory\n\t\t\t\t\t\t\tnewstock.unit=purchaseObj.unit\n\t\t\t\t\t\t\tnewstock.stocktag=stocktag\n\t\t\t\t\t\t\tnewstock.put()\n\n\n\n\n\t\t\t\"\"\"\n\t\t\tNote: this is a better approach to building in stock into the process because it then generates real stock items.\n\n\t\t\tallocation of stock is limited to what we have above\n\t\t\twhat we have below is suitable for creating a brewlog and resetting a brewlog\n\n\t\t\tNOV2012 need to rework what we have below so that it pulls from teh database not memory.\n\t\t\t\"\"\"\n\t\t\t\n\t\t\tssNum=50000\n\n\n\n\t\t\t# add a gBrewlogStep details\n\t\t\tourSteps = self.dbWrapper.GqlQuery(\"SELECT * FROM gProcess WHERE owner = :1 AND process = :2 AND auto = :3\",username,process,\"gatherthegrain\").fetch(400)\n\t\t\tfor gatherStep in ourSteps:\n\t\t\t\n\n\t\t\t\ttmpSSNUM = self.dbWrapper.GqlQuery(\"SELECT * FROM gProcess WHERE owner = :1 AND process = :2 AND activityNum = :3 AND stepNum = :4 ORDER BY subStepNum DESC\",username,process,gatherStep.activityNum,gatherStep.stepNum).fetch(1)\n\t\t\t\tif len(tmpSSNUM) == 0:\n\t\t\t\t\tssNum=-1\n\t\t\t\telse:\n\t\t\t\t\tssNum=int(tmpSSNUM[0].subStepNum)\n\n#April 2016 - 42AG - these steps appeared to be uncompleteable\n#\t\t\t\tourIngs = self.dbWrapper.GqlQuery(\"SELECT * FROM gBrewlogStock WHERE owner = :1 AND brewlog = :2 AND storecategory = :3\",username,brewlog,'fermentables').fetch(5000)\n#\t\t\t\tfor purchaseObj in ourIngs:\n#\t\t\t\t\tssNum=ssNum+1\n#\t\t\t\t\tx=gBrewlogStep(brewlog=brewlog,owner=username,activityNum=gatherStep.activityNum, stepNum=gatherStep.stepNum,subStepNum=ssNum)\n#\t\t\t\t\tx.db=self.dbWrapper\n#\t\t\t\t\tx.stepName=\" %.2f %s of %s (%s)\" %(purchaseObj.qty,purchaseObj.unit,purchaseObj.stock,purchaseObj.stocktag)\n#\t\t\t\t\tx.completed=False\n#\t\t\t\t\tx.stepStartTime=0\n#\t\t\t\t\tx.stepEndTime=0\n#\t\t\t\t\tx.needToComplete=True\n#\t\t\t\t\tx.subStepsCompleted=False\n#\t\t\t\t\tx.compileStep=gatherStep.compileStep\n#\t\t\t\t\tx.put()\n\n\n\n#\n\t\t\t# add a gBrewlogStep details\n\t\t\tourSteps = self.dbWrapper.GqlQuery(\"SELECT * FROM gProcess WHERE owner = :1 AND process = :2 AND auto = :3\",username,process,\"gatherthebottles\").fetch(400)\n\t\t\tfor gatherStep in ourSteps:\n\t\t\t\n\n\t\t\t\ttmpSSNUM = self.dbWrapper.GqlQuery(\"SELECT * FROM gProcess WHERE owner = :1 AND process = :2 AND activityNum = :3 AND stepNum = :4 ORDER BY subStepNum DESC\",username,process,gatherStep.activityNum,gatherStep.stepNum).fetch(1)\n\t\t\t\tif len(tmpSSNUM) == 0:\n\t\t\t\t\tssNum=-1\n\t\t\t\telse:\n\t\t\t\t\tssNum=int(tmpSSNUM[0].subStepNum)\n#\t\t\t\tourIngs = self.dbWrapper.GqlQuery(\"SELECT * FROM gBrewlogStock WHERE owner = :1 AND brewlog = :2 AND subcategory = :3\",username,brewlog,'bottle').fetch(5000)\n#\t\t\t\tfor purchaseObj in ourIngs:\n#\t\t\t\t\tssNum=ssNum+1\n#\t\t\t\t\tx=gBrewlogStep(brewlog=brewlog,owner=username,activityNum=gatherStep.activityNum, stepNum=gatherStep.stepNum,subStepNum=ssNum)\n#\t\t\t\t\tx.db=self.dbWrapper\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.qty))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.unit))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.stock))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.stocktag))\n#\t\t\t\t\tx.stepName=\" %.2f %s of %s (%s)\" %(purchaseObj.qty,purchaseObj.unit,purchaseObj.stock,purchaseObj.stocktag)\n#\t\t\t\t\tx.completed=False\n#\t\t\t\t\tx.stepStartTime=0\n#\t\t\t\t\tx.stepEndTime=0\n#\t\t\t\t\tx.needToComplete=True\n#\t\t\t\t\tx.subStepsCompleted=False\n#\t\t\t\t\tx.compileStep=gatherStep.compileStep\n#\t\t\t\t\tx.put()\n\n\n\t\t\t# add a gBrewlogStep details\n\t\t\tourSteps = self.dbWrapper.GqlQuery(\"SELECT * FROM gProcess WHERE owner = :1 AND process = :2 AND auto = :3\",username,process,\"gathertheminikegs\").fetch(400)\n\t\t\tfor gatherStep in ourSteps:\n\t\t\t\n\n\t\t\t\ttmpSSNUM = self.dbWrapper.GqlQuery(\"SELECT * FROM gProcess WHERE owner = :1 AND process = :2 AND activityNum = :3 AND stepNum = :4 ORDER BY subStepNum DESC\",username,process,gatherStep.activityNum,gatherStep.stepNum).fetch(1)\n\t\t\t\tif len(tmpSSNUM) == 0:\n\t\t\t\t\tssNum=-1\n\t\t\t\telse:\n\t\t\t\t\tssNum=int(tmpSSNUM[0].subStepNum)\n#\t\t\t\tourIngs = self.dbWrapper.GqlQuery(\"SELECT * FROM gBrewlogStock WHERE owner = :1 AND brewlog = :2 AND subcategory = :3\",username,brewlog,'keg').fetch(5000)\n#\t\t\t\tfor purchaseObj in ourIngs:\n#\t\t\t\t\tssNum=ssNum+1\n#\t\t\t\t\tx=gBrewlogStep(brewlog=brewlog,owner=username,activityNum=gatherStep.activityNum, stepNum=gatherStep.stepNum,subStepNum=ssNum)\n#\t\t\t\t\tx.db=self.dbWrapper\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.qty))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.unit))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.stock))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.stocktag))\n#\t\t\t\t\tx.stepName=\" %.2f %s of %s (%s)\" %(purchaseObj.qty,purchaseObj.unit,purchaseObj.stock,purchaseObj.stocktag)\n#\t\t\t\t\tx.completed=False\n#\t\t\t\t\tx.stepStartTime=0\n#\t\t\t\t\tx.stepEndTime=0\n#\t\t\t\t\tx.needToComplete=True\n#\t\t\t\t\tx.subStepsCompleted=False\n#\t\t\t\t\tx.compileStep=gatherStep.compileStep\n#\t\t\t\t\tx.put()\n\n\t\t\t# add a gBrewlogStep details\n\t\t\tourSteps = self.dbWrapper.GqlQuery(\"SELECT * FROM gProcess WHERE owner = :1 AND process = :2 AND auto = :3\",username,process,\"gatherthepolypins\").fetch(400)\n\t\t\tfor gatherStep in ourSteps:\n\t\t\t\n\n\t\t\t\ttmpSSNUM = self.dbWrapper.GqlQuery(\"SELECT * FROM gProcess WHERE owner = :1 AND process = :2 AND activityNum = :3 AND stepNum = :4 ORDER BY subStepNum DESC\",username,process,gatherStep.activityNum,gatherStep.stepNum).fetch(1)\n\t\t\t\tif len(tmpSSNUM) == 0:\n\t\t\t\t\tssNum=-1\n\t\t\t\telse:\n\t\t\t\t\tssNum=int(tmpSSNUM[0].subStepNum)\n#\t\t\t\tourIngs = self.dbWrapper.GqlQuery(\"SELECT * FROM gBrewlogStock WHERE owner = :1 AND brewlog = :2 AND subcategory = :3\",username,brewlog,'polypin').fetch(5000)\n#\t\t\t\tfor purchaseObj in ourIngs:\n#\t\t\t\t\tssNum=ssNum+1\n#\t\t\t\t\tx=gBrewlogStep(brewlog=brewlog,owner=username,activityNum=gatherStep.activityNum, stepNum=gatherStep.stepNum,subStepNum=ssNum)\n#\t\t\t\t\tx.db=self.dbWrapper\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.qty))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.unit))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.stock))\n#\t\t\t\t\tsys.stderr.write(\"%s\\n\" %(purchaseObj.stocktag))\n#\t\t\t\t\tx.stepName=\" %.2f %s of %s (%s)\" %(purchaseObj.qty,purchaseObj.unit,purchaseObj.stock,purchaseObj.stocktag)\n#\t\t\t\t\tx.completed=False\n#\t\t\t\t\tx.stepStartTime=0\n#\t\t\t\t\tx.stepEndTime=0\n#\t\t\t\t\tx.needToComplete=True\n#\t\t\t\t\tx.subStepsCompleted=False\n#\t\t\t\t\tx.compileStep=gatherStep.compileStep\n#\t\t\t\t\tx.put()\n\n\n\t\t\tif reset:\n\t\t\t\tsys.stderr.write(\"END: addStockToBrewlog() %s .. reset..\\n\" %(brewlog))\n\t\t\t\treturn # early\n\n\t\t\tif len(ourstock) < 1:\n\t\t\t\tsys.stderr.write(\"END: addStockToBrewlog() %s .. no stock..\\n\" %(brewlog))\n\t\t\t\treturn {'operation':'addStockToBrewlog','status':-3}\t\t# out of stock or out of date stock\n\n\t\t\t# need to add our stock into the databser\n\n\n\n\t\t\tresult = {}\n\t\t\tfor stockType in ourstock:\n\t\t\t\tresult[stockType] = {}\n\t\t\t\tfor stockItem in ourstock[stockType]:\n\t\t\t\t\tresult[stockType][stockItem]=[]\n\t\t\t\t\tfor (a,b,c,d,e) in ourstock[stockType][stockItem]:\n\t\t\t\t\t\tsubstock={}\n\t\t\t\t\t\tsubstock['percentage']=a\n\t\t\t\t\t\tsubstock['qty']=b\n\t\t\t\t\t\tsubstock['barcode']=c\n\t\t\t\t\t\tsubstock['item']=e.storeitem\n\t\t\t\t\t\tprint stockItem,ourstock[stockType][stockItem]\n\t\t\t\t\t\tresult[stockType][stockItem].append( substock)\n\n\t\t\tsys.stderr.write(\"END: addStockToBrewlog() %s .. have stock..\\n\" %(brewlog))\n\t\t\treturn {'operation':'addStockToBrewlog','status' :1,'json' : json.dumps( {\"result\": result})}\n\t\texcept ImportError:\n\t\t\tsys.stderr.write(\"EXCEPTION: addStockToBrewlog() %s .. no stock..\\n\" %(brewlog))\n\t\t\texc_type, exc_value, exc_traceback = sys.exc_info()\n\t\t\tfor e in traceback.format_tb(exc_traceback):\tsys.stderr.write(e)\n\t\t\treturn {'operation':'addStockToBrewlog','status':status }\n\n\t\treturn {'operation':'addStockToBrewlog','status':status}",
"def create_stocks():\n logger.info('Creating Stocks and Stock Data...')\n logger.info('Adding NYSE Stocks...')\n NYSE().save_stocks()\n logger.info('Added NYSE Stocks')\n logger.info('Adding NASDAQ Stocks...')\n NASDAQ().save_stocks()\n logger.info('Added NASDAQ Stocks')",
"def test_add_stock_details_no_stock(self):\n self.assertFalse(self.stock.add_stock_details('NOSTOCKNAME'))",
"def create_company(sender, instance, created, **kwargs):\n if created:\n CompanyAlias.objects.create(company=instance, alias=instance.name)\n CompanyAlias.objects.create(company=instance, alias=instance.ticker)\n StockInformation.objects.create(\n company=instance,\n spot_price=0,\n price_difference=0,\n percent_difference=0,\n volume=0,\n retrieved=datetime.datetime.utcfromtimestamp(0)\n )",
"def add_all_installed_packs_to_config_file(self):\n marketplace_packs = self.get_installed_packs()\n self.update_xsoar_config_data(\n section_name=MARKETPLACE_PACKS_SECTION, data_to_update=marketplace_packs\n )",
"def create_and_assign_lot(self, name):\n self.ensure_one()\n product_id = self.product_id.id\n val = {'product_id': product_id}\n new_lot_id = False\n if name:\n lots = self.env['stock.production.lot'].search(\n ['&', ('name', '=', name), ('product_id', '=', product_id)],\n )\n if lots:\n new_lot_id = lots.ids[0]\n val.update({'name': name})\n\n if not new_lot_id:\n new_lot_id = self.env['stock.production.lot'].create(val).id\n self.write({'pack_lot_ids': [(0, 0, {'lot_id': new_lot_id})]})",
"def install(self):\n modulename = self.space.text0_w(self.w_name)\n self.space.builtin_modules[modulename] = self",
"def test_put_install_item(self):\n pass",
"def onAddIndustry(self, item):\n self.frame.mode.addIndustry(self.slbAmount.position, self.selectedItemData, self.frame.systemID)",
"def generate_shop(self, company):\n self.env['l10n_pe_edi.shop'].create({\n 'name': '%s (%s)' % (company.name, _('Shop')),\n 'code': '0000',\n 'company_id': company.id,\n 'partner_id': company.partner_id.id,\n })\n return True",
"def insert_warehouse(self, w_name, w_description, w_location):\n fields = (w_name, w_description, w_location)\n query = database_queries.insert_warehouse\n self._execute_query(query=query, param=fields)",
"def set_pack_operation_lot(self, picking=None):\n \"\"\"\n TODO we foce core odoo because: we get lot id direct pos operation lot \\n\n And if order return we dont care lots_necessary, auto add back lot ID\n \"\"\"\n\n StockProductionLot = self.env['stock.production.lot']\n PosPackOperationLot = self.env['pos.pack.operation.lot']\n has_wrong_lots = False\n for order in self:\n for move in (picking or self.picking_id).move_lines:\n picking_type = (picking or self.picking_id).picking_type_id\n lots_necessary = True\n if picking_type:\n lots_necessary = picking_type and picking_type.use_existing_lots\n qty_done = 0\n pack_lots = []\n pos_pack_lots = PosPackOperationLot.search([\n ('order_id', '=', order.id),\n ('product_id', '=', move.product_id.id)\n ])\n if pos_pack_lots and (lots_necessary or order.is_return):\n for pos_pack_lot in pos_pack_lots:\n stock_production_lot = StockProductionLot.search([('name', '=', pos_pack_lot.lot_name), ('product_id', '=', move.product_id.id)])\n if stock_production_lot:\n # a serialnumber always has a quantity of 1 product, a lot number takes the full quantity of the order line\n qty = 1.0\n if stock_production_lot.product_id.tracking == 'lot':\n qty = abs(pos_pack_lot.pos_order_line_id.qty)\n qty_done += qty\n if pos_pack_lot.lot_id:\n pack_lots.append({\n 'lot_id': pos_pack_lot.lot_id.id,\n 'qty': qty,\n 'lot_name': pack_lot.lot_id.name\n })\n else:\n pack_lots.append({\n 'lot_id': stock_production_lot.id,\n 'qty': qty,\n 'lot_name': stock_production_lot.name\n })\n else:\n has_wrong_lots = True\n elif move.product_id.tracking == 'none' or not lots_necessary:\n qty_done = move.product_uom_qty\n else:\n has_wrong_lots = True\n for pack_lot in pack_lots:\n lot_id, qty, lot_name = pack_lot['lot_id'], pack_lot['qty'], pack_lot['lot_name']\n self.env['stock.move.line'].create({\n 'picking_id': move.picking_id.id,\n 'move_id': move.id,\n 'product_id': move.product_id.id,\n 'product_uom_id': move.product_uom.id,\n 'qty_done': qty,\n 'location_id': move.location_id.id,\n 'location_dest_id': move.location_dest_id.id,\n 'lot_id': lot_id,\n 'lot_name': lot_name,\n })\n if not pack_lots and not float_is_zero(qty_done, precision_rounding=move.product_uom.rounding):\n if len(move._get_move_lines()) < 2:\n move.quantity_done = qty_done\n else:\n move._set_quantity_done(qty_done)\n return has_wrong_lots",
"def afterUpgrade(self):",
"def build_warehouse(self) -> None:\n # building the warehouses\n self.build_ncbibert_warehouse()\n self.build_biobert_warehouse()\n self.build_bert_warehouse()\n self.build_elmo_warehouse()\n self.build_glove_warehouse()\n\n output_folder = self.output_path\n\n force_folder_to_exist(os.path.join(output_folder, 'bert/bert'))\n os.system(\n 'mv ' + os.path.join(output_folder, 'bert/bert_small') + ' ' + os.path.join(output_folder, 'bert/bert'))\n os.system(\n 'mv ' + os.path.join(output_folder, 'bert/bert_large') + ' ' + os.path.join(output_folder, 'bert/bert'))\n os.system('mv ' + os.path.join(output_folder, 'biobert') + ' ' + os.path.join(output_folder, 'bert'))\n os.system('mv ' + os.path.join(output_folder, 'ncbibert') + ' ' + os.path.join(output_folder, 'bert'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load raw data (image/mask) and resample to fixed resolution.
|
def resample_raw_image(self, mask_fname, patient_folder, binary=True):
m_nii_fname = os.path.join(patient_folder, mask_fname)
new_res = (1.37, 1.37)
print('Resampling %s at resolution %s to file %s' % (m_nii_fname, str(new_res), new_res))
im_nii = nib.load(m_nii_fname)
im_data = im_nii.get_data()
voxel_size = im_nii.header.get_zooms()
scale_vector = [voxel_size[i] / new_res[i] for i in range(len(new_res))]
order = 0 if binary else 1
result = []
for i in range(im_data.shape[-1]):
im = im_data[..., i]
rescaled = transform.rescale(im, scale_vector, order=order, preserve_range=True, mode='constant')
result.append(np.expand_dims(rescaled, axis=-1))
return np.concatenate(result, axis=-1)
|
[
"def resample(path, upscale_factor=2):\n with rasterio.open(path) as dataset:\n\n # resample data to target shape\n data = dataset.read(out_shape=(dataset.count,\n int(dataset.height * upscale_factor),\n int(dataset.width * upscale_factor)),\n resampling=Resampling.mode)\n\n # scale image transform\n transform = dataset.transform * dataset.transform.scale(\n (dataset.width / data.shape[-1]), (dataset.height / data.shape[-2]))\n\n #write new dataset\n metadata = dataset.meta.copy()\n\n metadata.update({\n 'transform': transform,\n \"height\": dataset.height * upscale_factor,\n \"width\": dataset.width * upscale_factor\n })\n\n basename = os.path.splitext(path)[0]\n filename = \"{}_resampled.tif\".format(basename)\n\n with rasterio.open(filename, \"w\", **metadata) as dest:\n dest.write(data)\n\n return filename",
"def load_and_resample(file: str,\n output_path: str,\n naming_scheme: str,\n target_res: int,\n resampling_method: Resampling,\n resampler: str) -> list:\n\n if not os.path.isfile(file):\n print('File ' + file + ' not found.')\n sys.exit()\n\n with rio.open(file) as raw_ds:\n sds_paths = raw_ds.subdatasets\n\n resampled_subdatasets = list(map(\n lambda sds: resample_band(\n rio.open(sds),\n float(target_res),\n resampling_method=resampling_method,\n resampler=resampler\n ),\n sds_paths\n ))\n\n # New instance of GDAL/Rasterio\n with rio.Env():\n # Write an array as a raster band to a new 8-bit file. For\n # the new file's profile, we start with the profile of the source\n # profile = src.profile\n\n # if len(resampled_subdatasets) > 0:\n # all_profile = resampled_subdatasets[0][\"profile\"]\n # all_profile.update(\n # driver=\"GTiff\",\n # count=len(resampled_subdatasets)\n # )\n # else:\n # raise RuntimeError(\"No resampled datasets returned! Check your script!\")\n\n # Read each resampled band and write it to stack\n for ds_num, ds_dict in enumerate(resampled_subdatasets):\n to_create = \"/{}/{}_{}.tiff\".format(output_path, naming_scheme, ds_num)\n ds_dict[\"profile\"].update(driver=\"GTiff\", dtype=rio.uint16)\n write_resampled(ds_dict[\"data\"], to_create, ds_dict[\"profile\"])\n\n return resampled_subdatasets",
"def resampleFluid(resampleWidth=int, resampleDepth=int, resampleHeight=int):\n pass",
"def resample(img, nx=2, ny=2):\n if not isinstance(nx, int) or not isinstance(ny, int):\n raise TypeError('nx and ny must be ints')\n\n block = np.ones((ny, nx))\n img_resampled = np.kron(img, block)\n\n return img_resampled",
"def resample(self, data, length: int, boxcar=True):\n old_length = len(data)\n new_length = length\n if old_length == new_length:\n return data\n if new_length == 0 or old_length == 0:\n return np.array([])\n\n if new_length > old_length:\n # Upsample\n return self._upsample(data, new_length)\n else:\n # Downsample\n if old_length % new_length: \n # Requires upsampling to nearest multiple first, then reducing\n data = self._upsample(data, int(np.ceil(old_length / new_length) * new_length))\n old_length = len(data)\n return self._downsample(data, int(old_length / new_length), boxcar=boxcar)",
"def resample_Raster(input_raster, output_raster, dx, dy, resample='near'):\n cmdString = \"gdalwarp -tr \"+str(dx)+\" \"+str(dy)+\" -et 0.001 -r \"+resample+\" -overwrite \"+\\\n input_raster +\" \"+ output_raster\n callSubprocess(cmdString, \"re-grid raster\")\n #Delete temp file",
"def _resample(self, data, labels):\n X_train, y_train = ADASYN(n_jobs=16).fit_resample(data, labels)\n return X_train, y_train",
"def resampleRecord(data: np.recarray, inRate: float, outRate: float) -> np.recarray:\n if inRate == outRate: return data\n resampleFac = inRate/outRate\n # NOTE: This is done for each measure\n # TODO: Maybe we can make this quicker somehow\n oldX = np.arange(0, len(data))\n newX = np.arange(0, len(data), resampleFac)\n data2 = np.zeros(len(newX), dtype=data.dtype)\n for measure in data.dtype.names:\n data2[measure] = np.interp(newX, oldX, data[measure])\n data = data2\n return data",
"def _resample(image, real_axes, new_real_coords, **interpn_kwargs):\n\n return interpn(points=real_axes, values=image, xi=new_real_coords, **interpn_kwargs)",
"def test_from_raw_default(self):\n raw = TestRawDataset()\n\n with DiffractionDataset.from_raw(raw, filename=self.fname, mode=\"w\") as dataset:\n self.assertSequenceEqual(\n dataset.diffraction_group[\"intensity\"].shape,\n raw.resolution + (len(raw.time_points),),\n )",
"def load_crop(\n band_path: str, bounds: Tuple, resample_flag: bool, scale_factor: float\n) -> np.ndarray:\n # convert bounds from lat/lon to meters\n with rasterio.open(band_path) as src:\n crs_data = src.crs.data\n mins = warp.transform({\"init\": \"epsg:4326\"}, crs_data, [bounds[0]],\n [bounds[1]])\n maxs = warp.transform({\"init\": \"epsg:4326\"}, crs_data, [bounds[2]],\n [bounds[3]])\n # load crop\n with rasterio.open(band_path) as dataset:\n crop = dataset.read(\n 1,\n window=from_bounds(\n mins[0][0], mins[1][0], maxs[0][0], maxs[1][0],\n dataset.transform\n ),\n )\n # upsample bands with GSD > 10m\n if resample_flag:\n crop = cv2.resize(\n crop,\n dsize=(\n int(scale_factor * np.shape(crop)[1]),\n int(scale_factor * np.shape(crop)[0]),\n ),\n interpolation=cv2.INTER_CUBIC,\n )\n return crop",
"def resample(\n self,\n sampling_rate=None,\n variables=None,\n force_dense=False,\n in_place=False,\n kind=\"linear\",\n ):\n return self._densify_and_resample(\n sampling_rate,\n variables,\n force_dense=force_dense,\n in_place=in_place,\n kind=kind,\n resample_dense=True,\n )",
"def resample(self, image: nib.nifti1.Nifti1Image) -> nib.nifti1.Nifti1Image:\n if self.target_shape is not None:\n self.target_shape = np.array(self.target_shape)\n target_affine = rescale_affine(image.affine, image.shape, \n self.target_resolution, new_shape=self.target_shape)\n return resample_img(image, target_affine=target_affine,\n target_shape=self.target_shape, interpolation='nearest')",
"def prepare_anisotropic_dataset(image_dir,\n list_synth_res,\n downsample_image_result_dir,\n resample_image_result_dir,\n labels_dir=None,\n downsample_labels_result_dir=None,\n native_resolution=1,\n slice_thickness=4,\n recompute=True):\n\n # create results dir\n if not os.path.isdir(resample_image_result_dir):\n os.mkdir(resample_image_result_dir)\n if not os.path.isdir(downsample_image_result_dir):\n os.mkdir(downsample_image_result_dir)\n if downsample_labels_result_dir is not None:\n if not os.path.isdir(downsample_labels_result_dir):\n os.mkdir(downsample_labels_result_dir)\n\n # define thickness, which is always the same\n list_thickness = slice_thickness * np.eye(3)\n list_thickness[list_thickness == 0] = native_resolution\n list_thickness = list_thickness.tolist()\n\n # loop over resolution levels\n for synth_res in list_synth_res:\n\n # define blurring res\n list_data_res = synth_res * np.eye(3)\n list_data_res[list_data_res == 0] = native_resolution\n list_data_res = list_data_res.tolist()\n\n # loop over resolution directions\n for (data_res, thickness) in zip(list_data_res, list_thickness):\n res_str = '_'.join(['%d' % r for r in data_res])\n print('\\npreprocess images to ' + res_str.replace('_', '*') + 'mm resolution')\n\n # build path result folders\n im_results_dir = os.path.join(resample_image_result_dir, 'images_' + res_str)\n im_downsample_results_dir = os.path.join(downsample_image_result_dir, 'images_' + res_str)\n if downsample_labels_result_dir is not None:\n labels_results_dir = os.path.join(downsample_labels_result_dir, 'labels_' + res_str)\n else:\n labels_results_dir = None\n\n # downsample datasets\n edit_volumes.simulate_upsampled_anisotropic_images(image_dir,\n im_downsample_results_dir,\n im_results_dir,\n data_res,\n labels_dir=labels_dir,\n downsample_labels_result_dir=labels_results_dir,\n slice_thickness=thickness,\n recompute=recompute)",
"def initialize_resampler(fixed, tx):\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(fixed)\n resampler.SetInterpolator(sitk.sitkNearestNeighbor)\n resampler.SetDefaultPixelValue(0)\n resampler.SetTransform(tx)\n return resampler",
"def resample(tar_img, ref_img):\n\n resizeFilter = sitk.ResampleImageFilter()\n resizeFilter.SetNumberOfThreads(num_cores)\n resizeFilter.SetReferenceImage(ref_img)\n\n return resizeFilter.Execute(tar_img)",
"def auto_resample(self):\n if self.effective_particles() < 2.0 / 3.0 * self.num_points:\n self.resample()",
"def resample_images_from_warp_matrix(red_dir):\n\n log = logs.start_stage_log(red_dir, 'resampled_images')\n\n resampled_dir = path.join(red_dir, 'resampled')\n data_dir = path.join(red_dir, 'data')\n\n resimage_list = glob.glob(path.join(resampled_dir, '*.fits'))\n\n for entry in resimage_list:\n matrix_file = path.join(entry, 'warp_matrice_image.npy')\n image_path = path.join(data_dir, path.basename(entry))\n\n (image_header, data_image, mask_image) = load_image_data(image_path,log)\n\n if path.isfile(matrix_file) and type(data_image) == type(np.zeros(1)):\n model = load_resample_model(matrix_file,image_path,log)\n\n resampled_image = stage4.warp_image(data_image,model)\n\n resampled_mask = tf.warp(mask_image, inverse_map=model,\n output_shape=data_image.shape, order=1, mode='constant',\n cval=1, clip=True, preserve_range=True)\n\n output_resampled_image(red_dir, path.basename(image_path),\n image_header, resampled_image, resampled_mask, log)\n\n logs.close_log(log)",
"def resample_image(directory_LaSRC, directory_workspace, file, directory_input, directory_main):\n for name in list_brdf_adjustment:\n dst_transform = resample_band(os.path.join(directory_LaSRC, file+name), os.path.join(directory_workspace, file+name), np.uint16)\n print(os.path.join(directory_LaSRC, file+name))\n\n #For spectral band 9 and 10\n list_path_initial = os.listdir(os.path.join(directory_input, \"GRANULE\"))\n path_initial_2 = os.path.join(directory_input, \"GRANULE\", list_path_initial[0], \"IMG_DATA\")\n print(os.path.join(directory_LaSRC, file+name))\n for name in os.listdir(path_initial_2):\n if name[-7:]==\"B09.jp2\":\n dst_transform = resample_band(os.path.join(path_initial_2, name), os.path.join(directory_main, file+\"_toa_band9.tif\"), np.int16)\n if name[-7:]==\"B10.jp2\":\n dst_transform = resample_band(os.path.join(path_initial_2, name), os.path.join(directory_main, file+\"_toa_band10.tif\"), np.int16)\n return dst_transform",
"def resample(img, sub_rate=0.5):\n\n\tfrom fundamentals import subsample\n\tfrom utilities import get_pixel_size, set_pixel_size\n\n\tif type(img) == str:\n\t\tfrom utilities import get_image\n\t\timg = get_image(img)\n\tnx = img.get_xsize()\n\tny = img.get_ysize()\n\tnz = img.get_zsize()\n\tif( ny == 1): ERROR(\"Only 2D or 3D images allowed\",\"resample\",1)\n\tif sub_rate == 1.0: return img.copy()\n\telif sub_rate < 1.0:\n\t\te = subsample(img, sub_rate)\n\telse: # sub_rate>1\n\t\tnew_nx = int(nx*sub_rate+0.5)\n\t\tnew_ny = int(ny*sub_rate+0.5)\n\t\tif nz==1:\n\t\t\tnew_nz = 1\n\t\telse:\n\t\t\tnew_nz = int(ny*sub_rate+0.5)\n\t\tif ( nx!=ny and nz==1 ):\n\t\t\tnn = max(new_nx, new_ny)\n\t\t\te = Util.pad(img, nn, nn, 1, 0, 0, 0, \"circumference\")\n\t\t\te, kb = prepi(e)\n\t\t\te = Util.window( e.rot_scale_conv_new(0.0, 0.0, 0.0, kb, sub_rate), new_nx, new_ny, 1, 0,0,0)\n\t\t \n\t\telif ((nx!=ny or nx!=nz or ny!=nz) and nz>1):\n\t\t\tnn = max(new_nx, new_ny,new_nz)\n\t\t\te = Util.pad(img, nn, nn, nn, 0, 0, 0, \"circumference\")\n\t\t\te, kb = prepi3D(e)\n\t\t\te = Util.window( e.rot_scale_conv_new_3D(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, kb, sub_rate), new_nx, new_ny, new_nz, 0,0,0)\n\t\telse:\n\t\t\tif nz==1:\n\t\t\t\te, kb = prepi(Util.pad(img, new_nx, new_ny, 1, 0, 0, 0, \"circumference\"))\n\t\t\t\te = e.rot_scale_conv_new(0.0, 0.0, 0.0, kb, sub_rate)\n\t\t\telse:\n\t\t\t\te, kb = prepi3D(Util.pad(img, new_nx, new_ny, new_nz, 0, 0, 0, \"circumference\"))\n\t\t\t\te = e.rot_scale_conv_new_3D(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, kb, sub_rate)\n\n\t# Automatically adjust pixel size for ctf parameters\n\tfrom utilities import get_pixel_size, set_pixel_size\n\tapix = get_pixel_size(e)\n\tapix /= sub_rate\n\tset_pixel_size(e, apix)\n\tcc = e.get_attr_default(\"xform.projection\", None)\n\tif cc:\n\t\tcp = cc.get_params(\"spider\")\n\t\tcp[\"tx\"] *= sub_rate\n\t\tcp[\"ty\"] *= sub_rate\n\t\tfrom utilities import set_params_proj\n\t\tset_params_proj(e, [cp[\"phi\"], cp[\"theta\"], cp[\"psi\"], -cp[\"tx\"], -cp[\"ty\"]]) # have to invert as set inverts them again\n\tcc = e.get_attr_default(\"xform.align2d\", None)\n\tif cc:\n\t\tcp = cc.get_params(\"2D\")\n\t\tcp[\"tx\"] *= sub_rate\n\t\tcp[\"ty\"] *= sub_rate\n\t\tfrom utilities import set_params2D\n\t\tset_params2D(e, [cp[\"alpha\"], cp[\"tx\"], cp[\"ty\"], cp[\"mirror\"], cp[\"scale\"]])\n\n\treturn \te"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the next free index in the given dictionary.
|
def _get_next_free_index(d: dict) -> int:
if _NEXT_FREE_INDEX_KEY not in d:
d[_NEXT_FREE_INDEX_KEY] = 1
next_index = int(d[_NEXT_FREE_INDEX_KEY])
d[_NEXT_FREE_INDEX_KEY] = next_index + 1
return next_index
|
[
"def next_free_date_index(bit_vector):\n return bit_vector.index(0)",
"def get_index(self, key):\r\n index = self.horner_hash(key)\r\n j = 0\r\n for i in range(0, self.table_size):\r\n j = (index + i ** 2) % self.table_size\r\n if self.hash_table[j] and self.hash_table[j].key == key:\r\n return j\r\n return None",
"def get_index(self, key: str) -> Optional[int]:\r\n i = 0\r\n m = 0\r\n while self.hash_table[self.horner_hash(key) + (i ** 2) - m] is not None:\r\n if self.hash_table[self.horner_hash(key) + (i ** 2) - m][0] != key:\r\n i = i + 1\r\n if self.table_size <= self.horner_hash(key) + (i ** 2) - m:\r\n m = m + self.table_size\r\n continue\r\n return self.horner_hash(key) + (i ** 2) - m\r\n return None",
"def get_next(index: int) -> int:\n return index | (index + 1)",
"def get_next_index(polls):\n new_index = ''\n if polls == []:\n new_index = 1\n else:\n indexes = [\n dictionary['index'] for dictionary in polls\n ]\n new_index = max(indexes) + 1\n return new_index",
"def _get_index(k, size):\n if k < 0: k += size\n if not 0 <= k < size:\n raise KeyError('Index out of bound!')\n return k",
"def __next_index():\r\n return Customer.redis.incr('index')",
"def _get_pos(self, key):\n p = bisect(self.runtime._keys, self.hashi(key))\n if p == len(self.runtime._keys):\n return 0\n else:\n return p",
"def _getitem(self, key):\n\n hsh_idx = self._gethash(key)\n cur_pos = self._data[hsh_idx]\n struct_size = len(self._data)\n\n while cur_pos is not None:\n if cur_pos[0] == key:\n return hsh_idx\n\n else:\n # treat as circullar array\n hsh_idx = (hsh_idx + 1) % struct_size\n cur_pos = self._data[hsh_idx]\n\n return hsh_idx",
"def _free_idx(self, idx):\n assert idx[0] <= idx[-1]\n n = len(idx)\n if self.buffer_tail != self.INVALID_IDX:\n update_tail = ((idx[0] <= idx[-1]) and\n (idx[0] <= self.buffer_tail) and\n (idx[-1] >= self.buffer_tail))\n update_tail |= idx[0] > idx[-1] and (idx[0] <= self.buffer_tail or\n idx[-1] >= self.buffer_tail)\n\n if update_tail:\n i = 0\n while i < n:\n curr_idx = idx[i]\n if self.is_valid_path(curr_idx):\n start_idx = self.get_path_start(curr_idx)\n end_idx = self.get_path_end(curr_idx)\n pathlen = self.get_pathlen(curr_idx)\n\n if start_idx < end_idx:\n self.buffers[self.PATH_START_KEY][start_idx:end_idx +\n 1] = self.INVALID_IDX\n else:\n self.buffers[self.PATH_START_KEY][start_idx:self\n .buffer_size] = self.INVALID_IDX\n self.buffers[self.PATH_START_KEY][0:end_idx +\n 1] = self.INVALID_IDX\n\n self.num_paths -= 1\n i += pathlen + 1\n self.buffer_tail = (end_idx + 1) % self.buffer_size\n else:\n i += 1\n else:\n self.buffer_tail = idx[0]\n return",
"def get_key_index( self , key ):\n index = EclSum.cNamespace().get_general_var_index( self , key )\n if index >= 0:\n return index\n else:\n return None",
"def index(self, key):\n try:\n i = self.sortedKeys.index(key)\n except Exception:\n i = -1\n return i",
"def _get_new_index(self, node: str):\n current_index = self._hash_func(node)\n current_node = self[current_index]\n\n while current_node != node:\n if current_node is None or current_node == \"AVAIL\":\n break\n\n # Linear hashing\n current_index += 1\n if current_index == self.map_size:\n current_index = 0\n\n current_node = self[current_index]\n\n return current_index",
"def find_next_ind(self, nodes, ind, pred):\n beg_node = nodes[ind]\n ind += 1\n sz = len(nodes)\n while ind < sz:\n if pred(beg_node, nodes[ind]):\n break\n ind += 1\n return ind",
"def find_next_ind(self, nodes, ind, pred):\n beg_node = nodes[ind]\n ind += 1\n sz = len(self.nodes)\n while ind < sz:\n if pred(beg_node, nodes[ind]):\n break\n ind += 1\n return ind",
"def _next_free_slot(self, first_hash):\n curr_index = first_hash\n try_number = 0\n tried = []\n #print self._data\n while self._data[curr_index] is not None:\n tried.append(curr_index)\n if try_number + 1 >= self.n_slots // 2:\n #print self._data\n print('Size = ' + str(self.n_slots))\n print('Number of items = ' + str(self.n_items))\n print(\"Failed to find an empty slot...\")\n print('Try number = '+str(try_number))\n print('List of tried slots = '+str(tried))\n print('Current table = '+str(self._data))\n raise ValueError(\"Failed to find an empty slot!!!! \"+\n \"This can happen with quadratic probing \"+\n \"if the table is over half full\")\n else:\n try_number += 1\n curr_index = (first_hash + try_number**2) % self.n_slots\n return curr_index",
"def nextIndex(self, index):\n if index == len(self.directions) - 1: return 0\n return index + 1",
"def get_index(self, token):\n try:\n return self.token_to_index_dict[token]\n except KeyError:\n return self.token_to_index_dict[self.unk]",
"def Map_get_slot(aMap, key, default=None):\n bucket = Map_get_bucket(aMap, key)\n \n for i, kv in enumerate(bucket):\n k, v = kv\n if key == k:\n return i, k, v\n \n return -1, key, default"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all meta directives available.
|
def get_meta_directives() -> Dict[str, MetaDirective]:
directives = {} # type: Dict[str, MetaDirective]
# Helper functions to implement each meta directive.
def append_action(config_dict: dict, key: str, value: Any) -> None:
if key not in config_dict:
config_dict[key] = []
if not isinstance(config_dict[key], list):
raise ValueError(f"Trying to append to non-list setting {key}")
if not isinstance(value, list):
raise ValueError(f"Trying to append to list {key} with non-list {value}")
config_dict[key] += value
def append_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[Tuple[Any, str]]:
return [replacement_setting, value], "crossappend"
# append depends only on itself
directives['append'] = MetaDirective(action=append_action,
target_settings=lambda key, value: [key],
rename_target=append_rename)
def crossappend_decode(value: Any) -> Tuple[str, list]:
assert isinstance(value, list), "crossappend takes a list of two elements"
assert len(value) == 2, "crossappend takes a list of two elements"
target_setting = value[0] # type: str
append_value = value[1] # type: list
assert isinstance(target_setting, str), "crossappend target setting must be a string"
assert isinstance(append_value, list), "crossappend must append a list"
return target_setting, append_value
# crossappend takes a list that has two elements.
# The first is the target list (the list to append to), and the second is
# a list to append to the target list.
# e.g. if base has ["1"] and crossappend has ["base", ["2", "3"]], then
# the result will be ["1", "2", "3"].
def crossappend_action(config_dict: dict, key: str, value: Any) -> None:
target_setting, append_value = crossappend_decode(value)
config_dict[key] = config_dict[target_setting] + append_value
def crossappend_targets(key: str, value: Any) -> List[str]:
target_setting, append_value = crossappend_decode(value)
return [target_setting]
def crossappend_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[
Tuple[Any, str]]:
crossappend_target, append_value = crossappend_decode(value)
return [replacement_setting if crossappend_target == target_setting else crossappend_target,
append_value], "crossappend"
directives['crossappend'] = MetaDirective(action=crossappend_action,
target_settings=crossappend_targets,
rename_target=crossappend_rename)
def crossappendref_decode(value: Any) -> Tuple[str, str]:
assert isinstance(value, list), "crossappendref takes a list of two elements"
assert len(value) == 2, "crossappendref takes a list of two elements"
target_key = value[0] # type: str
append_key = value[1] # type: str
assert isinstance(target_key, str), "crossappendref target setting must be a string"
assert isinstance(append_key, str), "crossappend append list setting must be a string"
return target_key, append_key
# crossappendref takes a list that has two elements.
# The first is the target list (the list to append to), and the second is
# a setting that contains a list to append.
# e.g. if base has ["1"], app has ["2", "3"], and crossappend has ["base", "app"], the result
# is ["1", "2", "3"].
def crossappendref_action(config_dict: dict, key: str, value: Any) -> None:
target_setting, append_setting = crossappendref_decode(value)
config_dict[key] = config_dict[target_setting] + config_dict[append_setting]
def crossappendref_targets(key: str, value: Any) -> List[str]:
target_setting, append_setting = crossappendref_decode(value)
return [target_setting, append_setting]
def crossappendref_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[
Tuple[Any, str]]:
target, append = crossappendref_decode(value)
def replace_if_target_setting(setting: str) -> str:
"""Helper function to replace the given setting with the
replacement if it is equal to target_setting."""
return replacement_setting if setting == target_setting else setting
return [replace_if_target_setting(target),
replace_if_target_setting(append)], "crossappendref"
directives['crossappendref'] = MetaDirective(action=crossappendref_action,
target_settings=crossappendref_targets,
rename_target=crossappendref_rename)
def prepend_action(config_dict: dict, key: str, value: Any) -> None:
if key not in config_dict:
config_dict[key] = []
if not isinstance(config_dict[key], list):
raise ValueError(f"Trying to prepend to non-list setting {key}")
if not isinstance(value, list):
raise ValueError(f"Trying to prepend to list {key} with non-list {value}")
config_dict[key] = value + config_dict[key]
def prepend_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[Tuple[Any, str]]:
return [replacement_setting, value], "crossprepend"
# prepend depends only on itself
directives['prepend'] = MetaDirective(action=prepend_action,
target_settings=lambda key, value: [key],
rename_target=prepend_rename)
def crossprepend_decode(value: Any) -> Tuple[str, list]:
assert isinstance(value, list), "crossprepend takes a list of two elements"
assert len(value) == 2, "crossprepend takes a list of two elements"
target_setting = value[0] # type: str
prepend_value = value[1] # type: list
assert isinstance(target_setting, str), "crossprepend target setting must be a string"
assert isinstance(prepend_value, list), "crossprepend must prepend a list"
return target_setting, prepend_value
# crossprepend takes a list that has two elements.
# The first is the target list (the list to prepend to), and the second is
# a list to prepend to the target list.
# e.g. if base has ["1"] and crossprepend has ["base", ["2", "3"]], then
# the result will be ["2", "3", "1"].
def crossprepend_action(config_dict: dict, key: str, value: Any) -> None:
target_setting, prepend_value = crossprepend_decode(value)
config_dict[key] = prepend_value + config_dict[target_setting]
def crossprepend_targets(key: str, value: Any) -> List[str]:
target_setting, prepend_value = crossprepend_decode(value)
return [target_setting]
def crossprepend_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[
Tuple[Any, str]]:
crossprepend_target, prepend_value = crossprepend_decode(value)
return [replacement_setting if crossprepend_target == target_setting else crossprepend_target,
prepend_value], "crossprepend"
directives['crossprepend'] = MetaDirective(action=crossprepend_action,
target_settings=crossprepend_targets,
rename_target=crossprepend_rename)
def crossprependref_decode(value: Any) -> Tuple[str, str]:
assert isinstance(value, list), "crossprependref takes a list of two elements"
assert len(value) == 2, "crossprependref takes a list of two elements"
target_key = value[0] # type: str
prepend_key = value[1] # type: str
assert isinstance(target_key, str), "crossprependref target setting must be a string"
assert isinstance(prepend_key, str), "crossprepend prepend list setting must be a string"
return target_key, prepend_key
# crossprependref takes a list that has two elements.
# The first is the target list (the list to prepend to), and the second is
# a setting that contains a list to prepend.
# e.g. if base has ["1"], app has ["2", "3"], and crossprepend has ["base", "app"], the result
# is ["2", "3", "1"].
def crossprependref_action(config_dict: dict, key: str, value: Any) -> None:
target_setting, prepend_setting = crossprependref_decode(value)
config_dict[key] = config_dict[prepend_setting] + config_dict[target_setting]
def crossprependref_targets(key: str, value: Any) -> List[str]:
target_setting, prepend_setting = crossprependref_decode(value)
return [target_setting, prepend_setting]
def crossprependref_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[
Tuple[Any, str]]:
target, prepend = crossprependref_decode(value)
def replace_if_target_setting(setting: str) -> str:
"""Helper function to replace the given setting with the
replacement if it is equal to target_setting."""
return replacement_setting if setting == target_setting else setting
return [replace_if_target_setting(target),
replace_if_target_setting(prepend)], "crossprependref"
directives['crossprependref'] = MetaDirective(action=crossprependref_action,
target_settings=crossprependref_targets,
rename_target=crossprependref_rename)
def subst_str(input_str: str, replacement_func: Callable[[str], str]) -> str:
"""Substitute ${...}"""
return re.sub(__VARIABLE_EXPANSION_REGEX, lambda x: replacement_func(x.group(1)), input_str)
def subst_action(config_dict: dict, key: str, value: Any) -> None:
def perform_subst(value: Union[str, List[str]]) -> Union[str, List[str]]:
"""
Perform substitutions for the given value.
If value is a string, perform substitutions in the string. If value is a list, then perform substitutions
in every string in the list.
:param value: String or list
:return: String or list but with everything substituted.
"""
newval = "" # type: Union[str, List[str]]
if isinstance(value, list):
newval = list(map(lambda input_str: subst_str(input_str, lambda key: config_dict[key]), value))
else:
newval = subst_str(value, lambda key: config_dict[key])
return newval
config_dict[key] = perform_subst(value)
def subst_targets(key: str, value: Any) -> List[str]:
# subst can operate on either a string or a list
# subst_strings is e.g. ["${a} 1", "${b} 2"]
subst_strings = [] # type: List[str]
if isinstance(value, str):
subst_strings.append(value)
elif isinstance(value, list):
for i in value:
assert isinstance(i, str)
subst_strings = value
else:
raise ValueError(f"subst must operate on a str or List[str]; got {value} instead")
output_vars = [] # type: List[str]
for subst_value in subst_strings:
matches = re.finditer(__VARIABLE_EXPANSION_REGEX, subst_value, re.DOTALL)
for match in matches:
output_vars.append(match.group(1))
return output_vars
def subst_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[Tuple[Any, str]]:
assert isinstance(value, str)
if target_setting not in subst_targets(key, value):
return None
new_value = subst_str(value, lambda key: "${" + replacement_setting + "}" if key == target_setting else key)
return new_value, "subst"
directives['subst'] = MetaDirective(action=subst_action,
target_settings=subst_targets,
rename_target=subst_rename)
def crossref_check_and_cast(k: Any) -> str:
if not isinstance(k, str):
raise ValueError("crossref (if used with lists) can only be used only with lists of strings")
return k
def crossref_action(config_dict: dict, key: str, value: Any) -> None:
"""
Copy the contents of the referenced key for use as this key's value.
If the reference is a list, then apply the crossref for each element
of the list.
"""
if isinstance(value, str):
config_dict[key] = config_dict[value]
elif isinstance(value, list):
def check_and_get(k: Any) -> Any:
return config_dict[crossref_check_and_cast(k)]
config_dict[key] = list(map(check_and_get, value))
elif isinstance(value, numbers.Number):
# bools are instances of numbers.Number for some weird reason
raise ValueError("crossref cannot be used with numbers and bools")
else:
raise NotImplementedError("crossref not implemented on other types yet")
def crossref_targets(key: str, value: Any) -> List[str]:
if isinstance(value, str):
return [value]
if isinstance(value, list):
return list(map(crossref_check_and_cast, value))
if isinstance(value, numbers.Number):
# bools are instances of numbers.Number for some weird reason
raise ValueError("crossref cannot be used with numbers and bools")
raise NotImplementedError("crossref not implemented on other types yet")
def crossref_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[
Tuple[Any, str]]:
def change_if_target(x: str) -> str:
if x == target_setting:
return replacement_setting
return x
if isinstance(value, str):
return [change_if_target(value)], "crossref"
if isinstance(value, list):
return list(map(change_if_target, map(crossref_check_and_cast, value))), "crossref"
if isinstance(value, numbers.Number):
# bools are instances of numbers.Number for some weird reason
raise ValueError("crossref cannot be used with numbers and bools")
raise NotImplementedError("crossref not implemented on other types yet")
directives['crossref'] = MetaDirective(action=crossref_action,
target_settings=crossref_targets,
rename_target=crossref_rename)
def transclude_action(config_dict: dict, key: str, value: Any) -> None:
"""Transclude the contents of the file pointed to by value."""
assert isinstance(value, str), "Path to file for transclusion must be a string"
with open(value, "r") as f:
file_contents = str(f.read())
config_dict[key] = file_contents
def transclude_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[
Tuple[Any, str]]:
# This meta directive doesn't depend on any settings
return value, "transclude"
# transclude depends on external files, not other settings.
directives['transclude'] = MetaDirective(action=transclude_action,
target_settings=lambda key, value: [],
rename_target=transclude_rename)
def json2list_action(config_dict: dict, key: str, value: Any) -> None:
"""Turn the value of the key (JSON list) into a list."""
assert isinstance(value, str), "json2list requires a JSON string that is a list"
parsed = json.loads(value)
assert isinstance(parsed, list), "json2list requires a JSON string that is a list"
config_dict[key] = parsed
def json2list_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[
Tuple[Any, str]]:
# This meta directive doesn't depend on any settings
return value, "json2list"
# json2list does not depend on anything
directives['json2list'] = MetaDirective(action=json2list_action,
target_settings=lambda key, value: [],
rename_target=json2list_rename)
def prependlocal_action(config_dict: dict, key: str, value: Any) -> None:
"""Prepend the local path of the config dict."""
if isinstance(value, list):
new_values = []
for v in value:
new_values.append(os.path.join(config_dict[_CONFIG_PATH_KEY], str(v)))
config_dict[key] = new_values
else:
config_dict[key] = os.path.join(config_dict[_CONFIG_PATH_KEY], str(value))
def prependlocal_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[
Tuple[Any, str]]:
# This metal directive doesn't depend on any settings
return value, "prependlocal"
directives['prependlocal'] = MetaDirective(action=prependlocal_action,
target_settings=lambda key, value: [],
rename_target=prependlocal_rename)
def deepsubst_action(config_dict: dict, key: str, value: Any) -> None:
"""
Perform a deep substitution on the value provided. This will replace any variables that occur in strings
of the form ${...} and will also do a special meta replacement on keys which end in _deepsubst_meta.
"""
def do_subst(oldval: Any) -> Any:
if isinstance(oldval, str):
# This is just regular subst
return subst_str(oldval, lambda key: config_dict[key])
if isinstance(oldval, list):
return list(map(do_subst, oldval))
if isinstance(oldval, dict):
# We need to check for _deepsubst_meta here
newval = {} # type: Dict
for k, v in oldval.items():
if isinstance(k, str):
if k.endswith("_deepsubst_meta"):
base = k.replace("_deepsubst_meta", "")
if base not in oldval:
raise ValueError(f"Deepsubst meta key provided, but there is no matching base key: {k}")
# Note that we don't add the meta back to newval.
else:
meta_key = f"{k}_deepsubst_meta"
if meta_key in oldval:
# Do the deepsubst_meta, whatever it is.
meta = oldval[meta_key]
if meta in DeepSubstMetaDirectives:
if isinstance(v, str):
newval[k] = DeepSubstMetaDirectives[meta](config_dict, v)
else:
raise ValueError(f"Deepsubst metas not supported on non-string values: {v}")
else:
err_keys = ", ".join(DeepSubstMetaDirectives.keys())
raise ValueError(f"Unknown deepsubst_meta type: {meta}. Valid options are [{err_keys}].")
else:
newval[k] = do_subst(v)
else:
# k is not an instance of a string.
# Will this ever happen? It's possible you could have {1: "foo"}...
newval[k] = do_subst(v)
return newval
return oldval
config_dict[key] = do_subst(value)
def deepsubst_targets(key: str, value: Any) -> List[str]:
"""
Look for all substitution targets (${...}) in value and return a list of the targets found.
"""
if isinstance(value, str):
# This is just regular subst
return subst_targets(key, value)
if isinstance(value, (dict, list)):
# Recursively find all strings
def find_strings(x: Union[List, Dict]) -> List[str]:
iterator = x # type: Iterable[Any]
if isinstance(x, dict):
iterator = x.values()
output = [] # type: List
for item in iterator:
if isinstance(item, str):
output.extend([s for s in subst_targets(key, item) if s not in output])
elif isinstance(item, list) or isinstance(item, dict):
output.extend([s for s in find_strings(item) if s not in output])
return output
return find_strings(value)
raise ValueError(f"deepsubst cannot be used with this type: {value}")
def deepsubst_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[Tuple[Any, str]]:
"""
Not implemented.
"""
raise NotImplementedError("Deepsubst does not support rename")
directives['deepsubst'] = MetaDirective(action=deepsubst_action,
target_settings=deepsubst_targets,
rename_target=deepsubst_rename)
return directives
|
[
"def get_directives(self):\n return self.directives",
"def directives(self):\n directive_sources = chain(hug.defaults.directives.items(), getattr(self, '_directives', {}).items())\n return {'hug_' + directive_name: directive for directive_name, directive in directive_sources}",
"def extract_dc_metadata(soup):\n metatags = [] \n\n for m in soup.find_all('meta'):\n \n # Skip over fields we don't like explicitly\n if m.has_attr('name') is False: continue\n if 'width' in m['content']: continue\n if m['name'] == 'GENERATOR': continue\n\n meta = {}\n meta[m['name']] = m['content']\n metatags.append(meta)\n\n return metatags",
"def get_metas():\n get_video_meta_class_key_values()",
"def parse_dist_meta():\n pats = {re_meta: _add_default, re_doc: _add_doc}\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, NAME, '__about__.py')) as meta_fh:\n distmeta = {}\n for line in meta_fh:\n if line.strip() == '# -eof meta-':\n break\n for pattern, handler in pats.items():\n m = pattern.match(line.strip())\n if m:\n distmeta.update(handler(m))\n return distmeta",
"def asBasicDirectives(self):\n if not self.isRegularDirective():\n return set([])\n if len(self._whitelistedSourceExpressions) <= 1:\n return frozenset((self,))\n directives = set([])\n for srcExpr in self._whitelistedSourceExpressions:\n directives.add(Directive(self._directiveType, (srcExpr,)))\n return directives",
"def _get_base_metas(mcs):\n return mcs._get_bases(mcs, meta=True)",
"def get_all_metadata(self):\n metadata = {}\n for key in self.METADATA_KEYS:\n try:\n val = self.get_metadata(key)\n except MissingMetadataError:\n pass\n else:\n metadata[key] = val\n\n return metadata",
"def meta(self) -> index.MetaIndex:\n return self._filtered('meta')",
"def getSysMetaFields(self):\n res = []\n sysm_proc = self.getBean(self.parsers[0].p[\"systemMetadataProcessor\"])\n for field in sysm_proc.p[\"fieldList\"]:\n fb = self.getBean(field)\n field_name = str(fb.p[\"field_name\"][0])\n res.append(field_name)\n return res",
"def get_metadata(self):\n return meta.get_metadata(self.ast)",
"def info(self):\n self._load_meta()\n cache = dict()\n for key, value in self._meta.items():\n container, _ = value\n cache[key] = container\n return cache",
"def get_all(self):\n with self._db_connection() as connection:\n return self._make_many(connection.get_all_metadata_types())",
"def get_meta (self) :\n return self._meta",
"def _metadata(self):\n return [antenna._metadata for antenna in self]",
"def robo_directives(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RoboDirectiveArgs']]]]:\n return pulumi.get(self, \"robo_directives\")",
"def metset(self):\n return self._metset",
"def metadata_services():\n return app.manager.admin_metadata_services_controller.process_metadata_services()",
"def get_system_cmds(self):\r\n return self.system_commands\r\n #return [cmd for cmd in self.commands if cmd.key.startswith('__')]\r"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper function to replace the given setting with the replacement if it is equal to target_setting.
|
def replace_if_target_setting(setting: str) -> str:
return replacement_setting if setting == target_setting else setting
|
[
"def assume_working_value_setting(self, setting, value, type_, source=\"direct\",\n desired_value=None):\n setting.set_value(value, source)\n self.assertEqual(setting.value, desired_value if desired_value is not None else value)\n self.assert_(isinstance(setting.value, type_))",
"def replace_property(cls, obj, target, replacement):\n Mock.__recall__[(obj,target)]=getattr(obj,target)\n obj.__dict__[target]=property(replacement)",
"def replace(settings, pattern, pattern_type=None, with_value=None):\n\tfilter = settings.format(settings.content)\n\tfilter.replace(pattern, with_value, pattern_type)\n\tsettings.content = filter.content",
"def replace(what: Expression, repl: Expression, target_input: Expression) -> Expression:\n target = copy.deepcopy(target_input)\n return replace_without_copy(what, repl, target)",
"def setTarget(self, target):\n\n if self.type == operation.STORE or self.type == operation.SYSTEM:\n raise Exception, \"Store or system operations don't have a target!\"\n\n elif self.type == operation.CONTROL:\n # Replace the last argument, the target, with a new target.\n\n parts = self.code.split(\",\")\n\n if len(parts) == 1:\n parts = parts[0]\n parts = parts.split()\n\n oldTarget = parts[-1]\n self.code = self.code[:len(self.code) - len(oldTarget)] + target\n\n else:\n # Replace the first argument, the target, with a new target.\n\n parts = self.code.split()\n first = parts[0]\n parts = parts[1].split(\",\")\n\n self.code = first + target + parts[1:]",
"def update_metric(\n target: str, \n support: Union[str,List[str]], \n targetKG: KnowledgeGraph, \n supportKG: KnowledgeGraph, \n targetProp: str,\n supportProp: str,\n strategy: str,\n options: dict={} ) -> None:\n #logging.debug(\"support=\"+repr(support))\n \"\"\"if supportProp not in AvailbleProps:\n raise ValueError(f'supportProp {supportProp} not supported.')\"\"\"\n # Verify strategy validity\n if strategy not in AvailableStrategy:\n raise ValueError(f'Weighting strategy {strategy} not supported.')\n\n # Verify if targetProp exists\n if targetProp not in targetKG.g.nodes[target]: return\n # \n node = targetKG.g.nodes[target]\n old_val = node[targetProp]\n # \n contrib_text = \"\"\n novelty_matters_text = \"\"\n\n if strategy == 'default':\n new_val = 0\n if type(support) == list:\n contributing_nodes=[]\n for cnpt in support:\n if supportProp in supportKG.g.nodes[cnpt]:\n #if debug: logging.info(f\"cnpt={cnpt}, {supportProp}={supportKG.g.nodes[cnpt][supportProp]}\")\n new_val = new_val + supportKG.g.nodes[cnpt][supportProp]\n contributing_nodes.append({cnpt: supportKG.g.nodes[cnpt][supportProp]})\n if len(contributing_nodes) > 0: \n new_val /= len(contributing_nodes)\n contrib_text = f\"from supportKG nodes {repr(contributing_nodes)}\"\n else:\n #No update\n return\n else:\n if supportProp in supportKG.g.nodes[cnpt]:\n new_val = supportKG.g.nodes[support][supportProp]\n contrib_text = f\"from supportKG node {{{support}: {supportKG.g.nodes[support][supportProp]}}}\"\n else:\n #No update\n return\n if ('novelty_matters' in options) and \\\n (options['novelty_matters'] == True) and \\\n ('already_seen' in node) and \\\n ('dfre_category' in node) and \\\n (node['dfre_category'].strip()=='Security') and \\\n (node['already_seen']=='False'):\n\n new_val = new_val/2\n novelty_matters_text = f\", not new: val=val/2\"\n #logging.debug(f\"Novelty matters for node {node}\")\n\n elif strategy == 'string':\n new_val = ''\n if type(support) == list:\n contributing_nodes=[]\n for cnpt in support:\n if supportProp in supportKG.g.nodes[cnpt]:\n new_val = new_val + supportKG.g.nodes[cnpt][supportProp] + \" \"\n contributing_nodes.append({cnpt: supportKG.g.nodes[cnpt][supportProp]})\n if len(contributing_nodes) > 0: \n contrib_text = f\"from supportKG nodes {repr(contributing_nodes)}\"\n else:\n #No update\n return\n else:\n if supportProp in supportKG.g.nodes[cnpt]:\n new_val = supportKG.g.nodes[support][supportProp]\n contrib_text = f\"from supportKG node {{{support}: {supportKG.g.nodes[support][supportProp]}}}\"\n else:\n #No update\n return\n\n elif strategy == 'inc':\n new_val = node[targetProp] + 1\n contrib_text = f\"for target node {{{target}: {node[targetProp]}}}\"\n\n else:\n raise ValueError(f'Weighting strategy {strategy} not supported.')\n\n #logging.debug(f\"New value for targetKG.g.nodes[{target}][{targetProp}]={new_val}\")\n node[targetProp] = new_val\n node[targetProp + '_strat'] = strategy\n if targetProp + '_histo' not in node:\n node[targetProp + '_histo'] = f\"Init val to {new_val}, strategy '{strategy}', contribution from {contrib_text}{novelty_matters_text};\"\n if ('status' in node) and (node['status'] == ''): node['status']='updated'\n elif old_val!=new_val:\n node[targetProp + '_histo'] += f\"Change val from {old_val} to {new_val}, strategy '{strategy}', contribution from {contrib_text}{novelty_matters_text};\"\n if ('status' in node) and (node['status'] == ''): node['status']='updated'",
"def _modify_tensor_quantizers(input_output_tensor_quantizers: TensorQuantizersTupleType, setting_name: str,\n quantizer_setting: bool, modified_tensor_quantizers: Dict[TensorQuantizer, Set]):\n setting_type = get_setting_type(setting_name)\n\n tensor_quantizers_to_modify = _get_tensor_quantizers_to_modify(input_output_tensor_quantizers, setting_name,\n quantizer_setting)\n for tensor_quantizer in tensor_quantizers_to_modify:\n if tensor_quantizer in modified_tensor_quantizers and \\\n setting_type in modified_tensor_quantizers[tensor_quantizer]:\n # Tensor quantizer's setting has already been modified\n if setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]:\n current_setting = tensor_quantizer.enabled\n else:\n current_setting = tensor_quantizer.use_symmetric_encodings\n if current_setting != quantizer_setting:\n logger.error('Conflicting tensor quantizer settings for symmetric encodings')\n raise AssertionError\n else:\n if setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]:\n tensor_quantizer.enabled = quantizer_setting\n else:\n tensor_quantizer.use_symmetric_encodings = quantizer_setting\n if tensor_quantizer not in modified_tensor_quantizers:\n modified_tensor_quantizers[tensor_quantizer] = {setting_type}\n else:\n modified_tensor_quantizers[tensor_quantizer].add(setting_type)",
"def defaultTarget(someTarget):\n\n import stitch.buildfile as buildfile\n buildfile.getCurBuildFile().defaultTarget = someTarget",
"def target_value(self, dct):\n try:\n value = dct[self.source]\n except KeyError:\n # First condition occurs if the configured source key is not in \n # the dictionary; no further manipulation/conversion takes place.\n return self.default\n else:\n # Second condition occurs if the value matches the one configured \n # as `none`; no further manipulation/conversion necessary.\n if value == self.none:\n return None\n # Majority of cases should find their way here. Source key is \n # in the dictionary and the value does not match `none`. All \n # further manipulation/conversion occurs.\n else:\n if self.rstrip is not NotConfigured:\n value = value.rstrip(self.rstrip)\n if self.lstrip is not NotConfigured:\n value = value.lstrip(self.lstrip)\n # Finally, after string manipulation (currently just strip), \n # data type conversion:\n value = self.data_type(value)\n return value",
"def update_override_settings(self, override_settings: dict) -> None:",
"def test_injections_respects_project_settings(self):\n set_app_default_setting('TEST_SETTING', 'foo')\n self.assertEqual(settings.TEST_SETTING, 'bar')",
"def doCopySettingsTo(self,attrArg):\n\ttry:\n\t d_targetReturn = validateAttrArg(attrArg,noneValid=False)\n\t mPlug_target = d_targetReturn['mi_plug']\n\t \n\t if self.isNumeric():\n\t\tif not mPlug_target.isNumeric():\n\t\t raise StandardError, \"source is numeric: '%s' | target is not: '%s'\"%(self.p_combinedShortName,mPlug_target.p_combinedShortName)\n\t\tif self.p_defaultValue is not False:mPlug_target.p_defaultValue = self.p_defaultValue\n\t\tif self.p_minValue is not False:mPlug_target.p_minValue = self.p_minValue\n\t\tif self.p_maxValue is not False:mPlug_target.p_maxValue = self.p_maxValue\n\t\tif self.p_softMax is not False:mPlug_target.p_softMax = self.p_softMax\n\t\tif self.p_softMin is not False:mPlug_target.p_softMin = self.p_softMin\n\t\t\n\t mPlug_target.p_hidden = self.p_hidden\n\t mPlug_target.p_locked = self.p_locked\n\t if mPlug_target.attrType not in ['string','message']:mPlug_target.p_keyable = self.p_keyable\n\t return True\n\texcept Exception,error:\n\t fmt_args = [self.obj.p_nameShort, self.p_nameLong, attrArg, error]\n\t s_errorMsg = \"{0}.{1}.doCopySettingsTo() | attrArg: {2} | error: {3}\".format(*fmt_args)\t \n\t log.error(s_errorMsg)",
"def solve_set_value(self, set_value: Any) -> None:\n self.apply_node_changes()\n\n if self.path_and_name is None:\n raise ValueError(\"path_and_none should not be None\")\n\n setattr(self.parent.object_ref, self.path_and_name.rsplit(\".\")[-1], set_value)",
"def update_settings(**kwds):\n for k,v in iteritems(kwds):\n if v is UNSET:\n if hasattr(settings, k):\n if has_django0:\n delattr(settings._target, k)\n else:\n delattr(settings, k)\n else:\n setattr(settings, k, v)",
"def set_joint_target(self, target, group_id=0):\n try:\n self.group[group_id].set_joint_value_target(self._simplify_joints(target,group_id))\n self.group[group_id].set_planner_id(self.planner)\n except moveit_commander.MoveItCommanderException as e:\n rospy.logerr('Unable to set target and planner: {}'.format(e))",
"def substitute(sentence, word, lang, target):\n conjugated_word = conjugate(str(target), word, lang)\n new_sentence = sentence.replace(target.text, conjugated_word, 1)\n return new_sentence",
"def _process_match_value(self, match, child=False):\n keys = self._match_config_property_keys(match, child=child)\n pattern_value = get_first_defined(self.values, keys, self._default_value)\n if pattern_value:\n match.value = pattern_value",
"def test_override_duplicates(self):\n targets = Table()\n targets['TARGETID'] = [1, 2, 3, 2, 1, 5]\n n = len(targets['TARGETID'])\n orig_subpriority = np.random.random(n)\n targets['SUBPRIORITY'] = orig_subpriority.copy()\n\n override = Table()\n override['TARGETID'] = np.array([3, 2, 20])\n override['SUBPRIORITY'] = np.array([10.0, 20.0, 30.0])\n\n desitarget.subpriority.override_subpriority(targets, override)\n\n # - Check that we overrode correctly; don't juse geomask.match\n # - to avoid circularity of code and test\n for i, tid in enumerate(targets['TARGETID']):\n in_override = np.where(override['TARGETID'] == tid)[0]\n if len(in_override) > 0:\n j = in_override[0]\n self.assertEqual(targets['SUBPRIORITY'][i], override['SUBPRIORITY'][j])\n else:\n self.assertEqual(targets['SUBPRIORITY'][i], orig_subpriority[i])",
"def set(digraph, node, target, old_label=None, new_label=None):\n digraph.set('Peter', 'Pete')\n assert not digraph.has('Peter')\n assert digraph.has('Pete')\n assert digraph.has('Pete', 'Sarah', 'likes')\n\n digraph.set('Pete', 'Sarah', 'likes', 'dislikes')\n assert not digraph.has('Pete', 'Sarah', 'likes')\n assert digraph.has('Pete', 'Sarah', 'dislikes')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper function to replace the given setting with the replacement if it is equal to target_setting.
|
def replace_if_target_setting(setting: str) -> str:
return replacement_setting if setting == target_setting else setting
|
[
"def assume_working_value_setting(self, setting, value, type_, source=\"direct\",\n desired_value=None):\n setting.set_value(value, source)\n self.assertEqual(setting.value, desired_value if desired_value is not None else value)\n self.assert_(isinstance(setting.value, type_))",
"def replace_property(cls, obj, target, replacement):\n Mock.__recall__[(obj,target)]=getattr(obj,target)\n obj.__dict__[target]=property(replacement)",
"def replace(settings, pattern, pattern_type=None, with_value=None):\n\tfilter = settings.format(settings.content)\n\tfilter.replace(pattern, with_value, pattern_type)\n\tsettings.content = filter.content",
"def replace(what: Expression, repl: Expression, target_input: Expression) -> Expression:\n target = copy.deepcopy(target_input)\n return replace_without_copy(what, repl, target)",
"def setTarget(self, target):\n\n if self.type == operation.STORE or self.type == operation.SYSTEM:\n raise Exception, \"Store or system operations don't have a target!\"\n\n elif self.type == operation.CONTROL:\n # Replace the last argument, the target, with a new target.\n\n parts = self.code.split(\",\")\n\n if len(parts) == 1:\n parts = parts[0]\n parts = parts.split()\n\n oldTarget = parts[-1]\n self.code = self.code[:len(self.code) - len(oldTarget)] + target\n\n else:\n # Replace the first argument, the target, with a new target.\n\n parts = self.code.split()\n first = parts[0]\n parts = parts[1].split(\",\")\n\n self.code = first + target + parts[1:]",
"def update_metric(\n target: str, \n support: Union[str,List[str]], \n targetKG: KnowledgeGraph, \n supportKG: KnowledgeGraph, \n targetProp: str,\n supportProp: str,\n strategy: str,\n options: dict={} ) -> None:\n #logging.debug(\"support=\"+repr(support))\n \"\"\"if supportProp not in AvailbleProps:\n raise ValueError(f'supportProp {supportProp} not supported.')\"\"\"\n # Verify strategy validity\n if strategy not in AvailableStrategy:\n raise ValueError(f'Weighting strategy {strategy} not supported.')\n\n # Verify if targetProp exists\n if targetProp not in targetKG.g.nodes[target]: return\n # \n node = targetKG.g.nodes[target]\n old_val = node[targetProp]\n # \n contrib_text = \"\"\n novelty_matters_text = \"\"\n\n if strategy == 'default':\n new_val = 0\n if type(support) == list:\n contributing_nodes=[]\n for cnpt in support:\n if supportProp in supportKG.g.nodes[cnpt]:\n #if debug: logging.info(f\"cnpt={cnpt}, {supportProp}={supportKG.g.nodes[cnpt][supportProp]}\")\n new_val = new_val + supportKG.g.nodes[cnpt][supportProp]\n contributing_nodes.append({cnpt: supportKG.g.nodes[cnpt][supportProp]})\n if len(contributing_nodes) > 0: \n new_val /= len(contributing_nodes)\n contrib_text = f\"from supportKG nodes {repr(contributing_nodes)}\"\n else:\n #No update\n return\n else:\n if supportProp in supportKG.g.nodes[cnpt]:\n new_val = supportKG.g.nodes[support][supportProp]\n contrib_text = f\"from supportKG node {{{support}: {supportKG.g.nodes[support][supportProp]}}}\"\n else:\n #No update\n return\n if ('novelty_matters' in options) and \\\n (options['novelty_matters'] == True) and \\\n ('already_seen' in node) and \\\n ('dfre_category' in node) and \\\n (node['dfre_category'].strip()=='Security') and \\\n (node['already_seen']=='False'):\n\n new_val = new_val/2\n novelty_matters_text = f\", not new: val=val/2\"\n #logging.debug(f\"Novelty matters for node {node}\")\n\n elif strategy == 'string':\n new_val = ''\n if type(support) == list:\n contributing_nodes=[]\n for cnpt in support:\n if supportProp in supportKG.g.nodes[cnpt]:\n new_val = new_val + supportKG.g.nodes[cnpt][supportProp] + \" \"\n contributing_nodes.append({cnpt: supportKG.g.nodes[cnpt][supportProp]})\n if len(contributing_nodes) > 0: \n contrib_text = f\"from supportKG nodes {repr(contributing_nodes)}\"\n else:\n #No update\n return\n else:\n if supportProp in supportKG.g.nodes[cnpt]:\n new_val = supportKG.g.nodes[support][supportProp]\n contrib_text = f\"from supportKG node {{{support}: {supportKG.g.nodes[support][supportProp]}}}\"\n else:\n #No update\n return\n\n elif strategy == 'inc':\n new_val = node[targetProp] + 1\n contrib_text = f\"for target node {{{target}: {node[targetProp]}}}\"\n\n else:\n raise ValueError(f'Weighting strategy {strategy} not supported.')\n\n #logging.debug(f\"New value for targetKG.g.nodes[{target}][{targetProp}]={new_val}\")\n node[targetProp] = new_val\n node[targetProp + '_strat'] = strategy\n if targetProp + '_histo' not in node:\n node[targetProp + '_histo'] = f\"Init val to {new_val}, strategy '{strategy}', contribution from {contrib_text}{novelty_matters_text};\"\n if ('status' in node) and (node['status'] == ''): node['status']='updated'\n elif old_val!=new_val:\n node[targetProp + '_histo'] += f\"Change val from {old_val} to {new_val}, strategy '{strategy}', contribution from {contrib_text}{novelty_matters_text};\"\n if ('status' in node) and (node['status'] == ''): node['status']='updated'",
"def _modify_tensor_quantizers(input_output_tensor_quantizers: TensorQuantizersTupleType, setting_name: str,\n quantizer_setting: bool, modified_tensor_quantizers: Dict[TensorQuantizer, Set]):\n setting_type = get_setting_type(setting_name)\n\n tensor_quantizers_to_modify = _get_tensor_quantizers_to_modify(input_output_tensor_quantizers, setting_name,\n quantizer_setting)\n for tensor_quantizer in tensor_quantizers_to_modify:\n if tensor_quantizer in modified_tensor_quantizers and \\\n setting_type in modified_tensor_quantizers[tensor_quantizer]:\n # Tensor quantizer's setting has already been modified\n if setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]:\n current_setting = tensor_quantizer.enabled\n else:\n current_setting = tensor_quantizer.use_symmetric_encodings\n if current_setting != quantizer_setting:\n logger.error('Conflicting tensor quantizer settings for symmetric encodings')\n raise AssertionError\n else:\n if setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]:\n tensor_quantizer.enabled = quantizer_setting\n else:\n tensor_quantizer.use_symmetric_encodings = quantizer_setting\n if tensor_quantizer not in modified_tensor_quantizers:\n modified_tensor_quantizers[tensor_quantizer] = {setting_type}\n else:\n modified_tensor_quantizers[tensor_quantizer].add(setting_type)",
"def defaultTarget(someTarget):\n\n import stitch.buildfile as buildfile\n buildfile.getCurBuildFile().defaultTarget = someTarget",
"def target_value(self, dct):\n try:\n value = dct[self.source]\n except KeyError:\n # First condition occurs if the configured source key is not in \n # the dictionary; no further manipulation/conversion takes place.\n return self.default\n else:\n # Second condition occurs if the value matches the one configured \n # as `none`; no further manipulation/conversion necessary.\n if value == self.none:\n return None\n # Majority of cases should find their way here. Source key is \n # in the dictionary and the value does not match `none`. All \n # further manipulation/conversion occurs.\n else:\n if self.rstrip is not NotConfigured:\n value = value.rstrip(self.rstrip)\n if self.lstrip is not NotConfigured:\n value = value.lstrip(self.lstrip)\n # Finally, after string manipulation (currently just strip), \n # data type conversion:\n value = self.data_type(value)\n return value",
"def update_override_settings(self, override_settings: dict) -> None:",
"def test_injections_respects_project_settings(self):\n set_app_default_setting('TEST_SETTING', 'foo')\n self.assertEqual(settings.TEST_SETTING, 'bar')",
"def doCopySettingsTo(self,attrArg):\n\ttry:\n\t d_targetReturn = validateAttrArg(attrArg,noneValid=False)\n\t mPlug_target = d_targetReturn['mi_plug']\n\t \n\t if self.isNumeric():\n\t\tif not mPlug_target.isNumeric():\n\t\t raise StandardError, \"source is numeric: '%s' | target is not: '%s'\"%(self.p_combinedShortName,mPlug_target.p_combinedShortName)\n\t\tif self.p_defaultValue is not False:mPlug_target.p_defaultValue = self.p_defaultValue\n\t\tif self.p_minValue is not False:mPlug_target.p_minValue = self.p_minValue\n\t\tif self.p_maxValue is not False:mPlug_target.p_maxValue = self.p_maxValue\n\t\tif self.p_softMax is not False:mPlug_target.p_softMax = self.p_softMax\n\t\tif self.p_softMin is not False:mPlug_target.p_softMin = self.p_softMin\n\t\t\n\t mPlug_target.p_hidden = self.p_hidden\n\t mPlug_target.p_locked = self.p_locked\n\t if mPlug_target.attrType not in ['string','message']:mPlug_target.p_keyable = self.p_keyable\n\t return True\n\texcept Exception,error:\n\t fmt_args = [self.obj.p_nameShort, self.p_nameLong, attrArg, error]\n\t s_errorMsg = \"{0}.{1}.doCopySettingsTo() | attrArg: {2} | error: {3}\".format(*fmt_args)\t \n\t log.error(s_errorMsg)",
"def solve_set_value(self, set_value: Any) -> None:\n self.apply_node_changes()\n\n if self.path_and_name is None:\n raise ValueError(\"path_and_none should not be None\")\n\n setattr(self.parent.object_ref, self.path_and_name.rsplit(\".\")[-1], set_value)",
"def update_settings(**kwds):\n for k,v in iteritems(kwds):\n if v is UNSET:\n if hasattr(settings, k):\n if has_django0:\n delattr(settings._target, k)\n else:\n delattr(settings, k)\n else:\n setattr(settings, k, v)",
"def set_joint_target(self, target, group_id=0):\n try:\n self.group[group_id].set_joint_value_target(self._simplify_joints(target,group_id))\n self.group[group_id].set_planner_id(self.planner)\n except moveit_commander.MoveItCommanderException as e:\n rospy.logerr('Unable to set target and planner: {}'.format(e))",
"def substitute(sentence, word, lang, target):\n conjugated_word = conjugate(str(target), word, lang)\n new_sentence = sentence.replace(target.text, conjugated_word, 1)\n return new_sentence",
"def _process_match_value(self, match, child=False):\n keys = self._match_config_property_keys(match, child=child)\n pattern_value = get_first_defined(self.values, keys, self._default_value)\n if pattern_value:\n match.value = pattern_value",
"def test_override_duplicates(self):\n targets = Table()\n targets['TARGETID'] = [1, 2, 3, 2, 1, 5]\n n = len(targets['TARGETID'])\n orig_subpriority = np.random.random(n)\n targets['SUBPRIORITY'] = orig_subpriority.copy()\n\n override = Table()\n override['TARGETID'] = np.array([3, 2, 20])\n override['SUBPRIORITY'] = np.array([10.0, 20.0, 30.0])\n\n desitarget.subpriority.override_subpriority(targets, override)\n\n # - Check that we overrode correctly; don't juse geomask.match\n # - to avoid circularity of code and test\n for i, tid in enumerate(targets['TARGETID']):\n in_override = np.where(override['TARGETID'] == tid)[0]\n if len(in_override) > 0:\n j = in_override[0]\n self.assertEqual(targets['SUBPRIORITY'][i], override['SUBPRIORITY'][j])\n else:\n self.assertEqual(targets['SUBPRIORITY'][i], orig_subpriority[i])",
"def set(digraph, node, target, old_label=None, new_label=None):\n digraph.set('Peter', 'Pete')\n assert not digraph.has('Peter')\n assert digraph.has('Pete')\n assert digraph.has('Pete', 'Sarah', 'likes')\n\n digraph.set('Pete', 'Sarah', 'likes', 'dislikes')\n assert not digraph.has('Pete', 'Sarah', 'likes')\n assert digraph.has('Pete', 'Sarah', 'dislikes')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform substitutions for the given value. If value is a string, perform substitutions in the string. If value is a list, then perform substitutions in every string in the list.
|
def perform_subst(value: Union[str, List[str]]) -> Union[str, List[str]]:
newval = "" # type: Union[str, List[str]]
if isinstance(value, list):
newval = list(map(lambda input_str: subst_str(input_str, lambda key: config_dict[key]), value))
else:
newval = subst_str(value, lambda key: config_dict[key])
return newval
|
[
"def apply_replaces(value, replaces):\n if isinstance(value, list):\n return list(map(lambda v: apply_replaces(v, replaces), value))\n\n for pattern, replacement in replaces.items():\n value = re.sub(pattern, replacement, value)\n\n return value",
"def _substitute(valueish, replacements, root=()):\n if isinstance(valueish, dict):\n return type(valueish)({\n k: _substitute(v, replacements, root + (k, ))\n for (k, v) in valueish.items()\n })\n elif isinstance(valueish, (tuple, list)):\n return type(valueish)((\n _substitute(v, replacements, root + (ix, ))\n for (ix, v) in enumerate(valueish)))\n else:\n return replacements[root]",
"def deepsubst_targets(key: str, value: Any) -> List[str]:\n if isinstance(value, str):\n # This is just regular subst\n return subst_targets(key, value)\n if isinstance(value, (dict, list)):\n # Recursively find all strings\n def find_strings(x: Union[List, Dict]) -> List[str]:\n iterator = x # type: Iterable[Any]\n if isinstance(x, dict):\n iterator = x.values()\n\n output = [] # type: List\n for item in iterator:\n if isinstance(item, str):\n output.extend([s for s in subst_targets(key, item) if s not in output])\n elif isinstance(item, list) or isinstance(item, dict):\n output.extend([s for s in find_strings(item) if s not in output])\n return output\n\n return find_strings(value)\n raise ValueError(f\"deepsubst cannot be used with this type: {value}\")",
"def substitute_value(d, subst, key):\n if debugging:\n _log.debug(f\"substitute value: d={d} subst={subst} key={key}\")\n # make a scalar into a list of length 1, but remember whether\n # it's a list or not\n if (\n isinstance(d[key], str)\n or isinstance(d[key], int)\n or isinstance(d[key], float)\n ):\n str_values = [d[key]]\n is_list = False\n else:\n try:\n str_values = list(d[key])\n except TypeError:\n str_values = [str(d[key])]\n is_list = True\n # substitute all values in the list, with the result in `new_list`\n num_subst, new_list = 0, []\n for str_value in str_values:\n new_value = None\n if dicty(subst):\n if str_value in subst:\n new_value = subst[str_value]\n # add case-insensitivity\n elif str_value.lower() in subst:\n new_value = subst[str_value.lower()]\n elif subst == cls.SUBST_UNITS:\n if isinstance(\n str_value, str\n ): # make sure it's not already evaluated\n _log.debug(\n f\"Substituting units: set {{'{key}': units('{str_value}')}} in {d}\"\n )\n new_value = cls._build_units(str_value)\n if new_value is None:\n new_list.append(str_value) # unsubstituted value\n else:\n new_list.append(new_value)\n num_subst += 1\n # change input to substituted list (or single value)\n d[key] = new_list if is_list else new_list[0]\n # return True only if all values were substituted\n return num_subst == len(new_list)",
"def register_substitution(name: str, value: any):\n _substitutions[name] = value",
"def substitute(val, s):\n while is_var(val):\n for svar, sval in substitutions(s):\n if val is svar:\n val = sval\n break\n else:\n break\n return val",
"def replace_string_list(l, pattern, new_string):\n for i in range(len(l)):\n if type(l[i]) == str:\n l[i] = l[i].replace(pattern, new_string)\n elif type(l[i]) == list:\n replace_string_list(l[i], pattern, new_string)\n elif type(l[i]) == dict:\n replace_string_dict(l[i], pattern, new_string)",
"def deepsubst_action(config_dict: dict, key: str, value: Any) -> None:\n def do_subst(oldval: Any) -> Any:\n if isinstance(oldval, str):\n # This is just regular subst\n return subst_str(oldval, lambda key: config_dict[key])\n if isinstance(oldval, list):\n return list(map(do_subst, oldval))\n if isinstance(oldval, dict):\n # We need to check for _deepsubst_meta here\n newval = {} # type: Dict\n for k, v in oldval.items():\n if isinstance(k, str):\n if k.endswith(\"_deepsubst_meta\"):\n base = k.replace(\"_deepsubst_meta\", \"\")\n if base not in oldval:\n raise ValueError(f\"Deepsubst meta key provided, but there is no matching base key: {k}\")\n # Note that we don't add the meta back to newval.\n else:\n meta_key = f\"{k}_deepsubst_meta\"\n if meta_key in oldval:\n # Do the deepsubst_meta, whatever it is.\n meta = oldval[meta_key]\n if meta in DeepSubstMetaDirectives:\n if isinstance(v, str):\n newval[k] = DeepSubstMetaDirectives[meta](config_dict, v)\n else:\n raise ValueError(f\"Deepsubst metas not supported on non-string values: {v}\")\n else:\n err_keys = \", \".join(DeepSubstMetaDirectives.keys())\n raise ValueError(f\"Unknown deepsubst_meta type: {meta}. Valid options are [{err_keys}].\")\n else:\n newval[k] = do_subst(v)\n else:\n # k is not an instance of a string.\n # Will this ever happen? It's possible you could have {1: \"foo\"}...\n newval[k] = do_subst(v)\n return newval\n return oldval\n\n config_dict[key] = do_subst(value)",
"def _replace_value(v, rels, dmaap):\n if isinstance(v, six.string_types): # do not try to replace anything that is not a string\n match_on_rels = re.match(template_match_rels, v)\n if match_on_rels:\n # now holds just x,.. of {{x,...}}\n template_identifier = match_on_rels.groups()[0].strip()\n rtpartial = partial(_replace_rels_template, rels)\n return reduce(lambda a, b: a + b, map(rtpartial, template_identifier.split(\",\")), [])\n match_on_dmaap = re.match(template_match_dmaap, v)\n if match_on_dmaap:\n template_identifier = match_on_dmaap.groups()[0].strip()\n \"\"\"\n Here is what Mike said:\n 1) want simple replacement of \"<< >>\" with dmaap key value\n 2) never need to support <<f1,f2>> whereas we do support {{sct1,sct2}}\n The consequence is that if you give the CBS a dmaap key like {\"foo\" : {...}} you are going to get back {...}, but rels always returns [...].\n So now component developers have to possible handle dicts and [], and we have to communicate that to them\n \"\"\"\n return _replace_dmaap_template(dmaap, template_identifier)\n return v # was not a match or was not a string, return value as is",
"def __resolve_value__(value, dict_type):\n t = type(value)\n\n if t is dict:\n value = dict_type(value)\n\n elif t is list:\n value = [__resolve_value__(e, dict_type) for e in value]\n\n return value",
"def substitute(string, substitutions):\n for key, value in substitutions:\n string = re.sub(re.escape(\"{{\" + key + \"}}\"), value, string)\n return string",
"def _multiple_replace(mapping, text):\n pattern = \"|\".join(map(re.escape, mapping.keys()))\n return re.sub(pattern, lambda m: mapping[m.group()], str(text))",
"def prepare_for_saving(self, value):\r\n if isinstance(value, basestring):\r\n return self._escape(value)\r\n else:\r\n return (self._escape(v) for v in value)",
"def multi_replace(text, targets, replacer):\n for t in targets:\n text = text.replace(t, replacer)\n return text",
"def replace_recurse(c, wildcards):\n # print(\"recurse c: \", c)\n\n for k, v in c.items():\n if isinstance(v, dict):\n replace_recurse(c[k], wildcards)\n elif isinstance(v, list):\n for index, item in enumerate(v):\n replace_recurse(item, wildcards)\n elif isinstance (v, str):\n # print(\"key/value : \", k, \"; \", v)\n # print(\"c[k] : \", c[k])\n c[k] = c[k].format(**wildcards)\n # print(\"c[k]2: \", c[k])",
"def substitute(self, substitution_map):\n for element_name in substitution_map:\n assert (is_constant(element_name) or is_variable(element_name)) and \\\n type(substitution_map[element_name]) is Term\n # Task 9.1",
"def substitute(self, substitution_map):\n for element_name in substitution_map:\n assert (is_constant(element_name) or is_variable(element_name)) and \\\n type(substitution_map[element_name]) is Term\n # Task 9.2",
"def value_convert(_dict, fn, traverse_list=True):\n for k in _dict:\n if traverse_list and isinstance(_dict[k], list):\n _dict[k] = [fn(x) for x in _dict[k]]\n else:\n _dict[k] = fn(_dict[k])\n return _dict",
"def multiple_replace(dict, text):\n\n # Create a regular expression from the dictionary keys\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, dict.keys())))\n\n # For each match, look-up corresponding value in dictionary\n return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Copy the contents of the referenced key for use as this key's value. If the reference is a list, then apply the crossref for each element of the list.
|
def crossref_action(config_dict: dict, key: str, value: Any) -> None:
if isinstance(value, str):
config_dict[key] = config_dict[value]
elif isinstance(value, list):
def check_and_get(k: Any) -> Any:
return config_dict[crossref_check_and_cast(k)]
config_dict[key] = list(map(check_and_get, value))
elif isinstance(value, numbers.Number):
# bools are instances of numbers.Number for some weird reason
raise ValueError("crossref cannot be used with numbers and bools")
else:
raise NotImplementedError("crossref not implemented on other types yet")
|
[
"def reference(self):\n for termkey, termval in six.iteritems(self.terms):\n termval.relations.update(\n (relkey, TermList(\n (self.terms.get(x) or Term(x, '', '')\n if not isinstance(x, Term) else x) for x in relval\n )) for relkey, relval in six.iteritems(termval.relations)\n )",
"def references(self, key):\r\n doesReference = {}\r\n for k, e in self.graph.items():\r\n doesReference[k] = self.__references(key, k, e, doesReference)\r\n return doesReference",
"def insert_ref(self, item, key=None, axes=None, copy=True, replace=True):\n _DEPRECATION_ERROR_METHOD(\n self,\n \"insert_ref\",\n \"Use method 'set_construct' or 'set_coordinate_reference' \"\n \"instead.\",\n version=\"3.0.0\",\n removed_at=\"4.0.0\",\n ) # pragma: no cover",
"def selfupdate(self, key_list):\n pass",
"def reset_to_reference_parameters(self):\n for key, value in self.reference_params.items():\n setattr(self, key, value)",
"def unpackRefs(self) -> NamedKeyDict:\n return NamedKeyDict((datasetType, scaffolding.refs[0]) for datasetType, scaffolding in self.items())",
"def push(self, value, reference): # real signature unknown; restored from __doc__\n pass",
"def fill_references(item: Any, references: Dict[str, Any] = None) -> Any:\n if isreference(item):\n if references is None:\n raise ValueError(f\"Found reference {item} but references is None.\")\n if item not in references:\n raise ValueError(f\"Found reference {item} not in references\")\n return references[item]\n if isinstance(item, dict):\n return {key: fill_references(value, references) for key, value in item.items()}\n if isinstance(item, list):\n return [fill_references(it, references) for it in item]\n if isinstance(item, tuple):\n return tuple(fill_references(it, references) for it in item)\n return item",
"def expand_ref(json_obj, definition):\n json_obj = copy(json_obj) # make a copy so we don't change the input json_obj\n if isinstance(json_obj, dict):\n for key in list(json_obj.keys()):\n if key == \"$ref\":\n if json_obj[key].startswith(\"#/definitions/\"):\n concept = json_obj[key].split(\"/\")[-1]\n if concept in definition:\n json_obj.pop(\"$ref\")\n json_obj.update(definition[concept])\n else:\n raise ValueError(\"{} is not defined\".format(json_obj[key]))\n elif isinstance(json_obj[key], dict):\n resolved = expand_ref(json_obj[key], definition)\n json_obj[key] = resolved\n elif isinstance(json_obj[key], list):\n for k, v in enumerate(json_obj[key]):\n resolved = expand_ref(v, definition)\n if resolved:\n json_obj[key][k] = resolved\n elif isinstance(json_obj, list):\n for key, value in enumerate(json_obj):\n resolved = expand_ref(value, definition)\n if resolved:\n json_obj[key] = resolved\n return json_obj",
"def copy(self, new_key=None):\r\n return self.dbobj.copy(new_key=new_key)",
"def propagate_attr(self, key):\n for i in range(len(self.series)):\n self.series[i][key] = self[key]",
"def target_ref(self, value: typing.Union[\"ObjectReference\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n ObjectReference,\n ObjectReference().from_dict(value),\n )\n self._properties[\"targetRef\"] = value",
"def assign_ref(data):\n data = copy(data)\n operation_length = list()\n pv_dataset = list()\n for _id, pvs in data.items():\n operation_length.append((len(pvs[0]['values']), _id))\n pv_list = list()\n for pv_dict in pvs:\n pv_list.append(pv_dict['name'])\n pv_dataset.append(pv_list)\n\n median_len = np.median([l for l, _id in operation_length])\n\n # Select the ref_len=50 closest to the median bacthes\n # center around the median\n centered = [(abs(l-median_len), _id) for l, _id in operation_length]\n selected = sorted(centered)\n\n med_id = selected[0][1] # 5153\n\n # pop batches without all pvs\n # ids = list(data.keys())\n # for _id in ids:\n # k = len(data[_id])\n # if k != 99:\n # data.pop(_id)\n\n all_ids = list(data.keys())\n for _id in all_ids:\n if _id not in [x[1] for x in selected]:\n _ = data.pop(_id)\n\n data['reference'] = med_id\n\n return data",
"def __remove_reference(self, key, transaction, txn):\n if isinstance(key, tuple):\n #create a byte string key, first validity check in python 3!\n for val in key:\n if sys.version_info[0] >= 3 and isinstance(val, bytes):\n raise DbError(_('An attempt is made to save a reference key '\n 'which is partly bytecode, this is not allowed.\\n'\n 'Key is %s') % str(key))\n key = str(key)\n if isinstance(key, UNITYPE):\n key = key.encode('utf-8')\n if not self.readonly:\n if not transaction.batch:\n old_data = self.reference_map.get(key, txn=txn)\n transaction.add(REFERENCE_KEY, TXNDEL, key, old_data, None)\n #transaction.reference_del.append(str(key))\n self.reference_map.delete(key, txn=txn)",
"def absorb(self, key, ctx):\n self.data[key] = dicts.merge(self.data.get(key), ctx.data)\n return self",
"def copy(self, key):\r\n copy = Set(key=key, db=self.db)\r\n copy.clear()\r\n copy |= self\r\n return copy",
"def nested_assign(self, key_list, value):\n if len(key_list) == 1:\n self[key_list[0]] = value\n elif len(key_list) > 1:\n if key_list[0] not in self:\n self[key_list[0]] = LIVVDict()\n self[key_list[0]].nested_assign(key_list[1:], value)",
"def referencesCopy(self) -> \"SbBool\":\n return _coin.SoField_referencesCopy(self)",
"def _populate_set_refs(self, set_list):\n objects = []\n for s in set_list:\n objects.append({'ref': s['ref']})\n\n if len(objects) > 0:\n obj_data = self.ws.get_objects2({\n 'objects': objects,\n 'no_data': 1\n })['data']\n\n # if ws call worked, then len(obj_data)==len(set_list)\n for k in range(0, len(obj_data)):\n items = []\n for item_ref in obj_data[k]['refs']:\n items.append({'ref': item_ref})\n set_list[k]['items'] = items\n\n return set_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Transclude the contents of the file pointed to by value.
|
def transclude_action(config_dict: dict, key: str, value: Any) -> None:
assert isinstance(value, str), "Path to file for transclusion must be a string"
with open(value, "r") as f:
file_contents = str(f.read())
config_dict[key] = file_contents
|
[
"def expand(self,\n context,\n outputFile,\n outputEncoding=None,\n interpreter=None):\n # This method must wrap outputFile if required by the encoding, and write out\n # any template pre-amble (DTD, Encoding, etc)\n self.expandInline(context, outputFile, interpreter)",
"def original_content(self, filename):\n raise NotImplementedError",
"def recurse_tsv(contents: str, fnc: Callable, delim: str = \"\\t\") -> Tuple[str, bool]:\n modified = False\n new_contents = []\n for line in contents.split(\"\\n\"):\n new_values = []\n for v in line.split(delim):\n new_val, modified_ = fnc(v)\n modified |= modified_\n if modified_:\n new_values.append(new_val)\n else:\n new_values.append(v)\n new_contents.append(delim.join(new_values))\n\n return \"\\n\".join(new_contents), modified",
"def expand(self,\n context,\n outputFile,\n outputEncoding=\"utf-8\",\n interpreter=None):\n # This method must wrap outputFile if required by the encoding, and write out\n # any template pre-amble (DTD, Encoding, etc)\n if isinstance(outputFile, io.TextIOBase):\n encodingFile = outputFile\n else:\n writer = codecs.getwriter(outputEncoding)\n encodingFile = writer(outputFile, errors='xmlcharrefreplace')\n self.expandInline(context, encodingFile, interpreter)",
"def expandContent(self, content):\n self.content += content",
"def set_contents(self, contents=None):\n \n if contents is None and self.__contents is None:\n return\n \n root = self.__container\n root.InitChange()\n try:\n old_contents = self.__contents\n if old_contents is not None:\n root.RemChild(old_contents)\n self.__contents = contents\n if contents:\n #print \"Set %s as contents of %s\" % (contents, root)\n root.AddTail(contents)\n finally:\n root.ExitChange()\n return old_contents",
"def tag_file(self, path, key, value):\n return False",
"def add_control_file_above(self):\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the selected item from the index\n selected_item = idx.model().itemFromIndex(idx)\n new_file_number = min([0, int(float(selected_item.text()))-1])\n # get the parent of the selected item\n parent = selected_item.parent()\n # insert the new file entry\n new_file_entry = [QtGui.QStandardItem(str(new_file_number)),\n QtGui.QStandardItem(\"Right click to browse\")]\n parent.insertRow(idx.row(), new_file_entry)\n # renumber the section\n self.renumber_subsection_keys(parent)\n # add an asterisk to the tab text to indicate the tab contents have changed\n self.update_tab_text()\n return",
"def set_value(\n self,\n item,\n value,\n subgroup=None,\n ignore_type=False,\n allow_undefined=False,\n return_file=False,\n ):\n expect(\n not self._read_only_mode,\n \"Cannot modify case, read_only. \"\n \"Case must be opened with read_only=False and can only be modified within a context manager\",\n )\n\n if item == \"CASEROOT\":\n self._caseroot = value\n result = None\n\n for env_file in self._files:\n result = env_file.set_value(item, value, subgroup, ignore_type)\n if result is not None:\n logger.debug(\"Will rewrite file {} {}\".format(env_file.filename, item))\n return (result, env_file.filename) if return_file else result\n\n if len(self._files) == 1:\n expect(\n allow_undefined or result is not None,\n \"No variable {} found in file {}\".format(item, self._files[0].filename),\n )\n else:\n expect(\n allow_undefined or result is not None,\n \"No variable {} found in case\".format(item),\n )",
"def construct_include_text(loader: _ConfigLoader, node: yaml.Node) -> Any:\n\n filename = os.path.abspath(os.path.join(loader._root, loader.construct_scalar(node)))\n\n with open(filename, 'r') as f:\n return f.read()",
"def generate(self, node, file):\n file.write('<p>')\n super().generate(node, file)\n file.write('</p>')",
"def set_FileContent(self, value):\n super(UploadSessionFinishInputSet, self)._set_input('FileContent', value)",
"def includebase(parser, tocken):\r\n bits = tocken.split_contents()\r\n mitemplate = bits[1]\r\n mitemplate2 = _prefijocomillas(mitemplate, templateCTX.directorio_base + \"/\")\r\n tocken.contents = tocken.contents.replace(mitemplate, mitemplate2)\r\n return template.loader_tags.do_include(parser, tocken)",
"def generate_tag_value_files(self, dir: Path) -> None:\n if not dir.exists():\n raise ValueError(f\"Undefined directory: {str(dir)}\")\n if not dir.is_dir():\n raise NotADirectoryError(str(dir))\n\n externalRefs = list()\n for spdx_dependency in self.dependency_documents:\n file_name = f\"{spdx_dependency.name}.spdx\"\n checksum = SpdxProject.generate_tag_value_file(dir, spdx_dependency, file_name)\n externalRefs.append(\n DependencySpdxDocumentRef(\n name=spdx_dependency.document_name, namespace=spdx_dependency.document_namespace, checksum=checksum\n )\n )\n self.main_document.external_refs = externalRefs\n SpdxProject.generate_tag_value_file(dir, self.main_document, f\"{self.main_document.name}.spdx\")",
"def prepend_content(self, content):\n self.contents = content + self.contents",
"def register_text_content(self, exact_file_name, file_contents):\n self.content_map[exact_file_name] = file_contents",
"def _replace_include_by_content(\n xml_tree, include_element, dependency_content\n ):\n if dependency_content is not None:\n # build the tree of the dependency\n dependency_tree = XSDTree.fromstring(dependency_content)\n # get elements from dependency\n dependency_elements = dependency_tree.getchildren()\n # appends elements from dependency to tree\n for element in dependency_elements:\n xml_tree.getroot().append(element)\n # remove the include element\n include_element.getparent().remove(include_element)",
"def add_input_file_above(self):\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the selected item from the index\n selected_item = idx.model().itemFromIndex(idx)\n new_file_number = min([0, int(float(selected_item.text()))-1])\n # get the parent of the selected item\n parent = selected_item.parent()\n # insert the new file entry\n new_file_entry = [QtGui.QStandardItem(str(new_file_number)),\n QtGui.QStandardItem(\"Right click to browse\")]\n parent.insertRow(idx.row(), new_file_entry)\n # renumber the section\n self.renumber_subsection_keys(parent)\n # add an asterisk to the tab text to indicate the tab contents have changed\n self.update_tab_text()\n return",
"def link_file_content(self):\n return self._link_file_content"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Turn the value of the key (JSON list) into a list.
|
def json2list_action(config_dict: dict, key: str, value: Any) -> None:
assert isinstance(value, str), "json2list requires a JSON string that is a list"
parsed = json.loads(value)
assert isinstance(parsed, list), "json2list requires a JSON string that is a list"
config_dict[key] = parsed
|
[
"def __GetKeyValueAsList(self, key):\n if not self.request.has_key(key):\n return None\n\n key_value = self.request[key]\n # Check that the value is actually a list\n if not isinstance(key_value, list):\n return None\n\n return key_value",
"def _list(key: str, vals: dict) -> list:\n result = vals.get(key, [])\n if not isinstance(result, list):\n result = [result]\n return result",
"def get_value_as_list(self, key):\n value = self.get(key)\n if value is None:\n return []\n if isinstance(value, MetainfoValue):\n return [value]\n return value",
"def _convert_to_list(\n self, data: Dict[str, Union[str, int]]\n ) -> Sequence[Union[str, int]]:\n data.pop(\"id\")\n return list(data.values())",
"def dict_to_list(value, joiner=\"=\"):\n return [list_to_str(items, joiner) for items in value.items()]",
"def get_list(self, key_list):\n return [self.get(key) for key in key_list]",
"def _convertListProperty(model_property, entity):\n return [\n _newKey(old_key)\n for old_key in model_property.get_value_for_datastore(entity) or []]",
"def _to_list(val):\n if isinstance(val, list):\n return val\n else:\n return [val]",
"def kexp_to_list(kexp):\n if kexp[0] != '{' or kexp[-1] != '}':\n raise ParseException(\"kexp_to_list: not a list: {}\".format(kexp))\n kexp = kexp[1:-1]\n return string_to_kexp_strings(kexp)",
"def str_to_list(value):\n if isinstance(value, str):\n return [value]\n else:\n return value",
"def as_list(self, key, default=None):\r\n lines = []\r\n try:\r\n vlist = self[key]\r\n except KeyError:\r\n return default\r\n for val in vlist:\r\n lines.extend(\r\n line.strip() for line in val.splitlines()\r\n if line.strip() and not line.strip().startswith('#'))\r\n return lines",
"def listitems(d):\n return list(iteritems(d))",
"def listvalues(d):\n return list(itervalues(d))",
"def get_list(self, key):\n return self.__settings[key]",
"def cast_list(self, parm, dict_convert='keys'):\n if type(parm)==str:\n rtn.append(parm)\n elif type(parm)==list:\n rtn = parm\n elif type(parm)==dict:\n rtn=[] # none returns empty list\n if dict_convert=='keys': rtn = list(parm)\n if dict_convert=='values': rtn = list(parm.values())\n if dict_convert=='both':\n for n,v in parm.items():\n rtn.append(n)\n rtn.append(v)\n else:\n msg = \"must be list or string,\\n you supplied %s\" %type(filepaths)\n self.log(msg, error=True)\n raise ValueError(msg)\n return rtn",
"def dict_to_list(self):\n\t\tres = []\n\t\tfor k,v in self.get_order().items():\n\t\t\tres.extend([k]*v)\n\t\tself.order = res",
"def _list(x):\n if not isinstance(x, list):\n x = list(x)\n return x",
"def _to_list(e):\n return e if _is_list(e) else [e]",
"def _get_list_value(\r\n self, value: Union[List[Any], tuple, Any]) -> List[Any]:\r\n if isinstance(value, tuple):\r\n return list(value)\r\n if isinstance(value, Array):\r\n return value._value\r\n return value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prepend the local path of the config dict.
|
def prependlocal_action(config_dict: dict, key: str, value: Any) -> None:
if isinstance(value, list):
new_values = []
for v in value:
new_values.append(os.path.join(config_dict[_CONFIG_PATH_KEY], str(v)))
config_dict[key] = new_values
else:
config_dict[key] = os.path.join(config_dict[_CONFIG_PATH_KEY], str(value))
|
[
"def prepend_path(path):\n\n environ_path = get_paths()\n if path not in environ_path:\n environ_path.insert(0, path)\n environ[\"PATH\"] = \":\".join(environ_path)",
"def _expandPathInConfig(path,config):\r\n config[path] = os.path.expandvars(config[path])",
"def add_to_default_path(self, to_add):\n default_env_path = self.defaults.env.path\n if default_env_path:\n splitted_paths = default_env_path.split(os.pathsep)\n else:\n splitted_paths = list()\n to_add = qisys.sh.to_native_path(to_add)\n if to_add not in splitted_paths:\n splitted_paths.insert(0, to_add)\n self.defaults.env.path = os.pathsep.join(splitted_paths)",
"def relative_config(config, organisms_dir, samples_dir, data_dir):\n for param in params.ENV_PARAMS:\n config[param] = test.customise_path(\"\", config[param])",
"def opt_config(self, path):\n self['extra_configs'].append(os.path.abspath(path))",
"def _configure_local_paths(local_paths):\n answer = copy(local_paths)\n\n # Ask the user for a repository root.\n while not answer.get('reporoot'):\n logger.info('First, we need to know where you store most code on your '\n 'local machine.')\n logger.info('Other paths (example: toolkit) will derive from this, '\n 'but most are individually configurable.')\n logger.info('The use of ${REPOROOT} in GAPIC YAMLs will point here.')\n logger.info('Note: Use of ~ is fine here.')\n answer['reporoot'] = six.moves.input('Local code path: ')\n answer['reporoot'] = answer['reporoot'].rstrip('/').strip()\n\n # Set up dependent directories.\n reporoot = answer['reporoot']\n for dep in ('api-client-staging', 'googleapis', 'toolkit'):\n location = six.moves.input(\n 'Path for {0} (default: {1}/{0}): '.format(dep, reporoot)\n ).rstrip('/').strip()\n if location:\n answer[dep.replace('-', '_')] = location\n\n # Done; return the answer.\n return answer",
"def set_key_root(self, path):\n self._config['DEFAULT']['key_dir'] = os.path.expanduser(path)",
"def wf_use_local_configs(self, revision_dirname):\n nfconfig_fn = os.path.join(self.outdir, revision_dirname, \"nextflow.config\")\n find_str = \"https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}\"\n repl_str = \"${projectDir}/../configs/\"\n log.debug(f\"Editing 'params.custom_config_base' in '{nfconfig_fn}'\")\n\n # Load the nextflow.config file into memory\n with open(nfconfig_fn, \"r\") as nfconfig_fh:\n nfconfig = nfconfig_fh.read()\n\n # Replace the target string\n log.debug(f\"Replacing '{find_str}' with '{repl_str}'\")\n nfconfig = nfconfig.replace(find_str, repl_str)\n\n # Append the singularity.cacheDir to the end if we need it\n if self.container_system == \"singularity\" and self.container_cache_utilisation == \"copy\":\n nfconfig += (\n f\"\\n\\n// Added by `nf-core download` v{nf_core.__version__} //\\n\"\n + 'singularity.cacheDir = \"${projectDir}/../singularity-images/\"'\n + \"\\n///////////////////////////////////////\"\n )\n\n # Write the file out again\n log.debug(f\"Updating '{nfconfig_fn}'\")\n with open(nfconfig_fn, \"w\") as nfconfig_fh:\n nfconfig_fh.write(nfconfig)",
"def get_config_minimal() -> Path:\n return Path(__file__).parent.parent / \"config_minimal.yaml\"",
"def load_config(self):\n for local_var, config_var in self.from_config.items():\n value = flask.current_app.config.get(config_var)\n if value:\n if \".\" in local_var:\n # this is a dotpath -- needs special handling\n body, tail = local_var.rsplit(\".\", 1)\n obj = getattrd(self, body)\n setattr(obj, tail, value)\n else:\n # just use a normal setattr call\n setattr(self, local_var, value)",
"def redirectConfPath():\n if MYOS == 'Windows':\n return os.environ['APPDATA'].decode(SYSENC)+u\"\\\\litebook_red.ini\"\n else:\n return unicode(os.environ['HOME'],SYSENC)+u\"/.litebook_red.ini\"",
"def prepend_home_dir(filename):\n return os.path.join(os.environ['HOME'], '.ssh', filename) if '/' not in filename else filename",
"def create_local_configs():\r\n print \"Local configuration files will be generated in %(build_path)s\" % env\r\n create_config_files()\r\n _interpolate_templates()",
"def _locater(app):\n return functools.partial(os.path.join, app.confdir)",
"def loadDefaultConfPath():\n global GlobalConfig\n GlobalConfig['path_list']={}\n if MYOS == 'Windows':\n GlobalConfig['path_list']['cache_dir']=os.environ['USERPROFILE'].decode(SYSENC)+u\"\\\\litebook\\\\cache\"\n GlobalConfig['path_list']['bookdb']=os.environ['APPDATA'].decode(SYSENC)+u\"\\\\litebook.bookdb\"\n GlobalConfig['path_list']['conf']=os.environ['APPDATA'].decode(SYSENC)+u\"\\\\litebook.ini\"\n GlobalConfig['path_list']['key_conf']=os.environ['APPDATA'].decode(SYSENC)+u\"\\\\litebook_key.ini\"\n else:\n GlobalConfig['path_list']['cache_dir']=unicode(os.environ['HOME'],SYSENC)+u\"/litebook/cache\"\n GlobalConfig['path_list']['bookdb']=unicode(os.environ['HOME'],SYSENC)+u\"/.litebook.bookdb\"\n GlobalConfig['path_list']['conf']=unicode(os.environ['HOME'],SYSENC)+u\"/.litebook.ini\"\n GlobalConfig['path_list']['key_conf']=unicode(os.environ['HOME'],SYSENC)+u\"/.litebook_key.ini\"",
"def _conf_addloc(app, args):\n conf_fof = get_conf_locations_fof(app.name)\n confloc = load_conf_locations(conf_fof)\n confloc[args.name] = args.location\n save_conf_locations(conf_fof, confloc)",
"def path_prepend(directory):\n os.environ['PATH'] = \":\".join((directory, os.environ['PATH']))",
"def prependExtensionConfig(self, name, config):\n assert isinstance(config, list);\n\n if not name in self.__extensionConfigs:\n self.__extensionConfigs[name] = list();\n\n self.__extensionConfigs[name].insert(0, config);",
"def local_path(self, local_path: str):\n\n self._local_path = local_path"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform a deep substitution on the value provided. This will replace any variables that occur in strings of the form ${...} and will also do a special meta replacement on keys which end in _deepsubst_meta.
|
def deepsubst_action(config_dict: dict, key: str, value: Any) -> None:
def do_subst(oldval: Any) -> Any:
if isinstance(oldval, str):
# This is just regular subst
return subst_str(oldval, lambda key: config_dict[key])
if isinstance(oldval, list):
return list(map(do_subst, oldval))
if isinstance(oldval, dict):
# We need to check for _deepsubst_meta here
newval = {} # type: Dict
for k, v in oldval.items():
if isinstance(k, str):
if k.endswith("_deepsubst_meta"):
base = k.replace("_deepsubst_meta", "")
if base not in oldval:
raise ValueError(f"Deepsubst meta key provided, but there is no matching base key: {k}")
# Note that we don't add the meta back to newval.
else:
meta_key = f"{k}_deepsubst_meta"
if meta_key in oldval:
# Do the deepsubst_meta, whatever it is.
meta = oldval[meta_key]
if meta in DeepSubstMetaDirectives:
if isinstance(v, str):
newval[k] = DeepSubstMetaDirectives[meta](config_dict, v)
else:
raise ValueError(f"Deepsubst metas not supported on non-string values: {v}")
else:
err_keys = ", ".join(DeepSubstMetaDirectives.keys())
raise ValueError(f"Unknown deepsubst_meta type: {meta}. Valid options are [{err_keys}].")
else:
newval[k] = do_subst(v)
else:
# k is not an instance of a string.
# Will this ever happen? It's possible you could have {1: "foo"}...
newval[k] = do_subst(v)
return newval
return oldval
config_dict[key] = do_subst(value)
|
[
"def deepsubst_targets(key: str, value: Any) -> List[str]:\n if isinstance(value, str):\n # This is just regular subst\n return subst_targets(key, value)\n if isinstance(value, (dict, list)):\n # Recursively find all strings\n def find_strings(x: Union[List, Dict]) -> List[str]:\n iterator = x # type: Iterable[Any]\n if isinstance(x, dict):\n iterator = x.values()\n\n output = [] # type: List\n for item in iterator:\n if isinstance(item, str):\n output.extend([s for s in subst_targets(key, item) if s not in output])\n elif isinstance(item, list) or isinstance(item, dict):\n output.extend([s for s in find_strings(item) if s not in output])\n return output\n\n return find_strings(value)\n raise ValueError(f\"deepsubst cannot be used with this type: {value}\")",
"def substitute(val, s):\n while is_var(val):\n for svar, sval in substitutions(s):\n if val is svar:\n val = sval\n break\n else:\n break\n return val",
"def apply_variable_substitution(subliminal, superliminal, is_render_variables=False):\n keyword = \"variables\"\n merged_variables = dict_util.merge_dicts(subliminal.get(keyword, {}), superliminal.get(keyword, {}), True)\n if is_render_variables:\n for k, v in merged_variables.items():\n if isinstance(v, str) or (not isinstance(v, dict) and not isinstance(v, list)):\n merged_variables[k] = dict_util.replace_placholders_in_string(str(v), merged_variables)\n\n merged_variables = dict_util.replace_placeholders(merged_variables, merged_variables)\n return dict_util.replace_placeholders(subliminal, merged_variables)\n else:\n subliminal[keyword] = merged_variables\n return subliminal",
"def _substitute(valueish, replacements, root=()):\n if isinstance(valueish, dict):\n return type(valueish)({\n k: _substitute(v, replacements, root + (k, ))\n for (k, v) in valueish.items()\n })\n elif isinstance(valueish, (tuple, list)):\n return type(valueish)((\n _substitute(v, replacements, root + (ix, ))\n for (ix, v) in enumerate(valueish)))\n else:\n return replacements[root]",
"def expand_variables(template_str, value_map, transformer=None):\n if template_str is None:\n return None\n else:\n if transformer is None:\n transformer = lambda v: v\n try:\n # Don't bother iterating items for Python 2+3 compatibility.\n transformed_value_map = {k: transformer(value_map[k]) for k in value_map}\n return Template(template_str).substitute(transformed_value_map)\n except Exception as e:\n raise ValueError(\"could not expand variable names in command '%s': %s\" % (template_str, e))",
"def expand_string_variables(value, env: Mapping, warn: bool = True):\n if not isinstance(value, str):\n return value\n def _replace_var(matched):\n default = None\n var = matched.group(1)\n if matched.group(2):\n var = matched.group(2)\n default = matched.group(4)\n found = env.get(var)\n if found is None or found == '':\n found = default\n if found is None and warn:\n logging.getLogger(__name__).warning('Configuration variable not defined: %s', var)\n found = ''\n return found\n return re.sub(r'\\$(?:(\\w+)|\\{([^}]*?)(:-([^}]*))?\\})', _replace_var, value)",
"def expand(val, variables):\n while True:\n m = re.match(r'.*\\$(\\w+).*', val)\n if m is not None and m.lastindex is not None and m.lastindex >= 1:\n varname = m.group(1)\n try:\n v = variables[varname]\n except KeyError:\n v = os.getenv(varname)\n if v is None:\n print(\"Unknown variable '{0}'\".format(varname))\n exit(1)\n val = re.sub(r\"\\$\"+varname, v, val)\n else:\n break\n return val",
"def subst_vars (s, local_vars):\r\n check_environ()\r\n def _subst (match, local_vars=local_vars):\r\n var_name = match.group(1)\r\n if local_vars.has_key(var_name):\r\n return str(local_vars[var_name])\r\n else:\r\n return os.environ[var_name]\r\n\r\n try:\r\n return re.sub(r'\\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)\r\n except KeyError, var:\r\n raise ValueError, \"invalid variable '$%s'\" % var",
"def expand(template, variables=None):\r\n if variables is None:\r\n variables = {}\r\n return patterns.sub(functools.partial(_replace, variables), template)",
"def substitute(s, variables):\n env = variables.copy()\n def repl(m):\n try: return str(eval(m.group(1), env))\n except: return ''\n return re.sub('\\{(.+?)\\}', repl, s)",
"def substitute_vars(cfg):\n for k, v in cfg.items():\n if isinstance(v, str):\n cfg[k] = test_define_value(v)[0]\n elif isinstance(v, dict):\n substitute_vars(v)\n elif isinstance(v, list):\n new_list = []\n for lv in v:\n if isinstance(lv, dict):\n substitute_vars(lv)\n new_list.append(lv)\n elif isinstance(lv, str):\n new_list.append(test_define_value(lv)[0])\n else:\n new_list.append(lv)\n cfg[k] = new_list",
"def substitute_variables(text, variables):\n dollar_pattern = r\"\"\"(?x) # Verbose regex syntax\n \\$ # A dollar sign,\n (?: # then\n (?P<dollar> \\$ ) | # a dollar sign, or\n (?P<word1> \\w+ ) | # a plain word, or\n { # a {-wrapped\n (?P<word2> \\w+ ) # word,\n (?:\n (?P<strict> \\? ) | # strict or\n -(?P<defval> [^}]* ) # defaulted\n )? # maybe\n }\n )\n \"\"\"\n\n def dollar_replace(match):\n \"\"\"Called for each $replacement.\"\"\"\n # Get the one group that matched.\n groups = match.group('dollar', 'word1', 'word2')\n word = next(g for g in groups if g)\n\n if word == \"$\":\n return \"$\"\n elif word in variables:\n return variables[word]\n elif match.group('strict'):\n msg = \"Variable {} is undefined: {!r}\"\n raise NameError(msg.format(word, text))\n else:\n return match.group('defval')\n\n text = re.sub(dollar_pattern, dollar_replace, text)\n return text",
"def shell_expand_variables(template_str, value_map):\n return expand_variables(template_str, value_map, transformer=pipes.quote)",
"def varsub(val):\n _dbg('varsub(): starting with val = %s' % val)\n if isinstance(val, None.__class__):\n return val\n if isinstance(val, int):\n return str(val)\n i = 0\n while i < 100:\n if MARKER == 'dollar':\n s = re.search(r'\\$\\{([^${}]+)\\}', val)\n else:\n s = re.search(r'\\~\\{([^~{}]+)\\}', val)\n try:\n s.group(1)\n except (IndexError, AttributeError):\n break\n _dbg('varsub(): s.group(0) = %s' % s.group(0))\n _dbg('varsub(): s.group(1) = %s' % s.group(1))\n needle = s.group(0).replace('$', r'\\$')\n val = re.sub(needle, __varsub(s.group(1)), val)\n i += 1\n if MARKER == 'dollar':\n # Un-escape $\\\\{ --> ${, and $\\\\\\{ --> $\\{\n val = val.replace(r'$\\\\{', r'${')\n val = val.replace(r'$\\\\\\{', r'$\\{')\n return val",
"def ResolveReferences(self, var_dict, args):\n re_var = re.compile('(\\$\\{[-_a-z0-9A-Z]{1,}\\})')\n\n while True:\n m = re_var.search(args)\n if not m:\n break\n lookup = m.group(0)[2:-1]\n value = var_dict.get(lookup, '')\n args = args[:m.start(0)] + value + args[m.end(0):]\n return args",
"def replace_recurse(c, wildcards):\n # print(\"recurse c: \", c)\n\n for k, v in c.items():\n if isinstance(v, dict):\n replace_recurse(c[k], wildcards)\n elif isinstance(v, list):\n for index, item in enumerate(v):\n replace_recurse(item, wildcards)\n elif isinstance (v, str):\n # print(\"key/value : \", k, \"; \", v)\n # print(\"c[k] : \", c[k])\n c[k] = c[k].format(**wildcards)\n # print(\"c[k]2: \", c[k])",
"def substitute(string, substitutions):\n for key, value in substitutions:\n string = re.sub(re.escape(\"{{\" + key + \"}}\"), value, string)\n return string",
"def substitute_value(d, subst, key):\n if debugging:\n _log.debug(f\"substitute value: d={d} subst={subst} key={key}\")\n # make a scalar into a list of length 1, but remember whether\n # it's a list or not\n if (\n isinstance(d[key], str)\n or isinstance(d[key], int)\n or isinstance(d[key], float)\n ):\n str_values = [d[key]]\n is_list = False\n else:\n try:\n str_values = list(d[key])\n except TypeError:\n str_values = [str(d[key])]\n is_list = True\n # substitute all values in the list, with the result in `new_list`\n num_subst, new_list = 0, []\n for str_value in str_values:\n new_value = None\n if dicty(subst):\n if str_value in subst:\n new_value = subst[str_value]\n # add case-insensitivity\n elif str_value.lower() in subst:\n new_value = subst[str_value.lower()]\n elif subst == cls.SUBST_UNITS:\n if isinstance(\n str_value, str\n ): # make sure it's not already evaluated\n _log.debug(\n f\"Substituting units: set {{'{key}': units('{str_value}')}} in {d}\"\n )\n new_value = cls._build_units(str_value)\n if new_value is None:\n new_list.append(str_value) # unsubstituted value\n else:\n new_list.append(new_value)\n num_subst += 1\n # change input to substituted list (or single value)\n d[key] = new_list if is_list else new_list[0]\n # return True only if all values were substituted\n return num_subst == len(new_list)",
"def expand_vars(self, in_table=None, old_key=None, update=True):\n if in_table:\n t = in_table\n else:\n t = self.data\n if not update:\n t = deepcopy(t)\n for key, value in t.items():\n # If we get a dict, recurse\n if isinstance(value, dict):\n if old_key:\n new_key = '%s.%s' % (old_key, key)\n else:\n new_key = key\n self.expand_vars(in_table=value, old_key=new_key)\n elif isinstance(value, str):\n # If we get string, first replace environment variables\n value = re.sub('\\$([A-z0-9-_]+)', get_env_variable, value)\n t[key] = value\n return t"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Look for all substitution targets (${...}) in value and return a list of the targets found.
|
def deepsubst_targets(key: str, value: Any) -> List[str]:
if isinstance(value, str):
# This is just regular subst
return subst_targets(key, value)
if isinstance(value, (dict, list)):
# Recursively find all strings
def find_strings(x: Union[List, Dict]) -> List[str]:
iterator = x # type: Iterable[Any]
if isinstance(x, dict):
iterator = x.values()
output = [] # type: List
for item in iterator:
if isinstance(item, str):
output.extend([s for s in subst_targets(key, item) if s not in output])
elif isinstance(item, list) or isinstance(item, dict):
output.extend([s for s in find_strings(item) if s not in output])
return output
return find_strings(value)
raise ValueError(f"deepsubst cannot be used with this type: {value}")
|
[
"def get_ninja_targets(path):\n output = subprocess.check_output([\n CMAKE_EXECUTABLE, '--build', path, '--target', 'help'], cwd=path)\n lines = output.decode().splitlines()\n suffix = ':'\n return [\n line.split(' ')[0][:-len(suffix)]\n for line in lines\n if len(line.split(' ')) == 2 and line.split(' ')[0].endswith(suffix)]",
"def perform_subst(value: Union[str, List[str]]) -> Union[str, List[str]]:\n newval = \"\" # type: Union[str, List[str]]\n\n if isinstance(value, list):\n newval = list(map(lambda input_str: subst_str(input_str, lambda key: config_dict[key]), value))\n else:\n newval = subst_str(value, lambda key: config_dict[key])\n return newval",
"def get_targets(self):\n targets = []\n goto_target = re.compile(r'goto\\s(.*)')\n if_target = re.compile(r'if-\\w+\\s.*,\\s(\\S*)')\n switch_target = re.compile(r'packed-switch\\s.*,\\s(\\S*)')\n # TODO: What about catch?\n ll = self.instructions[-1].strip()\n goto_match = goto_target.match(ll)\n if goto_match:\n targets.append(goto_match.group(1))\n if_match = if_target.match(ll)\n if if_match:\n targets.append(if_match.group(1))\n switch_match = switch_target.match(ll)\n if switch_match:\n # TODO SWITCH CASES ARE BAD\n pass\n return targets",
"def apply_replaces(value, replaces):\n if isinstance(value, list):\n return list(map(lambda v: apply_replaces(v, replaces), value))\n\n for pattern, replacement in replaces.items():\n value = re.sub(pattern, replacement, value)\n\n return value",
"def extract_targets(args: Namespace) -> list:\n if type(args) is not Namespace:\n raise TypeError(\"[!] Invalid type for args\")\n\n list_targets = []\n\n if args.target_file:\n try:\n list_targets = list(\n set(list_targets + filter_targets(extract_targets_from_file(args.target_file)))\n )\n except IOError:\n print(\"[!] An error occurred when the program tries to open the file\")\n except Exception as e:\n print(e)\n\n if args.targets:\n try:\n list_targets = list(set((list_targets + filter_targets(args.targets))))\n except TypeError as e:\n print(e)\n\n return list_targets",
"def multi_replace(text, targets, replacer):\n for t in targets:\n text = text.replace(t, replacer)\n return text",
"def extract_lookups_from_string(value):\n lookups = set()\n for match in LOOKUP_REGEX.finditer(value):\n groupdict = match.groupdict()\n raw = match.groups()[0]\n lookup_type = groupdict[\"type\"]\n lookup_input = groupdict[\"input\"]\n lookups.add(Lookup(lookup_type, lookup_input, raw))\n return lookups",
"def ResolveReferences(self, var_dict, args):\n re_var = re.compile('(\\$\\{[-_a-z0-9A-Z]{1,}\\})')\n\n while True:\n m = re_var.search(args)\n if not m:\n break\n lookup = m.group(0)[2:-1]\n value = var_dict.get(lookup, '')\n args = args[:m.start(0)] + value + args[m.end(0):]\n return args",
"def apply_target(rule, substitutions):\n if rule.arity != len(substitutions):\n raise ValueError(\"Rule (%s) arity does not match substitutions: %s\" %\n (rule, substitutions))\n output = []\n for token in rule.target:\n if is_nt(token):\n index = get_nt_index(token)\n output.append(substitutions[index - 1])\n else:\n output.append(token)\n return \" \".join(output)",
"def find_targets(self):\n\n\t\tself.where_am_i()\n\n\t\ttext = self.text_fragment()\n\n\t\tfor match in re.finditer(r'\\w+', text):\n\t\t\tr_start = match.start() + self.start\n\t\t\tr_end = match.end() + self.start\n\t\t\tself.targets.append(sublime.Region(r_start, r_start+1))\n\t\t\tself.faded.append(sublime.Region(r_start+1,r_end))\n\n\t\tif not self.forward:\n\t\t\tself.targets.reverse()\n\t\t\tself.faded.reverse()\n\n\t\tif self.in_word():\n\t\t\tself.targets.pop(0)\n\t\t\tself.faded.pop(0)",
"def subst_path(self, env, target, source):\n result = []\n for type, value in self.pathlist:\n if type == TYPE_STRING_SUBST:\n value = env.subst(value, target=target, source=source,\n conv=node_conv)\n if SCons.Util.is_Sequence(value):\n result.extend(SCons.Util.flatten(value))\n elif value:\n result.append(value)\n elif type == TYPE_OBJECT:\n value = node_conv(value)\n if value:\n result.append(value)\n elif value:\n result.append(value)\n return tuple(result)",
"def get_targets(self, targets: str, level: str) -> list:\n targets_split = targets.split(\",\")\n targets_list = []\n\n if level == self.NotificationLevel.emergency.value:\n if \"everyone\" in targets_split:\n targets_list.append(HOUSE[NOTIFIER])\n for person, attribute in PERSONS.items():\n targets_list.append(attribute[NOTIFIER])\n else:\n if \"home\" in targets_split:\n targets_list.append(HOUSE[NOTIFIER])\n for person, attribute in PERSONS.items():\n if person in targets_split:\n targets_list.append(attribute[NOTIFIER])\n else:\n if \"everyone\" in targets_split:\n targets_list.append(HOUSE[NOTIFIER])\n for person, attribute in PERSONS.items():\n if self.target_available(person):\n targets_list.append(attribute[NOTIFIER])\n else:\n if \"home\" in targets_split:\n targets_list.append(HOUSE[NOTIFIER])\n for person, attribute in PERSONS.items():\n if person in targets_split and self.target_available(person):\n targets_list.append(attribute[NOTIFIER])\n\n return targets_list",
"async def get_makefile_targets(path):\n output = await check_output([\n CMAKE_EXECUTABLE, '--build', path, '--target', 'help'], cwd=path)\n lines = output.decode().splitlines()\n prefix = '... '\n return [line[len(prefix):] for line in lines if line.startswith(prefix)]",
"def get_named_target_values(self, target):\n return self._g.get_named_target_values(target)",
"def substituteParallelTree(replaces, patterns, tree) :\n if len(replaces) < len(patterns) :\n error(\"substituteParallelTree: mismatch of patterns with replacements\")\n return []\n # traverse tree and try to match subtrees against any of patterns:\n if tree in patterns :\n for i in range(len(patterns)) : # find which one...\n if tree == patterns[i] :\n ans = replaces[i]\n break\n elif isinstance(tree, list) :\n ans = []\n for subtree in tree :\n ans.append(substituteParallelTree(replaces, patterns, subtree))\n else :\n ans = tree\n return ans",
"def substitute_vars(self, substitutions):\n return Type.engine.substitute_in(self, substitutions)",
"def _substitute(valueish, replacements, root=()):\n if isinstance(valueish, dict):\n return type(valueish)({\n k: _substitute(v, replacements, root + (k, ))\n for (k, v) in valueish.items()\n })\n elif isinstance(valueish, (tuple, list)):\n return type(valueish)((\n _substitute(v, replacements, root + (ix, ))\n for (ix, v) in enumerate(valueish)))\n else:\n return replacements[root]",
"def search_match(value):\n names=[]\n\n for name,values in params.iteritems():\n for v in values:\n if v == value: \n names.append(name)\n\n return names",
"def promote_trigger(source: list, trigger: str) -> list:\n result = []\n r = re.compile(trigger)\n for item in source:\n if isinstance(item, str) and r.match(item):\n result = [item] + result\n else:\n result.append(item)\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Unpack the given config_dict, flattening key names recursively.
|
def unpack(config_dict: dict, prefix: str = "") -> dict:
# We don't want an extra "." in the beginning.
real_prefix = "" if prefix == "" else prefix + "."
output_dict = {}
for key, value in config_dict.items():
if isinstance(value, dict):
output_dict.update(unpack(value, real_prefix + key))
else:
output_dict[real_prefix + key] = value
return output_dict
|
[
"def unpack(config_dict, prefix=\"\"):\n # We don't want an extra \".\" in the beginning.\n real_prefix = \"\" if prefix == \"\" else prefix + \".\"\n output_dict = {}\n for key, value in config_dict.items():\n if isinstance(value, dict):\n output_dict.update(unpack(value, real_prefix + key))\n else:\n output_dict[real_prefix + key] = value\n return output_dict",
"def unflatten(flat_dict):\n unflat_dict = {}\n\n for compound_key, value in flat_dict.items():\n curr_dict = unflat_dict\n parts = compound_key.split(\".\")\n for key in parts[:-1]:\n curr_value = curr_dict.get(key)\n if key not in curr_dict:\n curr_dict[key] = {}\n curr_dict = curr_dict[key]\n elif isinstance(curr_value, dict):\n curr_dict = curr_value\n else:\n raise ConfigurationError(\"flattened dictionary is invalid\")\n if not isinstance(curr_dict, dict) or parts[-1] in curr_dict:\n raise ConfigurationError(\"flattened dictionary is invalid\")\n else:\n curr_dict[parts[-1]] = value\n\n return unflat_dict",
"def unflatten_dict(d, *, sep=\".\"):\n res, dotted = partition(lambda i: sep in i[0], d.items())\n res = dict(res)\n for k, val in dotted:\n *parts, name = k.split(sep)\n sub = res\n for part in parts:\n sub = sub.setdefault(part, {})\n _merge(sub, name, val)\n return res",
"def _flatten_params(d):\n flat = {}\n for k, v in d.items():\n if isinstance(v, dict):\n flat.update(_flatten_params(v))\n elif isinstance(v, list):\n # clean up lists to be acceptable output names\n # e.g. goes from [1, 2, 3] to '1_2_3'\n trim_list = str(v)\n trim_list = re.sub(\n r\"[\\[\\]\\s']\", \"\", trim_list\n ) # remove [, ], spaces and quotes\n trim_list = re.sub(\n \"[^A-Za-z0-9|._-]\", \"_\", trim_list\n ) # turn unacceptable characters into underscores\n flat[k] = trim_list\n else:\n flat[k] = re.sub(\n \"[^A-Za-z0-9|._-]\", \"_\", str(v)\n ) # turn unacceptable characters into underscores\n return flat",
"def flatten_pipes_dict(pipes_dict : dict) -> list:\n pipes_list = []\n for ck in pipes_dict.values():\n for mk in ck.values():\n pipes_list += list(mk.values())\n return pipes_list",
"def test_flatten_ignore_keys(self):\n dic = {\n 'a': {'a': [1, 2, 3]},\n 'b': {'b': 'foo', 'c': 'bar'},\n 'c': {'c': [{'foo': 5, 'bar': 6, 'baz': [1, 2, 3]}]}\n }\n expected = {\n 'a_a_0': 1,\n 'a_a_1': 2,\n 'a_a_2': 3\n }\n actual = flatten(dic, root_keys_to_ignore={'b', 'c'})\n self.assertEqual(actual, expected)",
"def _pop_out_yaml_from_config(config):\n for key in config._config.keys():\n config._config[key].pop(\"yaml\", None)\n\n for key in config._subconfigs.keys():\n _pop_out_yaml_from_config(config._subconfigs[key])",
"def _recurse(config, rels, dmaap):\n if isinstance(config, list):\n return [_recurse(item, rels, dmaap) for item in config]\n if isinstance(config, dict):\n for key in config:\n config[key] = _recurse(config[key], rels, dmaap)\n return config\n if isinstance(config, six.string_types):\n return _replace_value(config, rels, dmaap)\n # not a dict, not a list, not a string, nothing to do.\n return config",
"def unpack(input_dict):\n import json\n with open(input_dict) as f:\n out_dict = json.load(f)\n return out_dict",
"def flatten(data: Dict) -> Dict[str, Any]:\n return recursive_flatten(\"\", data)",
"def flatten_dictionary(dictionary, flatten_char = '_'):\n\n\n def inner_function(sub_dict, name_beginning):\n\n inner_dict = {}\n\n for temp_key in sub_dict.keys():\n\n if name_beginning != '':\n new_name_beginning = name_beginning + flatten_char + temp_key\n else:\n new_name_beginning = temp_key\n\n if type(sub_dict[temp_key]) == dict:\n\n new_dictionary = inner_function(sub_dict[temp_key], new_name_beginning)\n for temp_inner_key in new_dictionary.keys():\n inner_dict[temp_inner_key] = new_dictionary[temp_inner_key]\n\n else:\n inner_dict[new_name_beginning] = sub_dict[temp_key]\n\n return inner_dict\n\n flattened_dictionary = inner_function(dictionary, '')\n return flattened_dictionary",
"def flatten_dict(dictionary, keys=tuple()):\n result = {}\n for name in dictionary:\n if isinstance(dictionary[name],dict):\n flat = flatten_dict(dictionary[name],keys=keys + (str(name),))\n result.update(flat)\n else:\n result[keys + (str(name),)] = dictionary[name]\n return result",
"def _collapse_dotdict(dic, first_level_keys):\n _dic = defaultdict(dict)\n for key in dic:\n entry_processed = False\n for first_level_key in first_level_keys:\n if key.startswith(first_level_key):\n if key == first_level_key:\n # TypeError is raised if value not dict\n _dic[first_level_key].update(dic[key])\n entry_processed = True\n elif key.startswith(first_level_key + '.'):\n if key[len(first_level_key) + 1:]:\n _key = key[len(first_level_key) + 1:]\n _dic[first_level_key][_key] = dic[key]\n entry_processed = True\n if not entry_processed:\n raise ValueError(f\"Invalid entry for key '{key}'.\")\n return _dic",
"def unpack_dic(dic):\n for k, v in dic.items():\n print k + ':', v",
"def flatten_dict(d, level=None, sep='.'):\n def items():\n # somehow this function prevents a proper traceback - so I put a check a few lines below.\n for key, value in d.items():\n if (level is None or level>0) and isinstance(value, dict):\n for subkey, subvalue in flatten_dict(value, level=level-1 if level is not None else None, sep=sep).items():\n yield key + sep + subkey, subvalue\n else:\n yield key, value\n\n if d is None:\n raise ValueError('dictionary to be flattened is empty')\n return dict(items())",
"def flatten_dict(nested_dict, flattening_key='.'):\n\toutput = {}\n\tfor k, v in nested_dict.items():\n\t\tif isinstance(v, dict):\n\t\t\tfor k2, v2 in v.items():\n\t\t\t\tif isinstance(v2, dict):\n\t\t\t\t\traise NotImplementedError('Cannot flatten triple nested dicts')\n\t\t\t\tflat_key = k + flattening_key + k2\n\t\t\t\toutput[flat_key] = v2\n\t\telse:\n\t\t\toutput[k] = v\n\treturn output",
"def deepsubst_action(config_dict: dict, key: str, value: Any) -> None:\n def do_subst(oldval: Any) -> Any:\n if isinstance(oldval, str):\n # This is just regular subst\n return subst_str(oldval, lambda key: config_dict[key])\n if isinstance(oldval, list):\n return list(map(do_subst, oldval))\n if isinstance(oldval, dict):\n # We need to check for _deepsubst_meta here\n newval = {} # type: Dict\n for k, v in oldval.items():\n if isinstance(k, str):\n if k.endswith(\"_deepsubst_meta\"):\n base = k.replace(\"_deepsubst_meta\", \"\")\n if base not in oldval:\n raise ValueError(f\"Deepsubst meta key provided, but there is no matching base key: {k}\")\n # Note that we don't add the meta back to newval.\n else:\n meta_key = f\"{k}_deepsubst_meta\"\n if meta_key in oldval:\n # Do the deepsubst_meta, whatever it is.\n meta = oldval[meta_key]\n if meta in DeepSubstMetaDirectives:\n if isinstance(v, str):\n newval[k] = DeepSubstMetaDirectives[meta](config_dict, v)\n else:\n raise ValueError(f\"Deepsubst metas not supported on non-string values: {v}\")\n else:\n err_keys = \", \".join(DeepSubstMetaDirectives.keys())\n raise ValueError(f\"Unknown deepsubst_meta type: {meta}. Valid options are [{err_keys}].\")\n else:\n newval[k] = do_subst(v)\n else:\n # k is not an instance of a string.\n # Will this ever happen? It's possible you could have {1: \"foo\"}...\n newval[k] = do_subst(v)\n return newval\n return oldval\n\n config_dict[key] = do_subst(value)",
"def flattenSpiderConfig(run_spider_config) :\n\tspider_config_flat = {}\n\tfor conf_class, conf_set in run_spider_config.iteritems() :\n\t\tif conf_class != \"_id\" :\n\t\t\tfor conf_field, conf_data in conf_set.iteritems() : \n\t\t\t\tspider_config_flat[conf_field] = conf_data\n\treturn spider_config_flat",
"def _convert_config(cfg: 'OmegaConf'):\n if not _HAS_HYDRA:\n logging.error(\"This function requires Hydra/Omegaconf and it was not installed.\")\n exit(1)\n\n # Get rid of cls -> _target_.\n if 'cls' in cfg and '_target_' not in cfg:\n cfg._target_ = cfg.pop('cls')\n\n # Get rid of params.\n if 'params' in cfg:\n params = cfg.pop('params')\n for param_key, param_val in params.items():\n cfg[param_key] = param_val\n\n # Recursion.\n try:\n for _, sub_cfg in cfg.items():\n if isinstance(sub_cfg, DictConfig):\n _convert_config(sub_cfg)\n except omegaconf_errors.OmegaConfBaseException as e:\n logging.warning(f\"Skipped conversion for config/subconfig:\\n{cfg}\\n Reason: {e}.\")",
"def nested_dict_to_dunder(d):\n d_copy = d.copy()\n for key, value in d.items():\n if isinstance(d[key], dict):\n for k_inner, v_inner in d[key].items():\n d_copy[key + '__' + k_inner] = v_inner\n return d_copy"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Expand the meta directives for the given config dict and return a new dictionary containing the updated settings with respect to the base config_dict.
|
def update_and_expand_meta(config_dict: dict, meta_dict: dict) -> dict:
assert isinstance(config_dict, dict)
assert isinstance(meta_dict, dict)
newdict = deepdict(config_dict)
# Find meta directives.
meta_dict = deepdict(meta_dict) # create a copy so we can remove items.
meta_dict_keys = list(meta_dict.keys())
meta_keys = filter(lambda k: k.endswith("_meta"), meta_dict_keys)
# Update current config path to match meta dict's (used by prependlocal/deepsubst_local)
if _CONFIG_PATH_KEY in meta_dict_keys:
newdict[_CONFIG_PATH_KEY] = meta_dict[_CONFIG_PATH_KEY]
# Deal with meta directives.
meta_len = len("_meta")
for meta_key in meta_keys:
setting = meta_key[:-meta_len]
meta_type_from_dict = meta_dict[meta_key] # type: Union[str, List[str]]
meta_directives = [] # type: List[str]
if isinstance(meta_type_from_dict, str):
meta_directives = [meta_type_from_dict]
else:
if not isinstance(meta_type_from_dict, list):
raise ValueError("A meta directive must either be a string or a list of strings")
meta_directives = meta_type_from_dict
# Process each meta type in order.
seen_lazy = False # type: bool
for meta_type in meta_directives:
if not isinstance(meta_type, str):
raise TypeError("meta_type was not a string: " + repr(meta_type))
# If it's a lazy meta, skip it for now since they are lazily
# processed at the very end.
if meta_type.startswith("dynamic"):
raise ValueError(
f"Found meta type {meta_type}. "
"Dynamic meta directives were renamed to lazy meta directives after issue #134. "
"Please change your metas from dynamic* to lazy*")
if meta_type.startswith("lazy"):
lazy_base_meta_type = meta_type[len("lazy"):]
if lazy_base_meta_type not in get_meta_directives():
raise ValueError(f"The type of lazy meta variable {meta_key} is not supported ({meta_type})" % (meta_key, meta_type))
if seen_lazy:
raise ValueError("Multiple lazy directives in a single directive array not supported yet")
seen_lazy = True
update_dict = {} # type: dict
# Check if this lazy meta references itself by checking if any of its targets is itself.
targets = get_meta_directives()[lazy_base_meta_type].target_settings(setting, meta_dict[setting])
if len(list(filter(lambda x: x == setting, targets))) > 0:
# If it does, rename this lazy meta to reference a new base.
# e.g. if a (dict 2) -> a (dict 1), rename "a (dict 1)" to a_1.
next_index = _get_next_free_index(newdict)
new_base_setting = f"{setting}_{next_index}"
new_value_meta = get_meta_directives()[lazy_base_meta_type].rename_target(setting,
meta_dict[setting],
setting,
new_base_setting) # type: Optional[Tuple[Any, str]]
if new_value_meta is None:
raise ValueError(
f"Failed to rename lazy setting which depends on itself ({setting})")
new_value, new_meta = new_value_meta
# Rename base setting to new_base_setting, and add the new setting.
update_dict.update({
new_base_setting: newdict[setting],
setting: new_value,
setting + "_meta": "lazy" + new_meta # these are lazy metas
})
if setting + "_meta" in newdict:
update_dict.update({
new_base_setting + "_meta": newdict[setting + "_meta"]
})
else:
# Store it into newdict and skip processing now.
update_dict.update({
setting: meta_dict[setting],
setting + "_meta": meta_type
})
newdict.update(update_dict)
continue
if seen_lazy:
raise ValueError("Cannot use a non-lazy meta directive after a lazy one")
try:
meta_func = get_meta_directives()[meta_type].action
except KeyError as exc:
raise ValueError(f"The type of meta variable {meta_key} is not supported ({meta_type})") from exc
meta_func(newdict, setting, meta_dict[setting])
# Update meta_dict if there are multiple meta directives.
meta_dict[setting] = newdict[setting]
del meta_dict[meta_key]
del meta_dict[setting]
newdict.update(deepdict(meta_dict)) # Update everything else.
return newdict
|
[
"def get_meta_directives() -> Dict[str, MetaDirective]:\n directives = {} # type: Dict[str, MetaDirective]\n\n # Helper functions to implement each meta directive.\n def append_action(config_dict: dict, key: str, value: Any) -> None:\n if key not in config_dict:\n config_dict[key] = []\n\n if not isinstance(config_dict[key], list):\n raise ValueError(f\"Trying to append to non-list setting {key}\")\n if not isinstance(value, list):\n raise ValueError(f\"Trying to append to list {key} with non-list {value}\")\n config_dict[key] += value\n\n def append_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[Tuple[Any, str]]:\n return [replacement_setting, value], \"crossappend\"\n\n # append depends only on itself\n directives['append'] = MetaDirective(action=append_action,\n target_settings=lambda key, value: [key],\n rename_target=append_rename)\n\n def crossappend_decode(value: Any) -> Tuple[str, list]:\n assert isinstance(value, list), \"crossappend takes a list of two elements\"\n assert len(value) == 2, \"crossappend takes a list of two elements\"\n target_setting = value[0] # type: str\n append_value = value[1] # type: list\n assert isinstance(target_setting, str), \"crossappend target setting must be a string\"\n assert isinstance(append_value, list), \"crossappend must append a list\"\n return target_setting, append_value\n\n # crossappend takes a list that has two elements.\n # The first is the target list (the list to append to), and the second is\n # a list to append to the target list.\n # e.g. if base has [\"1\"] and crossappend has [\"base\", [\"2\", \"3\"]], then\n # the result will be [\"1\", \"2\", \"3\"].\n def crossappend_action(config_dict: dict, key: str, value: Any) -> None:\n target_setting, append_value = crossappend_decode(value)\n config_dict[key] = config_dict[target_setting] + append_value\n\n def crossappend_targets(key: str, value: Any) -> List[str]:\n target_setting, append_value = crossappend_decode(value)\n return [target_setting]\n\n def crossappend_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[\n Tuple[Any, str]]:\n crossappend_target, append_value = crossappend_decode(value)\n return [replacement_setting if crossappend_target == target_setting else crossappend_target,\n append_value], \"crossappend\"\n\n directives['crossappend'] = MetaDirective(action=crossappend_action,\n target_settings=crossappend_targets,\n rename_target=crossappend_rename)\n\n def crossappendref_decode(value: Any) -> Tuple[str, str]:\n assert isinstance(value, list), \"crossappendref takes a list of two elements\"\n assert len(value) == 2, \"crossappendref takes a list of two elements\"\n target_key = value[0] # type: str\n append_key = value[1] # type: str\n assert isinstance(target_key, str), \"crossappendref target setting must be a string\"\n assert isinstance(append_key, str), \"crossappend append list setting must be a string\"\n return target_key, append_key\n\n # crossappendref takes a list that has two elements.\n # The first is the target list (the list to append to), and the second is\n # a setting that contains a list to append.\n # e.g. if base has [\"1\"], app has [\"2\", \"3\"], and crossappend has [\"base\", \"app\"], the result\n # is [\"1\", \"2\", \"3\"].\n def crossappendref_action(config_dict: dict, key: str, value: Any) -> None:\n target_setting, append_setting = crossappendref_decode(value)\n config_dict[key] = config_dict[target_setting] + config_dict[append_setting]\n\n def crossappendref_targets(key: str, value: Any) -> List[str]:\n target_setting, append_setting = crossappendref_decode(value)\n return [target_setting, append_setting]\n\n def crossappendref_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[\n Tuple[Any, str]]:\n target, append = crossappendref_decode(value)\n\n def replace_if_target_setting(setting: str) -> str:\n \"\"\"Helper function to replace the given setting with the\n replacement if it is equal to target_setting.\"\"\"\n return replacement_setting if setting == target_setting else setting\n\n return [replace_if_target_setting(target),\n replace_if_target_setting(append)], \"crossappendref\"\n\n directives['crossappendref'] = MetaDirective(action=crossappendref_action,\n target_settings=crossappendref_targets,\n rename_target=crossappendref_rename)\n\n def prepend_action(config_dict: dict, key: str, value: Any) -> None:\n if key not in config_dict:\n config_dict[key] = []\n\n if not isinstance(config_dict[key], list):\n raise ValueError(f\"Trying to prepend to non-list setting {key}\")\n if not isinstance(value, list):\n raise ValueError(f\"Trying to prepend to list {key} with non-list {value}\")\n config_dict[key] = value + config_dict[key]\n\n def prepend_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[Tuple[Any, str]]:\n return [replacement_setting, value], \"crossprepend\"\n\n # prepend depends only on itself\n directives['prepend'] = MetaDirective(action=prepend_action,\n target_settings=lambda key, value: [key],\n rename_target=prepend_rename)\n\n def crossprepend_decode(value: Any) -> Tuple[str, list]:\n assert isinstance(value, list), \"crossprepend takes a list of two elements\"\n assert len(value) == 2, \"crossprepend takes a list of two elements\"\n target_setting = value[0] # type: str\n prepend_value = value[1] # type: list\n assert isinstance(target_setting, str), \"crossprepend target setting must be a string\"\n assert isinstance(prepend_value, list), \"crossprepend must prepend a list\"\n return target_setting, prepend_value\n\n # crossprepend takes a list that has two elements.\n # The first is the target list (the list to prepend to), and the second is\n # a list to prepend to the target list.\n # e.g. if base has [\"1\"] and crossprepend has [\"base\", [\"2\", \"3\"]], then\n # the result will be [\"2\", \"3\", \"1\"].\n def crossprepend_action(config_dict: dict, key: str, value: Any) -> None:\n target_setting, prepend_value = crossprepend_decode(value)\n config_dict[key] = prepend_value + config_dict[target_setting]\n\n def crossprepend_targets(key: str, value: Any) -> List[str]:\n target_setting, prepend_value = crossprepend_decode(value)\n return [target_setting]\n\n def crossprepend_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[\n Tuple[Any, str]]:\n crossprepend_target, prepend_value = crossprepend_decode(value)\n return [replacement_setting if crossprepend_target == target_setting else crossprepend_target,\n prepend_value], \"crossprepend\"\n\n directives['crossprepend'] = MetaDirective(action=crossprepend_action,\n target_settings=crossprepend_targets,\n rename_target=crossprepend_rename)\n\n def crossprependref_decode(value: Any) -> Tuple[str, str]:\n assert isinstance(value, list), \"crossprependref takes a list of two elements\"\n assert len(value) == 2, \"crossprependref takes a list of two elements\"\n target_key = value[0] # type: str\n prepend_key = value[1] # type: str\n assert isinstance(target_key, str), \"crossprependref target setting must be a string\"\n assert isinstance(prepend_key, str), \"crossprepend prepend list setting must be a string\"\n return target_key, prepend_key\n\n # crossprependref takes a list that has two elements.\n # The first is the target list (the list to prepend to), and the second is\n # a setting that contains a list to prepend.\n # e.g. if base has [\"1\"], app has [\"2\", \"3\"], and crossprepend has [\"base\", \"app\"], the result\n # is [\"2\", \"3\", \"1\"].\n def crossprependref_action(config_dict: dict, key: str, value: Any) -> None:\n target_setting, prepend_setting = crossprependref_decode(value)\n config_dict[key] = config_dict[prepend_setting] + config_dict[target_setting]\n\n def crossprependref_targets(key: str, value: Any) -> List[str]:\n target_setting, prepend_setting = crossprependref_decode(value)\n return [target_setting, prepend_setting]\n\n def crossprependref_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[\n Tuple[Any, str]]:\n target, prepend = crossprependref_decode(value)\n\n def replace_if_target_setting(setting: str) -> str:\n \"\"\"Helper function to replace the given setting with the\n replacement if it is equal to target_setting.\"\"\"\n return replacement_setting if setting == target_setting else setting\n\n return [replace_if_target_setting(target),\n replace_if_target_setting(prepend)], \"crossprependref\"\n\n directives['crossprependref'] = MetaDirective(action=crossprependref_action,\n target_settings=crossprependref_targets,\n rename_target=crossprependref_rename)\n\n def subst_str(input_str: str, replacement_func: Callable[[str], str]) -> str:\n \"\"\"Substitute ${...}\"\"\"\n return re.sub(__VARIABLE_EXPANSION_REGEX, lambda x: replacement_func(x.group(1)), input_str)\n\n def subst_action(config_dict: dict, key: str, value: Any) -> None:\n def perform_subst(value: Union[str, List[str]]) -> Union[str, List[str]]:\n \"\"\"\n Perform substitutions for the given value.\n If value is a string, perform substitutions in the string. If value is a list, then perform substitutions\n in every string in the list.\n :param value: String or list\n :return: String or list but with everything substituted.\n \"\"\"\n newval = \"\" # type: Union[str, List[str]]\n\n if isinstance(value, list):\n newval = list(map(lambda input_str: subst_str(input_str, lambda key: config_dict[key]), value))\n else:\n newval = subst_str(value, lambda key: config_dict[key])\n return newval\n\n config_dict[key] = perform_subst(value)\n\n def subst_targets(key: str, value: Any) -> List[str]:\n # subst can operate on either a string or a list\n\n # subst_strings is e.g. [\"${a} 1\", \"${b} 2\"]\n subst_strings = [] # type: List[str]\n if isinstance(value, str):\n subst_strings.append(value)\n elif isinstance(value, list):\n for i in value:\n assert isinstance(i, str)\n subst_strings = value\n else:\n raise ValueError(f\"subst must operate on a str or List[str]; got {value} instead\")\n\n output_vars = [] # type: List[str]\n\n for subst_value in subst_strings:\n matches = re.finditer(__VARIABLE_EXPANSION_REGEX, subst_value, re.DOTALL)\n for match in matches:\n output_vars.append(match.group(1))\n\n return output_vars\n\n def subst_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[Tuple[Any, str]]:\n assert isinstance(value, str)\n\n if target_setting not in subst_targets(key, value):\n return None\n\n new_value = subst_str(value, lambda key: \"${\" + replacement_setting + \"}\" if key == target_setting else key)\n return new_value, \"subst\"\n\n directives['subst'] = MetaDirective(action=subst_action,\n target_settings=subst_targets,\n rename_target=subst_rename)\n\n def crossref_check_and_cast(k: Any) -> str:\n if not isinstance(k, str):\n raise ValueError(\"crossref (if used with lists) can only be used only with lists of strings\")\n return k\n\n def crossref_action(config_dict: dict, key: str, value: Any) -> None:\n \"\"\"\n Copy the contents of the referenced key for use as this key's value.\n If the reference is a list, then apply the crossref for each element\n of the list.\n \"\"\"\n if isinstance(value, str):\n config_dict[key] = config_dict[value]\n elif isinstance(value, list):\n def check_and_get(k: Any) -> Any:\n return config_dict[crossref_check_and_cast(k)]\n\n config_dict[key] = list(map(check_and_get, value))\n elif isinstance(value, numbers.Number):\n # bools are instances of numbers.Number for some weird reason\n raise ValueError(\"crossref cannot be used with numbers and bools\")\n else:\n raise NotImplementedError(\"crossref not implemented on other types yet\")\n\n def crossref_targets(key: str, value: Any) -> List[str]:\n if isinstance(value, str):\n return [value]\n if isinstance(value, list):\n return list(map(crossref_check_and_cast, value))\n if isinstance(value, numbers.Number):\n # bools are instances of numbers.Number for some weird reason\n raise ValueError(\"crossref cannot be used with numbers and bools\")\n raise NotImplementedError(\"crossref not implemented on other types yet\")\n\n def crossref_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[\n Tuple[Any, str]]:\n def change_if_target(x: str) -> str:\n if x == target_setting:\n return replacement_setting\n return x\n\n if isinstance(value, str):\n return [change_if_target(value)], \"crossref\"\n if isinstance(value, list):\n return list(map(change_if_target, map(crossref_check_and_cast, value))), \"crossref\"\n if isinstance(value, numbers.Number):\n # bools are instances of numbers.Number for some weird reason\n raise ValueError(\"crossref cannot be used with numbers and bools\")\n raise NotImplementedError(\"crossref not implemented on other types yet\")\n\n directives['crossref'] = MetaDirective(action=crossref_action,\n target_settings=crossref_targets,\n rename_target=crossref_rename)\n\n def transclude_action(config_dict: dict, key: str, value: Any) -> None:\n \"\"\"Transclude the contents of the file pointed to by value.\"\"\"\n assert isinstance(value, str), \"Path to file for transclusion must be a string\"\n with open(value, \"r\") as f:\n file_contents = str(f.read())\n config_dict[key] = file_contents\n\n def transclude_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[\n Tuple[Any, str]]:\n # This meta directive doesn't depend on any settings\n return value, \"transclude\"\n\n # transclude depends on external files, not other settings.\n directives['transclude'] = MetaDirective(action=transclude_action,\n target_settings=lambda key, value: [],\n rename_target=transclude_rename)\n\n def json2list_action(config_dict: dict, key: str, value: Any) -> None:\n \"\"\"Turn the value of the key (JSON list) into a list.\"\"\"\n assert isinstance(value, str), \"json2list requires a JSON string that is a list\"\n parsed = json.loads(value)\n assert isinstance(parsed, list), \"json2list requires a JSON string that is a list\"\n config_dict[key] = parsed\n\n def json2list_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[\n Tuple[Any, str]]:\n # This meta directive doesn't depend on any settings\n return value, \"json2list\"\n\n # json2list does not depend on anything\n directives['json2list'] = MetaDirective(action=json2list_action,\n target_settings=lambda key, value: [],\n rename_target=json2list_rename)\n\n def prependlocal_action(config_dict: dict, key: str, value: Any) -> None:\n \"\"\"Prepend the local path of the config dict.\"\"\"\n if isinstance(value, list):\n new_values = []\n for v in value:\n new_values.append(os.path.join(config_dict[_CONFIG_PATH_KEY], str(v)))\n config_dict[key] = new_values\n else:\n config_dict[key] = os.path.join(config_dict[_CONFIG_PATH_KEY], str(value))\n\n def prependlocal_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[\n Tuple[Any, str]]:\n # This metal directive doesn't depend on any settings\n return value, \"prependlocal\"\n\n directives['prependlocal'] = MetaDirective(action=prependlocal_action,\n target_settings=lambda key, value: [],\n rename_target=prependlocal_rename)\n\n def deepsubst_action(config_dict: dict, key: str, value: Any) -> None:\n \"\"\"\n Perform a deep substitution on the value provided. This will replace any variables that occur in strings\n of the form ${...} and will also do a special meta replacement on keys which end in _deepsubst_meta.\n \"\"\"\n def do_subst(oldval: Any) -> Any:\n if isinstance(oldval, str):\n # This is just regular subst\n return subst_str(oldval, lambda key: config_dict[key])\n if isinstance(oldval, list):\n return list(map(do_subst, oldval))\n if isinstance(oldval, dict):\n # We need to check for _deepsubst_meta here\n newval = {} # type: Dict\n for k, v in oldval.items():\n if isinstance(k, str):\n if k.endswith(\"_deepsubst_meta\"):\n base = k.replace(\"_deepsubst_meta\", \"\")\n if base not in oldval:\n raise ValueError(f\"Deepsubst meta key provided, but there is no matching base key: {k}\")\n # Note that we don't add the meta back to newval.\n else:\n meta_key = f\"{k}_deepsubst_meta\"\n if meta_key in oldval:\n # Do the deepsubst_meta, whatever it is.\n meta = oldval[meta_key]\n if meta in DeepSubstMetaDirectives:\n if isinstance(v, str):\n newval[k] = DeepSubstMetaDirectives[meta](config_dict, v)\n else:\n raise ValueError(f\"Deepsubst metas not supported on non-string values: {v}\")\n else:\n err_keys = \", \".join(DeepSubstMetaDirectives.keys())\n raise ValueError(f\"Unknown deepsubst_meta type: {meta}. Valid options are [{err_keys}].\")\n else:\n newval[k] = do_subst(v)\n else:\n # k is not an instance of a string.\n # Will this ever happen? It's possible you could have {1: \"foo\"}...\n newval[k] = do_subst(v)\n return newval\n return oldval\n\n config_dict[key] = do_subst(value)\n\n def deepsubst_targets(key: str, value: Any) -> List[str]:\n \"\"\"\n Look for all substitution targets (${...}) in value and return a list of the targets found.\n \"\"\"\n if isinstance(value, str):\n # This is just regular subst\n return subst_targets(key, value)\n if isinstance(value, (dict, list)):\n # Recursively find all strings\n def find_strings(x: Union[List, Dict]) -> List[str]:\n iterator = x # type: Iterable[Any]\n if isinstance(x, dict):\n iterator = x.values()\n\n output = [] # type: List\n for item in iterator:\n if isinstance(item, str):\n output.extend([s for s in subst_targets(key, item) if s not in output])\n elif isinstance(item, list) or isinstance(item, dict):\n output.extend([s for s in find_strings(item) if s not in output])\n return output\n\n return find_strings(value)\n raise ValueError(f\"deepsubst cannot be used with this type: {value}\")\n\n def deepsubst_rename(key: str, value: Any, target_setting: str, replacement_setting: str) -> Optional[Tuple[Any, str]]:\n \"\"\"\n Not implemented.\n \"\"\"\n raise NotImplementedError(\"Deepsubst does not support rename\")\n\n directives['deepsubst'] = MetaDirective(action=deepsubst_action,\n target_settings=deepsubst_targets,\n rename_target=deepsubst_rename)\n\n return directives",
"def _merge_conf_dicts(base, new_layer):\n for (section, items) in new_layer.items():\n if STANZA_MAGIC_KEY in items:\n magic_op = items[STANZA_MAGIC_KEY]\n if STANZA_OP_DROP in magic_op:\n # If this section exist in a parent (base), then drop it now\n if section in base:\n del base[section]\n continue # pragma: no cover (peephole optimization)\n if section in base:\n # TODO: Support other magic here...\n # Rip all the comments out of the new_layer, and prepend them (sequentially) to base\n comments = _extract_comments(items)\n if comments:\n inject_section_comments(base[section], prepend=comments)\n base[section].update(items)\n else:\n # TODO: Support other magic here too..., though with no parent info\n base[section] = items\n # Nothing to return, base is updated in-place",
"def expand_dict(doc, path, includes, current, cls=dict):\n cp = cls()\n # first merge any includes includes into cp\n templates: List[Mapping] = []\n assert isinstance(current, Mapping), current\n for (key, value) in current.items():\n if not isinstance(key, str):\n cp[key] = value\n continue\n if key.startswith(\"+\"):\n if key == mergeStrategyKey:\n cp[key] = value\n continue\n mergeKey = parse_merge_key(key)\n if not mergeKey:\n cp[key] = value\n continue\n foundTemplate = has_template(doc, mergeKey, value, path, cls)\n if not foundTemplate:\n includes.setdefault(path, []).append(_MissingInclude(mergeKey, value))\n cp[key] = value\n continue\n includes.setdefault(path, []).append((mergeKey, value))\n template = get_template(doc, mergeKey, value, path, cls, includes)\n if isinstance(template, Mapping):\n templates.append(template)\n elif mergeKey.include and template is None:\n continue # include path not found\n else:\n if len(current) > 1: # XXX include merge directive keys in count\n raise UnfurlError(\n f\"can not merge {mergeKey} with non-map value of type {type(template)}: {template}\"\n )\n else:\n return template # current dict is replaced with a value\n # elif key.startswith(\"q+\"):\n # cp[key[2:]] = value\n elif isinstance(value, Mapping):\n cp[key] = expand_dict(doc, path + (key,), includes, value, cls)\n elif isinstance(value, list):\n cp[key] = list(expand_list(doc, path + (key,), includes, value, cls))\n else:\n cp[key] = value\n\n if templates:\n accum = templates.pop(0)\n templates.append(cp)\n while templates:\n cls = getattr(templates[0], \"mapCtor\", cls)\n accum = merge_dicts(accum, templates.pop(0), cls)\n return accum\n else:\n return cp\n # e,g, merge_dicts(merge_dicts(a, b), cp)\n # return includes, reduce(lambda accum, next: merge_dicts(accum, next, cls), templates, {}), cp",
"def merge_with_master_config(self, config, defaults={}, delete_orphan_fields=False) -> dict:\n if isinstance(config, str):\n import json\n config = json.loads(config)\n properties = self.all_properties()\n config['fields'] = config.get('fields', dict())\n fields = config['fields']\n\n d_color = defaults.get('color', 'white')\n d_icon = defaults.get('icon', 'icons:default')\n\n if delete_orphan_fields:\n exist = {p.name() for p in properties}\n unexist = set(fields.keys()) - exist\n for name in unexist:\n del fields[name]\n\n for p in properties:\n field = fields.get(p.name(), {'show_in_search': False,\n 'combine_fields': False,\n 'number_of_rules': 0,\n 'glossaries': [],\n 'use_in_network_search': False,\n 'case_sensitive': False,\n 'show_as_link': 'text',\n 'blacklists': [],\n 'show_in_result': 'no',\n 'rule_extractor_enabled': False,\n 'search_importance': 1,\n 'group_name': '',\n 'show_in_facets': False,\n 'predefined_extractor': 'none',\n 'rule_extraction_target': ''})\n config['fields'][p.name()] = field\n field['screen_label'] = ' '.join(p.label())\n field['description'] = '\\n'.join(p.definition())\n field['name'] = p.name()\n\n # color\n if 'color' not in field:\n color = self.__merge_close_ancestor_color(p, fields, attr='color')\n field['color'] = color if color else d_color\n # icon\n if 'icon' not in field:\n icon = self.__merge_close_ancestor_color(p, fields, attr='icon')\n field['icon'] = icon if icon else d_icon\n # type\n if isinstance(p, OntologyObjectProperty):\n field['type'] = 'kg_id'\n else:\n try:\n field['type'] = self.__merge_xsd_to_type(next(iter(p.included_ranges())))\n except StopIteration:\n field['type'] = None\n return config",
"def apply_patch_to_config(\n config: dict,\n patch: dict\n ):\n from meerschaum.utils.packages import cascadict\n base = cascadict.CascaDict(config)\n new = base.cascade(patch)\n return new.copy_flat()",
"def extend_config_reference(config):\n def _parse_reference(keys, r):\n if hasattr(r, '__getitem__'):\n try:\n v = r.__getitem__(keys)\n return v\n except (KeyError, TypeError, IndexError):\n pass\n if isinstance(keys, tuple):\n v = _parse_reference(keys[0], r)\n if v is not None:\n if len(keys) == 1:\n return v\n return _parse_reference(keys[1:], v)\n return None\n\n def _sub_reference(cf, ori):\n it = cf.keys() if isinstance(cf, dict) else range(len(cf))\n for k in it:\n v = cf[k]\n if isinstance(v, (dict, list)):\n v = _sub_reference(v, ori)\n else:\n r = _parse_reference(v, ori)\n if r is not None:\n v = r\n cf[k] = v\n return cf\n\n replace = copy.deepcopy(config)\n return _sub_reference(replace, replace)",
"def _expand_variables(self, config, hostname):\n\n if 'hostname' in config:\n config['hostname'] = config['hostname'].replace('%h', hostname)\n else:\n config['hostname'] = hostname\n\n if 'port' in config:\n port = config['port']\n else:\n port = SSH_PORT\n\n user = os.getenv('USER')\n if 'user' in config:\n remoteuser = config['user']\n else:\n remoteuser = user\n\n host = socket.gethostname().split('.')[0]\n fqdn = LazyFqdn(config, host)\n homedir = os.path.expanduser('~')\n replacements = {'controlpath':\n [\n ('%h', config['hostname']),\n ('%l', fqdn),\n ('%L', host),\n ('%n', hostname),\n ('%p', port),\n ('%r', remoteuser),\n ('%u', user)\n ],\n 'identityfile':\n [\n ('~', homedir),\n ('%d', homedir),\n ('%h', config['hostname']),\n ('%l', fqdn),\n ('%u', user),\n ('%r', remoteuser)\n ],\n 'proxycommand':\n [\n ('%h', config['hostname']),\n ('%p', port),\n ('%r', remoteuser)\n ]\n }\n\n for k in config:\n if k in replacements:\n for find, replace in replacements[k]:\n if isinstance(config[k], list):\n for item in range(len(config[k])):\n if find in config[k][item]:\n config[k][item] = config[k][item].\\\n replace(find, str(replace))\n else:\n if find in config[k]:\n config[k] = config[k].replace(find, str(replace))\n return config",
"def merge_config(config):\r\n for key, value in config.items():\r\n if \".\" not in key:\r\n if isinstance(value, dict) and key in global_config:\r\n global_config[key].update(value)\r\n else:\r\n global_config[key] = value\r\n else:\r\n sub_keys = key.split('.')\r\n assert (\r\n sub_keys[0] in global_config\r\n ), \"the sub_keys can only be one of global_config: {}, but get: {}, please check your running command\".format(\r\n global_config.keys(), sub_keys[0])\r\n assert (\r\n sub_keys[1] in global_config[sub_keys[0]]\r\n ), \"the sub_keys can only be one of global_config: {}, but get: {}, please check your running command\".format(\r\n global_config[sub_keys[0]].keys(), sub_keys[1])\r\n cur = global_config[sub_keys[0]]\r\n for idx, sub_key in enumerate(sub_keys[1:]):\r\n if idx == len(sub_keys) - 2:\r\n cur[sub_key] = value\r\n else:\r\n cur = cur[sub_key]",
"def resolve_override(config, rels=[], dmaap={}):\n # use deepcopy to make sure that config is not touched\n return _recurse(copy.deepcopy(config), rels, dmaap)",
"def unpack(config_dict, prefix=\"\"):\n # We don't want an extra \".\" in the beginning.\n real_prefix = \"\" if prefix == \"\" else prefix + \".\"\n output_dict = {}\n for key, value in config_dict.items():\n if isinstance(value, dict):\n output_dict.update(unpack(value, real_prefix + key))\n else:\n output_dict[real_prefix + key] = value\n return output_dict",
"def expand_generators(config):\n\n to_delete = []\n for d, key, value in IOTools.nested_iter(config):\n if isinstance(value, str):\n if value.startswith(\"generate=\"):\n expression = re.sub(\"^generate=\\s*\", \"\", value)\n if expression.startswith(\"'\") and expression.startswith(\"'\"):\n expression = expression[1:-1]\n try:\n argument_list = eval(expression)\n except SyntaxError as ex:\n raise ValueError(\n \"error occured while evaluating generator \"\n \"expression {}: {}\".format(expression, ex))\n if isinstance(d, list):\n d.extend(argument_list)\n to_delete.append((d, key))\n else:\n d[key] = argument_list\n\n for d, key in to_delete[::-1]:\n del d[key]\n\n return config",
"def update_config_with_dvc_params(base_config):\n params = yaml.safe_load(open(\"params.yaml\"))\n\n if params is None:\n return base_config\n\n def _update(config, params):\n for key, value in params.items():\n if isinstance(value, dict):\n config[key] = _update(config.get(key, {}), value)\n else:\n config[key] = value\n return config\n\n return _update(base_config, params)",
"async def _apply_dynamic_config(self, dynamic_config, jupyterhub_config=None):\n to_set = self.flatten_dict_for_kv(dynamic_config, prefix=self.kv_traefik_prefix)\n if jupyterhub_config:\n to_set.update(\n self.flatten_dict_for_kv(\n jupyterhub_config, prefix=self.kv_jupyterhub_prefix\n )\n )\n self.log.debug(\"Setting key-value config %s\", to_set)\n await self._kv_atomic_set(to_set)",
"def extend_config(config, parameters):\n for namespace, cfg in parameters.items():\n # Allow one nesting\n if namespace not in config and isinstance(cfg, dict):\n for name, value in cfg.items():\n fullname = '%s_%s' % (namespace, name)\n config[fullname] = value\n else:\n config[namespace] = cfg",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def update(self, config: dict):\n for key, value in config.items():\n setattr(self, key, value)",
"def adapt_clix_meta_yaml(raw: os.PathLike | StringIO | str, adapted: os.PathLike):\n from ..indices import generic # pylint: disable=import-outside-toplevel\n\n # freq_names = {\"annual\": \"A\", \"seasonal\": \"Q\", \"monthly\": \"M\", \"weekly\": \"W\"}\n freq_defs = {\"annual\": \"YS\", \"seasonal\": \"QS-DEC\", \"monthly\": \"MS\", \"weekly\": \"W\"}\n\n if isinstance(raw, os.PathLike):\n with open(raw) as f:\n yml = safe_load(f)\n else:\n yml = safe_load(raw)\n\n yml[\"realm\"] = \"atmos\"\n yml[\n \"doc\"\n ] = \"\"\" ===================\n CF Standard indices\n ===================\n\n Indicators found here are defined by the `clix-meta project`_. Adapted documentation from that repository follows:\n\n The repository aims to provide a platform for thinking about, and developing,\n a unified view of metadata elements required to describe climate indices (aka climate indicators).\n\n To facilitate data exchange and dissemination the metadata should, as far as possible,\n follow the Climate and Forecasting (CF) Conventions. Considering the very rich and diverse flora of\n climate indices this is however not always possible. By collecting a wide range of different indices\n it is easier to discover any common patterns and features that are currently not well covered by the\n CF Conventions. Currently identified issues frequently relate to standard_name or/and cell_methods\n which both are controlled vocabularies of the CF Conventions.\n\n .. _clix-meta project: https://github.com/clix-meta/clix-meta\n\"\"\"\n yml[\"references\"] = \"clix-meta https://github.com/clix-meta/clix-meta\"\n\n remove_ids = []\n rename_ids = {}\n for cmid, data in yml[\"indices\"].items():\n if \"reference\" in data:\n data[\"references\"] = data.pop(\"reference\")\n\n index_function = data.pop(\"index_function\")\n\n data[\"compute\"] = index_function[\"name\"]\n if getattr(generic, data[\"compute\"], None) is None:\n remove_ids.append(cmid)\n print(\n f\"Indicator {cmid} uses non-implemented function {data['compute']}, removing.\"\n )\n continue\n\n if (data[\"output\"].get(\"standard_name\") or \"\").startswith(\n \"number_of_days\"\n ) or cmid == \"nzero\":\n remove_ids.append(cmid)\n print(\n f\"Indicator {cmid} has a 'number_of_days' standard name\"\n \" and xclim disagrees with the CF conventions on the correct output units, removing.\"\n )\n continue\n\n if (data[\"output\"].get(\"standard_name\") or \"\").endswith(\"precipitation_amount\"):\n remove_ids.append(cmid)\n print(\n f\"Indicator {cmid} has a 'precipitation_amount' standard name\"\n \" and clix-meta has incoherent output units, removing.\"\n )\n continue\n\n rename_params = {}\n if index_function[\"parameters\"]:\n data[\"parameters\"] = index_function[\"parameters\"]\n for name, param in data[\"parameters\"].copy().items():\n if param[\"kind\"] in [\"operator\", \"reducer\"]:\n # Compatibility with xclim `op` notation for comparison symbols\n if name == \"condition\":\n data[\"parameters\"][\"op\"] = param[param[\"kind\"]]\n del data[\"parameters\"][name]\n else:\n data[\"parameters\"][name] = param[param[\"kind\"]]\n else: # kind = quantified\n if param.get(\"proposed_standard_name\") == \"temporal_window_size\":\n # Window, nothing to do.\n del data[\"parameters\"][name]\n elif isinstance(param[\"data\"], dict):\n # No value\n data[\"parameters\"][name] = {\n \"description\": param.get(\n \"long_name\",\n param.get(\n \"proposed_standard_name\", param.get(\"standard_name\")\n ).replace(\"_\", \" \"),\n ),\n \"units\": param[\"units\"],\n }\n rename_params[\n f\"{{{name}}}\"\n ] = f\"{{{list(param['data'].keys())[0]}}}\"\n else:\n # Value\n data[\"parameters\"][name] = f\"{param['data']} {param['units']}\"\n\n period = data.pop(\"default_period\")\n # data[\"allowed_periods\"] = [freq_names[per] for per in period[\"allowed\"].keys()]\n data.setdefault(\"parameters\", {})[\"freq\"] = {\"default\": freq_defs[period]}\n\n attrs = {}\n output = data.pop(\"output\")\n for attr, val in output.items():\n if val is None:\n continue\n if attr == \"cell_methods\":\n methods = []\n for i, cell_method in enumerate(val):\n # Construct cell_method string\n cm = \"\".join(\n [f\"{dim}: {meth}\" for dim, meth in cell_method.items()]\n )\n\n # If cell_method seems to be describing input data, and not the operation, skip.\n if i == 0:\n if cm in [ICM.get(v) for v in data[\"input\"].values()]:\n continue\n\n methods.append(cm)\n\n val = \" \".join(methods)\n\n elif attr in [\"var_name\", \"long_name\"]:\n for new, old in rename_params.items():\n val = val.replace(old, new)\n attrs[attr] = val\n data[\"cf_attrs\"] = [attrs]\n\n del data[\"ET\"]\n\n if \"{\" in cmid:\n rename_ids[cmid] = cmid.replace(\"{\", \"\").replace(\"}\", \"\")\n\n for old, new in rename_ids.items():\n yml[\"indices\"][new] = yml[\"indices\"].pop(old)\n\n for cmid in remove_ids:\n del yml[\"indices\"][cmid]\n\n yml[\"indicators\"] = yml.pop(\"indices\")\n\n with open(adapted, \"w\") as f:\n safe_dump(yml, f)",
"def _config_update_puppet(self, config_uuid, config_dict, force=False,\n host_uuids=None):\n host_updated = False\n\n personalities = config_dict['personalities']\n if not host_uuids:\n hosts = self.dbapi.ihost_get_list()\n else:\n hosts = [self.dbapi.ihost_get(host_uuid) for host_uuid in host_uuids]\n\n for host in hosts:\n if host.personality in personalities:\n # Never generate hieradata for uninventoried hosts, as their\n # interface config will be incomplete.\n valid_inventory_states = [\n constants.INV_STATE_INITIAL_INVENTORIED,\n constants.INV_STATE_REINSTALLING\n ]\n if host.inv_state not in valid_inventory_states:\n LOG.info(\n \"Cannot generate the configuration for %s, \"\n \"the host is not inventoried yet.\" % host.hostname)\n # We will allow controller nodes to re-generate manifests\n # when in an \"provisioning\" state. This will allow for\n # example the ntp configuration to be changed on an CPE\n # node before the \"worker_config_complete\" has been\n # executed.\n elif (force or\n host.invprovision in [constants.PROVISIONED, constants.UPGRADING] or\n (host.invprovision == constants.PROVISIONING and\n host.personality == constants.CONTROLLER)):\n if host.software_load == tsc.SW_VERSION:\n # We will not generate the hieradata in runtime here if the\n # software load of the host is different from the active\n # controller. The Hieradata of a host during an upgrade/rollback\n # will be saved by update_host_config_upgrade() to the\n # directory of the host's software load.\n self._puppet.update_host_config(host, config_uuid)\n host_updated = True\n else:\n LOG.info(\n \"Cannot regenerate the configuration for %s, \"\n \"the node is not ready. invprovision=%s\" %\n (host.hostname, host.invprovision))\n\n # ensure the system configuration is also updated if hosts require\n # a reconfiguration\n if host_updated:\n self._puppet.update_system_config()\n self._puppet.update_secure_system_config()",
"def config_dict(config):\n return dict((key, getattr(config, key)) for key in config.values)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Internal keys that shouldn't show up in any final config.
|
def internal_keys() -> Set[str]:
return {_CONFIG_PATH_KEY, _NEXT_FREE_INDEX_KEY}
|
[
"def valid_config_keys():\n click.echo(', '.join(get_class_properties(PipelineConfig)))",
"def _pullup_keys(self):\n for k in [\n \"definitions\",\n \"providers\",\n \"handlers\",\n \"remote_vars\",\n \"template_vars\",\n \"terraform_vars\",\n \"worker_options\",\n ]:\n if self.tf:\n setattr(self, f\"{k}_odict\", self.tf.get(k, dict()))\n else:\n setattr(self, f\"{k}_odict\", None)",
"def _keep_printable_keys(d):\n pass",
"def AUTHORIZED_KEYS(self):\n\t\treturn \"{} {}\".format(self.keytype, base64.b64encode(self.pubkey).decode('ascii'))",
"def additional_project_configuration_keys():\n return {'post-clean', 'configure-steps', 'environment', 'type', 'root'}",
"def key_is_deprecated(self, full_key):\n if full_key in self.__dict__[Map.DEPRECATED_KEYS]:\n print(\"Deprecated config key (ignoring): {}\".format(full_key))\n return True\n return False",
"def pypeit_file_keys(self):\n return super().pypeit_file_keys() + ['cenwave','lampstat01']",
"def keys(self):\n # FOR WHEN YOU MOVE TO PYTHON 3.5\n # return list(self.service_options |\n # {*self.container.keys()} |\n # {*self.host_config.keys()})\n # TODO: this is wrong\n return list(self.service_options |\n self.container.keys() |\n self.host_config.keys())",
"def _setall_init(self):\n for k, v in self._dict.iteritems():\n if k in PseudoDotDict._reserved:\n raise Exception('%s is a reserved key' % k)\n else:\n setattr(self, k, v)",
"def get_required_keys(self):\n return self.REQUIRED_KEYS",
"def test_keys_not_in_protected_keys(self):\n assert self.fh.keys() not in set(_PROTECTED_KEYS)\n\n for x in iter(self.fh):\n assert x not in _PROTECTED_KEYS",
"def _setall_init(self):\n for k, v in self.iteritems():\n if k in DotDict._reserved:\n raise Exception('%s is a reserved key' % k)\n else:\n setattr(self, k, v)",
"def test_missing_keys(self):\n self.assertEqual(None, tsig_keys.check({}))",
"def dKeys(self, value):\n\t\traise AttributeError('You are not allowed to modify the keys this way')",
"def clean_invalid_keys(self):\n\n keys = self._doc_type.mapping.properties.properties._d_.keys()\n\n to_delete = []\n\n for key in self._d_.keys():\n if key not in keys:\n to_delete.append(key)\n\n for key in to_delete:\n del self._d_[key]",
"def __init__(self): # type: () -> None\n self.secrets = {} # type: Dict[Text, Text]",
"def setKeyPath(object):\n pass",
"def test_get_kv_config(self):\n pass",
"def remove_internals(self):\n self.values = {value for value in self.values if not value.internal}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the database (get_config) in JSON form as a string.
|
def get_database_json(self) -> str:
# The cls=HammerJSONEncoder enables writing Decimals
return json.dumps(self.get_config(), cls=HammerJSONEncoder, sort_keys=True, indent=4, separators=(',', ': '))
|
[
"def get_db_info() -> dict:\n env = os.environ['FLASK_ENV']\n if env == Environments.PRODUCTION.value:\n return {\n 'dbname': os.environ['DBNAME'],\n 'user': os.environ['DBUSER'],\n 'host': os.environ['DBHOST'],\n 'password': os.environ['DBPASSWORD'],\n 'port': os.environ['DBPORT'],\n }\n joined = os.path.join(fs.get_absolute_path(), '../')\n # TODO please add condition checks for other environments\n return json.loads(fs.get_file_contents(joined + f'secrets/{env}.db.json'))",
"def json(self):\n return json.dumps(self.conf, indent=4, separators=(',', ': ')) + '\\n'",
"def read_config(self):\n # path to config.json. Always one directory above 'ritly' package\n CONFIG_PATH = os.path.join(os.path.dirname(__file__), \"..\", \"config.json\")\n\n with open(CONFIG_PATH) as json_file: \n data = json.load(json_file)\n # connect to database 'linkstore' with given config info\n db = mysql.connector.connect(\n host=data['host'],\n user=data['user'],\n passwd=data['passwd'],\n database=\"linkstore\"\n )\n \n return db",
"def read_database_name():\n with open(\"model/database_name.json\") as json_file:\n database = json.load(json_file)\n return database[\"DATABASE\"]",
"def get_database_data(self):\r\n pass",
"def get_configuration_dict(self):\n return self.json",
"def get_current_grype_db_metadata(self) -> json:\n return self._get_metadata_file_contents(self.METADATA_FILE_NAME)",
"def db_data():\n\n # A constant that defines the record fields that we wish to retrieve.\n FIELDS = {\n '_id': False, 'branch': True, 'area': True, 'region': True,\n 'modules_passed': True, 'modules_failed': True, 'modules_in_progress': True,\n 'modules_overdue': True\n }\n\n # Open a connection to MongoDB using a with statement such that the\n # connection will be closed as soon as we exit the with statement\n with MongoClient(MONGO_URI) as conn:\n # Define which collection we wish to access\n collection = conn[DBS_NAME][COLLECTION_NAME]\n # Retrieve a result set only with the fields defined in FIELDS\n # and limit the the results to 55000\n projects = collection.find(projection=FIELDS, limit=55000)\n # Convert projects to a list in a JSON object and return the JSON data\n return json.dumps(list(projects))",
"def get_db_info():\n db_info = db_specification()\n\n server = db_info.get(\"server\")\n port = db_info.get(\"port\")\n repository = db_info.get(\"repository\")\n\n connection = test_db_connection(server, port, repository)\n if connection == False:\n return get_db_info()\n else:\n return db_info",
"def dump_db():\n\ttable_list = []\n\tfor table in db.metadata.tables.items():\n\t\ttable_list.append(table[0])\n\tdb_dict = dict.fromkeys(table_list)\n\tfor table in table_list:\n\t\tdb_dict[table] = []\n\t\tquery = db.engine.execute(\n\t\t\tf'SELECT * FROM {table}'\n\t\t)\n\t\tfor row in query:\n\t\t\tdb_dict[f\"{table}\"].append(list(row))\n\treturn jsonify(db_dict)",
"def get_config(self) -> bytes:\n return self.config",
"def load_backend() -> json:\n\treturn _load_config(\"env.json\")",
"async def __get_main_database_configuration(self) -> dict:\n mongo_url: str = await self.__get_database_connection_url(host=\"MONGO_MAIN_HOST\",\n port=\"MONGO_MAIN_PORT\",\n database=\"MONGO_MAIN_DATABASE_NAME\",\n username=\"MONGO_MAIN_DATABASE_USERNAME_FILE\",\n password=\"MONGO_MAIN_DATABASE_PASSWORD_FILE\",\n authentication_database=\"MONGO_MAIN_DATABASE_NAME\"\n )\n return {\n \"url\": mongo_url,\n \"path\": \"/app/mongodb-migrations/main\"\n }",
"def get_config():\n with open(\"config.json\", \"r\") as f:\n data = f.read()\n return json.loads(data)",
"def get_config() -> str:\n\n config = {\"ops\": get_supported_ops(), \"initializer\": \"scalar\", \"min_nodes\": 2}\n return json.dumps(config)",
"def databases(self) -> dict:\n return self.config[\"databases\"]",
"def load_database( filename: str) -> dict:\n with open(filename, 'r') as f:\n db = json.load(f)\n \n # Umwandlung nicht benötigt\n\n return db",
"def get_db_details(self):\n props = {}\n props['path'] = self.path\n props['size'] = self.__get_size()\n props['indexes'] = self.indexes_names.keys()\n props['cdb_environment'] = cdb_environment\n return props",
"def get_json(self) -> str:\n return json.dumps(self._raw_meta)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve a key, first trying with a suffix but returning base if found.
|
def get_setting_suffix(self, key: str, suffix: str, nullvalue: Any = None, check_type: bool = True) -> Any:
default = key
override = default + "_" + suffix
value = None
try:
value = self.get_config()[override]
except:
try:
value = self.get_config()[default]
except:
raise KeyError(f"Both base key: {default} and overriden key: {override} are missing.")
if default not in self.defaults:
self.logger.warning(f"Base key: {default} does not have a default implementation")
if check_type:
self.check_setting(default)
return nullvalue if value is None else value
|
[
"def get_bucket_key(bucket_name, uri):\n pos = uri.find(bucket_name) + len(bucket_name) + 1\n return uri[pos:]",
"def lookup(self, subcmd_prefix):\n for subcmd_name in self.subcmds.keys():\n if subcmd_name.startswith(subcmd_prefix) \\\n and len(subcmd_prefix) >= self.subcmds[subcmd_name]['min']:\n return self.subcmds[subcmd_name]\n return None",
"def _substitute(self, key):\n stripped_key = key[2:-1]\n if stripped_key in self:\n return self[stripped_key]\n else:\n return key",
"def key_filename(key, source):\n if key == None or key.startswith(\"/\"):\n raise ValueError\n suffix = \".txt\"\n flat = re.sub(r'/', '-', key) + suffix\n if flat == suffix:\n # top; old scheme would yield \".txt\"\n # try to employ some information from source\n if not source or source == \"/\":\n # nothing meaningfull can be derived, \n return \"root\" + suffix\n else:\n return os.path.basename(strip_trailing(source, \"/\")) + suffix\n else:\n return flat",
"def get_key(key_path):\n current = config\n for key in key_path:\n if key in current:\n current = current[key]\n else:\n return None\n\n return current",
"def findParentSuffix(self, suffix):\n rdns = ldap.explode_dn(suffix)\n del rdns[0]\n\n while len(rdns) > 0:\n suffix = ','.join(rdns)\n try:\n mapent = self.getMTEntry(suffix)\n return suffix\n except NoSuchEntryError:\n del rdns[0]\n\n return \"\"",
"def _get_value(obj, key, default=missing):\n if \".\" in key:\n return _get_value_for_keys(obj, key.split(\".\"), default)\n else:\n return _get_value_for_key(obj, key, default)",
"def find_key(params, partial_key):\n return next(v for k, v in params.items() if partial_key in k)",
"def get_full_path(self, key):\n return os.path.join(self.base_path, key)",
"def fullKeyName(self, key : str, *, ext : str = None) -> str:\n if self._path is None or key is None:\n return None\n key = str(key)\n _log.verify( len(key) > 0, \"'key' cannot be empty\")\n\n sub, _ = os.path.split(key)\n _log.verify( len(sub) == 0, \"Key '%s' contains directory information\", key)\n\n _log.verify( key[0] != \"!\", \"Key '%s' cannot start with '!' (this symbol indicates the temp directory)\", key)\n _log.verify( key[0] != \"~\", \"Key '%s' cannot start with '~' (this symbol indicates the user's directory)\", key)\n\n ext = self._convert_ext( ext if not ext is None else self._ext )\n if len(ext) > 0 and key[-len(ext):] != ext:\n return self._path + key + ext\n return self._path + key",
"def findSubKey(self, root_key, sub_key):\n\n import _winreg\n\n index = 0\n found_key = False\n\n try:\n key_ref = _winreg.OpenKey(root_key, sub_key, 0, _winreg.KEY_READ)\n except EnvironmentError:\n return None\n else:\n return key_ref",
"def _get(self, resource, *args):\n for key in args[:-1]:\n try:\n return resource[key]\n except KeyError:\n pass\n try:\n return resource[args[-1]]\n except KeyError:\n log.error('Resource does not contain `%s`:\\n%s', args[-1], pformat(resource))\n return ''",
"def get_setting(cls, settings, key):\n part1, _, part2 = key.partition('/')\n if part2:\n value = settings[part1][part2]\n else:\n value = settings[part1]\n return value",
"def get_specific_key(problem_id, version, key):\n return 'do some magic!'",
"def __getitem__(self, key):\r\n try:\r\n result = super().__getitem__(key)\r\n except KeyError as ex:\r\n result = self.alias(key)\r\n if result is None:\r\n raise ex\r\n return result",
"def GetKeyByPath(self, key_path):\n return None",
"def get_key(name, key=None):\n if key is None:\n fname = '.{}'.format(name)\n if exists(fname):\n with open(fname, 'rt') as f:\n key = f.readline().strip()\n else:\n print '{key} arg is not provided and .{key} file does not exist'.format(\n key=name\n )\n exit()\n return key",
"def get_default(self, key: str) -> Any:\n try:\n return self.get(key)\n except KeyError as e:\n last_dot_index = key.rfind(\".\")\n if last_dot_index < 0:\n raise e\n parent = key[:last_dot_index]\n field = key[last_dot_index + 1 :]\n while True:\n # self.log(\"Looking up {}/{}\".format(parent, field))\n try:\n parent_type = self.get(parent + \".\" + \"type\")\n # found a type -> go to this type and lookup there\n new_key = parent_type + \".\" + field\n last_dot_index = new_key.rfind(\".\")\n parent = new_key[:last_dot_index]\n field = new_key[last_dot_index + 1 :]\n except KeyError:\n # no type found -> go up hierarchy\n last_dot_index = parent.rfind(\".\")\n if last_dot_index < 0:\n raise e\n field = parent[last_dot_index + 1 :] + \".\" + field\n parent = parent[:last_dot_index]\n continue\n try:\n value = self.get(parent + \".\" + field)\n # uncomment this to see where defaults are taken from\n # self.log(\n # \"Using value of {}={} for key {}\".format(\n # parent + \".\" + field, value, key\n # )\n # )\n return value\n except KeyError:\n # try further\n continue",
"def getFileBySuffix(self, suffix=None):\n lst = self.getFilesBySuffix(suffix)\n if len(lst) == 0:\n return None\n else:\n return lst[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks a setting for correct typing.
|
def check_setting(self, key: str, cfg: Optional[dict] = None) -> bool:
# Ignore all builtins
if any(key in unpack(builtin) for builtin in self.builtins):
return True
if cfg is None:
cfg = self.get_config()
if key not in self.get_config_types():
self.logger.warning(f"Key {key} is not associated with a type")
return True
try:
exp_value_type = parse_setting_type(self.get_config_types()[key])
except ValueError as ve:
raise ValueError(f'Key {key} has an invalid outer type: perhaps you have "List" instead of "list" or "Dict" instead of "dict"?') from ve
value = cfg[key]
if value is None and not exp_value_type.optional:
raise TypeError(f"Key {key} is missing and non-optional")
if value is None and exp_value_type.optional:
return True
if exp_value_type.primary == NamedType.ANY:
return True
value_type_primary = type(value).__name__
if value_type_primary != exp_value_type.primary.value:
raise TypeError(f"Expected primary type {exp_value_type.primary.value} for {key}, got type {value_type_primary}")
if isinstance(value, list) and len(value) > 0:
if exp_value_type.secondary == NamedType.ANY:
return True
contained_val = value[0]
value_type_secondary = type(contained_val).__name__
if value_type_secondary != exp_value_type.secondary.value:
raise TypeError(f"Expected secondary type {exp_value_type.secondary.value} for {key}, got type {value_type_secondary}")
if isinstance(contained_val, dict) and len(contained_val) > 0:
k, v = list(contained_val.items())[0]
k_type = type(k).__name__
v_type = type(v).__name__
if exp_value_type.tertiary_k != NamedType.ANY and k_type != exp_value_type.tertiary_k.value:
raise TypeError(f"Expected tertiary key type {exp_value_type.tertiary_k.value} for {key}, got type {k_type}")
if exp_value_type.tertiary_v != NamedType.ANY and v_type != exp_value_type.tertiary_v.value:
raise TypeError(f"Expected tertiary value type {exp_value_type.tertiary_v.value} for {key}, got type {v_type}")
return True
|
[
"def _type_check(self, key):\n if self._type == \"I\" and isinstance(key,str):\n raise TypeError(\"STDict keys is set as type int()\")\n\n elif self._type == \"S\" and isinstance(key,int):\n raise TypeError(\"STDict keys is set as type str()\")\n else:\n return",
"def run_settings_check(self, key, setting):\n self.assertTrue(type(setting) is dict)\n\n name = setting.get('name', None)\n\n self.assertIsNotNone(name)\n self.assertIn('django.utils.functional.lazy', str(type(name)))\n\n description = setting.get('description', None)\n\n self.assertIsNotNone(description)\n self.assertIn('django.utils.functional.lazy', str(type(description)))\n\n if key != key.upper():\n raise ValueError(f\"Setting key '{key}' is not uppercase\") # pragma: no cover\n\n # Check that only allowed keys are provided\n allowed_keys = [\n 'name',\n 'description',\n 'default',\n 'validator',\n 'hidden',\n 'choices',\n 'units',\n 'requires_restart',\n 'after_save',\n 'before_save',\n ]\n\n for k in setting.keys():\n self.assertIn(k, allowed_keys)\n\n # Check default value for boolean settings\n validator = setting.get('validator', None)\n\n if validator is bool:\n default = setting.get('default', None)\n\n # Default value *must* be supplied for boolean setting!\n self.assertIsNotNone(default)\n\n # Default value for boolean must itself be a boolean\n self.assertIn(default, [True, False])",
"def test_adjust_bool_keyval(self):\n self.setting.value = u\"true\"\n self.assertRaises(ValueError, lambda: self.setting.adjust_value_to_type(\"keyval list\"))",
"async def commonspam(self, ctx, setting = None):\n if setting is None:\n await ctx.send(\"Please specify a setting! (on | off)\")\n else:\n if setting.lower() == \"on\" or setting.lower() == \"off\":\n pass\n else:\n await ctx.send(\"Please specify a *correct* setting! (on | off)\")",
"def test_int_direct(self):\n for source in (\"direct\", \"default\"):\n self.assertEqual(self.setting.detect_type(1, source), \"int\")",
"def get_setting_type(setting_name: str) -> str:\n\n if setting_name in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]:\n return ConfigDictKeys.IS_QUANTIZED\n if setting_name == ConfigDictKeys.IS_SYMMETRIC:\n return ConfigDictKeys.IS_SYMMETRIC\n if setting_name == ConfigDictKeys.ENCODING_CONSTRAINTS:\n return ConfigDictKeys.ENCODING_CONSTRAINTS\n error_msg = f'Unrecognized quantizer setter name {setting_name}'\n logger.error(error_msg)\n raise AssertionError(error_msg)",
"def test_adjust_bool_keyval(self):\n self.setting.value = u\"true\"\n self.setting.adjust_value_to_type(\"keyval list\")\n self.assertEqual(self.setting.value, u\"true\")\n self.assert_(isinstance(self.setting.value, unicode))",
"def test_adjust_bool_conffile(self):\n self.setting.value = u\"true\"\n self.setting.adjust_value_to_type(\"conf file\")\n self.assertEqual(self.setting.value, u\"true\")\n self.assert_(isinstance(self.setting.value, unicode))",
"def test_string_direct(self):\n for source in (\"direct\", \"default\"):\n self.assertEqual(self.setting.detect_type(u\"Hello\", source), \"unicode\")\n self.assertEqual(self.setting.detect_type(\"Hello\", source), \"unicode\")",
"def validateAndSet(self):\n\n text = self.edit.text()\n try:\n val = self.setting.fromText(text)\n styleClear(self.edit)\n self.emit( qt4.SIGNAL('settingChanged'), self, self.setting, val )\n\n except utils.InvalidType:\n styleError(self.edit)",
"def type_callback(self):\n print 'Stranger is typing...'",
"def assume_wrong_type_error(self, setting, value):\n self.assertRaises(settings.SettingWrongTypeError,\n lambda: setting.set_value(value, \"default\"))",
"def validate_settings(self):\n pass",
"def validateAndSet(self):\n\n text = self.text()\n try:\n val = self.setting.fromText(text)\n styleClear(self)\n self.emit( qt4.SIGNAL('settingChanged'), self, self.setting, val )\n\n except utils.InvalidType:\n styleError(self)",
"def assume_wrong_type_error(self, setting, value, source=\"direct\"):\n self.assertRaises(settings.SettingWrongTypeError, lambda: setting.set_value(value, source))",
"def test_adjust_bool_keyval(self):\n self.setting.value = u\"no\"\n self.setting.adjust_value_to_type(\"keyval list\")\n self.assertEqual(self.setting.value, False)",
"def test_adjust_bool_conffile(self):\n self.setting.value = u\"true\"\n self.assertRaises(ValueError, lambda: self.setting.adjust_value_to_type(\"conf file\"))",
"def test_bool(self):\n self.assume_wrong_type_error(self.string_setting, False)\n self.assume_wrong_type_error(self.int_setting, False)\n self.assume_wrong_type_error(self.float_setting, False)\n self.assume_working_value_setting(self.bool_setting, False, bool)\n if self.list_setting:\n self.assume_wrong_type_error(self.list_setting, False)",
"def test_adjust_int_conffile(self):\n self.setting.value = u\"2\"\n self.setting.adjust_value_to_type(\"conf file\")\n self.assertEqual(self.setting.value, u\"2\")\n self.assert_(isinstance(self.setting.value, unicode))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the core config with the given core config.
|
def update_core(self, core_config: List[dict], core_config_types: List[dict]) -> None:
self.core = core_config
self.update_defaults(core_config)
self.update_types(core_config_types, True)
self.__config_cache_dirty = True
|
[
"def reload_core_config(opp):\n opp.services.call(ha.DOMAIN, SERVICE_RELOAD_CORE_CONFIG)",
"def _update_central_core_config(\n self, enabled: bool, delete_backups: bool\n ) -> json_api.system_settings.SystemSettings:\n api_endpoint = ApiEndpoints.central_core.settings_update\n request_obj = api_endpoint.load_request(enabled=enabled, delete_backups=delete_backups)\n return api_endpoint.perform_request(http=self.auth.http, request_obj=request_obj)",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def update_config():\n g.config = app.config",
"def __update_config(self):\n os.chdir(str(self.__kernel_source_path))\n\n # Could get running config from /proc/config.gz but I'll just copy the newest one in /boot\n # The newest config we have\n src = self.__install_path / self.__current_kernels[0].config\n dest = Path(os.getcwd() + \"/.config\")\n\n script_info(f\"Copying {src.absolute()} to {dest.absolute()}\")\n shutil.copy(src, dest)\n\n script_info(f\"Creating a new config using .config as a base\")\n try:\n subprocess.run([\"make\", \"oldconfig\"], check=True)\n except CalledProcessError as err:\n error_and_exit(err)",
"def config():\n update_config_cli()",
"def _update_config(self, config_file, disable_parent_task_update=False, *args, **kwargs):\n config = interface.get_config(config_file)\n #Update global configuration here for printing everything in run() function\n #self.global_config = update(self.global_config, config)\n if not config:\n return kwargs\n if not config.has_section(self._config_section):\n return kwargs\n params = self.get_params()\n param_values = {x[0]:x[1] for x in self.get_param_values(params, args, kwargs)}\n for key, value in self.get_params():\n new_value = None\n # Got a command line option => override config file\n if value.default != param_values.get(key, None):\n new_value = param_values.get(key, None)\n logger.debug(\"option '{0}'; got value '{1}' from command line, overriding configuration file setting default '{2}' for task class '{3}'\".format(key, new_value, value.default, self.__class__))\n else:\n if config.has_key(self._config_section, key):\n new_value = config.get(self._config_section, key)\n if config.has_section(self._config_section, self._config_subsection):\n if config.has_key(self._config_section, key, self._config_subsection):\n new_value = config.get(self._config_section, key, self._config_subsection)\n logger.debug(\"Reading config file, setting '{0}' to '{1}' for task class '{2}'\".format(key, new_value, self.__class__))\n\n if new_value:\n if key == \"parent_task\" and disable_parent_task_update:\n logger.debug(\"disable_parent_task_update set; not updating '{0}' for task class '{1}'\".format(key, self.__class__))\n else:\n kwargs[key] = new_value\n logger.debug(\"Updating config, setting '{0}' to '{1}' for task class '{2}'\".format(key, new_value, self.__class__))\n else:\n pass\n logger.debug(\"Using default value '{0}' for '{1}' for task class '{2}'\".format(value.default, key, self.__class__))\n return kwargs",
"def updateSlaveCoreSettings(self, data, slaveCoreId):\n return self.session.request('replication/cores/slaves/%s/settings'\n % (slaveCoreId), 'PUT',\n self.getXML(data, 'updateCoreSettingsRequest'))",
"def update_config(self, config):\n self._accuracy_aware_algo.update_config(config)",
"def update(self, config: dict):\n for key, value in config.items():\n setattr(self, key, value)",
"def reload_kubeconfig(self) -> None:\n kube_config.load_kube_config(\n config_file=self.kubeconfig_path,\n context=self.kubecontext\n )\n self.core = kube_client.CoreV1Api()",
"def updated(self, newConfiguration):",
"def _update_project_config(self, path):\n projects_path = list(set(CONF.get('main', 'projects_path', [])))\n projects_path = list(projects_path)\n projects_path.append(path)\n CONF.set('main', 'projects_path', projects_path)\n self.load_projects()\n self.update_status('')",
"def update_coreset(config, shared, task_id, data, mnet, hnet, device, logger,\n allowed_outputs, hhnet=None, method='bbb'):\n assert method in ['bbb', 'avb']\n assert hhnet is None or method != 'bbb'\n\n if config.coreset_size == -1:\n return\n\n if config.per_task_coreset or not hasattr(shared, 'coreset'):\n num_new_samples = config.coreset_size\n else:\n # How many samples to be replaced.\n num_replace = config.coreset_size // (task_id+1)\n num_new_samples = num_replace\n\n # Pick random samples from the training set as new coreset.\n batch = data.next_train_batch(num_new_samples, return_ids=True)\n new_inputs = data.input_to_torch_tensor(batch[0], device,\n mode='train')\n new_targets = data.output_to_torch_tensor(batch[1], device,\n mode='train')\n #_, new_labels = torch.max(new_targets, 1)\n #new_labels = new_labels.detach().cpu().numpy()\n\n if config.per_task_coreset or not hasattr(shared, 'coreset'):\n\n # Add samples to existing coreset.\n if hasattr(shared, 'coreset'):\n assert np.all(np.equal(list(shared.coreset.shape[1:]),\n list(new_inputs.shape[1:])))\n shared.coreset = torch.cat([shared.coreset, new_inputs], dim=0)\n shared.coreset_targets = torch.cat([shared.coreset_targets,\n new_targets], dim=0)\n #shared.coreset_labels = np.concatenate([shared.coreset_labels,\n # new_labels])\n shared.task_ident = np.concatenate([shared.task_ident,\n np.ones(num_new_samples) * task_id])\n shared.sample_ids = np.concatenate([shared.sample_ids, batch[2]])\n else:\n shared.coreset = new_inputs\n shared.coreset_targets = new_targets\n #shared.coreset_labels = new_labels\n shared.task_ident = np.ones(num_new_samples) * task_id\n shared.sample_ids = batch[2]\n\n logger.debug('%d training samples from task %d have been added to ' \\\n % (num_new_samples, task_id+1) + 'the coreset.')\n else:\n assert hasattr(shared, 'coreset')\n\n logger.debug('%d/%d samples in the coreset will be replaced by ' \\\n % (num_replace, config.coreset_size) +\n 'samples from task %d.' % (task_id+1))\n\n if 'regression' in shared.experiment_type:\n raise NotImplementedError()\n\n if method == 'bbb':\n ents = calc_batch_uncertainty(config, shared, task_id,\n shared.coreset, mnet, hnet, data, config.val_sample_size,\n mnet_weights=None, allowed_outputs=allowed_outputs,\n disable_lrt=config.disable_lrt_test)\n else:\n ents = pcutils.calc_batch_uncertainty(config, shared, task_id,\n device, shared.coreset, mnet, hnet, hhnet, data,\n config.val_sample_size, hnet_theta=None,\n allowed_outputs=allowed_outputs)\n\n # We replace those samples in the coreset that achieve high entropy\n # under the current model.\n replace_inds = np.argsort(ents)[-num_replace:]\n\n assert np.all(np.equal(list(shared.coreset.shape[1:]),\n list(new_inputs.shape[1:])))\n shared.coreset[replace_inds, :] = new_inputs\n shared.coreset_targets[replace_inds, :] = new_targets\n #shared.coreset_labels[replace_inds] = new_labels\n shared.task_ident[replace_inds] = np.ones(num_replace) * task_id\n shared.sample_ids[replace_inds] = batch[2]",
"def update(self, settings):\n update_config = settings.configuration\n self.configuration.update(update_config)",
"def update_lvm_config(self, context):\n return self.call(context, self.make_msg('update_lvm_config'))",
"def update_plugin_config(self):\n conf_dict = {}\n # conf_dict['bridge'] = self.bridge\n conf_dict['bridge_serial'] = self.bridge.get('serialNumber','')\n conf_dict['bridge_user'] = self.bridge.get('username','')\n conf_dict['bridge_ip'] = self.bridge.get('ip','')\n conf_dict['bridge_port'] = self.bridge.get('port','')\n self.update_config_section(conf_dict)\n return",
"def core_reload(self, core, verbose=False):\n params = {\n 'action': 'RELOAD',\n 'wt': 'json'\n }\n\n if core is not None:\n params['core'] = core\n else:\n print('Solr reload: missing mandatory argument \"core\"')\n return\n\n if verbose:\n print('Solr core_reload:')\n\n self._get('admin/cores', params, verbose)",
"def _update_runtime_properties():\n # Override any values in `config` with values in `additional_config`\n config = inputs['config']\n additional_config = inputs['additional_config']\n _dict_merge(config, additional_config)\n\n ctx.instance.runtime_properties['config'] = config\n ctx.instance.update()\n ctx.logger.debug('Updated {0}: {1}'.format(ctx.instance.id, config))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the tools config with the given tools config.
|
def update_tools(self, tools_config: List[dict], tool_config_types: List[dict]) -> None:
self.tools = tools_config
self.update_defaults(tools_config)
self.update_types(tool_config_types, True)
self.__config_cache_dirty = True
|
[
"def update_tool_configs(self) -> None:\n tools = reduce(lambda a, b: a + b, list(self.tool_configs.values()))\n self.database.update_tools(tools)",
"def update_cache(self, tools: Dict[str, Union[ToolInfo, str]]):\r\n with self.db.transaction():\r\n self.db.insert_tool_info([tools.get(i) for i in tools.keys()])\r\n self._handle_cache_queue()",
"def run_vmwaretoolsupdate():\n run(\"sudo /usr/bin/vmware-config-tools.pl -d\")",
"def switch_tools(self, tools):\n self.toolbar.pack_forget()\n self.toolbar.destroy()\n self.remove_binds()\n if tools == \"CreationTools\":\n self.toolbar = CreationTools(self, **FRAME_OPTIONS)\n elif tools == \"MovingTools\":\n self.toolbar = MovingTools(self, **FRAME_OPTIONS)\n self.add_bind(tools)\n elif tools == \"DeletingTools\":\n self.toolbar = DeletingTools(self, **FRAME_OPTIONS)\n self.add_bind(tools)\n elif tools == \"ModifyingTools\":\n self.toolbar = ModifyingTools(self, **FRAME_OPTIONS)\n elif tools == \"CustomisingTools\":\n self.toolbar = CustomisingTools(self, **FRAME_OPTIONS)\n self.add_bind(tools)\n elif tools == \"ModelTools\":\n self.toolbar = ModelTools(self, **FRAME_OPTIONS)\n else:\n raise ValueError(\"No such tools.\")\n self.toolbar.pack(fill=tk.BOTH, expand=tk.NO)",
"def install_tools(self, tools):\n\n def bin_path(tool):\n binary = os.path.join(ASSETS_PATH, 'binaries', self.abi, tool)\n if not os.path.isfile(binary):\n binary = os.path.join(ASSETS_PATH, 'binaries', 'scripts', tool)\n return binary\n\n tools = sorted(set(tools) - self._installed_tools)\n\n # TODO: compute the checksum of the tool + install location and keep\n # that in _installed_tools, so we are sure to be correct\n for tool in tools:\n self.target.install(bin_path(tool))\n self._installed_tools.add(tool)",
"def config():\n update_config_cli()",
"def _update_linter_config(self, linter_config):\n for linter, tool_config in linter_config.items():\n if self._config_update(linter, tool_config):\n self._data['linters'][linter] = tool_config",
"def reload( self, tool_id ):\n if tool_id not in self.tools_by_id:\n raise ToolNotFoundException( \"No tool with id %s\" % tool_id )\n old_tool = self.tools_by_id[ tool_id ]\n new_tool = self.load_tool( old_tool.config_file )\n # Replace old_tool with new_tool in self.tool_panel\n tool_key = 'tool_' + tool_id\n for key, val in self.tool_panel.items():\n if key == tool_key:\n self.tool_panel[ key ] = new_tool\n break\n elif key.startswith( 'section' ):\n section = val\n for section_key, section_val in section.elems.items():\n if section_key == tool_key:\n self.tool_panel[ key ].elems[ section_key ] = new_tool\n break\n self.tools_by_id[ tool_id ] = new_tool\n log.debug( \"Reloaded tool %s %s\" %( old_tool.id, old_tool.version ) )",
"def init_tools( self, config_filename ):\n def load_tool( elem, panel_dict ):\n try:\n path = elem.get( \"file\" )\n tool = self.load_tool( os.path.join( self.tool_root_dir, path ) )\n self.tools_by_id[ tool.id ] = tool\n key = 'tool_' + tool.id\n panel_dict[ key ] = tool\n log.debug( \"Loaded tool: %s %s\" % ( tool.id, tool.version ) )\n except:\n log.exception( \"error reading tool from path: %s\" % path )\n def load_workflow( elem, panel_dict ):\n try:\n # TODO: should id be encoded?\n workflow_id = elem.get( 'id' )\n workflow = self.load_workflow( workflow_id )\n self.workflows_by_id[ workflow_id ] = workflow\n key = 'workflow_' + workflow_id\n panel_dict[ key ] = workflow\n log.debug( \"Loaded workflow: %s %s\" % ( workflow_id, workflow.name ) )\n except:\n log.exception( \"error loading workflow: %s\" % workflow_id )\n def load_label( elem, panel_dict ):\n label = ToolSectionLabel( elem )\n key = 'label_' + label.id\n panel_dict[ key ] = label\n def load_section( elem, panel_dict ):\n section = ToolSection( elem )\n log.debug( \"Loading section: %s\" % section.name )\n for section_elem in elem:\n if section_elem.tag == 'tool':\n load_tool( section_elem, section.elems )\n elif section_elem.tag == 'workflow':\n load_workflow( section_elem, section.elems )\n elif section_elem.tag == 'label':\n load_label( section_elem, section.elems )\n key = 'section_' + section.id\n panel_dict[ key ] = section\n \n log.info(\"parsing the tool configuration\")\n tree = util.parse_xml( config_filename )\n root = tree.getroot()\n for elem in root:\n if elem.tag == 'tool':\n load_tool( elem, self.tool_panel )\n elif elem.tag == 'workflow':\n load_workflow( elem, self.tool_panel )\n elif elem.tag == 'section' :\n load_section( elem, self.tool_panel )\n elif elem.tag == 'label':\n load_label( elem, self.tool_panel )",
"def reload( self, tool_id ):\n if tool_id not in self.tools_and_sections_by_id:\n raise ToolNotFoundException( \"No tool with id %s\" % tool_id )\n old_tool, section = self.tools_and_sections_by_id[ tool_id ]\n new_tool = Tool( old_tool.config_file )\n log.debug( \"Reloaded tool %s\", old_tool.id )\n # Is there a potential sync problem here? This should be roughly \n # atomic. Too many indexes for tools...\n section.tools[ section.tools.index( old_tool ) ] = new_tool\n self.tools_by_id[ tool_id ] = new_tool\n self.tools_and_sections_by_id[ tool_id ] = new_tool, section",
"def update_technology(self, technology_config: List[dict], technology_config_types: List[dict]) -> None:\n self.technology = technology_config\n self.update_defaults(technology_config)\n self.update_types(technology_config_types, True)\n self.__config_cache_dirty = True",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def configure(home=None, config=None):\n global freebaseToolsHome, freebaseToolsConfig\n if home is not None:\n freebaseToolsHome = home\n if config is not None:\n freebaseToolsConfig = config",
"def saveToolSettings():\n pass",
"def toolChanged(self, tool: ghidra.framework.plugintool.PluginTool) -> None:\n ...",
"def upgrade_tools(self, execution_type=None,\n installer_options=None, **kwargs):\n pass",
"def apply_config_changes(cfg):\n # Safety checks\n if type(cfg['warnings/filters']) != list:\n cfg['warnings/filters'] = []\n \n # Adhoc changes\n if type(cfg['gui/dynazoom']) is str:\n cfg['gui/dynazoom'] = [ cfg['gui/dynazoom'], '' ]\n\n for i in range(8):\n t = \"render/light%s\"%i\n try:\n cfg[t] = dict(cfg[t])\n except:\n pass\n\n # Rename settings\n for old,new in [\n ('history','gui/history'),\n ]:\n if old in cfg.keys():\n if new not in cfg.keys():\n cfg[new] = cfg[old]\n del cfg[old]\n\n # Delete settings\n for key in [\n 'input/timeout','filterwarnings',\n 'render/ambient','render/diffuse','render/specular','render/emission',\n 'render/material','canvas/propcolors','Save changes',\n ]:\n if key in cfg.keys():\n print(\"DELETING CONFIG VARIABLE %s\" % key)\n del cfg[key]",
"def add_tools_to_pipeline(pipeline,\n map_tool_to_runner,\n config=None,\n input_files=None,\n **kwargs):\n tool_functions = build_tool_functions(map_tool_to_runner, config)\n\n if \"input\" not in config:\n raise KeyError(\"configuration file requires an 'input' section\")\n\n if config[\"input\"] is None:\n raise ValueError(\"input section is empty\")\n\n input_regex = config[\"input\"].pop(\"regex\", None)\n input_alias = config[\"input\"].pop(\"alias\", None)\n input_group_regex = config[\"input\"].pop(\"group_regex\", None)\n input_group_alias = config[\"input\"].pop(\"group_alias\", \"\\\\1\")\n\n is_test = \"is_test\" in config\n\n # update selected fields for testing purposes\n if \"test\" in config[\"input\"]:\n config[\"input\"].update(config[\"input\"][\"test\"])\n del config[\"input\"][\"test\"]\n\n config_files = expand_globs(config[\"input\"], is_test=is_test)\n\n if input_group_regex:\n config_files = group_files(config_files,\n input_group_regex,\n input_group_alias)\n\n input_combos = build_combinations(config_files)\n tool_runners = []\n\n ignore = config[\"setup\"].get(\"ignore\", [])\n ignore.extend(config[\"input\"].get(\"ignore\", []))\n\n make_unique = check_unique(tool_functions,\n input_combos=input_combos,\n input_regex=input_regex,\n input_alias=input_alias,\n is_test=is_test)\n\n suffix = None\n\n for toolf, input_files in itertools.product(tool_functions, input_combos):\n\n # create a copy of the task function and give it its unique name\n # by mangling it with the input_files\n taskf = copy.copy(toolf)\n\n taskf.register_input(input_files,\n regex=input_regex,\n alias=input_alias,\n make_unique=make_unique,\n is_test=is_test)\n\n if \"name\" in input_files:\n # create copy of input_files without name, do\n # not modify original as different tools require\n # the 'name'\n input_files = dict([(x, y) for x, y in list(input_files.items())\n if x != \"name\"])\n\n result_dir = os.path.join(taskf.__name__ + \".dir\")\n\n found = False\n\n for i in IOTools.val2list(ignore):\n if i in result_dir:\n P.get_logger().warn(\n \"the following task will be ignored: \"\n \"{} matching {}\".format(\n result_dir, i))\n found = True\n if found:\n continue\n\n output, multiple_outputs, flexible_outputs, _suffix = \\\n build_output(taskf, result_dir)\n if suffix is None:\n suffix = _suffix\n elif suffix != _suffix:\n raise ValueError(\n \"tools produce output files of different type, \"\n \"got {}, expected {}\".format(_suffix, suffix))\n\n tool_task = pipeline.merge(\n task_func=taskf,\n input=list(input_files.values()),\n output=output,\n **kwargs).mkdir(result_dir)\n\n # if there are multilpe output files, split the task so that\n # each output file will be processed separately further down the\n # pipeline.\n if multiple_outputs:\n f = EmptyRunner()\n f.__name__ = taskf.__name__ + \"_split\"\n tool_task = pipeline.split(\n task_func=f,\n input=tool_task,\n output=output)\n\n tool_runners.append(tool_task)\n\n # convenience target\n f = EmptyRunner()\n f.__name__ = \"tools\"\n pipeline.merge(task_func=f,\n input=tool_runners,\n output=None)\n\n return suffix, tool_runners",
"def add_bind(self, tools):\n if tools == \"MovingTools\":\n self.canvas.add_moving_bind()\n elif tools == \"DeletingTools\":\n self.canvas.add_deleting_bind()\n elif tools == \"CustomisingTools\":\n self.canvas.add_selecting_bind()\n else:\n raise ValueError(\"No such tools.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the technology config with the given technology config.
|
def update_technology(self, technology_config: List[dict], technology_config_types: List[dict]) -> None:
self.technology = technology_config
self.update_defaults(technology_config)
self.update_types(technology_config_types, True)
self.__config_cache_dirty = True
|
[
"def update(self, name, config, etag):\n response = self._session.put(\n path=self._session.urljoin(self.RESOURCE_PATH, name).format(base_api=self.base_api),\n headers={\n 'Accept': self._accept_header(),\n 'Content-Type': 'application/json',\n 'If-Match': etag,\n },\n data=json.dumps(config),\n )\n\n etag = response.headers['ETag']\n return TemplateConfig(session=self._session, data=response.json(), etag=etag)",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def updated(self, newConfiguration):",
"def update_config():\n g.config = app.config",
"def update(self, config: dict):\n for key, value in config.items():\n setattr(self, key, value)",
"def update_security_info(self, config_dict):\n\n # update WPS information\n self.update_wps_configuration()\n\n if 'wpa_passphrase' in config_dict and config_dict['wpa_passphrase']:\n self.configuration_dict['wpa_key_mgmt'] = \"WPA-PSK\"\n self.configuration_dict['wpa_pairwise'] = \"TKIP CCMP\"\n self.configuration_dict['wpa'] = '3'",
"def upsert_config(config: TgcliConfiguration):\n configs = __read_config_files__()\n configs[config.name] = config\n save_configs(configs)",
"def update_security_feature_config(self, context):\n return self.call(context, self.make_msg('update_security_feature_config'))",
"def apply_config(self, config):\n raise NotImplementedError",
"def update_technology_params(self, tech_dict, params_dict, params_key, params_value):\n if params_key not in tech_dict:\n print(\"ERROR: \\\"{}\\\" not in technology data structure... Passing...\".format(params_key))\n return\n elif type(params_value) == str or type(params_value) == int or type(params_value) == float or type(params_value) == bool:\n tech_dict[params_key] = params_value\n elif type(params_value) == dict:\n for k, v in params_value.items():\n self.update_technology_params(tech_dict[params_key], params_value, k, v)\n elif type(params_value) == list:\n for i in range(0, len(params_value)):\n for k, v in params_value[i].items():\n self.update_technology_params(tech_dict[params_key][i], params_value[i], k, v)",
"def update(self, connector):\n response = requests.put('{}/{}/config'.format(self.base_endpoint,\n connector['config']['name']), json=connector['config'])\n response.raise_for_status()\n if self.verbose:\n print('[-] Updated connector: \"{}\"'.format(connector['config']['name']))",
"def _update_runtime_properties():\n # Override any values in `config` with values in `additional_config`\n config = inputs['config']\n additional_config = inputs['additional_config']\n _dict_merge(config, additional_config)\n\n ctx.instance.runtime_properties['config'] = config\n ctx.instance.update()\n ctx.logger.debug('Updated {0}: {1}'.format(ctx.instance.id, config))",
"def update_config(self, config):\n old_input_device = False\n if hasattr(self, \"_config\"):\n old_input_device = self._config[\"audio_device\"]\n\n if self._is_activated:\n self.deactivate()\n self._config = self.AUDIO_CONFIG_SCHEMA.fget()(config)\n if len(self._callbacks) != 0:\n self.activate()\n if (\n old_input_device\n and self._config[\"audio_device\"] is not old_input_device\n ):\n self._ledfx.events.fire_event(\n AudioDeviceChangeEvent(\n self.input_devices()[self._config[\"audio_device\"]]\n )\n )",
"def test_update_router_config(self):\n old_config = self.create_router_config(\n label='old', config={'test': 'pass'})\n resp = yield self.post('/routers/', old_config)\n router_id = (yield resp.json())['result']['id']\n\n router_config = yield self.api.router_store.get_router_config(\n router_id)\n old_config['id'] = router_id\n self.assertEqual(router_config, old_config)\n router_worker = self.api.service.namedServices[router_id]\n router_worker_config = old_config['config']\n for k, v in router_worker_config.items():\n self.assertEqual(router_worker.config[k], router_worker_config[k])\n\n update = {'config': {'test': 'pass', 'new': 'new'}}\n new_config = deepcopy(old_config)\n new_config.update(update)\n self.assertEqual(new_config['label'], 'old')\n resp = yield self.patch_request(\n '/routers/{}'.format(router_id), update)\n\n yield self.assert_response(\n resp, http.OK, 'router updated', new_config)\n\n router_config = yield self.api.router_store.get_router_config(\n router_id)\n self.assertEqual(router_config, new_config)\n router_worker = self.api.service.namedServices[router_id]\n router_worker_config = new_config['config']\n for k, v in router_worker_config.items():\n self.assertEqual(router_worker.config[k], router_worker_config[k])\n\n router_worker = self.api.service.namedServices[router_id]",
"def update_wps_configuration(self):\n\n # enable WPS\n self.configuration_dict['wps_state'] = '2'\n self.configuration_dict['ap_setup_locked'] = '1'\n self.configuration_dict['uuid'] = '12345678-9abc-def0-1234-56789abcdef0'\n self.configuration_dict['device_name'] = 'Wireless AP'\n self.configuration_dict['manufacturer'] = 'Company'\n self.configuration_dict['model_name'] = 'WAP'\n self.configuration_dict['model_number'] = '123'\n self.configuration_dict['serial_number'] = '12345'\n self.configuration_dict['device_type'] = '6-0050F204-1'\n self.configuration_dict['os_version'] = '01020300'\n self.configuration_dict['config_methods'] =\\\n 'label virtual_display virtual_push_button keypad'\n self.configuration_dict['eap_server'] = '1'",
"def apply_config_changes(cfg):\n # Safety checks\n if type(cfg['warnings/filters']) != list:\n cfg['warnings/filters'] = []\n \n # Adhoc changes\n if type(cfg['gui/dynazoom']) is str:\n cfg['gui/dynazoom'] = [ cfg['gui/dynazoom'], '' ]\n\n for i in range(8):\n t = \"render/light%s\"%i\n try:\n cfg[t] = dict(cfg[t])\n except:\n pass\n\n # Rename settings\n for old,new in [\n ('history','gui/history'),\n ]:\n if old in cfg.keys():\n if new not in cfg.keys():\n cfg[new] = cfg[old]\n del cfg[old]\n\n # Delete settings\n for key in [\n 'input/timeout','filterwarnings',\n 'render/ambient','render/diffuse','render/specular','render/emission',\n 'render/material','canvas/propcolors','Save changes',\n ]:\n if key in cfg.keys():\n print(\"DELETING CONFIG VARIABLE %s\" % key)\n del cfg[key]",
"def update_config(self, config):\n self._accuracy_aware_algo.update_config(config)",
"def update_lvm_config(self, context):\n return self.call(context, self.make_msg('update_lvm_config'))",
"def update_infos(self, project, infor, newvalue):\n self.configp.set(project, infor, newvalue)\n\n with open(self.dumpyfile, 'w+') as dumpyfile:\n self.configp.write(dumpyfile)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the environment config with the given environment config.
|
def update_environment(self, environment_config: List[dict]) -> None:
self.environment = environment_config
self.__config_cache_dirty = True
|
[
"def update_config():\n g.config = app.config",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def update_environment(environment_id, file):\n _confirm_account()\n\n evolv_client = EvolvClient(EVOLV_CONFIG)\n environment = evolv_client.get_environment(environment_id, account_id=EVOLV_ACCOUNT_ID)\n if not environment:\n raise Exception(\"Failed to retrieve the previous environments.\")\n\n response = evolv_client.update_environment(environment_id=environment_id, name=environment['name'],\n content=file.read().decode('utf-8'),\n content_type=APPLICATION_YAML\n if '.yml' in file.name else APPLICATION_JSON,\n account_id=EVOLV_ACCOUNT_ID)\n _print_dict(response)",
"def update(env, zname, **fter):\n a = _amod_settings(env, zname, update_one, fter)\n logger.info('update(): %r', a)",
"def update_environment(self, environment_id, new_name):\n\n return self.murano_client.environments.update(environment_id, new_name)",
"def __update_config(self):\n os.chdir(str(self.__kernel_source_path))\n\n # Could get running config from /proc/config.gz but I'll just copy the newest one in /boot\n # The newest config we have\n src = self.__install_path / self.__current_kernels[0].config\n dest = Path(os.getcwd() + \"/.config\")\n\n script_info(f\"Copying {src.absolute()} to {dest.absolute()}\")\n shutil.copy(src, dest)\n\n script_info(f\"Creating a new config using .config as a base\")\n try:\n subprocess.run([\"make\", \"oldconfig\"], check=True)\n except CalledProcessError as err:\n error_and_exit(err)",
"def update_config(\n config: dict, new_config: dict, scope: str | None, force_local: bool = False\n) -> None:\n if scope and type(config[\"journals\"][scope]) is dict: # Update to journal specific\n config[\"journals\"][scope].update(new_config)\n elif scope and force_local: # Convert to dict\n config[\"journals\"][scope] = {\"journal\": config[\"journals\"][scope]}\n config[\"journals\"][scope].update(new_config)\n else:\n config.update(new_config)",
"def set_config():\n env = get_current_environment()\n app.config.from_object('server.config.{0}Config'.format(env))",
"def upsert_config(config: TgcliConfiguration):\n configs = __read_config_files__()\n configs[config.name] = config\n save_configs(configs)",
"def put_asg_launch_config(self, environment=None, asgname=None, data={}, **kwargs):\n if environment is None or asgname is None:\n raise SyntaxError('Either environment or asgname has not been specified')\n request_endpoint = '/api/v1/asgs/%s/launch-config?environment=%s' % (asgname, environment)\n return self.query(query_endpoint=request_endpoint, query_type='PUT', data=data, **kwargs)",
"def update(self, config: dict):\n for key, value in config.items():\n setattr(self, key, value)",
"def _add_env_kvs(config, env):\n for k, val in config[env].iteritems():\n config[k] = val\n return config",
"def set_config(self, config):\n for key in config.keys():\n self.config[key] = config[key]",
"def put_environmenttype_config(self, environmenttype=None, expected_version=None, data={}, **kwargs):\n if environmenttype is None:\n raise SyntaxError('Environment type has not been specified')\n if expected_version is None:\n headers = ''\n else:\n headers = {'expected-version':expected_version}\n request_endpoint = '/api/v1/config/environment-types/%s' % environmenttype\n return self.query(query_endpoint=request_endpoint, query_type='PUT', headers=headers, data=data, **kwargs)",
"def apply_env():\n\n for node, val in SUPPORTED_ENV.iteritems():\n for param in val:\n env_var = (CONFIG['env_pfx'] + '_' + node + '_' + param).upper()\n env_value = os.environ.get(env_var)\n if env_value is not None:\n CONFIG[node][param] = env_value",
"def update_config(self, mode):\n with utils.environment_edit_in_place('/etc/environment') as env:\n key = 'HADOOP_CONF_DIR' if mode == 'mapreduce' else 'PIG_HOME'\n env['PIG_CLASSPATH'] = env[key]",
"def load_config_from_env(self):\n app_envs = filter(\n lambda s: s.startswith(\n '{}_'.format(self.name.upper())), os.environ.keys())\n for env_key in app_envs:\n if os.environ[env_key]:\n self.config[env_key] = os.environ[env_key]",
"def _update_runtime_properties():\n # Override any values in `config` with values in `additional_config`\n config = inputs['config']\n additional_config = inputs['additional_config']\n _dict_merge(config, additional_config)\n\n ctx.instance.runtime_properties['config'] = config\n ctx.instance.update()\n ctx.logger.debug('Updated {0}: {1}'.format(ctx.instance.id, config))",
"def set_config(**kwargs) -> None:\n _conf.update(kwargs)",
"def update_package_config():\n try:\n import importlib\n import sys\n import json\n\n path = importlib.machinery.PathFinder().find_spec('sentinelhub', sys.path[1:]).submodule_search_locations[0]\n old_config_filename = os.path.join(path, 'config.json')\n\n with open(old_config_filename, 'r') as file:\n old_config = json.load(file)\n\n from sentinelhub.config import SHConfig\n\n config = SHConfig()\n for attr, value in old_config.items():\n if hasattr(config, attr) and not getattr(config, attr):\n setattr(config, attr, value)\n\n config.save()\n\n except BaseException:\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the project config with the given project config.
|
def update_project(self, project_config: List[dict]) -> None:
self.project = project_config
self.__config_cache_dirty = True
|
[
"def _update_project_config(self, path):\n projects_path = list(set(CONF.get('main', 'projects_path', [])))\n projects_path = list(projects_path)\n projects_path.append(path)\n CONF.set('main', 'projects_path', projects_path)\n self.load_projects()\n self.update_status('')",
"def update_infos(self, project, infor, newvalue):\n self.configp.set(project, infor, newvalue)\n\n with open(self.dumpyfile, 'w+') as dumpyfile:\n self.configp.write(dumpyfile)",
"def update_config():\n g.config = app.config",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def update(self, config: dict):\n for key, value in config.items():\n setattr(self, key, value)",
"def config():\n update_config_cli()",
"def upsert_config(config: TgcliConfiguration):\n configs = __read_config_files__()\n configs[config.name] = config\n save_configs(configs)",
"def update(self, settings):\n update_config = settings.configuration\n self.configuration.update(update_config)",
"def update(self, name, config, etag):\n response = self._session.put(\n path=self._session.urljoin(self.RESOURCE_PATH, name).format(base_api=self.base_api),\n headers={\n 'Accept': self._accept_header(),\n 'Content-Type': 'application/json',\n 'If-Match': etag,\n },\n data=json.dumps(config),\n )\n\n etag = response.headers['ETag']\n return TemplateConfig(session=self._session, data=response.json(), etag=etag)",
"def updated(self, newConfiguration):",
"def _update_config(self, config_file, disable_parent_task_update=False, *args, **kwargs):\n config = interface.get_config(config_file)\n #Update global configuration here for printing everything in run() function\n #self.global_config = update(self.global_config, config)\n if not config:\n return kwargs\n if not config.has_section(self._config_section):\n return kwargs\n params = self.get_params()\n param_values = {x[0]:x[1] for x in self.get_param_values(params, args, kwargs)}\n for key, value in self.get_params():\n new_value = None\n # Got a command line option => override config file\n if value.default != param_values.get(key, None):\n new_value = param_values.get(key, None)\n logger.debug(\"option '{0}'; got value '{1}' from command line, overriding configuration file setting default '{2}' for task class '{3}'\".format(key, new_value, value.default, self.__class__))\n else:\n if config.has_key(self._config_section, key):\n new_value = config.get(self._config_section, key)\n if config.has_section(self._config_section, self._config_subsection):\n if config.has_key(self._config_section, key, self._config_subsection):\n new_value = config.get(self._config_section, key, self._config_subsection)\n logger.debug(\"Reading config file, setting '{0}' to '{1}' for task class '{2}'\".format(key, new_value, self.__class__))\n\n if new_value:\n if key == \"parent_task\" and disable_parent_task_update:\n logger.debug(\"disable_parent_task_update set; not updating '{0}' for task class '{1}'\".format(key, self.__class__))\n else:\n kwargs[key] = new_value\n logger.debug(\"Updating config, setting '{0}' to '{1}' for task class '{2}'\".format(key, new_value, self.__class__))\n else:\n pass\n logger.debug(\"Using default value '{0}' for '{1}' for task class '{2}'\".format(value.default, key, self.__class__))\n return kwargs",
"def update_file():\n with open(CONFIG_PATH, \"w\") as configfile:\n config.write(configfile)",
"def apply_config(self, config):\n raise NotImplementedError",
"def update_config(\n config: dict, new_config: dict, scope: str | None, force_local: bool = False\n) -> None:\n if scope and type(config[\"journals\"][scope]) is dict: # Update to journal specific\n config[\"journals\"][scope].update(new_config)\n elif scope and force_local: # Convert to dict\n config[\"journals\"][scope] = {\"journal\": config[\"journals\"][scope]}\n config[\"journals\"][scope].update(new_config)\n else:\n config.update(new_config)",
"def StoreConfig(self, cnxn, config):\n # TODO(jrobbins): Convert default template index values into foreign\n # key references. Updating an entire config might require (1) adding\n # new templates, (2) updating the config with new foreign key values,\n # and finally (3) deleting only the specific templates that should be\n # deleted.\n self.projectissueconfig_tbl.InsertRow(\n cnxn, replace=True,\n project_id=config.project_id,\n statuses_offer_merge=' '.join(config.statuses_offer_merge),\n exclusive_label_prefixes=' '.join(config.exclusive_label_prefixes),\n default_template_for_developers=config.default_template_for_developers,\n default_template_for_users=config.default_template_for_users,\n default_col_spec=config.default_col_spec,\n default_sort_spec=config.default_sort_spec,\n default_x_attr=config.default_x_attr,\n default_y_attr=config.default_y_attr,\n member_default_query=config.member_default_query,\n custom_issue_entry_url=config.custom_issue_entry_url,\n commit=False)\n\n self._UpdateTemplates(cnxn, config)\n self._UpdateWellKnownLabels(cnxn, config)\n self._UpdateWellKnownStatuses(cnxn, config)\n cnxn.Commit()",
"def _load_project_config(self):\n if self._project_config is not None:\n return\n\n query = \"SELECT config_json FROM project_config WHERE config_site = ?\"\n with self.localdb as cursor:\n cursor.execute(query, (self.wikiid,))\n results = cursor.fetchall()\n if not results:\n self._project_config = {\"defaults\": {}, \"projects\": {}}\n return\n raw = json.loads(results[0][0])\n\n config = {\"defaults\": raw[\"defaults\"], \"projects\": {}}\n for project in raw[\"projects\"]:\n config[\"projects\"][project[\"name\"]] = project\n self._project_config = config",
"def edit_config(self, config_name: str=\"default_config\"):\n subprocess.run([\"nano\", f\"{PM_PATH}/{config_name}.toml\"])",
"def set_config(self, config):\n for key in config.keys():\n self.config[key] = config[key]",
"def __update_config(self):\n os.chdir(str(self.__kernel_source_path))\n\n # Could get running config from /proc/config.gz but I'll just copy the newest one in /boot\n # The newest config we have\n src = self.__install_path / self.__current_kernels[0].config\n dest = Path(os.getcwd() + \"/.config\")\n\n script_info(f\"Copying {src.absolute()} to {dest.absolute()}\")\n shutil.copy(src, dest)\n\n script_info(f\"Creating a new config using .config as a base\")\n try:\n subprocess.run([\"make\", \"oldconfig\"], check=True)\n except CalledProcessError as err:\n error_and_exit(err)",
"def update_from_file(self):\n\n with open(self._path, 'r') as file:\n self._config.read_file(file)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the builtins config with the given builtins config.
|
def update_builtins(self, builtins_config: List[dict]) -> None:
self.builtins = builtins_config
self.__config_cache_dirty = True
|
[
"def load_builtins_and_core(cls, database: hammer_config.HammerDatabase) -> None:\n\n # Load in builtins.\n builtins_yml = resources.files(\"hammer.config\") / \"builtins.yml\"\n database.update_builtins([\n hammer_config.load_config_from_string(builtins_yml.read_text(), True),\n HammerVLSISettings.get_config()\n ])\n\n # Read in core and vendor-common defaults.\n # TODO: vendor-common defaults should be in respective vendor plugin packages\n # and considered tool configs instead\n core_defaults = [] # type: List[dict]\n core_defaults_types = [] # type: List[dict]\n vendors = [\"cadence\", \"synopsys\", \"mentor\", \"openroad\"]\n for pkg in [\"hammer.config\"] + list(map(lambda v: \"hammer.common.\" + v, vendors)):\n config, types = hammer_config.load_config_from_defaults(pkg, types=True)\n core_defaults.extend(config)\n core_defaults_types.extend(types)\n database.update_core(core_defaults, core_defaults_types)",
"def update_config():\n g.config = app.config",
"def update_core(self, core_config: List[dict], core_config_types: List[dict]) -> None:\n self.core = core_config\n self.update_defaults(core_config)\n self.update_types(core_config_types, True)\n self.__config_cache_dirty = True",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def config():\n update_config_cli()",
"def reload_config():\n config.reload_config(\"utilipy\")",
"def install_builtins(namespace=None):\n\n if not namespace:\n namespace = __builtin__\n\n if not isinstance(namespace, dict):\n namespace = namespace.__dict__\n\n _globals = globals()\n\n for name in __all__:\n if name in _globals:\n namespace[name] = _globals[name]",
"def _reload_shortcuts(self, shortcuts_dict):\n _ = shortcuts_dict # Keep Pylint happy",
"def handle_version_admin(self, cms_config):\n for versionable in cms_config.versioning:\n register_versionadmin_proxy(versionable)",
"def apply(self):\n\n # Clear\n pyzo.config.shellConfigs2 = []\n\n # Set new versions. Note that although we recreate the list,\n # the list is filled with the orignal structs, so having a\n # reference to such a struct (as the shell has) will enable\n # you to keep track of any made changes.\n for i in range(self._tabs.count()):\n w = self._tabs.widget(i)\n pyzo.config.shellConfigs2.append(w.getInfo())",
"def update():\n update_code()\n update_env()\n symlink()\n set_current()\n permissions()",
"def update_hooks(*loaders):\n global _NATIVE_HOOK\n from importlib.machinery import FileFinder\n \n if loaders:\n for i, hook in enumerate(sys.path_hooks):\n __closure__ = getattr(hook, '__closure__', None)\n if __closure__ and issubclass(__closure__[0].cell_contents, FileFinder):\n _NATIVE_HOOK = globals().get('_NATIVE_HOOK', (i, hook))\n sys.path_hooks[i] = FileFinder.path_hook(*(\n (loader, loader.EXTENSION_SUFFIXES) for loader in loaders\n ), *_NATIVE_HOOK[1].__closure__[1].cell_contents)\n else:\n sys.path_hooks[_NATIVE_HOOK[0]] = _NATIVE_HOOK[1]\n \n \"\"\"https://docs.python.org/3/library/sys.html#sys.path_importer_cache\"\"\"\n sys.path_importer_cache.clear()",
"def update(self, new_mapping):\n for option, value in new_mapping.items():\n if isinstance(value, dict):\n if hasattr(self, option):\n sublevel_config = getattr(self, option)\n else:\n sublevel_config = DocconvertConfiguration(level=option)\n sublevel_config.update(value)\n value = sublevel_config\n setattr(self, option, value)",
"def update_tools(self, tools_config: List[dict], tool_config_types: List[dict]) -> None:\n self.tools = tools_config\n self.update_defaults(tools_config)\n self.update_types(tool_config_types, True)\n self.__config_cache_dirty = True",
"def apply_config_changes(cfg):\n # Safety checks\n if type(cfg['warnings/filters']) != list:\n cfg['warnings/filters'] = []\n \n # Adhoc changes\n if type(cfg['gui/dynazoom']) is str:\n cfg['gui/dynazoom'] = [ cfg['gui/dynazoom'], '' ]\n\n for i in range(8):\n t = \"render/light%s\"%i\n try:\n cfg[t] = dict(cfg[t])\n except:\n pass\n\n # Rename settings\n for old,new in [\n ('history','gui/history'),\n ]:\n if old in cfg.keys():\n if new not in cfg.keys():\n cfg[new] = cfg[old]\n del cfg[old]\n\n # Delete settings\n for key in [\n 'input/timeout','filterwarnings',\n 'render/ambient','render/diffuse','render/specular','render/emission',\n 'render/material','canvas/propcolors','Save changes',\n ]:\n if key in cfg.keys():\n print(\"DELETING CONFIG VARIABLE %s\" % key)\n del cfg[key]",
"def reload_settings():\n refresh_config()",
"def _update_class_for_magic_builtins( self, obj, name):\r\n if not (name.startswith('__') and name.endswith('__') and len(name) > 4):\r\n return\r\n original = getattr(obj.__class__, name)\r\n def updated(self, *kargs, **kwargs):\r\n if (hasattr(self, '__dict__') and type(self.__dict__) is dict and\r\n name in self.__dict__):\r\n return self.__dict__[name](*kargs, **kwargs)\r\n else:\r\n return original(self, *kargs, **kwargs)\r\n setattr(obj.__class__, name, updated)\r\n if _get_code(updated) != _get_code(original):\r\n self._create_placeholder_mock_for_proper_teardown(\r\n obj.__class__, name, original)",
"def apply_config(self, config):\n raise NotImplementedError",
"def update(env, zname, **fter):\n a = _amod_settings(env, zname, update_one, fter)\n logger.info('update(): %r', a)",
"def run_builtin(self, builtin):\n # self.logger.debug('Execute builtin {}', builtin)\n xbmc.executebuiltin(builtin)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the default configs with the given config list. This dict gets updated with each additional defaults config file.
|
def update_defaults(self, default_configs: List[dict]) -> None:
for c in default_configs:
self.defaults = add_dicts(self.defaults, unpack(c))
|
[
"def load_defaults(defaults_file: list = []):\n cfg = Config(\"configs/default.yaml\")\n # cfg = cfg.update_config(Config(\"configs/dataset.yaml\"))\n for file in defaults_file:\n print(file)\n cfg = deep_update(cfg, Config(file))\n \n cfg = Opts(cfg).parse_args()\n \n cfg = load_enviroment_path(cfg)\n return cfg",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def setDefaults(self, defaults=()):\n for key in defaults:\n self._setattr(key, getattr(multiconfig.DefaultConfig, key))",
"def update_defaults(configurator, new_defaults):\n policy_defaults = DEFAULT_VALUES[policy_type(configurator)]\n for key, value in new_defaults.items():\n if key in policy_defaults:\n continue\n configurator.defaults[key] = value",
"def load_config(defaults,config=None,**kwargs):\n\n o = {}\n for item in defaults:\n \n item_list = [None,None,'',None,str]\n item_list[:len(item)] = item \n key, value, comment, groupname, item_type = item_list\n del comment\n if len(item) == 1:\n raise Exception('Option tuple must have at least one element.')\n \n if value is None and (item_type == list or item_type == dict):\n value = item_type()\n \n keypath = key.split('.')\n\n if len(keypath) > 1:\n groupname = keypath[0]\n key = keypath[1]\n \n if groupname:\n group = o.setdefault(groupname,{})\n group[key] = value\n else:\n o[key] = value\n \n update_dict(o,config)\n update_dict(o,kwargs)\n\n return o",
"def load_config():\n config = {}\n for path in CONFIGS:\n try:\n config = parse_config(path)\n break\n except: pass\n\n # merge and override tuples from DEFAULTS by config\n return dict(DEFAULTS, **config)",
"def load_config_with_defaults(cfg_filename, cfg_default_filename):\r\n cfg_defaults = load_config(cfg_default_filename)\r\n cfg = load_config(cfg_filename)\r\n if cfg_filename != cfg_default_filename:\r\n merge_dict(cfg_defaults, cfg)\r\n return cfg",
"def combine_configs(paths, updates):\n configs = []\n for path in paths:\n with open(path) as f:\n configs.append(yaml.load(f))\n return reduce(dict_merge, configs + [updates])",
"def fill_missing(self, default, config):\n if isinstance(default, dict):\n # go over all the keys in the default\n for key in default.keys():\n # if the key is not in the config, add it\n if key not in config:\n config[key] = default[key]\n else:\n config[key] = self.fill_missing(default[key], config[key])\n return config",
"def merge_config(default, override, prefix=None):\n result = dict()\n for k, v in default.items():\n result[k] = v\n\n prefixed_key = \"%s.%s\" % (prefix, k) if prefix else k\n if isinstance(v, dict):\n result[k] = merge_config(v, override[k] if k in override else dict(), prefixed_key)\n else:\n if k in override:\n result[k] = override[k]\n\n return result",
"def update_default_config(config_key, config_value, url=None, db_name=None):\n from pymongo import MongoClient\n if url is None:\n url = 'mongodb://localhost:27017'\n if db_name is None:\n db_name = 'TEST_DB'\n\n client = MongoClient(url)\n db = client[db_name]\n db.runs.update_many({config_key: {\"$exists\": False}}, {'$set': {config_key: config_value}})",
"def override(self, override_config):\n\n for key, new_value in override_config.iteritems():\n if isinstance(new_value, Config):\n cur_value = self.get(key, None)\n if isinstance(cur_value, Config):\n cur_value.override(new_value)\n else:\n self._set(key, new_value)\n else:\n self._set(key, new_value)",
"def propagate_defaults(self, requiredvars, config, defaultsection=None):\n for option, infodic in requiredvars.items():\n if 'section' in infodic:\n section = infodic['section']\n else:\n section = defaultsection\n\n default = infodic['default']\n\n if not config.has_section(section):\n config.add_section(section)\n\n if not config.has_option(section, option):\n config.set(section, option, default)",
"def test_multi_file_dict_list_config(self):\n class MySchema(Schema):\n foo = ListOption(\n item=DictOption(spec={\n 'bar': IntOption(),\n 'baz': IntOption(),\n }, strict=True))\n\n config1 = StringIO('[__main__]\\nfoo=mydict\\n[mydict]\\nbar=1\\nbaz=1')\n expected_values = {'__main__': {'foo': [{'bar': 1, 'baz': 1}]}}\n\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config1)\n self.assertEqual(parser.values(), expected_values)\n\n # override used dictionaries\n config2 = StringIO('[__main__]\\nfoo=otherdict\\n[otherdict]\\nbar=2')\n expected_values = {'__main__': {'foo': [{'bar': 2, 'baz': 0}]}}\n parser.readfp(config2)\n self.assertEqual(parser.values(), expected_values)\n\n # override existing dictionaries\n config3 = StringIO('[otherdict]\\nbaz=3')\n expected_values = {'__main__': {'foo': [{'bar': 2, 'baz': 3}]}}\n parser.readfp(config3)\n self.assertEqual(parser.values(), expected_values)\n\n # reuse existing dict\n config4 = StringIO('[__main__]\\nfoo=mydict\\n otherdict')\n expected_values = {'__main__': {'foo': [{'bar': 1, 'baz': 1},\n {'bar': 2, 'baz': 3}]}}\n parser.readfp(config4)\n self.assertEqual(parser.values(), expected_values)",
"def restore_defaults(self):\n\n # Set default values for each of the pysat provided values. Set\n # all but the last parameter directly. Set last using __setitem__\n # to trigger a file write.\n keys = list(self.defaults.keys())\n for key in keys:\n self.data[key] = self.defaults[key]\n\n # Trigger a file write\n self.store()\n\n return",
"def __SetMissingDefaultConfigValues(self, config={}):\n config = super(DfpClient, self)._SetMissingDefaultConfigValues(config)\n default_config = {\n 'home': DfpClient.home,\n 'log_home': os.path.join(DfpClient.home, 'logs')\n }\n for key in default_config:\n if key not in config:\n config[key] = default_config[key]\n return config",
"def merge_config_files(fnames):\n def _load_yaml(fname):\n with open(fname) as in_handle:\n config = yaml.load(in_handle)\n\n return config\n\n out = _load_yaml(fnames[0])\n for fname in fnames[1:]:\n cur = _load_yaml(fname)\n for k, v in cur.iteritems():\n if k in out and isinstance(out[k], dict):\n out[k].update(v)\n else:\n out[k] = v\n\n return out",
"def add_setting_defaults(newdefaults):\n sph_settings_defaults.update(newdefaults)",
"def update_defaults(self):\r\n # setting names\r\n settings_names = (\"CMDSET_CHARACTER\", \"CMDSET_PLAYER\",\r\n \"BASE_PLAYER_TYPECLASS\", \"BASE_OBJECT_TYPECLASS\",\r\n \"BASE_CHARACTER_TYPECLASS\", \"BASE_ROOM_TYPECLASS\",\r\n \"BASE_EXIT_TYPECLASS\", \"BASE_SCRIPT_TYPECLASS\",\r\n \"BASE_CHANNEL_TYPECLASS\")\r\n # get previous and current settings so they can be compared\r\n settings_compare = zip([ServerConfig.objects.conf(name) for name in settings_names],\r\n [settings.__getattr__(name) for name in settings_names])\r\n mismatches = [i for i, tup in enumerate(settings_compare) if tup[0] and tup[1] and tup[0] != tup[1]]\r\n if len(mismatches): # can't use any() since mismatches may be [0] which reads as False for any()\r\n # we have a changed default. Import relevant objects and\r\n # run the update\r\n from src.objects.models import ObjectDB\r\n from src.comms.models import ChannelDB\r\n #from src.players.models import PlayerDB\r\n for i, prev, curr in ((i, tup[0], tup[1]) for i, tup in enumerate(settings_compare) if i in mismatches):\r\n # update the database\r\n print \" %s:\\n '%s' changed to '%s'. Updating unchanged entries in database ...\" % (settings_names[i], prev, curr)\r\n if i == 0:\r\n [obj.__setattr__(\"cmdset_storage\", curr) for obj in ObjectDB.objects.filter(db_cmdset_storage__exact=prev)]\r\n if i == 1:\r\n [ply.__setattr__(\"cmdset_storage\", curr) for ply in PlayerDB.objects.filter(db_cmdset_storage__exact=prev)]\r\n if i == 2:\r\n [ply.__setattr__(\"typeclass_path\", curr) for ply in PlayerDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i in (3, 4, 5, 6):\r\n [obj.__setattr__(\"typeclass_path\", curr) for obj in ObjectDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i == 7:\r\n [scr.__setattr__(\"typeclass_path\", curr) for scr in ScriptDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i == 8:\r\n [scr.__setattr__(\"typeclass_path\", curr) for scr in ChannelDB.objects.filter(db_typeclass_path__exact=prev)]\r\n # store the new default and clean caches\r\n ServerConfig.objects.conf(settings_names[i], curr)\r\n ObjectDB.flush_instance_cache()\r\n PlayerDB.flush_instance_cache()\r\n ScriptDB.flush_instance_cache()\r\n ChannelDB.flush_instance_cache()\r\n # if this is the first start we might not have a \"previous\"\r\n # setup saved. Store it now.\r\n [ServerConfig.objects.conf(settings_names[i], tup[1])\r\n for i, tup in enumerate(settings_compare) if not tup[0]]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the types config with the given types config.
|
def update_types(self, config_types: List[dict], check_type: bool = True) -> None:
loaded_cfg = combine_configs(config_types)
self.__config_types.update(loaded_cfg)
if check_type:
for k, v in loaded_cfg.items():
if not self.has_setting(k):
self.logger.warning(f"Key {k} has a type {v} is not yet implemented")
elif k != "_config_path":
self.check_setting(k)
|
[
"def update_technology(self, technology_config: List[dict], technology_config_types: List[dict]) -> None:\n self.technology = technology_config\n self.update_defaults(technology_config)\n self.update_types(technology_config_types, True)\n self.__config_cache_dirty = True",
"def update_core(self, core_config: List[dict], core_config_types: List[dict]) -> None:\n self.core = core_config\n self.update_defaults(core_config)\n self.update_types(core_config_types, True)\n self.__config_cache_dirty = True",
"def alter_config_and_load_prorietary_datatypes( app, datatypes_config, relative_install_dir, deactivate=False, override=True ):\n tree, error_message = xml_util.parse_xml( datatypes_config )\n if tree is None:\n return None, None\n datatypes_config_root = tree.getroot()\n registration = datatypes_config_root.find( 'registration' )\n if registration is None:\n # We have valid XML, but not a valid proprietary datatypes definition.\n return None, None\n sniffers = datatypes_config_root.find( 'sniffers' )\n converter_path, display_path = get_converter_and_display_paths( registration, relative_install_dir )\n if converter_path:\n # Path to datatype converters\n registration.attrib[ 'proprietary_converter_path' ] = converter_path\n if display_path:\n # Path to datatype display applications\n registration.attrib[ 'proprietary_display_path' ] = display_path\n relative_path_to_datatype_file_name = None\n datatype_files = datatypes_config_root.find( 'datatype_files' )\n datatype_class_modules = []\n if datatype_files is not None:\n # The <datatype_files> tag set contains any number of <datatype_file> tags.\n # <datatype_files>\n # <datatype_file name=\"gmap.py\"/>\n # <datatype_file name=\"metagenomics.py\"/>\n # </datatype_files>\n # We'll add attributes to the datatype tag sets so that the modules can be properly imported by the datatypes registry.\n for elem in datatype_files.findall( 'datatype_file' ):\n datatype_file_name = elem.get( 'name', None )\n if datatype_file_name:\n # Find the file in the installed repository.\n for root, dirs, files in os.walk( relative_install_dir ):\n if root.find( '.hg' ) < 0:\n for name in files:\n if name == datatype_file_name:\n datatype_class_modules.append( os.path.join( root, name ) )\n break\n break\n if datatype_class_modules:\n for relative_path_to_datatype_file_name in datatype_class_modules:\n datatype_file_name_path, datatype_file_name = os.path.split( relative_path_to_datatype_file_name )\n for elem in registration.findall( 'datatype' ):\n # Handle 'type' attribute which should be something like one of the following:\n # type=\"gmap:GmapDB\"\n # type=\"galaxy.datatypes.gmap:GmapDB\"\n dtype = elem.get( 'type', None )\n if dtype:\n fields = dtype.split( ':' )\n proprietary_datatype_module = fields[ 0 ]\n if proprietary_datatype_module.find( '.' ) >= 0:\n # Handle the case where datatype_module is \"galaxy.datatypes.gmap\".\n proprietary_datatype_module = proprietary_datatype_module.split( '.' )[ -1 ]\n # The value of proprietary_path must be an absolute path due to job_working_directory.\n elem.attrib[ 'proprietary_path' ] = os.path.abspath( datatype_file_name_path )\n elem.attrib[ 'proprietary_datatype_module' ] = proprietary_datatype_module\n # Temporarily persist the proprietary datatypes configuration file so it can be loaded into the datatypes registry.\n fd, proprietary_datatypes_config = tempfile.mkstemp( prefix=\"tmp-toolshed-acalpd\" )\n os.write( fd, '<?xml version=\"1.0\"?>\\n' )\n os.write( fd, '<datatypes>\\n' )\n os.write( fd, '%s' % xml_util.xml_to_string( registration ) )\n if sniffers is not None:\n os.write( fd, '%s' % xml_util.xml_to_string( sniffers ) )\n os.write( fd, '</datatypes>\\n' )\n os.close( fd )\n os.chmod( proprietary_datatypes_config, 0644 )\n # Load proprietary datatypes\n app.datatypes_registry.load_datatypes( root_dir=app.config.root, config=proprietary_datatypes_config, deactivate=deactivate, override=override )\n if deactivate:\n # Reload the upload tool to eliminate deactivated datatype extensions from the file_type select list.\n tool_util.reload_upload_tools( app )\n else:\n append_to_datatypes_registry_upload_file_formats( app, registration )\n tool_util.reload_upload_tools( app )\n if datatype_files is not None:\n try:\n os.unlink( proprietary_datatypes_config )\n except:\n pass\n return converter_path, display_path",
"def register_type(self, key, *types):\n assert key in self._config\n self._types[key] = set(types)",
"def reload_types(self):\n current_version = self._type_system_version()\n if Manager._type_registry_cache:\n cached_registry, version = Manager._type_registry_cache\n if current_version == version:\n log.debug(\n 'using cached type registry, version: %s', current_version)\n self.type_registry = cached_registry.clone()\n return\n\n self.type_registry = TypeRegistry()\n registry = self.type_registry\n\n for type_id, bases, attrs in self.get_type_hierarchy():\n try:\n cls = registry.get_class_by_id(type_id)\n\n if not registry.is_dynamic_type(cls):\n cls = None\n except UnknownType:\n cls = None\n\n if cls is None:\n bases = tuple(registry.get_class_by_id(base) for base in bases)\n registry.create_type(str(type_id), bases, attrs)\n\n registry._types_in_db.add(type_id)\n\n Manager._type_registry_cache = (\n self.type_registry.clone(),\n current_version\n )",
"def _setDataTypes(self, datatypes):\r\n \r\n self._dataTypes.clear()\r\n for dataType in datatypes:\r\n self.addDataType(dataType)",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def addTypes(self,*types):\n types = tuple([t for t in iterTypes(types) if t not in self.types])\n refs = len([t for t in types if issubclass(t,Entity)])\n\n if (refs or self.isReference) and refs<>len(types):\n raise TypeError(\"Cannot mix entity and value types in one role\")\n if len(self.types+types)>1:\n raise TypeError(\"Multiple value types not allowed in one role\")\n if refs:\n self.isReference = True\n self._setattr('types',self.types + types)\n self.setDoc() # update the doc string",
"def update_mimetypes_mapping(cls) -> None:\n for mimetypes_mapping in cls.get_mimetypes_mapping():\n # INFO - G.M - 2019-11-22 - mimetype are added as strict to force override of default\n # system/mimetype lib value, which is needed for type like .obj where system type can be\n # \"text/plain\" or \"application/octet-stream\"\n mimetypes_storage.add_type( # type: ignore\n type=mimetypes_mapping.mimetype, ext=mimetypes_mapping.file_extension, strict=True\n )",
"def webhook_types(self, webhook_types):\n\n self._webhook_types = webhook_types",
"def SetTypeFilter(self, type_strings):\n self.types = type_strings\n #refesh tree and list\n self.RefreshTree()\n self.RefreshNameList()",
"def pool_types(self, pool_types):\n\n self._pool_types = pool_types",
"def update(self, keywords, drawTypes):\n\n\t\tfor k, v in keywords.iteritems():\n\t\t\tself.var[k] = v\n\t\t\t#print 'deb:settings_update var %s= %s' %(k, self.var[k]) #--------------\n\t\tfor t, v in drawTypes.iteritems():\n\t\t\tself.drawTypes[t] = v\n\t\t\t#print 'deb:settings_update drawType %s= %s' %(t, self.drawTypes[t]) #--------------\n\n\t\tself.drawTypes['arc'] = self.drawTypes['line']\n\t\tself.drawTypes['circle'] = self.drawTypes['line']\n\t\tself.drawTypes['ellipse'] = self.drawTypes['line']\n\t\tself.drawTypes['trace'] = self.drawTypes['solid']\n\t\tself.drawTypes['insert'] = self.drawTypes['block']\n\t\t#self.drawTypes['vport'] = self.drawTypes['view']\n\n\t\t#print 'deb:self.drawTypes', self.drawTypes #---------------",
"def re_index_types(self):\n\n for (index, atom_type) in enumerate(self.atom_types, 1):\n atom_type.index = index\n\n for (index, bond_type) in enumerate(self.bond_types, 1):\n bond_type.index = index\n\n for (index, angle_type) in enumerate(self.angle_types, 1):\n angle_type.index = index\n\n index = 1\n for dihedral_type in self.dihedral_types:\n if isinstance(dihedral_type.index, list):\n for i in range(len(dihedral_type.index)):\n dihedral_type.index[i] = index\n index += 1\n else:\n dihedral_type.index = index\n index += 1\n\n for (index, improper_type) in enumerate(self.improper_types, 1):\n improper_type.index = index",
"def update_type_collections(self, ctx, params):\n # ctx is the context object\n # return variables are: output\n #BEGIN update_type_collections\n message = update_type_collections(self.ws, self.re_api_url, ctx['token'])\n logging.info(message)\n output = self.make_report(message, params['workspace_id'])\n\n #END update_type_collections\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method update_type_collections return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]",
"def update_tools(self, tools_config: List[dict], tool_config_types: List[dict]) -> None:\n self.tools = tools_config\n self.update_defaults(tools_config)\n self.update_types(tool_config_types, True)\n self.__config_cache_dirty = True",
"def applyType(self, typedef):\n # Convert to protobuf as old type and re-interpret as new type\n old_message_type = self.message_type\n json_data = self._text_editor.getText().tostring()\n protobuf_data = blackboxprotobuf.protobuf_from_json(json_data, old_message_type)\n new_json, message_type = blackboxprotobuf.protobuf_to_json(str(protobuf_data), typedef)\n\n # Should exception out before now if there is an issue\n # Set the message type and reparse with the new type\n self.message_type = message_type\n self._text_editor.setText(str(new_json))\n\n message_hash = self.getMessageHash()\n self._extender.known_types[message_hash] = message_type",
"def editType(self, typedef):\n # TODO this is kind of an ugly hack. Should redo how these are referenced\n # probably means rewriting a bunch of the editor\n old_source = self._source_typedef\n if old_source is None:\n old_source = {}\n old_source.clear()\n old_source.update(typedef)\n self.applyType(old_source)",
"def update_xlsx(self, types):\n\n reset_freq = True\n cdd = self.list()\n for project in cdd:\n if types == \"vindex\":\n print(f\"*UPDATING VINDEX for {cdd[project]}...\")\n in_fn = os.path.join(cdd[project], in_vindex)\n t = ExcelTool(self.conf_fn, in_fn, self.out_dir)\n if reset_freq:\n t.reset_freq(\"vindex\")\n reset_freq = False\n t.vindex_from_conf()\n elif types == \"translate\":\n in_fn = os.path.join(cdd[project], in_vfix)\n print(f\"*UPDATING TRANSLATION LIST from '{in_fn}'\")\n t = ExcelTool(self.conf_fn, in_fn, self.out_dir)\n if reset_freq:\n t.reset_freq(\"translate\")\n reset_freq = False\n t.translate_from_conf()\n else:\n raise TypeError(\"Unknown type\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load config from a string by loading it and unpacking it.
|
def load_config_from_string(contents: str, is_yaml: bool, path: str = "unspecified") -> dict:
unpacked = unpack(load_yaml(contents) if is_yaml else json.loads(contents))
unpacked[_CONFIG_PATH_KEY] = path
return unpacked
|
[
"def init_from_string(config_str):\n from StringIO import StringIO\n mconf = ConfigParser()\n mconf.readfp(StringIO(config_str))\n init_from_config(mconf)",
"def loadFromString(self, cfg_string):\n import StringIO\n fp = StringIO.StringIO(cfg_string)\n self.readfp(fp)\n fp.close()\n self.fileName = None\n self.fileMtime = time.time()\n return True",
"def parseErlangConfig(string):\n try:\n config = erlangConfig.parseString(string)\n # Convert to plain dict (so it can be pickled when using\n # multiprocessing).\n config = convertToDict(config)\n return config\n except ParseException, err:\n logging.error(err.line)\n logging.error(\" \"*(err.column-1) + \"^\")\n logging.error(err)\n raise",
"def loads(self, string, serializer_name):\r\n serializer = self._settings_global_serializers[serializer_name]\r\n raw_settings = serializer.loads(string)\r\n self.__do_load(raw_settings)",
"def load_from_string(cls, yaml_str):\n return cls(**cls._get_client_kwargs_from_yaml(yaml_str))",
"def parseErlangConfig(str):\n try:\n config = erlangConfig.parseString(str)\n # Convert to plain dict (so it can be pickled when using\n # multiprocessing).\n config = convertToDict(config)\n return config\n except ParseException, err:\n #logging.error(err.line)\n #logging.error(\" \"*(err.column-1) + \"^\")\n #logging.error(err)\n #raise\n return []",
"def _parse_config(self):\n for line in self.config_str.split('\\n'):\n line = line.strip()\n if line and line[0] != '#':\n key, value = line.split('=')\n key = key.strip()\n value = value.strip()\n \n if not Config.RE_HAS_VAR_REF.match(value):\n # no variable references e.g. ${var}, so we evaluate\n # the expression to get value into the correct \n # Python type\n try:\n value = eval(value.strip(), self._globals, self._locals)\n # we evaluated it successfully so record it as such\n self._evaluated.append(key)\n except SyntaxError:\n # if there is a syntax error we'll just use the \n # variable as a string\n value = value.strip()\n self[key.strip()] = value\n\n self._check_dependencies()\n\n # finished parsing lets evaluate each variable\n while len(self.keys()) != len(self._evaluated):\n for var_name in self.keys():\n self._get_var_value(var_name)",
"def load_config(filename=None):\n try:\n with _config_stream(filename) as handle:\n filename = handle.name\n return deserialize_config(handle.read())\n except (OSError, toml.TomlDecodeError, UnicodeDecodeError) as exc:\n raise ConfigError(\"Error loading configuration from {}\".format(filename)) from exc",
"def loadFromString(self, xmlstring):\n self.fileName = None\n self.fileMtime = time.time()\n\n try:\n self._xml = ElementTree.XML(xmlstring)\n except Exception, e:\n raise ConfigFileNotValid(\"%s\" % e)\n\n self._loadSettings()\n return True",
"def LoadConfigFromString(json_string):\n config_dict = json.loads(json_string, object_hook=_DecodeDict)\n\n # Use standard defaults, but allow the config to override.\n defaults = DefaultSettings()\n defaults.update(config_dict.pop(DEFAULT_BUILD_CONFIG))\n\n _UpdateConfig(defaults)\n\n templates = config_dict.pop('_templates', None)\n\n site_params = DefaultSiteParameters()\n site_params.update(config_dict.pop('_site_params', {}))\n\n defaultBuildConfig = BuildConfig(**defaults)\n\n builds = {n: _CreateBuildConfig(n, defaultBuildConfig, v, templates)\n for n, v in config_dict.iteritems()}\n\n # config is the struct that holds the complete cbuildbot config.\n result = SiteConfig(defaults=defaults, templates=templates,\n site_params=site_params)\n result.update(builds)\n\n return result",
"def from_yaml_string(yaml_str):\n return from_dict(yaml.load(yaml_str))",
"def load_config():\n return config.load_config({})",
"def load_from(self, path):\n self.config.read(path)",
"def from_text(stream,raise_on_error=False):\n if not isinstance(stream, Loader):\n stream = StreamParsingLoader(stream)\n \n return Configuration().load(stream, raise_on_error)",
"def load(self):\n self.config.read(\"config.py\")\n pass",
"def load_config(msg):\n import ujson as json\n try:\n config = json.loads(msg)\n except (OSError, ValueError):\n print(\"Couldn't load config from JSON.\")\n else:\n set_relay(config['power'])",
"def from_bytes(self, bytes_data: bytes) -> \"Config\":\n return self.from_str(bytes_data.decode(\"utf8\"))",
"def loads(self, s):\n\n raise NotImplementedError()",
"def load_message_string(string):\n return pickle.loads(string)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load config from a package's defaults.
|
def load_config_from_defaults(package: str, types: bool = False) -> Tuple[List[dict], List[dict]]:
package_path = importlib.resources.files(package)
json_file = package_path / "defaults.json"
json_types_file = package_path / "defaults_types.json"
yaml_file = package_path / "defaults.yml"
yaml_types_file = package_path / "defaults_types.yml"
config_list: List[dict] = []
config_types_list: List[dict] = []
if json_file.is_file():
config_list.append(load_config_from_string(json_file.read_text(), False, str(package_path)))
if json_types_file.is_file() and types:
config_types_list.append(load_config_from_string(json_types_file.read_text(), False, str(package_path)))
if yaml_file.is_file():
config_list.append(load_config_from_string(yaml_file.read_text(), True, str(package_path)))
if yaml_types_file.is_file() and types:
config_types_list.append(load_config_from_string(yaml_types_file.read_text(), True, str(package_path)))
return (config_list, config_types_list)
|
[
"def _load_defaults(self):\n module = self._do_import(self._defaults_module_path)\n self._defaults = {\n k: v for k, v in module.__dict__.items()\n if k.isupper() # ignore anything that doesn't look like a setting\n }",
"def load_defaults(defaults_file: list = []):\n cfg = Config(\"configs/default.yaml\")\n # cfg = cfg.update_config(Config(\"configs/dataset.yaml\"))\n for file in defaults_file:\n print(file)\n cfg = deep_update(cfg, Config(file))\n \n cfg = Opts(cfg).parse_args()\n \n cfg = load_enviroment_path(cfg)\n return cfg",
"def load_defaults(self):\n try:\n f = open(\"defaults.json\")\n options = f.read()\n f.close()\n except IOError:\n self.log.error(\"Could not load defaults file.\")\n self.inform.emit(\"ERROR: Could not load defaults file.\")\n return\n\n try:\n defaults = json.loads(options)\n except:\n e = sys.exc_info()[0]\n App.log.error(str(e))\n self.inform.emit(\"ERROR: Failed to parse defaults file.\")\n return\n self.defaults.update(defaults)",
"def load_config():\n return config.load_config({})",
"def load_config_with_defaults(cfg_filename, cfg_default_filename):\r\n cfg_defaults = load_config(cfg_default_filename)\r\n cfg = load_config(cfg_filename)\r\n if cfg_filename != cfg_default_filename:\r\n merge_dict(cfg_defaults, cfg)\r\n return cfg",
"def load_config():\n config = {}\n for path in CONFIGS:\n try:\n config = parse_config(path)\n break\n except: pass\n\n # merge and override tuples from DEFAULTS by config\n return dict(DEFAULTS, **config)",
"def load_config(defaults,config=None,**kwargs):\n\n o = {}\n for item in defaults:\n \n item_list = [None,None,'',None,str]\n item_list[:len(item)] = item \n key, value, comment, groupname, item_type = item_list\n del comment\n if len(item) == 1:\n raise Exception('Option tuple must have at least one element.')\n \n if value is None and (item_type == list or item_type == dict):\n value = item_type()\n \n keypath = key.split('.')\n\n if len(keypath) > 1:\n groupname = keypath[0]\n key = keypath[1]\n \n if groupname:\n group = o.setdefault(groupname,{})\n group[key] = value\n else:\n o[key] = value\n \n update_dict(o,config)\n update_dict(o,kwargs)\n\n return o",
"def loadConfig(self):\n pass",
"def load_config():\n f = open(os.path.expanduser(\"~/.baas/python_test_config.yaml\"), 'r')\n config = yaml.load(f)\n f.close()\n return config",
"def read_config():\n\n with open(PYDOCMD_CONFIG) as fp:\n config = yaml.load(fp)\n return default_config(config)",
"def init(args):\n Configuration.load_config(vars(args).get(\"config\"))",
"def load(self):\n self.config.read(\"config.py\")\n pass",
"def load_or_import_from_config(key, app=None, default=None):\n app = app or current_app\n value = app.config.get(key)\n return obj_or_import_string(value, default=default)",
"def parse_config(path, default_config):\n with open(path) as f:\n config = json.load(f)\n\n if not isinstance(config, dict):\n raise json.JSONDecodeError(\"It's not a dict\")\n\n if not config:\n return default_config\n\n for k in default_config.keys():\n if k in config:\n default_config[k] = config[k]\n\n return default_config",
"def _load_raw_config(config_file=None):\n\n if config_file is None:\n # see if location is overridden via the environment\n config_file = os.environ.get(\"ADAM_CONFIG\", None)\n\n if config_file is None:\n # get the default location (if exists)\n config_dir = next(xdgb.load_config_paths(\"adam\"), None)\n if config_dir is not None:\n def_config_file = os.path.join(config_dir, ADAM_CONFIG_FN)\n if os.path.exists(def_config_file):\n config_file = def_config_file\n\n if config_file is None:\n return \"\", {'envs': {}}\n\n # Load the config file (if we have it)\n with open(config_file) as fp:\n return config_file, yaml.safe_load(fp)",
"def __init__(self, config, defaults={}):\n self._defaults = defaults\n\n # Check and fix paths in default options\n for id, (val, type) in self._defaults.items() :\n if type == 'src':\n val = ConfigFile.abspath(val)\n if not self.check(id, val): continue # Don't use wrong paths\n\n # Insert into dictionary if is a valid one.\n self._defaults[id] = [val, type]\n\n # Add defaults to options.\n self._opts.update(self._defaults)\n\n # Open and read config file\n self._config = ConfigFile(config)\n self.read()",
"def _load_configurations(self):\n with open(self.config_file) as f:\n configs = f.read()\n config = ConfigParser.RawConfigParser(allow_no_value=True)\n config.readfp(io.BytesIO(configs))\n self.config = config\n #\n self.cert_file = self.config.get(\"cert-paths\", \"cert_file\")",
"def load_config(path):\n config = get_default_config()\n\n if not os.path.exists(path):\n print (\"WARN: no config file could be found at %s\" % path)\n else:\n try:\n with open(path, \"r\") as f:\n config_file = yaml.safe_load(f)\n config = merge_config(config, config_file if config_file else {})\n except Exception as e:\n print (\"WARN: invalid configuration file at %s: %s\" % (path, e))\n\n return config",
"def load_configuration(app):\n app.config.from_pyfile(\"../../../config/service_config.py\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parses a configuration type.
|
def parse_setting_type(setting_type: str) -> ConfigType:
m_prim = re.search(PRIMARY_REGEX, setting_type)
m_sec = re.search(INNER_REGEX, setting_type)
if m_prim is None:
raise RuntimeError("Not a valid configuration type")
primary_type = m_prim.group(0)
if primary_type == "Optional":
if m_sec is None:
raise RuntimeError("Not a valid inner configuration type")
opt_type = m_sec.group(1)
recursive_type = parse_setting_type(opt_type)
return ConfigType(
NamedType(recursive_type.primary),
optional=True,
secondary=NamedType(recursive_type.secondary),
tertiary_k=NamedType(recursive_type.tertiary_k),
tertiary_v=NamedType(recursive_type.tertiary_v)
)
if primary_type == "list":
if m_sec is None:
raise RuntimeError("Not a valid inner configuration type")
secondary_type_full = m_sec.group(1)
m_sec_flat = re.search(PRIMARY_REGEX, secondary_type_full)
if m_sec_flat is None:
raise RuntimeError("Not a valid inner configuration type")
secondary_type_flat = m_sec_flat.group(0)
if secondary_type_flat == "dict":
m_sec_inner = re.search(DICT_REGEX, secondary_type_full)
if m_sec_inner is None:
raise RuntimeError("Not a valid inner dictionary type")
tertiary_k, tertiary_v = m_sec_inner.groups()[:2]
return ConfigType(
NamedType(primary_type),
secondary=NamedType(secondary_type_flat),
tertiary_k=NamedType(tertiary_k),
tertiary_v=NamedType(tertiary_v)
)
else:
return ConfigType(NamedType(primary_type), secondary=NamedType(secondary_type_flat))
return ConfigType(NamedType(primary_type))
|
[
"def parse_config(self) -> None:\n # ToDO add support for Lists, Dicts, Datetimes, and Bool\n for s_name, sect_info in self.raw_cfg.items():\n sect = getattr(self, s_name.lower())\n for a_name, attribute in sect_info.items():\n if type(getattr(sect, a_name.lower())) == int:\n setattr(sect, a_name.lower(), int(attribute))\n else:\n setattr(sect, a_name.lower(), attribute)\n self.raw_cfg = None\n return",
"def _parse_type(self, basetype):\n typ_len = ''\n typ_precision = ''\n\n # The Length and the precision of the Datatype should be separate.\n # The Format we getting from database is: numeric(1,1)\n # So, we need to separate Length: 1, Precision: 1\n\n if basetype != '' and basetype.find(\"(\") > 0:\n substr = basetype[basetype.find(\"(\") + 1:len(\n basetype) - 1]\n typlen = substr.split(\",\")\n typ_len = typlen[0]\n if len(typlen) > 1:\n typ_precision = typlen[1]\n else:\n typ_precision = ''\n\n return {'typlen': typ_len, 'precision': typ_precision}",
"def _parse_type(\n type_ast: dict\n) -> Union[\"ListTypeNode\", \"NonNullTypeNode\", \"NamedTypeNode\"]:\n if type_ast[\"kind\"] == \"ListType\":\n return ListTypeNode(\n type=_parse_type(type_ast[\"type\"]),\n location=_parse_location(type_ast[\"loc\"]),\n )\n if type_ast[\"kind\"] == \"NonNullType\":\n return NonNullTypeNode(\n type=_parse_type(type_ast[\"type\"]),\n location=_parse_location(type_ast[\"loc\"]),\n )\n return _parse_named_type(type_ast)",
"def parse_type(type_name):\n for name, type_object in _type_definitions:\n if type_name == name:\n return type_object\n raise Exception(\"unknown type '%s'\" % type_name)",
"def get_parser(typ: Type[P]) -> Callable[[str], P]:\n try:\n return cast(\n Callable[[str], P],\n {\n str: parse_str,\n bool: parse_bool,\n int: parse_int,\n tuple: parse_tuple,\n list: parse_list,\n set: parse_set,\n }[typ],\n )\n except KeyError:\n raise NotImplementedError(\"Unsupported setting type: %r\", typ)",
"def parse(type_str: str) -> 'CommitType':\n try:\n return CommitType[type_str.upper()]\n except KeyError as e:\n raise ValueError(f'Unknown commit type: {type_str}. {e}')",
"def _parse_config(self):\n for line in self.config_str.split('\\n'):\n line = line.strip()\n if line and line[0] != '#':\n key, value = line.split('=')\n key = key.strip()\n value = value.strip()\n \n if not Config.RE_HAS_VAR_REF.match(value):\n # no variable references e.g. ${var}, so we evaluate\n # the expression to get value into the correct \n # Python type\n try:\n value = eval(value.strip(), self._globals, self._locals)\n # we evaluated it successfully so record it as such\n self._evaluated.append(key)\n except SyntaxError:\n # if there is a syntax error we'll just use the \n # variable as a string\n value = value.strip()\n self[key.strip()] = value\n\n self._check_dependencies()\n\n # finished parsing lets evaluate each variable\n while len(self.keys()) != len(self._evaluated):\n for var_name in self.keys():\n self._get_var_value(var_name)",
"def string_parser(self, string):\n\n # converts string into a list\n if ', ' in string:\n config = []\n # converts each item in the list into its respective types\n for item in string.split(', '):\n config.append(self.string_parser(item))\n return config\n # converts string to boolean\n elif string == 'True':\n return True\n elif string == 'False':\n return False\n # converts string to int\n elif string.count('.') == 0:\n try:\n return int(string)\n except ValueError:\n pass\n # converts string to float\n else:\n try:\n return float(string)\n except ValueError:\n pass\n\n # does not convert string if already is a string\n return string",
"def parseDispatchConfig(self, config):\n if not os.path.exists(config):\n return\n f = open(config, 'r')\n for l in f.readlines():\n l = l.strip()\n if l == \"\" or l.startswith('#'):\n continue\n t = l.split('=')\n _type = t[0]\n _map = t[1]\n if _type == 'server':\n _entry = _map.split('/')\n _path = _entry[1].strip()\n _addr_and_port = _entry[2].strip().split('#')\n _addr = _addr_and_port[0]\n if not is_address_validate(_addr):\n continue\n _port = \"53\"\n if len(_addr_and_port) == 2:\n _port = _addr_and_port[1]\n\n _port = int(_port)\n self.serverMap[_path] = (_addr, _port)\n\n if _type == 'address':\n _entry = _map.split('/')\n _path = _entry[1].strip()\n _addr = _entry[2].strip()\n if not is_address_validate(_addr):\n continue\n self.addressMap[_path] = _addr",
"def build_from_config(config):\n if 'type' not in config:\n assert 'eval' in config, 'eval not in config {}' .format(str(config))\n return eval(config['eval'])\n for k in config.keys():\n if isinstance(config[k], dict):\n config[k] = build_from_config(config[k])\n return parse_class(config.pop('type'))(**config)",
"def parse_config_value(value):\n if value == '':\n return None\n elif value.lower() == 'true':\n return True\n elif value.lower() == 'false':\n return False\n\n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n return float(value)\n except ValueError:\n pass\n\n return value",
"def string_parser(string):\n\n # converts string into a list\n if ', ' in string:\n config = []\n # converts each item in the list into its respective types\n for item in string.split(', '):\n config.append(string_parser(item))\n return config\n # converts string to boolean\n elif string == 'True':\n return True\n elif string == 'False':\n return False\n # converts string to int\n elif string.count('.') == 0:\n try:\n return int(string)\n except ValueError:\n pass\n # converts string to float\n else:\n try:\n return float(string)\n except ValueError:\n pass\n\n # does not convert string if already is a string\n return string",
"def load_from_ini(self, path):\n config = configparser.ConfigParser(interpolation=None)\n config.optionxform = str # preserve case when reading option names\n config.read(path)\n\n section_rule = config['rule']\n\n # is this a supported type?\n if section_rule['type'] != self.type:\n raise InvalidHuntTypeError(section_rule['type'])\n\n self.enabled = section_rule.getboolean('enabled')\n\n # if we don't pass the name then we create it from the name of the ini file\n self.name = section_rule.get(\n 'name', \n fallback=(os.path.splitext(os.path.basename(path))[0]).replace('_', ' ').title())\n\n self.description = section_rule['description']\n # if we don't pass an alert type then we default to the type field\n self.alert_type = section_rule.get('alert_type', fallback=f'hunter - {self.type}')\n self.analysis_mode = section_rule.get('analysis_mode', fallback=ANALYSIS_MODE_CORRELATION)\n\n # frequency can be either a timedelta or a crontab entry\n self.frequency = None\n if ':' in section_rule['frequency']:\n self.frequency = create_timedelta(section_rule['frequency'])\n\n # suppression must be either empty for a time range\n self.suppression = None\n if 'suppression' in section_rule and section_rule['suppression']:\n self.suppression = create_timedelta(section_rule['suppression'])\n\n self.cron_schedule = None\n if self.frequency is None:\n self.cron_schedule = section_rule.get('cron_schedule', fallback=section_rule['frequency'])\n # make sure this crontab entry parses\n croniter(self.cron_schedule)\n\n self.tags = [_.strip() for _ in section_rule['tags'].split(',') if _]\n self.queue = section_rule['queue'] if 'queue' in section_rule else QUEUE_DEFAULT\n\n self.ini_path = path\n self.last_mtime = os.path.getmtime(path)\n return config",
"def parse_types(node):\n return {'ntypes': int(node.text)}",
"def parse(text):\n\n config = configparser.ConfigParser(\n delimiters=('='),\n # We will interpret the options starting with _ as comments\n # to speed up parsing. If these options are ever meaningful\n # that can change.\n comment_prefixes=('//', '_'),\n strict=False\n )\n config.optionxform = str\n\n config.read_string(text)\n\n return config",
"def supports_configuration_record_type(self, configuration_record_type):\n return # boolean",
"def parse_config(config):\n source = config.get('source')\n dest = config.get('dest')\n anonymization_type = config.get('anonymization')\n masked_fields = config.get('include')\n suppressed_fields = config.get('exclude')\n include_rest = config.get('include_rest')\n sensitive = config.get('sensitive')\n\n if not source:\n raise ConfigParserError(\"source error: source not defined. Please check config.\")\n if not dest:\n raise ConfigParserError(\"destination error: dest not defined. Please check config.\")\n if not masked_fields:\n warnings.warn(\"no masked fields included in config. No data will be anonymized\", Warning)\n\n reader_type = source.get('type')\n writer_type = dest.get('type')\n\n if not reader_type:\n raise ConfigParserError(\"source error: source type not defined. Please check config.\")\n\n if not writer_type:\n raise ConfigParserError(\"destination error: dest type not defined. Please check config.\")\n\n Config = collections.namedtuple('Config', 'source dest anonymization_type masked_fields suppressed_fields include_rest sensitive')\n config = Config(source, dest, anonymization_type, masked_fields, suppressed_fields, include_rest, sensitive)\n return config",
"def parse_type(ty):\n ident_re = re.compile(r'^(?:\\w|_)+$')\n token_re = re.compile(r'(?s)\\s*((?:\\w|_)+|.)\\s*')\n tokens = token_re.finditer(ty)\n\n current = None\n\n def consume(token=None):\n nonlocal current\n assert token is None or token == current\n current = next(tokens, None)\n if current is not None:\n current = current.group(1)\n\n consume(None) # load the first token into `current`\n\n def is_ident():\n \"\"\"True if the current token is an identifier\"\"\"\n return current is not None and ident_re.match(current) is not None\n\n def parse_params():\n params = []\n while current != '>':\n params.append(parse_ty())\n if current == ',':\n consume(',')\n return params\n\n def parse_ty():\n if not is_ident():\n raise ValueError(\"parse error in type {!r}\".format(ty))\n name = current\n consume()\n if current == '<':\n consume('<')\n params = parse_params()\n if current != '>':\n raise ValueError(\"parse error in type {!r} (expected `>`)\".format(ty))\n consume('>')\n return Type(name, params)\n return Type(name)\n\n result = parse_ty()\n if current is not None:\n raise ValueError(\"parse error in type {!r} (extra stuff at end)\".format(ty))\n return result",
"def read_ini(self, filename = '../src/initialize.ini'):\n\t\tconfig = configparser.ConfigParser()\n\t\tconfig.optionxform=str # preserves uppercase for keys\n\n\t\tif os.path.isfile(filename):\n\t\t\tconfig.read(filename)\n\t\telse: \n\t\t\tprint(\"\\nNo .ini in folder.\\nProceed with default tedlium-configurations.\\n\")\n\t\t\treturn\n\n\t\tfor entry in config['NUMERICAL_VALUES']:\n\t\t\ttry:\n\t\t\t\tself.params[entry]['value'] = self.params[entry]['type'](config['NUMERICAL_VALUES'][entry])\n\t\t\texcept: \n\t\t\t\tif config['NUMERICAL_VALUES'][entry] == 'None':\n\t\t\t\t\tself.params[entry]['value'] = None\n\t\t\t\telse: \n\t\t\t\t\tprint(\"Unexpected error: \", sys.exc_info()[0])\n\n\t\tfor entry in config['LITERAL_VALUES']:\n\t\t\tif not config['LITERAL_VALUES'][entry] == 'None':\n\t\t\t\tself.params[entry]['value'] = self.params[entry]['type'](config['LITERAL_VALUES'][entry])\n\t\t\telse:\n\t\t\t\tself.params[entry]['value'] = None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Chop a message into chunks of max 1024 bytes incl. TIMEOUT and endings ADD/AYE
|
def prepare_command(message: str, timeout='5'):
packets = []
bodysize = 1024 - len(f'TIMEOUT {timeout} ') - len(' ADD')
chunks = ceil(len(message) / bodysize)
for chunk in range(0, chunks - 1):
data = b'TIMEOUT ' + timeout.encode() + b' '
data += message[:bodysize].encode()
data += b' ADD' # Signal more will follow
message = message[bodysize:]
packets.append(data)
# Last chunk
data = b'TIMEOUT ' + str(timeout).encode() + b' '
data += message[:bodysize].encode()
data += b' AYE' # Signal end
packets.append(data)
return packets
|
[
"def chunk_message(self, msg):\n prev = 0\n while prev < len(msg):\n next = min(prev + self.maxMsgSize, len(msg))\n yield msg[prev:next]\n prev = next",
"def test_multibyte_delim():\n\n delim = b'\\r\\n'\n for with_delim in (True, False):\n if with_delim:\n cond_delim = b'\\r\\n'\n else:\n cond_delim = b''\n\n empty = b''\n small_one = b'1'\n big_two = b'2' * 2048\n for ms in (3, 5, 1024, None):\n x, y = socket.socketpair()\n bs = BufferedSocket(x)\n\n y.sendall(empty + delim)\n y.sendall(small_one + delim)\n y.sendall(big_two + delim)\n\n kwargs = {'maxsize': ms, 'with_delimiter': with_delim}\n assert bs.recv_until(delim, **kwargs) == empty + cond_delim\n assert bs.recv_until(delim, **kwargs) == small_one + cond_delim\n try:\n assert bs.recv_until(delim, **kwargs) == big_two + cond_delim\n except MessageTooLong:\n if ms is None:\n assert False, 'unexpected MessageTooLong'\n else:\n if ms is not None:\n assert False, 'expected MessageTooLong'\n\n return",
"def get_chunks(self, fixed_limit=None, encoding=\"unicode\"):\n\n # Socket is disconnected.\n if not self.connected:\n return\n\n # Recv chunks until network buffer is empty.\n repeat = 1\n wait = 0.2\n chunk_no = 0\n max_buf = self.max_buf\n max_chunks = self.max_chunks\n if fixed_limit is not None:\n max_buf = fixed_limit\n max_chunks = fixed_limit\n\n while repeat:\n chunk_size = self.chunk_size\n while True:\n # Don't exceed buffer size.\n buf_len = len(self.buf)\n if buf_len >= max_buf:\n break\n remaining = max_buf - buf_len\n if remaining < chunk_size:\n chunk_size = remaining\n\n # Don't allow non-blocking sockets to be\n # DoSed by multiple small replies.\n if chunk_no >= max_chunks and not self.blocking:\n break\n\n try:\n chunk = self.s.recv(chunk_size)\n except socket.timeout as e:\n self.debug_print(\"Get chunks timed out.\")\n self.debug_print(e)\n\n # Timeout on blocking sockets.\n err = e.args[0]\n self.debug_print(err)\n if err == \"timed out\":\n repeat = 0\n break\n except ssl.SSLError as e:\n # Will block on non-blocking SSL sockets.\n if e.errno == ssl.SSL_ERROR_WANT_READ:\n self.debug_print(\"SSL_ERROR_WANT_READ\")\n break\n else:\n self.debug_print(\"Get chunks ssl error\")\n self.close()\n return\n except socket.error as e:\n # Will block on nonblocking non-SSL sockets.\n err = e.args[0]\n if err == errno.EAGAIN or err == errno.EWOULDBLOCK:\n break\n else:\n # Connection closed or other problem.\n self.debug_print(\"get chunks other closing\")\n self.close()\n return\n else:\n if chunk == b\"\":\n self.close()\n return\n\n # Avoid decoding errors.\n self.buf += chunk\n\n # Otherwise the loop will be endless.\n if self.blocking:\n break\n\n # Used to avoid DoS of small packets.\n chunk_no += 1\n\n # Repeat is already set -- manual skip.\n if not repeat:\n break\n else:\n repeat = 0\n\n # Block until there's a full reply or there's a timeout.\n if self.blocking:\n if fixed_limit is None:\n # Partial response.\n if self.delimiter not in self.buf:\n repeat = 1\n time.sleep(wait)",
"def receive_messages(self, batch_size):",
"def __recv_timeout(self, the_connection, timeout = 1):\n total_data=[];\n data='';\n \n begin=time.time()\n while True:\n if (total_data and time.time()-begin > timeout) or time.time()-begin > timeout*2:\n break\n try:\n data = the_connection.recv(4096)\n if data:\n total_data.append(data)\n begin=time.time()\n else:\n time.sleep(0.1)\n except:\n pass\n \n return b''.join(total_data)",
"def split_eap_message(eap_messages: bytes) -> list:\n if len(eap_messages) < 253:\n return [eap_messages]\n _stop = len(eap_messages)\n _step = 253\n return [eap_messages[pos:pos+_step] for pos in range(0, _stop, _step)]",
"def read_message(self):\n while True:\n if self.next_chunk_size is None:\n chunk_size = self.wire.read(2)\n self.next_chunk_size, = struct_unpack(\">H\", chunk_size)\n if self.next_chunk_size:\n chunk_data = self.wire.read(self.next_chunk_size)\n self.next_chunk_size = None\n self.data_buffer.append(chunk_data)\n else:\n self.next_chunk_size = None\n break\n buffer = UnpackableBuffer(b\"\".join(self.data_buffer))\n self.data_buffer = []\n unpacker = Unpacker(buffer, self.packstream_version)\n return unpacker.unpack_message()",
"async def test_message_is_truncated(\n hass: HomeAssistant, mock_imap_protocol: MagicMock, caplog: pytest.LogCaptureFixture\n) -> None:\n event_called = async_capture_events(hass, \"imap_content\")\n\n config = MOCK_CONFIG.copy()\n\n # Mock the max message size to test it is truncated\n config[\"max_message_size\"] = 3\n config_entry = MockConfigEntry(domain=DOMAIN, data=config)\n config_entry.add_to_hass(hass)\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n # Make sure we have had one update (when polling)\n async_fire_time_changed(hass, utcnow() + timedelta(seconds=5))\n await hass.async_block_till_done()\n state = hass.states.get(\"sensor.imap_email_email_com\")\n # We should have received one message\n assert state is not None\n assert state.state == \"1\"\n assert len(event_called) == 1\n\n event_data = event_called[0].data\n assert len(event_data[\"text\"]) == 3",
"def test_bytes_first_too_big(self):\n actions = ['chimpanzees', 'hi', 'ho']\n chunks = bulk_chunks(actions, bytes_per_chunk=6)\n self.assertEqual(list(chunks), [['chimpanzees'], ['hi', 'ho']])",
"def chunk_messages(report):\n msg = \"\"\n for line in report.splitlines():\n msg += line + \"\\n\"\n if len(msg) > 3500:\n yield msg\n msg = \"\"\n yield msg",
"def receive_bytes(self, size):\n time_start = datetime.now()\n total_data = \"\"\n last_read = \"\"\n while True:\n last_read = self.request.recv(size)\n total_data += last_read\n size -= len(last_read)\n if size <= 0:\n break\n else:\n time.sleep(0.01)\n time_now = datetime.now()\n time_diff = time_now - time_start\n if time_diff.seconds >= 5:\n raise DataReadTimeoutException()\n return total_data",
"def shorten_msg(msg,max_len):\n if len(msg) > max_len:\n h0=floor(max_len/2)\n h1=max_len-h0\n msg=msg[:h0]+\"\\n ---skipped--- \\n\" + msg[ - h1:]\n return msg",
"def message_reader(data_socket, num_lines, num_runs):\n\n byte_message = b''\n\n while num_lines > 0:\n byte_holder = next_byte(data_socket)\n byte_message += byte_holder\n if byte_holder == b'\\x0a':\n num_lines -= 1\n print(byte_message.decode())\n\n f = open((str(num_runs) + '.txt'), 'w+')\n f.write(byte_message.decode('ASCII'))\n f.close()\n num_runs += 1",
"def read_n_bytes(resp, sock, n, deadline):\n buf = b''\n while len(buf) < n:\n buf += read_by_deadline(resp, sock, deadline, n - len(buf))\n return buf",
"def publish_chunks(self):\n try:\n udp_dest = (self.udp_audio_host, self.udp_audio_port)\n\n while not self._exit_requested:\n chunk = self.chunk_queue.get()\n if chunk:\n # MQTT output\n with io.BytesIO() as wav_buffer:\n wav_file: wave.Wave_write = wave.open(wav_buffer, \"wb\")\n with wav_file:\n wav_file.setframerate(self.sample_rate)\n wav_file.setsampwidth(self.sample_width)\n wav_file.setnchannels(self.channels)\n wav_file.writeframes(chunk)\n\n wav_bytes = wav_buffer.getvalue()\n\n if self.udp_output:\n # UDP output\n self.udp_socket.sendto(wav_bytes, udp_dest)\n else:\n # Publish to output site_id\n self.publish(\n AudioFrame(wav_bytes=wav_bytes),\n site_id=self.output_site_id,\n )\n if self._dump_file is not None:\n # print(\"tell is\", self._dump_file.tell(), end=' ') \n # write_wave( self._dump_file, wav_bytes, remove_header=True)\n if USE_SOUNDFILE:\n\t\t\t\t\t\t\t\t# soultion soundfile\n self._dump_file.write(np.frombuffer(wav_bytes[44:], np.int16))\t# removing header!\n else:\n\t\t\t\t\t\t\t\t# Solution wave, this write always the latest buffer and doesnt happend it!\n self._dump_file.writeframesraw(wav_bytes[44:]) # removing header!\t\t\t\t\t\n if self.enable_summary:\n self.summary_frames_left -= 1\n if self.summary_frames_left > 0:\n continue\n\n self.summary_frames_left = self.summary_skip_frames\n if not self.vad:\n # Create voice activity detector\n self.vad = webrtcvad.Vad()\n self.vad.set_mode(self.vad_mode)\n # webrtcvad needs 16-bit 16Khz mono\n # TODO: would be possible to split here if demux is not selected? this would avoid resampling,\n # which is called continuously. (uncomment this code). With the switch --demux a proper channel\n # is produced\n # with io.BytesIO(wav_bytes) as wav_io:\n # with wave.open(wav_io, \"rb\") as wav_file:\n # if (wav_file.getframerate() != 16000) or \\\n # (wav_file.getsampwidth() != 2) or \\\n # (wav_file.getnchannels() != 1):\n # print(\"Need Resample: sr={}, width={}, n_ch={}\".format(wav_file.getframerate(),\n # wav_file.getsampwidth(),\n # wav_file.getnchannels()))\n # else:\n # print(\"No resample\")\n # webrtcvad needs 16-bit 16Khz mono\n self.vad_audio_data += self.maybe_convert_wav(\n wav_bytes, sample_rate=16000, sample_width=2, channels=1\n )\n is_speech = False\n # Process in chunks of 30ms for webrtcvad\n while len(self.vad_audio_data) >= self.vad_chunk_size:\n vad_chunk = self.vad_audio_data[: self.vad_chunk_size]\n self.vad_audio_data = self.vad_audio_data[\n self.vad_chunk_size :\n ]\n # Speech in any chunk counts as speech\n is_speech = is_speech or self.vad.is_speech(\n vad_chunk, 16000\n )\n # Publish audio summary\n self.publish(\n AudioSummary(\n debiased_energy=AudioSummary.get_debiased_energy(chunk),\n is_speech=is_speech,\n ),\n site_id=self.output_site_id,\n )\n\n except Exception as e:\n _LOGGER.exception(\"publish_chunks\")\n self.publish(\n AudioRecordError(\n error=str(e), context=\"publish_chunks\", site_id=self.site_id\n )\n )",
"def readMultiple(self, timeout=60):\n deadline = time.time() + timeout\n allpackets = []\n while time.time() < deadline:\n allpackets.append(self.read())\n time.sleep(1.0)\n\n return allpackets",
"def readexactly(sock, numbytes):\n bytes_received = b\"\"\n count = 0\n while count < numbytes:\n byte = sock.recv(1)\n if byte:\n count += 1\n bytes_received += byte\n else:\n raise asyncio.streams.IncompleteReadError(bytes_received, numbytes-count)\n\n return bytes_received",
"def test_big_chunks(self):\n self.assertEqual(_crc16.crc16xmodem(b'A' * 16 * 1024 * 1024), 0xbf75)",
"def test_receive_message_random_section(self):\n\n random.seed(a=0)\n payload = b''.join(\n [struct.pack('!B', random.randint(0, 255)) for i in range(1000)])\n\n compress = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED,\n -zlib.MAX_WBITS)\n compressed_payload = compress.compress(payload)\n compressed_payload += compress.flush(zlib.Z_SYNC_FLUSH)\n compressed_payload = compressed_payload[:-4]\n\n # Fragment the compressed payload into lots of frames.\n bytes_chunked = 0\n data = b''\n frame_count = 0\n\n chunk_sizes = []\n\n while bytes_chunked < len(compressed_payload):\n # Make sure that\n # - the length of chunks are equal or less than 125 so that we can\n # use 1 octet length header format for all frames.\n # - at least 10 chunks are created.\n chunk_size = random.randint(\n 1,\n min(125,\n len(compressed_payload) // 10,\n len(compressed_payload) - bytes_chunked))\n chunk_sizes.append(chunk_size)\n chunk = compressed_payload[bytes_chunked:bytes_chunked +\n chunk_size]\n bytes_chunked += chunk_size\n\n first_octet = 0x00\n if len(data) == 0:\n first_octet = first_octet | 0x42\n if bytes_chunked == len(compressed_payload):\n first_octet = first_octet | 0x80\n\n data += b'%c%c' % (first_octet, chunk_size | 0x80)\n data += _mask_hybi(chunk)\n\n frame_count += 1\n\n self.assertTrue(len(chunk_sizes) > 10)\n\n # Close frame\n data += b'\\x88\\x8a' + _mask_hybi(struct.pack('!H', 1000) + b'Good bye')\n\n extension = common.ExtensionParameter(\n common.PERMESSAGE_DEFLATE_EXTENSION)\n request = _create_request_from_rawdata(\n data, permessage_deflate_request=extension)\n self.assertEqual(payload, msgutil.receive_message(request))\n\n self.assertEqual(None, msgutil.receive_message(request))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Wrapper for selenuium.webdriver.get to handle WebDriverException when "Failed to decode response from marionette"
|
def get(self, url: str):
try:
super(Browser, self).get(url)
except (WebDriverException, NoSuchWindowException) as e:
if 'Message: Failed to decode response from marionette' in str(e) or \
'Message: Browsing context has been discarded' in str(e):
self.reset()
logging.info('Marionette exception encountered. Resetting browser object.')
self.get(url)
else:
logging.error(str(e))
|
[
"def test_get_gets_fake_url_without_internet():\n try:\n icl_b._get(\"http://fakeurl\")\n except Exception as e:\n assert isinstance(e, icl_e.ItaCovidLibConnectionError)",
"def test_get_html_not_defined(self):\n url = \"http://thispagedoesnotexists.com\"\n response, rtime = get_html(url)\n self.assertTrue(response[\"connected\"] == False)\n self.assertTrue(response[\"status_code\"][1] == None)",
"async def test_browser_session_fail():\n session = HTMLSession()\n with pytest.raises(RuntimeError):\n session.browser",
"def request_selenium(self, url, button=None, iframe=None, body=None):\n\n try:\n self.driver.get(url)\n print(\"request_selenium(): \", url)\n # select iframe\n if iframe:\n iframe_params = (iframe[\"element\"], iframe[\"by\"]) if isinstance(iframe, dict) else (iframe, None)\n self.wait_locate(*iframe_params)\n self.switch_frame(*iframe_params)\n\n # click button\n if button:\n button_params = (button[\"element\"], button[\"by\"]) if isinstance(button, dict) else (button, None)\n # btn = \\\n self.wait_click(*button_params)\n # btn.click()\n\n # make sure to select html body before returning driver.page_source\n self.driver.switch_to.default_content()\n\n # wait until html body is fully loaded\n if body:\n if isinstance(body, int):\n time.sleep(body)\n else:\n body_params = (body[\"element\"], body[\"by\"]) if isinstance(body, dict) else (body, None)\n self.wait_locate(*body_params)\n\n # print(\"request_selenium(): Active window handles:\", self.driver.window_handles)\n return self.driver.page_source\n\n except Exception as e:\n print(\"Selenium error\", e)\n return \"\"",
"def test_get_method_exception(self):\n with self.assertRaises(WechatException):\n RequestUtil.get('')",
"def __ie_confirm_cert_exception(self):\n js_cmd = \"javascript:document.getElementById('overridelink').click();\"\n try:\n self.set_page_load_timeout(1)\n super(Remote, self).get(js_cmd)\n except selenium_ex.TimeoutException:\n # \"Certificate Error\" page is not present, moving on\n pass\n finally:\n self.set_page_load_timeout(self.PAGE_LOAD_TIMEOUT)",
"def get_page(_driver, _url):\n _driver.get(_url)\n time.sleep(5)\n return BeautifulSoup(_driver.page_source, 'html.parser')",
"def force_get(self, url):\n try:\n # Ignore all popup windows and force to load the url.\n original_url = self.current_url\n\n # If original_url equal to url, that will lead EC.url_changed() never\n # return True!\n if original_url == url:\n condition = EC.url_changed_to(url)\n else:\n condition = EC.url_changed(original_url)\n\n for i in range(0, 3): # Try three times\n self.execute_script(\"window.onbeforeunload = function(e){};\")\n self.get(url)\n\n # Next code statements are just use for Chrome browser.\n # It will not ensure the url be success navigated to, so we\n # will try 3 times until failed.\n try:\n WebDriverWait(self, 10).until(condition)\n break\n except TimeoutException:\n pass\n except TimeoutException as e:\n # Stop the page loading if timeout already happened.\n self.execute_script(\"window.stop()\")\n\n return self",
"def test_get_html_404(self):\n url = \"http://httpbin.org/status/404\"\n response, rtime = get_html(url)\n self.assertTrue(response[\"connected\"])\n self.assertTrue(response[\"status_code\"][1] == 404)",
"def test_no_browser():\n\n transport = validating_transport(requests=[Request()] * 2, responses=[get_discovery_response()] * 2)\n credential = InteractiveBrowserCredential(client_id=\"client-id\", _server_class=Mock(), transport=transport)\n with patch(InteractiveBrowserCredential.__module__ + \"._open_browser\", lambda _: False):\n with pytest.raises(CredentialUnavailableError, match=r\".*browser.*\"):\n credential.get_token(\"scope\")",
"def get_error_page(self, url, method=None, timeout=0):\n self.url = url\n self.method = method\n self.query.client = FakeHTTPClient(self.status, url)\n return fail(self.error or Exception(self.response))",
"def get_html(url): \n \n return requests.get(url)",
"def _get_browser(self):\n return self._get('/get_browser').text",
"def test_get_html_200(self):\n url = \"http://google.fi\"\n response, rtime = get_html(url)\n self.assertTrue(response[\"connected\"])\n self.assertTrue(response[\"status_code\"][1] == 200)",
"def _get_monitor_agent_plugin(self, url):\n try:\n response = requests.get(url, timeout=self.options['timeout'])\n except Exception:\n raise base.BlackbirdPluginError(\n 'Maybe, fluentd doesn\\'t load \"monitor_agent\" plugin.'\n )\n\n try:\n return response.json()\n except Exception:\n raise base.BlackbirdPluginError(\n 'Response format is not json. Maybe, you specify invalid URI.'\n )",
"def fetchPage(driver,url):\n\tdriver.get('about:blank');\n\ttry:\n\t\tdriver.get(url)\n\texcept Exception as e:\n\t\tprint(('Could not fetch page. %s:%s)'%(e.__class__.__name__,e)))\n\ttry:\n\t\ttitle=driver.execute_script(\"return arguments[0].text\",driver.find_element_by_xpath('//title'))\n\t\tif \"404 Not Found\" in title:\n\t\t\traise Exception\n\texcept:\n\t\treturn False;\n\treturn True",
"def __request_page(self, url):\n r = self.session.get(url)\n try:\n r.html.render(timeout=30, scrolldown=True)\n if \"It’s currently a bit busy\" in r.html.text:\n raise ValueError(\"\\'It’s currently a bit busy\\\" page is shown\")\n r.close()\n return r\n except Exception as err:\n r.close()\n raise err",
"def test_check_network_error(self, mock_get):\n mock_get.side_effect = requests.exceptions.ConnectionError\n with self.assertRaises(WebMonitorException):\n self._web_monitor.check()",
"def load(self, url):\n self.browser.get(url)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add new custom types that can be interpolated by this object. This method expects a dict that maps types (the keys) to their custom wrapper classes (the values). The wrapper classes must be a descendant of the Esc class.
|
def add_types(self, new_types):
self.type_map.update(new_types)
|
[
"def register(cls):\n if not hasattr(cls, \"__fromjson__\") or not hasattr(cls, \"__tojson__\"):\n raise KeyError(\"register: registered types must have a __fromjson__ method\")\n k = clsKey(cls)\n if k in _types:\n raise Exception(\"tinyjson: mutliple attempts to register class %s\" % k)\n _types[k] = cls",
"def addTypes(self,*types):\n types = tuple([t for t in iterTypes(types) if t not in self.types])\n refs = len([t for t in types if issubclass(t,Entity)])\n\n if (refs or self.isReference) and refs<>len(types):\n raise TypeError(\"Cannot mix entity and value types in one role\")\n if len(self.types+types)>1:\n raise TypeError(\"Multiple value types not allowed in one role\")\n if refs:\n self.isReference = True\n self._setattr('types',self.types + types)\n self.setDoc() # update the doc string",
"def with_types(cls, key_type, value_type):\n result = cls()\n result._types = (key_type, value_type)\n return result",
"def register_type(self, key, *types):\n assert key in self._config\n self._types[key] = set(types)",
"def add_constants( self, **keywds ):\n tmpl = 'boost::python::scope().attr(\"%(name)s\") = %(value)s;'\n for name, value in keywds.items():\n if not isinstance( value, types.StringTypes ):\n value = str( value )\n self.add_registration_code( tmpl % dict( name=name, value=value) )",
"def addType(self, type):\n\t\tself.types.append(type)",
"def add_type(type_, func=None):\n def _check_type(type_):\n if not (isinstance(type_, python.class_types) or\n hasattr(type_, '__call__')):\n raise TypeError(r'Unable to add '%r' as a custom type (expected a '\n 'class or callable)' % (type_,))\n\n if isinstance(type_, list):\n type_ = tuple(type_)\n\n if type_ in TYPE_MAP:\n raise KeyError('Type %r already exists' % (type_,))\n\n if isinstance(type_, types.TupleType):\n for x in type_:\n _check_type(x)\n else:\n _check_type(type_)\n\n TYPE_MAP[type_] = func",
"def add_convenience_methods(cls, type_dict):\n for nm, value in CLASS_METHODS.get(cls.__name__, ()):\n type_dict[nm] = value\n\n try:\n for cls in CLASS_ABC[cls.__name__]:\n cls.register(cls)\n del CLASS_ABC[cls.__name__]\n except KeyError:\n pass",
"def add_field_type(self, **field_kwargs: Any) -> None:\n self._post_field(\"add-field-type\", **field_kwargs)",
"def InjectCustomKeys(self, keys, change):\n for key in keys:\n self._dict[str(key)] = change",
"def add_type_definitions(self):\n return self._sub(\n self._regexes['includes_end'],\n self._type_def_code)",
"def register_domain_type(domain_class, type_key):",
"def _wrap(name: str, ttype: int) -> dict:\n return {\n 'name': name,\n 'tag_type': ttype,\n }",
"def typesystem(**kwargs):\n def decorate(cls):\n for key, value in kwargs.items():\n if isinstance(value, (Descriptor, ConstantDescriptor)):\n value.name = key\n setattr(cls, key, value)\n else:\n setattr(cls, key, value(key))\n return cls\n return decorate",
"def add_types(self, functor, argtypes):\n key = (functor, len(argtypes))\n if key in self._types:\n raise ValueError(\"A type definition already exists for '%s/%s'.\"\n % (functor, len(argtypes)))\n else:\n self._types[key] = argtypes",
"def add_type_dict_for_context(self, var_dict):\n import traceback\n\n func_name = traceback.extract_stack(None, 2)[0][2]\n\n self.__add_type_dict_for_context(var_dict, func_name)",
"def add_or_type(self, or_base, or_decorators):\n or_decorators = _remove_blanks_repeats(or_decorators, ['', or_base])\n self._or_types.append((or_base, or_decorators))",
"def register_wrapper(cls):\n for wrapped in cls._WRAPPED:\n if wrapped in cls._WRAPPERS:\n LOGGER.warn('{} is already registered to {}.'.format(wrapped, cls._WRAPPERS[wrapped]))\n\n if LANTZ_BUILDING_DOCS:\n cls._WRAPPERS[wrapped] = type(wrapped.__name__ + 'Wrapped',\n (cls, ), {'_IS_LANTZ_WRAPPER': True})\n else:\n cls._WRAPPERS[wrapped] = type(wrapped.__name__ + 'Wrapped',\n (cls, wrapped), {'_IS_LANTZ_WRAPPER': True})\n\n return cls",
"def map_type(self, es_type, json_type, json_format=None, es_props=None):\n assert json_type in ['string', 'boolean', 'number', 'integer']\n\n stored_mapping = {\n 'type': es_type,\n 'props': es_props\n }\n\n if json_type == 'string' and json_format:\n self.__formats_map[json_format] = stored_mapping\n else:\n self.__types_map[json_type] = stored_mapping\n return self",
"def addSyntheticTemplate(self, templates, class_id) -> retval:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Commiting change a SQL query
|
def __commit_query(self, SQLquery):
try:
cursor = self.cnx.cursor()
#execute the SQL change
if self.debug == True:
print("Executing following SQL command : " + SQLquery + " on db : " + self.dbname)
cursor.execute(SQLquery)
#commit change in db
self.cnx.commit()
return 0
except:
self.cnx.rollback()
if self.debug == True:
print("Error executing : " + SQLquery + " on db : " + self.dbname)
return 1
|
[
"def _do_commit(self):\n self.backend.commit()",
"def _commit(self):\n pass",
"def execute_commit(self, sql, data):\n self.execute(sql, data)\n self.commit()",
"def commit(self):\n\t\tself.dbConnection.commit()",
"def commit(self, session):\n session.commit()",
"def commit(self):\n \"\"\" Default to commit after every transaction\n Will check instance variable to decide if a commit is needed\n \"\"\"\n try:\n self.cursor.execute(\"COMMIT\")\n self.cursor.close()\n self.cursor = None\n except AttributeError:\n logging.error(\"No Open Cursor to do Commit\")\n except Exception as e:\n logging.error(e)",
"def commit(self):\n\t\tdel self.transaction_log[:] \n\t\tself.transaction_mode = False",
"def commit(self):\n self._check_closed()\n self._trans_id = self.__session.send_commit()",
"def c_commit(self, args):\n log.info('forcing commit')\n self.db.commit()",
"def commit(t):\n return Action(COMMIT, t)",
"def commit_transaction(self):\n self.tx.commit()",
"def commit(self):\n result = self.lastTransaction.commit()\n self.lastTransaction = None\n return result",
"def commit(self):\n\t\tif self._status != self._IN_PROGRESS:\n\t\t\traise ValueError(\"Batch must be in progress to commit()\")\n\n\t\ttry:\n\t\t\tfrom viur.xeno.databases import dbinterface\n\t\t\tdbinterface.transaction_commit()\n\t\tfinally:\n\t\t\tself._status = self._FINISHED",
"def commit_database():\n global _conn\n _conn.commit()",
"def update(self, sql):",
"def run_query(self, query):\n self.cur.execute(query)\n self.dbConn.commit()\n return",
"def commit(self):\n if self.dbh:\n self.dbh.commit()\n self._feedlgr.commit()",
"def commit(self):\n try:\n self.session.commit()\n except Exception: # pragma: no cover\n self.session.rollback()\n raise",
"def commit(self):\n if self._dblog:\n self._feedlgr.commit()",
"def edit_job(self,query_data):\n self.cur.execute(query_data)\n self.conn.commit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
populate a table with the provided values
|
def populate_table(self, table, values):
# to be conpleted according to sqlite3 requirements
if self.platform == STATIC.PLATFORM_WINDOWS:
for value in values:
#print (str(value.MONTH) + " " + value.FLOW + " " + value.CONTRIB + " ")
# rev ex rox
table[STATIC.equivData["Rev"]][STATIC.equivFlow[value.FLOW]][STATIC.equivYield[value.CONTRIB]][value.MONTH] = value.REV_EX_ROX
# rpk
table[STATIC.equivData["RPK"]][STATIC.equivFlow[value.FLOW]][STATIC.equivYield[value.CONTRIB]][value.MONTH] = value.RPK
# ask
table[STATIC.equivData["ASK"]][STATIC.equivFlow[value.FLOW]][STATIC.equivYield[value.CONTRIB]][value.MONTH] = value.ASK
else:
for value in values:
# rev ex rox
table[STATIC.equivData["Rev"]][STATIC.equivFlow[value[2]]][STATIC.equivYield[value[1]]][value[0]] = value[4]
# rpk
table[STATIC.equivData["RPK"]][STATIC.equivFlow[value[2]]][STATIC.equivYield[value[1]]][value[0]] = value[5]
# ask
table[STATIC.equivData["ASK"]][STATIC.equivFlow[value[2]]][STATIC.equivYield[value[1]]][value[0]] = value[6]
|
[
"def fill_table(self, table: ttk.Treeview, data: dict, **kwds) -> None:\r\n assert len(data) > 0, 'wrong data passes to the table'\r\n tag = kwds.get('tag', 'default')\r\n for values in data:\r\n table.insert('', END, values=(values,), tags=tag)",
"def _fill_table(self, table, gen) -> None:\n seq_table = self._table_map[table]\n seq_table.table.put_value(next(gen))",
"def fill_table(table, proba_preds, preds, ids, tests):\n \"\"\"\n \"\"\"\n pi_scores = []\n pi_preds = []\n pi_tests = []\n def is_in_set(x):\n if x['ID'] in ids:\n return True\n else: \n return False\n table['take'] = table.apply(is_in_set, axis=1)\n table = table[table['take']]\n for i in table['ID'].values:\n index = ids.index(i)\n pi_scores.append(proba_preds[index]) #pi = repermuté dans le sens de table\n pi_preds.append(preds[index])\n pi_tests.append(tests[index])\n table['proba_preds'] = pi_scores\n table['prediction'] = pi_preds\n table['test'] = pi_tests\n return table",
"def CreateTable(self, param):\n pass",
"def populate_geoprice_tables(val):\n price_val = format_price(val) \n price = Price(price_val)\n #logger.debug(\"Formatted price info..\")\n try:\n if type(price.product_uuid) is float and np.isnan(price.product_uuid):\n raise Exception(\"Product UUID needs to be generated!\")\n except Exception as e:\n return False\n #logger.info(\"Saving All...\")\n if price.save_all_batch():\n #logger.debug(\"Loaded tables for: {}\".format(val['product_uuid']))\n pass",
"def make_table(src):\n table = DataTable(source=src, columns=[TableColumn(field=c, title=c) for c in src.column_names], width=800)\n return table",
"def create_table_for(self, model):",
"def post(self, table):\n pagecount.IncrPageCount(\"export.PopulateTable.attempt\", 1)\n verify_dig_sig(self.request, \"PopulateTable\")\n\n table_version = str(utils.get_last_arg(self.request, \"tv\", \"\"))\n if len(table_version) > 0:\n verify_table_name(table_version)\n source = get_model(table, \"PopulateTable\")\n destination = type(table + table_version, (source,), {})\n else:\n destination = get_model(table, \"PopulateTable\")\n\n # handle reference properties\n def ref_property_UserInfo(field):\n rmodel = type('UserInfo' + table_version, (models.UserInfo,), {})\n return rmodel.get_by_key_name(field)\n\n def nop(v):\n \"\"\" this is used for unknown field types \"\"\"\n return v\n\n def str_to_datetime(datetimestring):\n \"\"\" convert string to a real DateTime object \"\"\"\n # dont need milliseconds here\n ar = datetimestring.split(\".\")\n datetime_format = \"%Y-%m-%d %H:%M:%S\"\n return datetime.strptime(ar[0], datetime_format)\n\n def str_to_date(datestring):\n \"\"\" convert string to a real Date object \"\"\"\n date_format = \"%Y-%m-%d\"\n return datetime.strptime(datestring, date_format).date()\n\n try:\n reset = int(utils.get_last_arg(self.request, \"reset\", \"0\"))\n except:\n pagecount.IncrPageCount(\"export.%s.nonIntLimit\" % \"PopulateTable\", 1)\n raise Fail(\"invalid &reset signal\")\n\n if reset == 1:\n \"\"\" we should only see this with a first batch of records \"\"\"\n logging.info(\"export.PopulateTable reset signal recvd for %s%s\" \n % (table, table_version))\n self.response.out.write(\n \"PopulateTable: reset signal recvd, clearing all rows\\n\")\n pagecount.IncrPageCount(\"export.%s.reset\" % \"PopulateTable\", 1)\n while True:\n query = destination.all()\n # cannot delete more than 500 entities in a single call\n # and if there are a lot here we are going to timeout\n # anyway but better to try and fail than risk duplicating\n results = query.fetch(500)\n if results:\n logging.info(\"export.PopulateTable deleting %d from %s%s\" % \n (len(results), table, table_version))\n self.response.out.write(\"PopulateTable: deleting %d from %s%s\\n\" \n % (len(results), table, table_version))\n db.delete(results)\n else:\n logging.info(\"export.PopulateTable %s%s reset complete\" % \n (table, table_version))\n self.response.out.write(\"PopulateTable: %s%s reset complete\\n\" % \n (table, table_version))\n break\n\n # one record per line\n rows = self.request.get(\"row\").split(\"\\n\")\n\n # the first row is a header\n header = rows.pop(0).split(\"\\t\")\n\n field_type = []\n for field in header:\n # we are going to want to remember a function for each field type\n # but for now all we are doing is initializing the list\n field_type.append(None)\n \n limit = get_limit(self.request, \"PopulateTable\")\n logging.info(\"export.PopulateTable write to %s%s\" % (table, table_version))\n\n written = 0\n row_number = 0\n for row in rows:\n row_number += 1\n # all of our kind of lines should start \"row=\"\n if len(row) > ROW_MARKER_LEN and row[0:ROW_MARKER_LEN] == ROW_MARKER:\n fields = row[ROW_MARKER_LEN:].split(\"\\t\")\n for i, field in enumerate(fields):\n if i == 0:\n # on the first column (key) we only instantiate our kind of record\n try:\n # it could be a named key\n if not str(field)[0].isdigit():\n record = destination(key_name = str(field))\n else:\n record = destination()\n except:\n record = destination()\n else:\n if field is None or len(strip(field)) < 1:\n # no field/field value, nothing to do\n continue\n\n if field_type[i] != None:\n # we think we already know what kind of field this is \n try:\n # but we could be wrong\n setattr(record, header[i], field_type[i](field))\n except:\n # nothing we can really do about it now except carry on\n # and see if we can still make this a good record\n logging.warning(\n \"export.PopulateTable %s = %s not set in row %d of %s%s\" % \n (header[i], field, row_number, table, table_version))\n self.response.out.write(\"field %s = %s not set in row %d of %s%s\\n\" % \n (header[i], field, row_number, table, table_version))\n pass\n else:\n # on the first row of the file\n # we dont know what type of field this is\n # but we can try them all until we succeed\n # and remember which one worked for subsequent rows\n n = 0\n while n < MAX_FIELD_TYPES:\n if n == FIELD_TYPE_REF:\n if table != \"UserInterest\" or header[i] != \"user\":\n continue\n setattr(record, header[i], ref_property_UserInfo(field))\n field_type[i] = ref_property_UserInfo\n break\n elif n == FIELD_TYPE_DATETIME:\n try:\n setattr(record, header[i], str_to_datetime(field))\n field_type[i] = str_to_datetime\n break\n except:\n pass\n elif n == FIELD_TYPE_DATE:\n try:\n setattr(record, header[i], str_to_date(field))\n field_type[i] = str_to_date\n break\n except:\n pass\n elif n == FIELD_TYPE_STR:\n try:\n setattr(record, header[i], field)\n field_type[i] = str\n break\n except:\n pass\n elif n == FIELD_TYPE_BOOL:\n try:\n setattr(record, header[i], bool(field))\n field_type[i] = bool\n break\n except:\n pass\n elif n == FIELD_TYPE_INT:\n try:\n setattr(record, header[i], int(field))\n field_type[i] = int\n break\n except:\n pass\n elif n == FIELD_TYPE_LONG:\n try:\n setattr(record, header[i], long(field))\n field_type[i] = long\n break\n except:\n pass\n elif n == FIELD_TYPE_FLOAT:\n try:\n setattr(record, header[i], float(field))\n field_type[i] = float\n break\n except:\n pass\n n += 1\n if n >= MAX_FIELD_TYPES:\n logging.warning(\n \"export.PopulateTable unknown field type %s in %s%s\" % \n (header[i], table, table_version))\n self.response.out.write(\"unknown field type %s in %s%s\\n\" % \n (header[i], table, table_version))\n field_type[i] = nop\n else:\n logging.debug(\"%s is type %d\\n\" % (header[i], n))\n\n # end-of for each field\n try:\n # ready to attempt a put\n record.put()\n written += 1\n if written >= limit:\n break\n except:\n logging.error(\"export.PopulateTable put failed at row %d in %s%s\" % \n (row_number, table, table_version))\n self.response.out.write(\"put failed at row %d in %s%s\\n\" % \n (row_number, table, table_version))\n\n # end-of for each row\n logging.info(\"export.PopulateTable wrote %d rows to %s%s\" % \n (written, table, table_version))\n self.response.out.write(\"wrote %d rows to %s%s\\n\" % \n (written, table, table_version))\n pagecount.IncrPageCount(\"export.PopulateTable.success\", 1)",
"def __create_tables(self):\r\n self.__create_lines_table()\r\n self.__create_stops_table()\r\n self.__create_buses_table()",
"def populate_table(self):\n self.leader_table.setRowCount(10)\n db_access = DbMethods()\n scores_list = db_access.select_best_players()\n for i in range(len(scores_list)):\n username, score = scores_list[i]\n item_user = QTableWidgetItem(username)\n item_score = QTableWidgetItem(str(score))\n self.leader_table.setItem(i, 0, item_user)\n self.leader_table.setItem(i, 1, item_score)",
"def init_data_table(self):\n\n table_header = self.dataframe.columns.tolist()\n table_data = self.dataframe.values.tolist()\n\n return [table_header] + table_data",
"def generate_table(self, rows):\r\n table = PrettyTable(**self.kwargs)\r\n for row in self.rows:\r\n if len(row[0]) < self.max_row_width:\r\n appends = self.max_row_width - len(row[0])\r\n for i in range(1,appends):\r\n row[0].append(\"-\")\r\n\r\n if row[1] == True:\r\n self.make_fields_unique(row[0])\r\n table.field_names = row[0]\r\n else:\r\n table.add_row(row[0])\r\n return table",
"def make_table(title, primary, secondary, count):\n table = document.add_table(rows=1, cols=4, style='Normal Table')\n hdr_cells = table.rows[0].cells\n hdr_cells[0].paragraphs[0].add_run(title).bold = True\n headers = ['Freq', 'Percent', 'Cum.']\n for position, header in enumerate(headers):\n hdr_cells[position + 1].paragraphs[0].add_run(header).underline = True\n total = 0\n for key, value in primary.items():\n row_cells = table.add_row().cells\n row_cells[0].text = secondary[key]\n row_cells[1].text = str(value)\n total += int(value)\n row_cells[2].text = str(round(float(value / count) * 100, 0)) + '%'\n row_cells[3].text = str(round(float(total / count) * 100, 0)) + '%'\n set_col_widths(table, 3, 1)",
"def create_table() -> list: \n \n # Table size\n columns = range(random.randint(2,8))\n rows = range(random.randint(2,8))\n \n table = [[generate_data() for row in rows] \n for column in columns]\n return table",
"def create_table(file_name):\n csv_data = pandas.read_csv(file_name)\n csv_data = csv_data.values.tolist()\n table_name_from_file = file_name.split('/')[8][:-4]\n\n # Here we call the subroutine as the last\n table_definition(table_name_from_file)\n # Loop as the next manner over the rows\n for row in csv_data:\n ins = table_definition.table_define.insert().values(\n restaurant_id=row[0], name=row[1], rate=row[2],\n site=row[3], email=row[3], city=row[4], state=row[5],\n lat=row[6], lngtd=row[7],\n )\n conn = engine.connect()\n conn.execute(ins)",
"def fill(datatype, source=None):\n if source:\n response = read(source)\n if failed(response):\n return response\n table = response[\"table\"]\n grid = grids.table_to_grid(config.prefixes, config.fields, table)\n response = fill_rows(datatype, grid[\"rows\"])\n response[\"table\"] = table\n response[\"grid\"] = grid\n else:\n response = fill_rows(datatype)\n return response",
"def gen_table(self,\n parent: html.element,\n tabid: str,\n attrdct: dict,\n data: dataelements.record) -> typing.Tuple[html.table, typing.List[HTMLField]]:\n if self._dct_lst is None:\n return None, None\n tab = html.table(parent, tabid, attrdct)\n print(\"GENTABBY {}\".format(data))\n inp_lst = []\n for varname, dct in self._dct_lst:\n print(\"GENF {}: {}\".format(varname, dct))\n row = html.tr(tab, \"{}-row{}\".format(tabid, varname), None)\n # left column: variable name: use a th\n th = html.th(row, \"{}-rowh{}\".format(tabid, varname), None)\n html.textnode(th, varname)\n # now the data field: use a td\n datvar = None if data is None else data[varname]\n td = html.td(row, \"{}-rowd{}\".format(tabid, varname), None)\n inp = self._input_el(td, dct, datvar)\n if inp is not None:\n inp_lst.append(inp)\n return tab, inp_lst",
"def fill_QA_definition_table(self):\n qa_def = self.cmbQADef.currentText()\n index = self.cmbQADef.findText(qa_def, QtCore.Qt.MatchFixedString)\n\n # Set number of entries\n rows, cols = self.qa_analytics.qa_defs[index].shape\n self.tvQADef.setRowCount(rows)\n self.tvQADef.setColumnCount(cols)\n\n for row in range(rows):\n for col in range(cols):\n # Insert item on QA def TableView\n item = str(self.qa_analytics.qa_defs[index].iloc[row, col])\n self.tvQADef.setItem(row, col,\n QtWidgets.QTableWidgetItem(item))\n\n self.tvQADef.resizeColumnsToContents()",
"def insert_medical_table(self):\n med_table = self.generate_medical_table_items()\n med_table_flat = [item for col in list(zip(*med_table)) for item in col]\n\n med_cells = self.generate_medical_table_cells()\n med_cells = [''.join(cell) for cell in med_cells]\n\n for cell, val in zip(med_cells, med_table_flat):\n self.sheet[cell] = val\n\n self.insert_total_medical_row()\n self.format_medical_items_as_table()\n self.format_medical_table()",
"def create_table(small_dict):\r\n keys, values = tuple(zip(*small_dict.items()))\r\n table = tabulate(\r\n [values],\r\n headers=keys,\r\n tablefmt=\"pipe\",\r\n floatfmt=\".3f\",\r\n stralign=\"center\",\r\n numalign=\"center\",\r\n )\r\n return table"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Builds ParameterContainer object that holds ParameterNode objects with attribute namevalue pairs and optional details.
|
def _build_parameters(
self,
parameter_container: ParameterContainer,
domain: Domain,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
):
validator: Validator = self.get_validator(
domain=domain,
variables=variables,
parameters=parameters,
)
batch_id: str = self.get_batch_id(variables=variables)
metric_computation_result: Dict[
str, Union[Any, Number, Dict[str, Any]]
] = self.get_metric(
batch_id=batch_id,
validator=validator,
metric_name=self._metric_name,
metric_domain_kwargs=self._metric_domain_kwargs,
metric_value_kwargs=self._metric_value_kwargs,
enforce_numeric_metric=self._enforce_numeric_metric,
replace_nan_with_zero=self._replace_nan_with_zero,
domain=domain,
variables=variables,
parameters=parameters,
)
parameter_values: Dict[str, Any] = {
f"$parameter.{self.parameter_name}": metric_computation_result,
}
build_parameter_container(
parameter_container=parameter_container, parameter_values=parameter_values
)
|
[
"def setup_Component_with_parameters():\n comp = setup_Component_all_keywords()\n\n comp._unfreeze()\n # Need to set up attribute parameters\n comp.new_par1 = 1.5\n comp.new_par2 = 3\n comp.new_par3 = None\n comp.this_par = \"test_val\"\n comp.that_par = \"\\\"txt_string\\\"\"\n # also need to categorize them as when created\n comp.parameter_names = [\"new_par1\", \"new_par2\", \"new_par3\",\n \"this_par\", \"that_par\"]\n comp.parameter_defaults = {\"new_par1\": 5.1,\n \"new_par2\": 9,\n \"new_par3\": None,\n \"this_par\": \"conga\",\n \"that_par\": \"\\\"txt\\\"\"}\n comp.parameter_comments = {\"new_par1\": \"This is important\",\n \"new_par2\": \"This is less important\",\n \"this_par\": \"!\",\n \"that_par\": \"\"}\n comp.parameter_types = {\"new_par1\": \"double\",\n \"new_par2\": \"int\",\n \"this_par\": \"\",\n \"that_par\": \"string\"}\n comp.parameter_units = {\"new_par1\": \"m\",\n \"new_par2\": \"AA\",\n \"this_par\": \"\",\n \"that_par\": \"1\"}\n comp.line_limit = 117\n comp._freeze()\n\n return comp",
"def add_parameter(root, parameter_name, fieldnames, values, attriutes=None):\n if attriutes is None:\n attriutes = [None] * len(fieldnames)\n\n for s in root:\n if s.tag == \"parameter\" and s.attrib[\"name\"] == parameter_name:\n break\n else:\n node = etree.Element(\"parameter\", name=parameter_name)\n for fieldname, value, attriute in zip(fieldnames, values, attriutes):\n ele = etree.SubElement(node, fieldname)\n ele.text = value\n if attriute is not None:\n ele.set(attriute[0], attriute[1])\n root.insert(len(root), node)",
"def buildParameter(self, name, value, type):\n # type: (str, str, int) -> IParameter",
"def as_parameter(self):\n from pyqtgraph.parametertree.Parameter import PARAM_TYPES\n\n parameter_dicts = []\n for name, parameter in inspect.signature(self._func).parameters.items():\n if getattr(parameter.annotation, '__name__', None) in PARAM_TYPES:\n parameter_dict = dict()\n parameter_dict.update(self.opts.get(name, {}))\n parameter_dict['name'] = name\n parameter_dict[\n 'default'] = parameter.default if parameter.default is not inspect.Parameter.empty else None\n parameter_dict['value'] = self.filled_values[\n name] if name in self.filled_values else parameter_dict['default']\n\n parameter_dict['type'] = getattr(self.input_types[name], '__name__', None)\n if name in self.limits:\n parameter_dict['limits'] = self.limits[name]\n parameter_dict['units'] = self.units.get(name)\n parameter_dict['fixed'] = self.fixed.get(name)\n parameter_dict['fixable'] = self.fixable.get(name)\n parameter_dict['visible'] = self.visible.get(name, True)\n parameter_dict.update(self.opts.get(name, {}))\n\n parameter_dicts.append(parameter_dict)\n\n elif getattr(self.input_types[name], \"__name__\", None) == \"Enum\":\n parameter_dict = dict()\n parameter_dict['name'] = name\n parameter_dict['value'] = self.filled_values[\n name] if name in self.filled_values else parameter.default\n parameter_dict['values'] = self.limits.get(name) or [\"---\"],\n parameter_dict['default'] = parameter.default\n parameter_dict['type'] = \"list\",\n if name in self.limits:\n parameter_dict['limits'] = self.limits[name]\n parameter_dict['units'] = self.units.get(name)\n parameter_dict['fixed'] = self.fixed.get(name) # TODO: Does this need a default value\n parameter_dict['fixable'] = self.fixable.get(name)\n parameter_dict['visible'] = self.visible.get(name, True) # TODO: should we store the defaults at top?\n parameter_dict.update(self.opts.get(name, {}))\n\n parameter_dicts.append(parameter_dict)\n return parameter_dicts",
"def _build_param_dict(self):\n\n # TODO: leave this regex here for now - we've only tested against a simulator\n # the real instrument might give us floats, then we'll need this\n # FLOAT_REGEX = r'((?:[+-]?[0-9]|[1-9][0-9])+\\.[0-9]+)'\n\n int_regex = r'([+-]?[0-9]+)'\n\n # Add parameter handlers to parameter dict.\n self._param_dict.add(Parameter.ENDPOINT,\n r'NOT USED',\n None,\n str,\n type=ParameterDictType.STRING,\n display_name=\"Endpoint\",\n description='IP address of the system running the UltraGrid receiver process.',\n startup_param=False,\n direct_access=False,\n default_value=DEFAULT_ENDPOINT,\n visibility=ParameterDictVisibility.READ_ONLY)\n\n self._param_dict.add(Parameter.PAN_POSITION,\n r'\"pan\": ' + int_regex,\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Pan\",\n range=(45, 315),\n description='Camera pan position: (45 - 315)',\n startup_param=False,\n direct_access=False,\n default_value=180.0,\n units=Units.DEGREE_PLANE_ANGLE,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.TILT_POSITION,\n r'\"tilt\": ' + int_regex,\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Tilt\",\n description='Camera tilt position: (50 - 140)',\n range=(50, 140),\n startup_param=False,\n direct_access=False,\n default_value=90.0,\n units=Units.DEGREE_PLANE_ANGLE,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.PAN_TILT_SPEED,\n r'NOT USED',\n None,\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Speed\",\n range=(0.5, 40),\n description='Pan-Tilt speed, in 0.5 deg/s increments: (0.5 - 40)',\n startup_param=False,\n direct_access=False,\n default_value=10.0,\n units=Units.DEGREE_PLANE_ANGLE_PER_SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.HEADING,\n r'\"heading\": ' + int_regex,\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Heading\",\n range=(0, 360),\n description='Heading relative to magnetic North: (0 - 360)',\n startup_param=False,\n direct_access=False,\n default_value=0.0,\n units=Units.DEGREE_PLANE_ANGLE,\n visibility=ParameterDictVisibility.READ_ONLY)\n\n self._param_dict.add(Parameter.PITCH,\n r'\"pitch\": ' + int_regex,\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Pitch\",\n range=(-90, 90),\n description='Gravity referenced pitch angle. Negative values are up, '\n 'positive values are down: (-90 - 90)',\n startup_param=False,\n direct_access=False,\n default_value=0.0,\n units=Units.DEGREE_PLANE_ANGLE,\n visibility=ParameterDictVisibility.READ_ONLY)\n\n self._param_dict.add(Parameter.LIGHT_1_LEVEL,\n r'\"intensity\": \\[([\\d]+), ([\\d]+)\\]',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Light 1 Level\",\n range=(0, 100),\n description='Relative intensity of light 1: (0 - 100)',\n startup_param=False,\n direct_access=False,\n default_value=50,\n units=Units.PERCENT,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.LIGHT_2_LEVEL,\n r'\"intensity\": \\[([\\d]+), ([\\d]+)\\]',\n lambda match: int(match.group(2)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Light 2 Level\",\n range=(0, 100),\n description='Relative intensity of light 2: (0 - 100)',\n startup_param=False,\n direct_access=False,\n default_value=50,\n units=Units.PERCENT,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.ZOOM_LEVEL,\n r'\"zoom\": ' + int_regex,\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Zoom Level\",\n range=(0, 7),\n description='Zoom level in steps relative to the current setting: (+/- integer value)',\n startup_param=False,\n direct_access=False,\n default_value=0,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.LASERS_STATE,\n r'\"laser\": \"(on|off)\"',\n lambda match: match.group(1),\n str,\n type=ParameterDictType.STRING,\n display_name=\"Lasers State\",\n range={'On': 'on', 'Off': 'off'},\n description='Lasers state: (on | off)',\n startup_param=False,\n direct_access=False,\n default_value='off',\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.STATUS_INTERVAL,\n r'NOT USED',\n None,\n str,\n type=ParameterDictType.STRING,\n display_name=\"Acquire Status Interval\",\n description='Driver parameter used for acquire status schedule.',\n startup_param=False,\n direct_access=False,\n default_value='00:00:00',\n units=ParameterUnit.TIME_INTERVAL,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.ELEMENTAL_IP_ADDRESS,\n r'NOT USED',\n None,\n str,\n type=ParameterDictType.STRING,\n display_name=\"Elemental IP Address\",\n description='IP Address of the elemental live server running the video archive process.',\n startup_param=False,\n direct_access=False,\n default_value='209.124.182.238',\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.OUTPUT_GROUP_ID,\n r'NOT USED',\n None,\n int,\n type=ParameterDictType.INT,\n display_name=\"Output Group ID\",\n description='Output group ID for the archive video output streams being recorded '\n 'by elemental.',\n startup_param=False,\n direct_access=False,\n default_value=27,\n range=(1, 65536),\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.set_default(Parameter.STATUS_INTERVAL)\n self._param_dict.set_default(Parameter.ENDPOINT)\n self._param_dict.set_default(Parameter.PAN_TILT_SPEED)\n self._param_dict.set_default(Parameter.ELEMENTAL_IP_ADDRESS)\n self._param_dict.set_default(Parameter.OUTPUT_GROUP_ID)",
"def _process_parameter(self, item):\n a_param = nodes.Parameter()\n logger = logging.getLogger(self.__class__.__name__)\n\n # In a Full CWMP-DM XML, Parameters always have a @name, @access, and syntax\n a_param.set_name(item[\"@name\"])\n a_param.set_access(item[\"@access\"])\n\n # In a Full CWMP-DM XML, Parameters never have a @base\n if \"@base\" in item:\n a_param.set_base(item[\"@base\"])\n\n if \"@activeNotify\" in item:\n a_param.set_active_notify(item[\"@activeNotify\"])\n\n if \"@forcedInform\" in item:\n a_param.set_forced_inform(item[\"@forcedInform\"])\n\n if \"description\" in item:\n a_param.set_description(item[\"description\"])\n\n # In a Full CWMP-DM XML, Parameters always have a @name and @access\n logger.debug(\n \"Processing Parameter: \\\"{}\\\" with \\\"{}\\\" Access\"\n .format(a_param.get_name(), a_param.get_access))\n\n a_param.set_syntax(self._process_syntax(a_param.get_name(), item[\"syntax\"]))\n\n return a_param",
"def init_coupled_parameters(self):\n params=NamedObjects(scenario=self,cast_value=cast_to_parameter)\n # All of the current known options:\n # params['Tau']=1\n # params['TauFlow']=1\n # params['Velocity']=1\n if self.model.mdu.get_bool('physics','Salinity'):\n params['salinity']=1 \n if self.model.mdu.get_bool('physics','Temperature'):\n params['temp']=1 \n params['vwind']=1\n #params['winddir']=1\n #params['rain']=1\n return params",
"def build_parameters(self) -> None:\n if not self.query_string:\n return\n\n query_parser = CardQueryParser()\n try:\n self.root_parameter = query_parser.parse(self.query_string)\n # TODO\n self.sort_params = []\n except (ParseError, ValueError) as error:\n self.error_message = str(error)",
"def _parse_parameter_nodes(self, parameter_protos: list):\n for parameter in parameter_protos:\n name = parameter.get('name')\n if not name:\n logger.warning(\"Finding a parameter with an empty name will not be saved.\")\n continue\n node = Node(name=name, node_id=name)\n # Note: Display parameter as Const\n node.type = NodeType.CONST.value\n self._append_node(node)",
"def mkParams(self):\n parameters = lmfit.Parameters()\n for parameterName in self.parameterNames:\n parameters.add(parameterName,\n min=self.lower,\n max=self.upper,\n value=self.value)\n return parameters",
"def parameterDescription(self, identifier, activityDescription, name, valueType, description=None, unit=None,\n ucd=None, utype=None, min=None, max=None, options=None, default=None,\n other_attributes=None):\n if other_attributes is None:\n other_attributes = {}\n if description is not None:\n other_attributes.update({VOPROV['description']: description})\n if unit is not None:\n other_attributes.update({VOPROV['unit']: unit})\n if ucd is not None:\n other_attributes.update({VOPROV['ucd']: ucd})\n if utype is not None:\n other_attributes.update({VOPROV['utype']: utype})\n if min is not None:\n other_attributes.update({VOPROV['min']: min})\n if max is not None:\n other_attributes.update({VOPROV['max']: max})\n if options is not None:\n other_attributes.update({VOPROV['options']: options})\n if default is not None:\n other_attributes.update({VOPROV['default']: default})\n if len(other_attributes) == 0:\n other_attributes = None\n self.relate(identifier, activityDescription)\n return self.new_record(\n VOPROV_PARAMETER_DESCRIPTION, identifier, {\n VOPROV_ATTR_NAME: name,\n VOPROV_ATTR_VALUE_TYPE: valueType\n },\n other_attributes\n )",
"def get_params(self):\r\n def gen_param_name_copy(param, keys=(), values=(), array_type=None):\r\n \"\"\"\r\n Create a param with the original scope (of varargs) as parent.\r\n \"\"\"\r\n if isinstance(self.var_args, pr.Array):\r\n parent = self.var_args.parent\r\n start_pos = self.var_args.start_pos\r\n else:\r\n parent = self.decorated\r\n start_pos = 0, 0\r\n\r\n new_param = copy.copy(param)\r\n new_param.is_generated = True\r\n if parent is not None:\r\n new_param.parent = parent\r\n\r\n # create an Array (-> needed for *args/**kwargs tuples/dicts)\r\n arr = pr.Array(self._sub_module, start_pos, array_type, parent)\r\n arr.values = values\r\n key_stmts = []\r\n for key in keys:\r\n stmt = pr.Statement(self._sub_module, [], start_pos, None)\r\n stmt._commands = [key]\r\n key_stmts.append(stmt)\r\n arr.keys = key_stmts\r\n arr.type = array_type\r\n\r\n new_param._commands = [arr]\r\n\r\n name = copy.copy(param.get_name())\r\n name.parent = new_param\r\n return name\r\n\r\n result = []\r\n start_offset = 0\r\n if isinstance(self.decorated, InstanceElement):\r\n # Care for self -> just exclude it and add the instance\r\n start_offset = 1\r\n self_name = copy.copy(self.decorated.params[0].get_name())\r\n self_name.parent = self.decorated.instance\r\n result.append(self_name)\r\n\r\n param_dict = {}\r\n for param in self.decorated.params:\r\n param_dict[str(param.get_name())] = param\r\n # There may be calls, which don't fit all the params, this just ignores\r\n # it.\r\n var_arg_iterator = self.get_var_args_iterator()\r\n\r\n non_matching_keys = []\r\n keys_used = set()\r\n keys_only = False\r\n for param in self.decorated.params[start_offset:]:\r\n # The value and key can both be null. There, the defaults apply.\r\n # args / kwargs will just be empty arrays / dicts, respectively.\r\n # Wrong value count is just ignored. If you try to test cases that\r\n # are not allowed in Python, Jedi will maybe not show any\r\n # completions.\r\n key, value = next(var_arg_iterator, (None, None))\r\n while key:\r\n keys_only = True\r\n try:\r\n key_param = param_dict[str(key)]\r\n except KeyError:\r\n non_matching_keys.append((key, value))\r\n else:\r\n keys_used.add(str(key))\r\n result.append(gen_param_name_copy(key_param,\r\n values=[value]))\r\n key, value = next(var_arg_iterator, (None, None))\r\n\r\n commands = param.get_commands()\r\n keys = []\r\n values = []\r\n array_type = None\r\n ignore_creation = False\r\n if commands[0] == '*':\r\n # *args param\r\n array_type = pr.Array.TUPLE\r\n if value:\r\n values.append(value)\r\n for key, value in var_arg_iterator:\r\n # Iterate until a key argument is found.\r\n if key:\r\n var_arg_iterator.push_back((key, value))\r\n break\r\n values.append(value)\r\n elif commands[0] == '**':\r\n # **kwargs param\r\n array_type = pr.Array.DICT\r\n if non_matching_keys:\r\n keys, values = zip(*non_matching_keys)\r\n elif not keys_only:\r\n # normal param\r\n if value is not None:\r\n values = [value]\r\n else:\r\n if param.assignment_details:\r\n # No value: return the default values.\r\n ignore_creation = True\r\n result.append(param.get_name())\r\n param.is_generated = True\r\n else:\r\n # If there is no assignment detail, that means there is\r\n # no assignment, just the result. Therefore nothing has\r\n # to be returned.\r\n values = []\r\n\r\n # Just ignore all the params that are without a key, after one\r\n # keyword argument was set.\r\n if not ignore_creation and (not keys_only or commands[0] == '**'):\r\n keys_used.add(str(key))\r\n result.append(gen_param_name_copy(param, keys=keys,\r\n values=values, array_type=array_type))\r\n\r\n if keys_only:\r\n # sometimes param arguments are not completely written (which would\r\n # create an Exception, but we have to handle that).\r\n for k in set(param_dict) - keys_used:\r\n result.append(gen_param_name_copy(param_dict[k]))\r\n return result",
"def make_parameters(original_parameter, vars_to_ignore=[]):\n\n if hasattr(original_parameter, 'custom_diags'):\n with open(original_parameter.custom_diags) as json_file:\n json_data = json.loads(json_file.read())\n else:\n json_data = {'': []} # the first key doesn't hold any value, so it's ''\n for set_num in original_parameter.sets:\n default_set_runs = _get_default_diags(set_num)\n for _, set_runs in default_set_runs.iteritems():\n for single_run in set_runs:\n json_data[''].append(single_run)\n parameters = []\n for key in json_data:\n for single_run in json_data[key]:\n p = ACMEParameter()\n for attr_name in single_run:\n setattr(p, attr_name, single_run[attr_name])\n\n # Add attributes of original_parameter to p\n for var in original_parameter.__dict__:\n if var not in vars_to_ignore:\n p.__dict__[var] = original_parameter.__dict__[var]\n p.check_values()\n parameters.append(p)\n return parameters",
"def generative_parameters(self):\n pass",
"def parameter_declaration(\n identifier, name=None, data_type=DT_STRING, description=None, index=0,\n required=True, values=None, parent=None, default_value=None,\n as_const=None\n ):\n if identifier is None:\n raise InvalidParameterError('missing identifier')\n if not data_type in DATA_TYPES:\n raise InvalidParameterError('invalid parameter data type \\'{}\\''.format(data_type))\n para = {\n LABEL_ID: identifier,\n LABEL_NAME: name if not name is None else identifier,\n LABEL_DATATYPE: data_type,\n LABEL_INDEX: index,\n LABEL_REQUIRED: required\n }\n # Set optional properties\n if not description is None:\n para[LABEL_DESCRIPTION] = description\n else:\n para[LABEL_DESCRIPTION] = para[LABEL_NAME]\n if not values is None:\n para[LABEL_VALUES] = values\n if not parent is None:\n para[LABEL_PARENT] = parent\n if not as_const is None:\n para[LABEL_AS] = as_const\n if not default_value is None:\n para[LABEL_DEFAULT] = default_value\n return para",
"def make_param_list(self):\n self.params, self.bn_layers = {}, {}\n\n for key in list(self.layers.keys()):\n self.params[key] = []\n self.bn_layers[key] = []\n for layer in self.layers[key]:\n if layer.get_params():\n self.params[key] += layer.get_params()\n if layer.__class__.__name__ == 'BatchNormLayer':\n self.bn_layers[key].append(layer)",
"def __init__(self):\n self.parameters = {}",
"def newPanelParameter(self, **attrlinks):\n return PanelParameter(self, **attrlinks)",
"def createBuilder(self, *args, **kwargs):\n paramBuilder = ParameterBuilder(*args, **kwargs)\n paramBuilder.associateParameterDefinitionCollection(self)\n return paramBuilder"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Capture comment and redirect to movie page.
|
def post_movie_comment():
today = date.today()
comment_date = "%d %s %d" % (today.day, month_name[today.month],
today.year)
comment = Comment(comment_date, request.form["name"],
request.form["text"])
title_id = int(request.form["title_id"])
movie = Movie.select(graph, title_id).first()
comment.subject.add(movie)
graph.create(comment)
return redirect("/movie/%s" % title_id)
|
[
"def post_reply(assignment_name, file_name, comment_id):\n\t\t# grab user input from submitted form\n\t\tcomment_data = request.form['comment']\n\t\tcomment = Comment(file_name, comment_id, comment_data)\n\t\t# apply filter to comment\n\t\tcomment.apply_filter()\n\t\t# propogate changes to db\n\t\tdb.session.add(comment)\n\t\tdb.session.commit()\n\t\t# re-display the file page.\n\t\treturn get_file(assignment_name, file_name)",
"def comment(self, comment):\r\n\r\n if not isinstance(comment, basestring):\r\n comment = str(comment)\r\n\r\n # Remote phone comment\r\n if self._name != 'Main':\r\n comment = self._name + \" : \" + comment\r\n\r\n if self.result:\r\n # clear current test step for performance measuring\r\n self.result.clearCurrentTestStep()\r\n\r\n debug.brf(comment)\r\n if core.FW_conf['blackbox'] != None and core.FW_conf['blackbox'].isVideoRecorderAvailable():\r\n core.FW_conf['blackbox'].videoRecording_SetText(comment)\r\n\r\n if self.result:\r\n # FIXME: Remove list approach from addStepComment\r\n comment = [comment]\r\n self.result.addStepComment(comment)",
"def post_comment_pl():\r\n\tinsert_comment(request.form['Username'], request.form['Comment'])\r\n\t\r\n\treturn redirect(url_for('Pl'))",
"def capture():\r\n (x, y) = global_camera.get_coordinates()\r\n # Label snapshot image with the x- and y-coordinates:\r\n path = \"/capture/X{}Y{}.jpeg\".format(x,y)\r\n return redirect(path)",
"def movie_comments(self, movie_id):\n self.endpoint = 'movie_comments.json'\n self.payload = {'movie_id': movie_id}\n return self.__make_request()",
"def show_comment_form(self, req, page):\n page_id = self.env.get_real_filename(page)[:-4]\n ajax_mode = req.args.get('mode') == 'ajax'\n target = req.args.get('target')\n page_comment_mode = not target\n\n form_error = preview = None\n title = req.form.get('title', '').strip()\n if 'author' in req.form:\n author = req.form['author']\n else:\n author = req.session.get('author', '')\n if 'author_mail' in req.form:\n author_mail = req.form['author_mail']\n else:\n author_mail = req.session.get('author_mail', '')\n comment_body = req.form.get('comment_body', '')\n fields = (title, author, author_mail, comment_body)\n\n if req.method == 'POST':\n if req.form.get('preview'):\n preview = Comment(page_id, target, title, author, author_mail,\n comment_body)\n # 'homepage' is a forbidden field to thwart bots\n elif req.form.get('homepage') or self.antispam.is_spam(fields):\n form_error = 'Your text contains blocked URLs or words.'\n else:\n if not all(fields):\n form_error = 'You have to fill out all fields.'\n elif _mail_re.search(author_mail) is None:\n form_error = 'You have to provide a valid e-mail address.'\n elif len(comment_body) < 20:\n form_error = 'You comment is too short ' \\\n '(must have at least 20 characters).'\n else:\n # '|none' can stay since it doesn't include comments\n self.cache.pop(page_id + '|inline', None)\n self.cache.pop(page_id + '|bottom', None)\n comment = Comment(page_id, target,\n title, author, author_mail,\n comment_body)\n comment.save()\n req.session['author'] = author\n req.session['author_mail'] = author_mail\n if ajax_mode:\n return JSONResponse({'posted': True, 'error': False,\n 'commentID': comment.comment_id})\n return RedirectResponse(comment.url)\n\n output = render_template(req, '_commentform.html', {\n 'ajax_mode': ajax_mode,\n 'preview': preview,\n 'suggest_url': '@edit/%s/' % page,\n 'comments_form': {\n 'target': target,\n 'title': title,\n 'author': author,\n 'author_mail': author_mail,\n 'comment_body': comment_body,\n 'error': form_error\n }\n })\n\n if ajax_mode:\n return JSONResponse({\n 'body': output,\n 'error': bool(form_error),\n 'posted': False\n })\n return Response(render_template(req, 'commentform.html', {\n 'form': output\n }))",
"def test_attach_movie_to_ticket_comment(self):\n\n # login to the website\n self.utils.account.login_as(self.username,self.password)\n\n po = self.catalog.load_pageobject('SupportTicketNewPage')\n po.goto_page()\n\n # name, email and description are required\n data = {\n 'problem' : \"hubcheck test ticket\\n%s\" % (self.fnbase),\n }\n\n # submit the ticket\n po.submit_ticket(data)\n\n po = self.catalog.load_pageobject('SupportTicketSavePage')\n self.ticket_number = po.get_ticket_number()\n\n info = po.get_error_info()\n assert len(info) == 0, \"received unexpected error: %s\" % (info)\n assert self.ticket_number is not None, \"no ticket number returned\"\n assert int(self.ticket_number) > 0, \\\n \"invalid ticket number returned: %s\" % (self.ticket_number)\n\n\n # attach a movie image to a comment\n uploadfilename = 'movie1.mpg'\n uploadfilepath = os.path.join(self.datadir,'images',uploadfilename)\n\n po = self.catalog.load_pageobject('SupportTicketViewPage',\n self.ticket_number)\n po.goto_page()\n comment_data = {\n 'comment' : 'attaching a movie file',\n 'upload' : uploadfilepath,\n }\n po.add_comment(comment_data)\n\n\n # check if the movie was uploaded\n comment = po.get_nth_comment(-1)\n moviesrc = comment.download_attachment(uploadfilename)\n\n # not sure how to really download movie files yet.\n # so we assume that as long as opening the image didn't\n # cause an error, the test passed.\n\n assert re.search(uploadfilename,moviesrc) is not None, \\\n \"After uploading a movie to support ticket\" \\\n + \" #%s, could not download movie %s\" \\\n % (self.ticket_number,uploadfilename)",
"def comment(thread_uid):\n thread = storage.get_thread(thread_uid)\n if not thread:\n abort(404)\n\n text = request.form.get('text') or ''\n if not text:\n return error('comment:text')\n\n storage.add_comment(thread_uid, g.username, text)\n flash('Your comment successfully added!', 'success')\n\n return redirect(url_for('comments', thread_uid=thread_uid))",
"def record_screen(self, device):\n print(f'🎥 Started recording! Press Control-C to stop …')\n destination = self.perform_screen_recording(device)\n\n print('')\n print('👍 Recording completed')\n time.sleep(0.3)\n\n print('⏳ Converting video to GIF …')\n time.sleep(1)\n\n return destination",
"def show_trailer(self):\n webbrowser.open(self.youtube)",
"def addcomment(request, song_id):\r\n if request.method == 'POST':\r\n comment = request.POST['Comment'].strip()\r\n song = get_object_or_404(Song, id=song_id)\r\n if comment:\r\n form = SongComment(comment = request.POST['Comment'], song = song, user = request.user)\r\n form.save()\r\n return HttpResponseRedirect(song.get_absolute_url())",
"def send_mail_for_user_about_new_comment(comment, video, author=None):\n\n if ALLOWED_HOSTS:\n host = 'http://' + ALLOWED_HOSTS[0]\n else:\n host = 'http://localhost:8000'\n if author:\n context = {'author': author, 'host': host, 'comment': comment, 'video': video}\n else:\n context = {'author': video.author, 'host': host, 'comment': comment, 'video': video}\n author = video.author\n subject = render_to_string('email/new_comment_subject.txt')\n body = render_to_string('email/new_comment_body.txt', context)\n if author.send_messages:\n author.email_user(subject, body)",
"def post_comment(assignment_name, file_name):\n\t\t# grab user input from submitted form\n\t\tcomment_data = request.form['comment']\n\t\tcomment = Comment(file_name, None, comment_data)\n\t\t# apply filter to comment\n\t\tcomment.apply_filter()\n\t\t# propogate changes to db\n\t\tdb.session.add(comment)\n\t\tdb.session.commit()\n\t\t# re-display the file page.\n\t\treturn get_file(assignment_name, file_name)",
"async def add_comment(\n request: Request,\n event_id: int,\n session: Session = Depends(get_db),\n) -> Response:\n form = await request.form()\n data = {\n \"user_id\": get_current_user(session).id,\n \"event_id\": event_id,\n \"content\": form[\"comment\"],\n \"time\": dt.now(),\n }\n create_model(session, Comment, **data)\n path = router.url_path_for(\"view_comments\", event_id=str(event_id))\n return RedirectResponse(path, status_code=status.HTTP_303_SEE_OTHER)",
"def play_movie(self, url):\n self.open_url(url)",
"def main_video():\n annotate_movie(\"project_video.mp4\", \"annotated_project_video.mp4\")\n # annotate_movie(\"challenge_video.mp4\", \"annotated_challenge_video.mp4\")",
"def comment( self, comment ) :\n return self.client.commentonwiki( self.project, self, comment )",
"def video_feed():\n return Response(gen(Camera_person_online()),\n mimetype='multipart/x-mixed-replace; boundary=frame')",
"def preview_capture_example():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if Journal check_seq defined or not.
|
def _check_journal_seq(self, journal_id, context=None):
if not journal_id.check_sequence:
raise osv.except_osv(_('Warning'),_('Please add "Check Sequence" for journal %s')%(journal_id.name))
return True
|
[
"def _is_sequence(self, ddl):\n m_seqs = self._find_seq.search(ddl)\n return m_seqs is not None",
"def hasSequence(self):\n if os.path.exists(self.sequencePath):\n return True\n else:\n return False",
"def check_sequence(sequencer: dict) -> str:\n sequence = get_real_key(sequencer, \"sequence\")\n if not sequence or not isinstance(sequencer[sequence], list):\n return \"no steps\"\n if len(sequencer[sequence]) == 0:\n return \"no steps\"\n return \"\"",
"def check_seq(self):\n nuc_list = ['A', 'T', 'C', 'G']\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n for letter in row['Primer_seq'].strip():\n if letter not in nuc_list:\n check += 1\n error = \"Invalid DNA primer sequence, see row %s in file\" % (row_index + 4)\n error_details.append(error)",
"def supports_sequence_rule_lookup(self):\n return False",
"def _valid_seq(self, seq):\n if self.filter_AA and self.filter_minlength:\n forbidden_AAs = re.search(r'[BXZOUJ]', seq)\n if len(seq) >= int(self.minlength) and not forbidden_AAs:\n return True\n elif self.filter_AA and not self.filter_minlength:\n forbidden_AAs = re.search(r'[BXZOUJ]', seq)\n if not forbidden_AAs:\n return True\n elif not self.filter_AA and self.filter_minlength:\n if seq >= int(self.minlength):\n return True\n else:\n return False",
"def check_sequence(self) -> None:\n if not isinstance(self, SequenceType):\n raise UnexpectedTypeError(SequenceType, self)",
"def supports_sequence_rule_bank(self):\n return False",
"def supports_sequence_rule_search(self):\n return False",
"def supports_sequence_rule_notification(self):\n return False",
"def accesses_seq(self, node) -> bool:\n if (\n isinstance(node, ast.Subscript)\n and self.id.id in self.__get_slice_id(node)\n and node.value.id == self.seq.id\n ):\n self.uses_seq = True\n return True",
"def supports_sequence_rule_enabler_lookup(self):\n return False",
"def sequence_complete(self, prefix: List[int]) -> bool:\n return (len(prefix) == self.sequence_length or\n # Do not implicitly convert a numpy array to bool.\n (len(prefix) > 0 and prefix[-1] == 0)) # pylint: disable=g-explicit-length-test",
"def supports_sequence_rule_admin(self):\n return False",
"def _CheckSequence(self, newseq, oldseq, checklen=True):\n if checklen and len(newseq) <> len(oldseq):\n return True\n if type(newseq) is types.DictType:\n for key in newseq:\n if key == '_snapshot':\n continue\n if key not in oldseq:\n return True\n if self._CheckItem(newseq[key], oldseq[key]):\n return True\n else:\n for k in range(len(newseq)):\n if self._CheckItem(newseq[k], oldseq[k]):\n return True\n return 0",
"def _check(self, seq_nums):\n # hand back\n seq_nums = sorted(seq_nums)\n max_needed = self.calculate_max_seq_num()\n if len(seq_nums) > max_needed + 1:\n raise ValueError(f\"too many seg_nums: {len(seq_nums)}\")\n return len(seq_nums) == max_needed + 1",
"def supports_sequence_rule_smart_bank(self):\n return False",
"def check_macro(self):\n lines = self.macro.split('\\n')\n checks = ['run(\"Bio-Formats Importer\"', 'open', 'saveAs']\n for check in checks:\n if any([line.startswith(check) for line in lines]):\n print(Fore.RED + f'Macro contains {check} command. Make sure that the macro is prepared properly.' + Fore.RESET)",
"def check_sequence(self, sequence: list) -> bool:\n\n state = self.initial_state\n for symbol in sequence:\n if symbol not in self.alphabet:\n raise SymbolNotInAlphabetError(symbol, self.alphabet)\n\n state, state_changed = self.__get_next_state(symbol, state)\n if not state_changed:\n raise TransitionNotFoundError(symbol, state)\n\n if state not in self.final_states:\n return False\n\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Changing wizard state to "reprint"
|
def reprint_new_next(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
self.write(cr, uid, ids, {'state': 'reprint'}, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'account.check.print.wizard',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
return True
|
[
"def on_wizard_finish(self, wizard):\r\n pass",
"def finish_printing():\n set_extruder_temp(0,0)\n set_extruder_temp(0,1)\n set_bed_temp()\n drop_bed(100)\n home_axis('x')\n disable_motors()",
"def ToggleProgressPrinting():\n SetProgressPrintingEnabled(not GetProgressPrintingIsEnabled())",
"def restart(self):\n\n if self.SaveButton.isChecked():\n self.saveData()\n self.MeasurementCard.CloseTask(self.MeasurementTask)\n self.MeasurementCard.CloseTask(self.WritingTask)\n self.timer.stop()\n self.closeStage()\n\n self.Main()",
"def restart_decision_tree(self):\n self.dt = DecisionTree(type=self.sex)\n self.load_ref_img_list()\n self.next_question()",
"def reprint_work(self, index):\n QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))\n mod = index.model()\n row = index.row()\n self.tracking = mod.data(mod.index(row, 0)).toString()\n print_ = QtGui.QPrintPreviewDialog()\n print_.paintRequested.connect(self.print_preview)\n print_.exec_()\n QtGui.QApplication.restoreOverrideCursor()",
"def restart_trial(self):\n self.clock_widget.reset_clock()\n self.clock_widget.start_clock()\n self.button_next.setText(\"Stop\")\n self.button_next.setStyleSheet(\"background-color : red;\")\n self.button_retry.setEnabled(False)\n self.button_next.setEnabled(True)\n self.clock_widget.select_enabled = False\n\n self.update_status()",
"def srt_changed(self, state):\n self.skip_rigid_transformation_new = (state == QtCore.Qt.Checked)",
"def refresh(self):\n self.action = None\n self.check_clocks()\n if self.action is not None:\n self.step(self.action)",
"def reset_draft(self):\n self.state = 'draft'",
"def respond_to(self):\n return 'wizard'",
"def render_next_step(self, form, **kwargs):\n # get the form instance based on the data from the storage backend\n # (if available).\n\n # check citizen status\n if(self.steps.current=='0'):\n data=self.get_cleaned_data_for_step('0')\n if(data[\"citizenship\"]=='No'):\n return redirect(reverse('registrationIssue'))\n # run default render_next_step\n next_step = self.steps.next\n new_form = self.get_form(\n next_step,\n data=self.storage.get_step_data(next_step),\n files=self.storage.get_step_files(next_step),\n )\n\n # change the stored current step\n self.storage.current_step = next_step\n return self.render(new_form, **kwargs)",
"def setMigrating(state): # @NoSelf",
"def update(self):\n self.step += 1\n self.step %= 4\n self.l4.setText('waiting ' + self.step * '.')\n self.b1.setEnabled(False)\n self.b2.setEnabled(False)\n self.transaction.setEnabled(False)\n\n if self.transactionIsVisible:\n self.lTransaction.setVisible(False)\n self.transactionTable.setVisible(False)\n self.transactionIsVisible = False\n \n self.deactivateButton.emit()",
"def data_recording_enabled(self, state):\n if state:\n self.nottreal.view.wizard_window.command.log_msgs.show()\n else:\n self.nottreal.view.wizard_window.command.log_msgs.hide()",
"def restore(self):\n\t\tself.label[\"text\"] = \"ABRA CADABRA, Hello there!!!!!\"\n\t\tself.clearBtn[\"state\"] = \"normal\"\n\t\tself.restoreBtn[\"state\"] = \"disabled\"",
"def to_prev_screen(self) -> None:\n if self.game_mode == 'comp' and self.num_players == 2:\n self.reset_num_screen()\n self.parent.current = 'menu'\n elif self.game_mode == 'game' or (self.game_mode == 'comp' and self.num_players > 2):\n self.reset_num_screen()\n self.parent.current = 'number'\n elif self.game_mode == 'solo':\n self.reset_goal_screen()\n self.parent.current = 'goal'\n self.clear_widgets(self.children[:-2])",
"def wizard(self) :\n\n\t\t# Variables\n\t\tprint(\"Complete list of state variables, separated by commas :\")\n\t\tself.states = input().replace(\" \", \"\").split(\",\")\n\t\tself.N_states = len(self.states)\n\t\tself.states_map = { s : idx for s, idx in zip(self.states, range(self.N_states)) }\n\n\t\t# Initial condition for each variable\n\t\tprint(\"\\nInitial conditions (integers) :\")\n\t\tself.initconds = { s : int(input(\"%s : \" % s)) for s in self.states }\n\n\t\t# Parameters\n\t\tprint(\"\\nComplete list of parameters, separated by commas :\")\n\t\tparams = input().replace(\" \", \"\").split(\",\")\n\n\t\t# Value of each parameter\n\t\tprint(\"\\nValues of parameters :\")\n\t\tself.parameters = { p : input(\"%s : \" % p) for p in params }\n\n\t\t# State transitions\n\t\tevent = []\n\t\tself.events = []\n\t\tprint(\"\\nEvents, as \\\"<rate>, <state_change>, ...\\\" lists, with commas between state changes and X+1, Y-1 as example changes :\")\n\t\twhile True :\n\n\t\t\t# Grab user input of one event\n\t\t\tevent = input().split(\",\")\n\t\t\tif event == [\"\"] : # if they hit Enter\n\t\t\t\tbreak # stop reading in events\n\n\t\t\tthisevent = {}\n\t\t\tfor e in event[1:] :\n\t\t\t\tif \"+\" in e :\n\t\t\t\t\tst, quant = e.split(\"+\")\n\t\t\t\t\tquant = int(quant)\n\t\t\t\telif \"-\" in e :\n\t\t\t\t\tst, quant = e.split(\"-\")\n\t\t\t\t\tquant = -int(quant)\n\t\t\t\telse :\n\t\t\t\t\traise helpers.InvalidModel(\"The syntax of this event was not recognised.\")\n\t\t\t\tthisevent[st.strip()] = quant\n\n\t\t\tself.events.append([event[0].strip(), thisevent])\n\n\t\t# Model variables\n\t\tself.build()",
"def doBeforeTrialResponse(self):\n if(self.trialDrawStage == 0):\n self.drawBackground(\"White\")\n self.fixationStimuli.draw()\n self.trialDrawStage = 1\n self.task.refreshWindow()\n if(self.trialDrawStage == 1 and self.getTime()>=self.currentTrialFixation):\n self.drawBackground(\"White\")\n self.arrowsStimuli.draw()\n self.fixationStimuli.draw()\n self.trialDrawStage = 2\n self.enableResponse()\n self.task.refreshWindow()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method for creating new Check Payment or update the Check No.
|
def check_payment(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids, context=context)[0]
check_log_pool = self.pool.get('check.log')
sequence_pool = self.pool.get('ir.sequence')
move_pool = self.pool.get('account.move')
voucher_pool = self.pool.get('account.voucher')
move_line_pool = self.pool.get('account.move.line')
voucher_id = (data.payment_id and data.payment_id.id) or (context['active_model'] == 'account.move' and self.check_move_data(cr, uid, ids, context=context))
if not data.payment_id: data.write({'payment_id':voucher_id})
if data.new_no:
voucher = voucher_pool.browse(cr, uid, voucher_id, context=context)
journal_id=voucher and (voucher.pay_journal_id or voucher.journal_id)
if self._check_journal_seq(journal_id, context=context):
chk_log_ids = check_log_pool.search(cr,uid,[('name','=',voucher.id),('status','=','active')], context=context)
if data.state == 'reprint':
check_log_pool.write(cr,uid,chk_log_ids, {'status': data.status}, context=context)
sequence_pool.write(cr, uid, [journal_id.check_sequence.id], {'number_next_actual':int(data.new_no)}, context=context)
next_seq = sequence_pool.get_id(cr, uid, journal_id.check_sequence.id, context=context)
voucher_pool.write(cr, uid,[voucher.id],{'amount_in_word': amount_to_text_ar(voucher.amount, 'ar'),'chk_seq': next_seq, 'chk_status':True, 'date_due': (voucher.date_due or voucher.date)}, context=context)
if data.state == 'update':
check_log_pool.write(cr,uid,chk_log_ids, {'check_no': next_seq}, context=context)
else:
check_log_pool.create(cr, uid,{'name': voucher.id, 'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id}, context=context)
move_pool.write(cr, uid,[voucher.move_id.id], {'ref' : next_seq or ' '}, context=context)
lines = move_line_pool.search(cr, uid,[('move_id','=',voucher.move_id.id)], context=context)
move_line_pool.write(cr, uid,lines, {'ref' : next_seq or ' '}, context=context)
if data.state != 'update':
return self.print_report(cr, uid, ids, context=context)
return {'type':'ir.actions.act_window_close'}
|
[
"def stepCreateCheckPayment(self, sequence=None, sequence_list=None, **kwd):\n self.check_payment = self.check_payment_module.newContent(id = 'check_payment',\n portal_type = 'Check Payment',\n destination_payment_value = self.bank_account_1,\n # aggregate_value = self.check_1,\n resource_value = self.currency_1,\n aggregate_free_text = \"0000050\",\n description = \"test\",\n # source_value = self.bi_counter,\n start_date = DateTime().Date(),\n source_total_asset_price = 20000.0,\n unique_per_account=True)\n # call set source to go into the interaction workflow to update local roles\n self.check_payment._setSource(self.bi_counter.getRelativeUrl())\n self.assertNotEqual(self.check_payment, None)\n self.assertEqual(self.check_payment.getTotalPrice(fast=0), 0.0)\n self.assertEqual(self.check_payment.getDestinationPayment(), self.bank_account_1.getRelativeUrl())\n self.assertEqual(self.check_payment.getAggregateFreeText(), self.check_1.getReference())\n self.assertEqual(self.check_payment.getSourceTotalAssetPrice(), 20000.0)\n self.assertEqual(self.check_payment.getSource(), self.bi_counter.getRelativeUrl())\n # set source reference\n self.setDocumentSourceReference(self.check_payment)\n # check source reference\n self.assertNotEqual(self.check_payment.getSourceReference(), '')\n self.assertNotEqual(self.check_payment.getSourceReference(), None)\n # the initial state must be draft\n self.assertEqual(self.check_payment.getSimulationState(), 'draft')\n\n # source reference must be automatically generated\n self.check_payment.setSourceReference(self.check_payment.Baobab_getUniqueReference())\n self.assertNotEqual(self.check_payment.getSourceReference(), None)\n self.assertNotEqual(self.check_payment.getSourceReference(), '')",
"def post(self, request):\n return payment_detail.create(request=request)",
"def save(self, *args, **kwargs):\n super(FioPayment, self).save(*args, **kwargs)\n\n if self.order:\n self.order.update_paid_status()",
"def post(self, request):\n return payment_method.create(request=request)",
"def create_pnr(RequestBody={}):\n # -------*******------- Fetch Payment Information from \"payment\" Table -------*******-------\n try:\n # if common pnr made false, then the pnr will only create for B2C with payment check\n commonPNR = True\n # define result placeholder\n result = None\n\n # create connection\n con = connect_db()\n # cursor\n cur = con.cursor(\n cursor_factory=DictCursor\n )\n\n # payment check holder\n passedPaymentCheck = False\n # payment id placeholder\n paymentID = None\n\n if commonPNR == True:\n\n # ------- Select Payment from Database -------\n\n if not RequestBody[\"DataSource\"] in [\"B2B\", \"B2B_AGENT\", \"B2B_ADMIN\"] and RequestBody[\"TransactionID\"] is not None and not RequestBody[\"TransactionID\"] == \"\":\n\n try:\n # Get payment ID\n cur.execute(\n \"SELECT id FROM payment WHERE tran_id='{}'\".format(\n RequestBody.get(\"TransactionID\", None)\n )\n )\n\n payment = cur.fetchone()\n\n # assign payment id\n paymentID = payment[\"id\"]\n passedPaymentCheck = True\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to SELECT payment from 'payment' Table!\"\n )\n )\n \n else:\n\n try:\n # Get payment ID\n cur.execute(\n \"SELECT id FROM payment WHERE tran_id='{}'\".format(\n RequestBody.get(\"TransactionID\", None)\n )\n )\n\n payment = cur.fetchone()\n\n # assign payment id\n paymentID = payment[\"id\"]\n passedPaymentCheck = True\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[0][2],\n msg=\"Failed to SELECT payment from 'payment' Table!\"\n )\n )\n\n # ----------------------- Create PNR -----------------------\n def call_pnr_api(request_body=None, api_data=None):\n \"\"\"\n call the pnr api and returns pnr response\n \"\"\"\n headers = {\n 'content-type': \"application/json\",\n 'x-api-key': api_data[\"APIkey\"]\n }\n params = {\n 'body': request_body\n }\n\n pnr_api_response = requests.post(\n api_data[\"APIendpoint\"], request_body, headers=headers\n )\n\n print(\"XXXXXXXXXXXXXXXXXXX\", pnr_api_response)\n\n response = pnr_api_response.json()\n\n # return the response\n return response\n\n\n if passedPaymentCheck == True:\n # Get PNR API Information\n pnr_api_info_data_body = {\n \"APImoduleName\": \"TicketReservation\"\n }\n pnrAPIdata = get_skytrip_apis(RequestBody=pnr_api_info_data_body)\n\n # call for pnr creation\n pnr_response = call_pnr_api(\n request_body=RequestBody,\n api_data=pnrAPIdata\n )\n\n return pnr_response\n\n\n else:\n raise Exception(\n get_exception_message(\n Ex=None, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[0][2],\n msg=\"Failed payment check! Can't create PNR!\"\n )\n )\n\n\n # ------- Handle Exceptions -------\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[0][2],\n msg=\"Failed to Create PNR!\"\n )\n )",
"def free_payment(self):\n self.payment = Payment(profile=self.invoice.profile,\n amount=self.invoice.total,\n provider=self.provider,\n invoice=self.invoice,\n created=timezone.now()\n )\n self.payment.save()\n self.transaction_submitted = True\n\n self.payment.success = True\n self.payment.transaction = f\"{self.payment.uuid}-free\"\n self.payment.payee_full_name = \" \".join([self.invoice.profile.user.first_name, self.invoice.profile.user.last_name])\n self.payment.save()\n \n self.update_invoice_status(Invoice.InvoiceStatus.COMPLETE)\n\n self.create_receipts(self.invoice.order_items.all())",
"def create_payment_using_magento_data(self, payment_data):\n Payment = Pool().get('sale.payment')\n MagentoPaymentGateway = Pool().get('magento.instance.payment_gateway')\n\n magento_gateway = MagentoPaymentGateway.find_using_magento_data({\n 'name': payment_data['method']\n })\n\n if magento_gateway is None:\n return\n\n if payment_data['amount_paid']:\n payment, = Payment.create([{\n 'sale': self.id,\n 'gateway': magento_gateway.gateway.id,\n 'magento_id': payment_data['payment_id'],\n 'amount': Decimal(payment_data['amount_paid']),\n 'credit_account': self.party.account_receivable.id,\n 'payment_transactions': [('create', [{\n 'party': self.party.id,\n 'address': self.invoice_address.id,\n 'state': 'completed',\n 'gateway': magento_gateway.gateway.id,\n 'amount': Decimal(payment_data['amount_paid']),\n 'credit_account': self.party.account_receivable.id,\n }])]\n }])\n\n for transaction in payment.payment_transactions:\n transaction.safe_post()",
"def test_ach_or_check_number_required(self):\n main_transaction = Transaction.objects.create(account=self.account,\n balance_delta=25)\n entry = BankSpendingEntry(\n check_number=None, ach_payment=None, memo='no check or ach',\n main_transaction=main_transaction, date=datetime.date.today())\n self.assertRaises(ValidationError, entry.save)",
"def create_payment_model(self):\n self.payment = Payment(profile=self.invoice.profile,\n amount=self.invoice.total,\n provider=self.provider,\n invoice=self.invoice,\n created=timezone.now()\n )\n self.payment.result['account_number'] = self.payment_info.cleaned_data.get('card_number')[-4:]\n self.payment.payee_full_name = self.payment_info.cleaned_data.get('full_name')\n self.payment.payee_company = self.billing_address.cleaned_data.get('company')\n\n billing_address = self.billing_address.save(commit=False)\n billing_address, created = self.invoice.profile.get_or_create_address(billing_address)\n if created:\n billing_address.profile = self.invoice.profile\n billing_address.save()\n\n self.payment.billing_address = billing_address\n self.payment.save()",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n if self.order:\n self.order.update_paid_status()",
"def test_ach_xor_check_number(self):\n main_transaction = Transaction.objects.create(account=self.account,\n balance_delta=25)\n entry = BankSpendingEntry(\n check_number=\"23\", ach_payment=True, memo='check AND ach',\n main_transaction=main_transaction, date=datetime.date.today())\n self.assertRaises(ValidationError, entry.save)",
"def post(self, request, *args, **kwargs):\n logger.info(\n f\"Got notification from Yandex.Checkout, payload: {request.data}\")\n\n payment_object = request.data.get('object')\n if not payment_object:\n return Response({\"msg\": \"No object data passed\"},\n status=status.HTTP_400_BAD_REQUEST)\n external_id = payment_object.get('id')\n\n saved = payment_object[\"payment_method\"][\"saved\"]\n\n if saved: # Recurrent payment, use Subscription\n try:\n payment_method_id = payment_object[\"payment_method\"][\"id\"]\n subscription = Subscription.objects.get(\n external_id=external_id)\n subscription.is_active = True\n subscription.is_trial = False\n subscription.external_id = payment_method_id\n subscription.save()\n # TODO: schedule monthly payment task here\n except Subscription.DoesNotExist:\n return Response({\"msg\": \"No such subscription\"},\n status=status.HTTP_400_BAD_REQUEST)\n else: # normal payment, use Payment\n try:\n afi_payment = Payment.objects.get(external_id=external_id)\n afi_payment.status = Payment.STATUS.PAID\n afi_payment.save()\n except Payment.DoesNotExist:\n return Response({\"msg\": \"No such payment\"},\n status=status.HTTP_400_BAD_REQUEST)\n\n return Response({\"msg\": \"Got it!\"}, status=status.HTTP_200_OK)",
"def stepValidateAnotherCheckPaymentWorks(self, sequence=None, sequence_list=None, **kwd):\n self.createAnotherCheckPayment(sequence=sequence, will_fail=0, number=\"0000051\")",
"def auto_update_paid_object(self, request, payment):\n try:\n from tendenci.apps.notifications import models as notification\n except:\n notification = None\n from tendenci.apps.perms.utils import get_notice_recipients\n\n # approve it\n if self.renewal:\n self.approve_renewal(request)\n else:\n params = {'create_new': False,\n 'assign_to_user': None}\n if self.anonymous_creator:\n [assign_to_user] = User.objects.filter(\n first_name=self.anonymous_creator.first_name,\n last_name=self.anonymous_creator.last_name,\n email=self.anonymous_creator.email\n )[:1] or [None]\n if assign_to_user:\n params['assign_to_user'] = assign_to_user\n params['create_new'] = False\n else:\n params['create_new'] = True\n\n self.approve_join(request, **params)\n\n # send notification to administrators\n recipients = get_notice_recipients('module',\n 'corporate_memberships',\n 'corporatemembershiprecipients')\n if recipients:\n if notification:\n extra_context = {\n 'object': self,\n 'request': request,\n }\n notification.send_emails(recipients,\n 'corp_memb_paid',\n extra_context)",
"def receive_paycheck(self):\n self.money += self.salary #Adds paycheck to money available\n self.__time_money_last_increased = time.time() #Resets the time salary was last given",
"def action_post(self):\n res = super(AccountPayment, self).action_post()\n for rec in self:\n invoice = rec.move_id\n if invoice.book_issue_id and invoice.payment_state == \"paid\":\n invoice.book_issue_id.state = \"paid\"\n return res",
"def post(self):\n for rec in self:\n # code start\n# total = 0.0\n# for line in rec.invoice_lines:\n# if line.allocation < 0:\n# raise ValidationError(_(\"Negative allocation amount not allowed!\"))\n# if line.allocation > line.open_amount:\n# raise UserError(\"Allocation amount %s is greater then open amount %s of Invoice.\" % (line.allocation, line.open_amount))\n# total += line.allocation\n# if line.open_amount != line.invoice_id.residual:\n# raise UserError(\"Due amount changed.\\n Please click 'Update Invoice' button to update amount\")\n# \n# if total > rec.amount:\n# raise UserError(\"Total allocation %s is more then payment amount %s\" % (total, rec.amount))\n amt = 0\n if rec.invoice_lines:\n \n for line in rec.invoice_lines:\n amt += line.allocation\n # if rec.amount < amt:\n # raise ValidationError((\"Payment amount must be greater then or equal to '%s'\") %(amt))\n # if rec.amount > amt:\n # for line in rec.invoice_lines:\n # line.allocation = line.allocation + (rec.amount - amt)\n # break\n return super(account_payment,self).post()",
"def validate_payment(request):\n data = request.data\n flwref = data.get('flwRef')\n otp = data.get('otp')\n property = None\n purpose = None\n serializer = PaymentValidationSerializer(data=data)\n if serializer.is_valid():\n purpose = str(serializer.validated_data.get('purpose'))\n property_id = serializer.validated_data.get('property_id')\n else:\n return Response({'errors': serializer.errors},\n status=status.HTTP_400_BAD_REQUEST)\n if purpose == 'Buying':\n property = get_object_or_404(Property, pk=property_id)\n resp = TransactionServices.validate_card_payment(flwref, otp)\n if resp.get('status') == 'error':\n return Response(\n {'message': resp.get('message')},\n status=status.HTTP_400_BAD_REQUEST\n )\n else:\n txRef = resp['data']['tx']['txRef']\n verify_resp = TransactionServices.verify_payment(txRef)\n email = verify_resp['data']['custemail']\n user = User.objects.get(email=email)\n if verify_resp.get('data').get('status') == 'successful':\n save_card = verify_resp['data']['meta'][0]['metavalue']\n message = verify_resp['data']['vbvmessage']\n if int(save_card) == 1:\n message += TransactionServices.save_card(verify_resp)\n data = resp.get('data').get('tx')\n references = {k: v for k, v in data.items() if k.endswith('Ref')}\n amount = data.get('amount', 0)\n save_deposit(purpose,\n references,\n amount,\n user,\n property)\n else:\n return Response({'message': verify_resp.get('message')},\n status=status.HTTP_400_BAD_REQUEST)\n return Response({'message': resp['message']}, status=resp['status_code'])",
"def paymentDetailSaveHandler(sender, instance, created, **kwargs):\n if created:\n instance.game_played.game.increment_sellcount(price=instance.cost)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This Method check some constraints before printing check from Journal Entry. 1. Move state must be posted. 2. Move Journal must allow check writing. 3. Cheque must pay from cash account. 4. Move Lines must have partner_id (Beneficiary). 5. Cheque must pay to only one partner.
|
def check_move_data(self, cr, uid, ids, context=None):
move_line_pool = self.pool.get('account.move.line')
move = self.pool.get('account.move').browse(cr, uid, context.get('active_id',[]), context=context)
if move.state != 'posted':
raise osv.except_osv(_('Warning'), _('Payment is not posted. Please Validate Payment First!'))
if not move.journal_id.allow_check_writing:
raise osv.except_osv(_('Warning'), _("Current journal doesn't allow check writing"))
account_ids = self.pool.get('account.account').search(cr, uid, [('type','=','liquidity')], context=context)
move_line = move_line_pool.search(cr, uid, [('move_id','=',context.get('active_id',[]))], context=context)
credit_lines = move_line_pool.search(cr, uid, [('move_id','=',context.get('active_id',[])),('credit','>',0),('account_id','not in',account_ids)], context=context)
if credit_lines:
raise osv.except_osv(_('Warning'), _('Can not pay with check without cash account!!'))
debit_lines = move_line_pool.search(cr, uid, [('move_id','=',context.get('active_id',[])),('debit','>',0),('partner_id','=',False)], context=context)
if debit_lines:
raise osv.except_osv(_('Warning'), _('Can not create new check without partner!!'))
partners = move_line_pool.read(cr, uid, move_line, ['partner_id'], context=context)#[0]['partner_id']
x = [part['partner_id'] for part in partners]
if len(set([part['partner_id'] for part in partners])) > 1:
raise osv.except_osv(_('Warning'), _('Can not create new check for multiple partner!!'))
return self.new_check(cr, uid, ids, context=context)
|
[
"def check_payment(self, cr, uid, ids, context=None):\n data = self.browse(cr, uid, ids, context=context)[0]\n check_log_pool = self.pool.get('check.log')\n sequence_pool = self.pool.get('ir.sequence')\n move_pool = self.pool.get('account.move') \n voucher_pool = self.pool.get('account.voucher')\n move_line_pool = self.pool.get('account.move.line')\n voucher_id = (data.payment_id and data.payment_id.id) or (context['active_model'] == 'account.move' and self.check_move_data(cr, uid, ids, context=context))\n if not data.payment_id: data.write({'payment_id':voucher_id})\n if data.new_no:\n voucher = voucher_pool.browse(cr, uid, voucher_id, context=context)\n journal_id=voucher and (voucher.pay_journal_id or voucher.journal_id)\n if self._check_journal_seq(journal_id, context=context):\n chk_log_ids = check_log_pool.search(cr,uid,[('name','=',voucher.id),('status','=','active')], context=context)\n if data.state == 'reprint':\n check_log_pool.write(cr,uid,chk_log_ids, {'status': data.status}, context=context)\n\n sequence_pool.write(cr, uid, [journal_id.check_sequence.id], {'number_next_actual':int(data.new_no)}, context=context)\n next_seq = sequence_pool.get_id(cr, uid, journal_id.check_sequence.id, context=context)\n voucher_pool.write(cr, uid,[voucher.id],{'amount_in_word': amount_to_text_ar(voucher.amount, 'ar'),'chk_seq': next_seq, 'chk_status':True, 'date_due': (voucher.date_due or voucher.date)}, context=context)\n if data.state == 'update':\n check_log_pool.write(cr,uid,chk_log_ids, {'check_no': next_seq}, context=context)\n else: \n check_log_pool.create(cr, uid,{'name': voucher.id, 'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id}, context=context)\n move_pool.write(cr, uid,[voucher.move_id.id], {'ref' : next_seq or ' '}, context=context)\n lines = move_line_pool.search(cr, uid,[('move_id','=',voucher.move_id.id)], context=context)\n move_line_pool.write(cr, uid,lines, {'ref' : next_seq or ' '}, context=context)\n if data.state != 'update':\n return self.print_report(cr, uid, ids, context=context)\n return {'type':'ir.actions.act_window_close'}",
"def post(self):\n for rec in self:\n partner_balance = rec.partner_id.debit - rec.partner_id.credit\n prevent_out_payment = self.env['ir.config_parameter'].sudo().get_param('payment_control.prevent_supplier_outstanding_payment')\n if prevent_out_payment and self.payment_type == 'outbound' and rec.amount and partner_balance < rec.amount :\n raise ValidationError(_(\"The payment amount is greater than the partner balance \"))\n return super(account_payment, self).post()",
"def _check_report_lines(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n\n for item in self.browse(cr, uid, ids, context):\n ## Browse partner record lines to check if all are correct (all fields filled)\n for partner_record in item.partner_record_ids:\n if not partner_record.partner_record_ok:\n raise osv.except_osv(_('Error!'), _(\"All partner records fields (country, VAT number) must be filled.\"))\n if partner_record.total_operation_amount < 0:\n raise osv.except_osv(_('Error!'), _(\"All amounts must be positives\"))\n\n for partner_record in item.partner_refund_ids:\n if not partner_record.partner_refund_ok:\n raise osv.except_osv(_('Error!'), _(\"All partner refunds fields (country, VAT number) must be filled.\"))\n if partner_record.total_operation_amount < 0 or partner_record.total_origin_amount < 0:\n raise osv.except_osv(_('Error!'), _(\"All amounts must be positives\"))\n\n return True",
"def confirm(self, cr, uid, ids, context=None):\n for r in self.browse(cr, uid, ids, context=context):\n if not r.line_ids:\n raise orm.except_orm(_('Warning'), _('The employees should be entered!'))\n for l in r.line_ids:\n if l.amounts_value <= 0:\n raise orm.except_orm(_('Warning'), _('The final amount for employee should be greater than Zero; kindly check the red lines!'))\n if l.state == 'draft':\n raise orm.except_orm(_('Warning'), _('The state of additional allowance details for employee should be in the confirm state!'))\n l.write({'state':'implement'}, context=context)\n return self.write(cr, uid, ids, {'state':'confirm'}, context=context)",
"def post(self):\n AccountMove = self.env['account.move'].with_context(default_type='entry')\n for rec in self:\n if rec.state not in ['draft', 'pdc']:\n raise UserError(_(\"Only a draft payment can be posted.\"))\n\n if any(inv.state != 'posted' for inv in rec.invoice_ids):\n raise ValidationError(_(\"The payment cannot be processed because the invoice is not open!\"))\n\n # keep the name in case of a payment reset to draft\n if not rec.name:\n # Use the right sequence to set the name\n if rec.payment_type == 'transfer':\n sequence_code = 'account.payment.transfer'\n else:\n if rec.partner_type == 'customer':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.customer.invoice'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.customer.refund'\n if rec.partner_type == 'supplier':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.supplier.refund'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.supplier.invoice'\n rec.name = self.env['ir.sequence'].next_by_code(sequence_code, sequence_date=rec.payment_date)\n if not rec.name and rec.payment_type != 'transfer':\n raise UserError(_(\"You have to define a sequence for %s in your company.\") % (sequence_code,))\n\n # moves = AccountMove.create(rec._prepare_payment_moves())\n amount = rec.amount * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)\n # print(\"Attempt\")\n if rec.payment_type != 'transfer':\n moves = AccountMove.create(rec._create_payment_entry(amount))\n else:\n moves = AccountMove.create(rec._prepare_payment_moves())\n # print(\"Attempt Success\")\n moves.filtered(lambda move: move.journal_id.post_at != 'bank_rec').post()\n # Update the state / move before performing any reconciliation.\n move_name = self._get_move_name_transfer_separator().join(moves.mapped('name'))\n rec.write({'state': 'posted', 'move_name': move_name})\n if rec.payment_type in ('inbound', 'outbound'):\n # ==== 'inbound' / 'outbound' ====\n if rec.invoice_ids:\n (moves[0] + rec.invoice_ids + rec.payment_crdr_inv_line_ids.filtered(lambda l: l.allocation > 0.0).mapped('invoice_id')).line_ids \\\n .filtered(lambda line: not line.reconciled and line.account_id == rec.destination_account_id) \\\n .reconcile()\n elif rec.payment_type == 'transfer':\n # ==== 'transfer' ====\n (moves + rec.payment_crdr_inv_line_ids.filtered(lambda l: l.allocation > 0.0).mapped('invoice_id')).line_ids \\\n .filtered(lambda line: line.account_id == rec.company_id.transfer_account_id) \\\n .reconcile()\n\n return True",
"def post(self):\n if self.invoice_id and self.invoice_id.revenue_type == 'month_revenue':\n if self.invoice_id.month_revenue_date > str(fields.datetime.now()):\n raise ValidationError(\n _(\"Date Specified in invoice '%s' not come Yet!!\") % (self.invoice_id.month_revenue_date))\n else:\n super(accountMove, self).post()\n self.invoice_id.state = 'posted'\n return\n\n\n else:\n super( accountMove,self).post()\n if self.invoice_id and self.invoice_id.revenue_type == 'deferred_revenue':\n is_all_move_invoice_posted = self.env['account.move'].search([('invoice_id','=',self.invoice_id.id),('state','!=','posted')])\n if len(is_all_move_invoice_posted) == 0:\n self.invoice_id.state = 'posted'",
"def post(self):\n AccountMove = self.env['account.move'].with_context(default_type='entry')\n for rec in self:\n\n if rec.state != 'approve':\n raise UserError(_(\"Only a draft payment can be posted.\"))\n\n if any(inv.state != 'posted' for inv in rec.invoice_ids):\n raise ValidationError(_(\"The payment cannot be processed because the invoice is not open!\"))\n\n # keep the name in case of a payment reset to draft\n if not rec.name:\n # Use the right sequence to set the name\n if rec.payment_type == 'transfer':\n sequence_code = 'account.payment.transfer'\n else:\n if rec.partner_type == 'customer':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.customer.invoice'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.customer.refund'\n if rec.partner_type == 'supplier':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.supplier.refund'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.supplier.invoice'\n rec.name = self.env['ir.sequence'].next_by_code(sequence_code, sequence_date=rec.payment_date)\n if not rec.name and rec.payment_type != 'transfer':\n raise UserError(_(\"You have to define a sequence for %s in your company.\") % (sequence_code,))\n\n moves = AccountMove.create(rec._prepare_payment_moves())\n moves.filtered(lambda move: move.journal_id.post_at != 'bank_rec').post()\n\n # Update the state / move before performing any reconciliation.\n move_name = self._get_move_name_transfer_separator().join(moves.mapped('name'))\n rec.write({'state': 'posted', 'move_name': move_name})\n\n if rec.payment_type in ('inbound', 'outbound'):\n # ==== 'inbound' / 'outbound' ====\n if rec.invoice_ids:\n (moves[0] + rec.invoice_ids).line_ids \\\n .filtered(lambda line: not line.reconciled and line.account_id == rec.destination_account_id)\\\n .reconcile()\n elif rec.payment_type == 'transfer':\n # ==== 'transfer' ====\n moves.mapped('line_ids')\\\n .filtered(lambda line: line.account_id == rec.company_id.transfer_account_id)\\\n .reconcile()\n\n return True",
"def action_move_create(self, cr, uid, ids, context=None):\n\t\tait_obj = self.pool.get('account.invoice.tax')\n\t\tcur_obj = self.pool.get('res.currency')\n\t\tperiod_obj = self.pool.get('account.period')\n\t\tpayment_term_obj = self.pool.get('account.payment.term')\n\t\tjournal_obj = self.pool.get('account.journal')\n\t\tmove_obj = self.pool.get('account.move')\n\t\tif context is None:\n\t\t\tcontext = {}\n\t\tfor inv in self.browse(cr, uid, ids, context=context):\n\t\t\tif not inv.journal_id.sequence_id:\n\t\t\t\traise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))\n\t\t\tif not inv.invoice_line:\n\t\t\t\traise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))\n\t\t\tif inv.move_id:\n\t\t\t\tcontinue\n\n\t\t\tctx = context.copy()\n\t\t\tctx.update({'lang': inv.partner_id.lang})\n\t\t\tif not inv.date_invoice:\n\t\t\t\tself.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)\n\t\t\tcompany_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id\n\t\t\t# create the analytical lines\n\t\t\t# one move line per invoice line\n\t\t\timl = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n\t\t\t# check if taxes are all computed\n\t\t\tcompute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n\t\t\tself.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\t\t\t# I disabled the check_total feature\n\t\t\tgroup_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n\t\t\tgroup_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)\n\t\t\tif group_check_total and uid in [x.id for x in group_check_total.users]:\n\t\t\t\tif (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):\n\t\t\t\t\traise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n\t\t\tif inv.payment_term:\n\t\t\t\ttotal_fixed = total_percent = 0\n\t\t\t\tfor line in inv.payment_term.line_ids:\n\t\t\t\t\tif line.value == 'fixed':\n\t\t\t\t\t\ttotal_fixed += line.value_amount\n\t\t\t\t\tif line.value == 'procent':\n\t\t\t\t\t\ttotal_percent += line.value_amount\n\t\t\t\ttotal_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n\t\t\t\tif (total_fixed + total_percent) > 100:\n\t\t\t\t\traise osv.except_osv(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n\t\t\t# one move line per tax line\n\t\t\timl += ait_obj.move_line_get(cr, uid, inv.id)\n\n\t\t\tentry_type = ''\n\t\t\tif inv.type in ('in_invoice', 'in_refund'):\n\t\t\t\tref = inv.reference\n\t\t\t\tentry_type = 'journal_pur_voucher'\n\t\t\t\tif inv.type == 'in_refund':\n\t\t\t\t\tentry_type = 'cont_voucher'\n\t\t\telse:\n\t\t\t\tref = self._convert_ref(cr, uid, inv.number)\n\t\t\t\tentry_type = 'journal_sale_vou'\n\t\t\t\tif inv.type == 'out_refund':\n\t\t\t\t\tentry_type = 'cont_voucher'\n\t\t\tdiff_currency_p = inv.currency_id.id <> company_currency or inv.use_kmk_ar_ap\n\t\t\t# create one move line for the total and possibly adjust the other lines amount\n\t\t\ttotal = 0\n\t\t\ttotal_currency = 0\n\n\t\t\ttotal, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n\t\t\tacc_id = inv.account_id.id\n\t\t\t\n\t\t\tname = inv['name'] or inv['supplier_invoice_number'] or '/'\n\t\t\ttotlines = False\n\t\t\tif inv.payment_term:\n\t\t\t\ttotlines = payment_term_obj.compute(cr,\n\t\t\t\t\t\tuid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n\t\t\tif totlines:\n\t\t\t\tres_amount_currency = total_currency\n\t\t\t\ti = 0\n\t\t\t\tctx.update({'date': inv.date_invoice})\n\t\t\t\tfor t in totlines:\n\t\t\t\t\tif inv.currency_id.id != company_currency:\n\t\t\t\t\t\tif inv.use_kmk_ar_ap:\n\t\t\t\t\t\t\tamount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)\n\t\t\t\t\t\telse: \n\t\t\t\t\t\t\tamount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)\n\t\t\t\t\telse:\n\t\t\t\t\t\tamount_currency = False\n\n\t\t\t\t\t# last line add the diff\n\t\t\t\t\tres_amount_currency -= amount_currency or 0\n\t\t\t\t\ti += 1\n\t\t\t\t\tif i == len(totlines):\n\t\t\t\t\t\tamount_currency += res_amount_currency\n\n\t\t\t\t\tcurrency_p = (inv.use_kmk_ar_ap and inv.company_id.tax_base_currency.id) \\\n\t\t\t\t\t\t\tor (inv.currency_id.id != inv.company_id.currency_id.id and not inv.use_kmk_ar_ap and inv.company_id.currency_id.id) \\\n\t\t\t\t\t\t\tor False\n\n\t\t\t\t\timl.append({\n\t\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t\t'name': name,\n\t\t\t\t\t\t'price': t[1],\n\t\t\t\t\t\t'account_id': acc_id,\n\t\t\t\t\t\t'date_maturity': t[0],\n\t\t\t\t\t\t'amount_currency': diff_currency_p \\\n\t\t\t\t\t\t\t\tand amount_currency or False,\n\t\t\t\t\t\t'currency_id': currency_p,\n\t\t\t\t\t\t'ref': ref,\n\t\t\t\t\t})\n\t\t\telse:\n\t\t\t\tcurrency_p = (inv.use_kmk_ar_ap and inv.company_id.tax_base_currency.id) \\\n\t\t\t\t\t\t\tor (inv.currency_id.id != inv.company_id.currency_id.id and not inv.use_kmk_ar_ap and inv.company_id.currency_id.id) \\\n\t\t\t\t\t\t\tor False\n\n\t\t\t\timl.append({\n\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t'name': name,\n\t\t\t\t\t'price': total,\n\t\t\t\t\t'account_id': acc_id,\n\t\t\t\t\t'date_maturity': inv.date_due or False,\n\t\t\t\t\t'amount_currency': diff_currency_p \\\n\t\t\t\t\t\t\tand total_currency or False,\n\t\t\t\t\t'currency_id': currency_p or False,\n\t\t\t\t\t'ref': ref\n\t\t\t})\n\n\t\t\tdate = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n\t\t\tpart = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n\t\t\tline = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)\n\t\t\tline = self.group_lines(cr, uid, iml, line, inv)\n\n\t\t\tjournal_id = inv.journal_id.id\n\t\t\tjournal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n\t\t\tif journal.centralisation:\n\t\t\t\traise osv.except_osv(_('User Error!'),\n\t\t\t\t\t\t_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n\t\t\tline = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\t\t\t\n\t\t\tall_taxes = self.pool.get('account.tax').search(cr,uid,[])\n\t\t\tcodes = [t.tax_code_id and t.tax_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)] + [t.ref_tax_code_id and t.ref_tax_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)]\n\t\t\tcodes = list(set(codes))\n\t\t\t\t\t\n\t\t\tline_temp = []\n\t\t\tfor mvl_temp in line:\n\t\t\t\t\n\t\t\t\tif 'tax_code_id' in mvl_temp[2] and mvl_temp[2]['tax_code_id'] in codes:\n\t\t\t\t\tdummy_data = mvl_temp[2].copy()\n\t\t\t\t\tdummy_data.update({\n\t\t\t\t\t\t'faktur_pajak_source' :tuple(account.invoice,inv.id),\n\t\t\t\t\t\t'faktur_pajak_no'\t : inv.nomor_faktur_id and inv.nomor_faktur_id.name or ''\n\t\t\t\t\t\t})\n\t\t\t\t\tline_temp.append((0,0,dummy_data))\n\t\t\t\telse:\n\t\t\t\t\tline_temp.append(mvl_temp)\n\t\t\tline = line_temp\n\n\t\t\tmove = {\n\t\t\t\t'ref': inv.reference and inv.reference or inv.name,\n\t\t\t\t'line_id': line,\n\t\t\t\t'journal_id': journal_id,\n\t\t\t\t'date': date,\n\t\t\t\t'narration': inv.comment,\n\t\t\t\t'company_id': inv.company_id.id,\n\t\t\t}\n\t\t\tperiod_id = inv.period_id and inv.period_id.id or False\n\t\t\tctx.update(company_id=inv.company_id.id,\n\t\t\t\t\t account_period_prefer_normal=True)\n\t\t\tif not period_id:\n\t\t\t\tperiod_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)\n\t\t\t\tperiod_id = period_ids and period_ids[0] or False\n\t\t\tif period_id:\n\t\t\t\tmove['period_id'] = period_id\n\t\t\t\tfor i in line:\n\t\t\t\t\ti[2]['period_id'] = period_id\n\n\t\t\tctx.update(invoice=inv)\n\t\t\tmove_id = move_obj.create(cr, uid, move, context=ctx)\n\t\t\tnew_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n\t\t\t# make the invoice point to that move\n\t\t\n\t\t\tself.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n\t\t\t# Pass invoice in context in method post: used if you want to get the same\n\t\t\t# account move reference when creating the same invoice after a cancelled one:\n\t\t\t# link to account_move post\n\t\t\tmove_obj.post(cr, uid, [move_id], context=ctx)\n\t\tself._log_event(cr, uid, ids)\n\t\treturn True",
"def _check_cylinder_deposit(self):\n for rec in self:\n deposit_amount = 0.0\n if rec.state == 'draft':\n cancel_connection_ids = self.search([\n ('id', '!=', rec.id),\n ('new_connection_id', '=', rec.new_connection_id.id),\n ('state', '=', 'draft')])\n if cancel_connection_ids:\n raise ValidationError(_(\n \" %s Cancel record already exists for Customer %s !!\")\n % (cancel_connection_ids[0].new_connection_id.number,\n rec.partner_id.name))\n if rec.connection_history_id:\n if rec.cylinder_qty <= 0:\n raise ValidationError(_(\"Cylinder Qty should not \"\n \"be less than or equal to Zero ! \"))\n elif rec.security_deposit_amount < 0:\n raise ValidationError(_(\"Security Deposit Amount should not\"\n \" be negative value ! \"))\n if rec.cylinder_qty > self.remaining_cancel_qty():\n raise ValidationError(_(\"Cylinder Qty should not \"\n \"be greater than %s Qty !!\")\n % (self.remaining_cancel_qty()))\n deposit_amount = \\\n (rec.connection_history_id.security_deposit_amount /\n rec.connection_history_id.qty) * rec.cylinder_qty\n if rec.security_deposit_amount > deposit_amount:\n raise ValidationError(\n _(\"Security Deposit Amount \"\n \"should not be greater than %s Amount !!\")\n % deposit_amount)",
"def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):\n\t\tres = super(account_invoice,self).finalize_invoice_move_lines(cr, uid, invoice_browse, move_lines)\n\t\tmoves = False\n\t\tif invoice_browse.separate_tax:\n\t\t\taccount_pool = self.pool.get('account.account')\n\t\t\tcur_obj = self.pool.get('res.currency')\n\t\t\taccount_ids = [x[2]['account_id'] for x in res]\n\t\t\trec_payable_id = account_pool.search(cr,uid,[('id','in',account_ids),('type','in',('payable','receivable'))])\n\t\t\n\t\t\tif not rec_payable_id and invoice_browse.type =='out_invoice':\n\t\t\t\traise osv.except_osv(_('No Receivable Account Defined!'), _('There is no Receivable Account Defined on this transaction, please check your account configuration.'))\n\t\t\telif not rec_payable_id and invoice_browse.type =='in_invoice':\n\t\t\t\traise osv.except_osv(_('No Payable Account Defined!'), _('There is no Payable Account Defined on this transaction, please check your account configuration.'))\n\t\t\tmoves =[]\n\t\t\tmoves_ar_ap = False\n\t\t\ttotal_tax_amt_currency=0.0\n\t\t\ttotal_trans_amt_currency = 0.0\n\t\t\ttotal_trans_amt_currency2 = 0.0\n\t\t\ttotal_tax = 0.0\n\t\t\tall_taxes = self.pool.get('account.tax').search(cr,uid,[])\n\t\t\tcodes = [t.tax_code_id and t.tax_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)] + [t.ref_tax_code_id and t.ref_tax_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)]\n\t\t\tcodes = list(set(codes))\n\t\t\tbase_codes = [t.tax_code_id and t.base_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)] + [t.ref_tax_code_id and t.ref_base_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)]\n\t\t\tbase_codes = list(set(base_codes))\n\n\t\t\tfound_tax = False\n\t\t\ttemp = []\n\t\t\ti=0\n\t\t\tfor line in res:\n\t\t\t\ti+=1\n\t\t\t\tsign = invoice_browse.type =='out_invoice' and -1 or 1\n\t\t\t\tposition = line[2]['credit'] !=0.0 and -1 or 1\n\n\t\t\t\ttm = line[2]['debit']!=0.0 and line[2]['debit'] or line[2]['credit']\n\t\t\t\tif line[2]['tax_amount'] and ( line[2]['tax_code_id'] in codes):\n\t\t\t\t\ttotal_tax += position * sign * tm\n\t\t\t\t\ttotal_tax_amt_currency -= sign * position * line[2]['amount_currency']\n\t\t\t\t\tfound_tax = True\n\t\t\t\t\t\n\t\t\t\tif line[2]['account_id'] not in rec_payable_id:\n\t\t\t\t\tif line[2]['debit']!=False or line[2]['credit']!=False:\n\t\t\t\t\t\tmoves.append(line)\n\t\t\t\t\t\ttotal_trans_amt_currency2 += sign*(line[2]['amount_currency'] or 0.0)\t\n\t\t\t\t\tif line[2]['tax_amount'] and line[2]['tax_code_id'] in base_codes:\n\t\t\t\t\t\ttemp.append(line)\n\t\t\t\telse:\n\t\t\t\t\tmoves_ar_ap = line\n\t\t\t\t\ttotal_trans_amt_currency += line[2]['amount_currency']\n\t\t\tfound_not_zero = False\n\t\t\tfor x in temp:\n\t\t\t\tif x[2]['debit']!=False or x[2]['credit']!=False:\n\t\t\t\t\tfound_not_zero = True\n\t\t\t\t\n\t\t\t# print \"moves_ar_ap-----------\",moves_ar_ap\n\t\t\t# if moves_ar_ap and invoice_browse.use_kmk_ar_ap:\n\t\t\t# \tt_moves_arp_ap=moves_ar_ap[2].copy()\n\t\t\t# \tamt = t_moves_arp_ap['debit'] not in (0.0,False) and t_moves_arp_ap['debit'] or (-1 * t_moves_arp_ap['credit'])\n\t\t\t# \tcur_obj =self.pool.get('res.currency')\n\t\t\t# \tcontext_rate = {}\n\t\t\t# \tcontext_rate.update({'date':invoice_browse.date_invoice or time.strftime('%Y-%m-%d'),'reverse':False,'trans_currency':invoice_browse.currency_id and invoice_browse.currency_id.id or False})\n\t\t\t# \tamount_currency = cur_obj.computerate(cr, uid, invoice_browse.currency_id.id,invoice_browse.company_id.tax_base_currency.id , amt, context=context_rate)\n\n\t\t\t# \tt_moves_arp_ap.update({'amount_currency':amount_currency,'currency_id':invoice_browse.company_id and invoice_browse.company_id.tax_base_currency.id})\n\t\t\t# \tmoves_ar_ap = (0,0,t_moves_arp_ap)\n\t\t\t\n\t\t\tprint \"moves_ar_ap-----------\",total_tax,moves_ar_ap[2]['debit'],moves_ar_ap[2]['credit']\n\t\t\tif moves_ar_ap and total_tax > 0.0 and found_tax and found_not_zero:\n\t\t\t\ttemp = moves_ar_ap[2].copy()\n\t\t\t\ttemp2 = moves_ar_ap[2].copy()\n\t\t\t\tdebit = moves_ar_ap[2]['debit']>0.0 and moves_ar_ap[2]['debit'] - total_tax or moves_ar_ap[2]['debit']\n\t\t\t\tcredit = moves_ar_ap[2]['credit']>0.0 and moves_ar_ap[2]['credit'] - total_tax or moves_ar_ap[2]['credit']\n\t\t\t\tdebit2 = moves_ar_ap[2]['debit']>0.0 and total_tax or 0.0\n\t\t\t\tcredit2 = moves_ar_ap[2]['credit']>0.0 and total_tax or 0.0\n\n\t\t\t\t# if invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id or invoice_browse.currency_tax_id.id !=invoice_browse.company_id.currency_id.id or invoice_browse.use_kmk_ar_ap:\n\t\t\t\t# \ttemp.update({\n\t\t\t\t# \t\t'amount_currency':(invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id or invoice_browse.use_kmk_ar_ap) and (total_trans_amt_currency-total_tax_amt_currency) or False,\n\t\t\t\t# \t\t'currency_id':(invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id and not invoice_browse.use_kmk_ar_ap and invoice_browse.currency_id.id) or (invoice_browse.use_kmk_ar_ap and invoice_browse.currency_tax_id and invoice_browse.currency_tax_id.id) or False,\n\t\t\t\t# \t\t})\n\n\t\t\t\t# \ttemp2.update({\n\t\t\t\t# \t\t'amount_currency':total_tax_amt_currency,\n\t\t\t\t# \t\t'ar_ap_tax':True,\n\t\t\t\t# \t\t'currency_id':invoice_browse.currency_tax_id and invoice_browse.currency_tax_id.id or invoice_browse.currency_id.id,})\n\t\t\t\t\n\t\t\t\tis_kmk_tax = invoice_browse.currency_tax_id.id == invoice_browse.company_id.tax_base_currency.id\n\t\t\t\tif is_kmk_tax:\n\t\t\t\t\tif invoice_browse.currency_id.id == invoice_browse.company_id.currency_id.id and invoice_browse.use_kmk_ar_ap:\n\t\t\t\t\t\ttemp.update({\n\t\t\t\t\t\t\t'amount_currency':(total_trans_amt_currency2-total_tax_amt_currency),\n\t\t\t\t\t\t\t'currency_id':invoice_browse.currency_tax_id.id,\n\t\t\t\t\t\t\t})\n\t\t\t\t\telif invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id:\n\t\t\t\t\t\tif invoice_browse.use_kmk_ar_ap:\n\t\t\t\t\t\t\ttemp.update({\n\t\t\t\t\t\t\t\t'amount_currency':(total_trans_amt_currency-total_tax_amt_currency),\n\t\t\t\t\t\t\t\t'currency_id': invoice_browse.currency_tax_id.id,\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttemp.update({\n\t\t\t\t\t\t\t\t'amount_currency':(total_trans_amt_currency-total_tax_amt_currency),\n\t\t\t\t\t\t\t\t'currency_id': invoice_browse.currency_id.id!=invoice_browse.company_id.currency_id.id and invoice_browse.currency_id.id or False,\n\t\t\t\t\t\t\t\t})\n\n\t\t\t\t\ttemp2.update({\n\t\t\t\t\t\t'amount_currency':total_tax_amt_currency,\n\t\t\t\t\t\t'ar_ap_tax':True,\n\t\t\t\t\t\t'currency_id': invoice_browse.currency_tax_id.id,})\n\t\t\t\telse:\n\t\t\t\t\ttemp.update({\n\t\t\t\t\t\t'amount_currency':invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id and (total_trans_amt_currency-total_tax_amt_currency) or 0.0,\n\t\t\t\t\t\t'currency_id':invoice_browse.currency_id.id!=invoice_browse.company_id.currency_id.id and invoice_browse.currency_id.id or False,\n\t\t\t\t\t\t})\n\t\t\t\t\ttemp2.update({\n\t\t\t\t\t\t'amount_currency':total_tax_amt_currency,\n\t\t\t\t\t\t'ar_ap_tax':True,\n\t\t\t\t\t\t'currency_id':invoice_browse.currency_id.id!=invoice_browse.company_id.currency_id.id and invoice_browse.currency_id.id or False,})\n\n\n\n\t\t\t\ttemp.update({'debit':abs(debit),'credit':abs(credit),})\n\t\t\t\ttemp2.update({'debit':abs(debit2),'credit':abs(credit2)})\n\n\t\t\t\tmoves.append((0,0,temp))\n\t\t\t\tmoves.append((0,0,temp2))\n\t\t\telif moves_ar_ap and not found_tax:\n\t\t\t\tmoves.append(moves_ar_ap)\n\t\t\telif moves_ar_ap and found_tax and not found_not_zero:\n\t\t\t\tmoves.append(moves_ar_ap)\n\t\t\telse:\n\t\t\t\tmoves.append(moves_ar_ap)\n\t\t\treturn moves\n\t\telse:\n\t\t\treturn res",
"def check_budget(self):\n if self.invoice_line_ids:\n for line in self.invoice_line_ids:\n if line.budget_confirm_id.state in ('waiting_valid','complete','unvalid') and line.account_budget_required == True:\n line.budget_confirm_id.check_budget_invoice()\n self.change_state()\n else:\n raise ValidationError(_(\"You must enter at least one line in Bill Information!!!\"))",
"def done(self,cr,uid,ids,context={}):\n for fees in self.browse(cr, uid, ids, context=context):\n contract = fees.contract_id\n voucher_id = super(contract_co_operative_fees, self).create_invoice(cr, uid, ids, context)\n fees.write({'state':'done'})\n \"\"\"user_obj = self.pool.get('res.users')\n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n\t\n for fees in self.browse(cr, uid, ids, context=context):\n\t \n contract = fees.contract_id\n \n voucher_id = voucher_obj.create(cr, uid, {\n 'contract_id': fees.contract_id.id,\n 'amount': fees.fees_amount,\n 'type': 'purchase',\n 'date': time.strftime('%Y-%m-%d'),\n 'partner_id': contract.partner_id.id , \n #'journal_id': 67,\n 'reference': contract.name+\"/\"+ fees.name,\n 'state': 'draft',\n # 'name':'Project fees:'+fees.name +'project :'+contract.department_id.name,\n # 'currency_id':contract.currency_id.id,\n })\n voucher_obj.write(cr,uid,[voucher_id],{'amount': fees.fees_amount}, context=context)\n \n \n vocher_line_id = voucher_line_obj.create(cr, uid, {\n 'amount': fees.fees_amount,\n 'voucher_id': voucher_id,\n 'type': 'dr',\n 'account_id': contract.contract_account.id,\n 'name': fees.name,\n })\n contract.write({'voucher_ids': [(4, voucher_id)]}, context=context)\n fees.write({'state':'done'})\n\t print \"voucher id:\",voucher_id\n\t print \"amount:\",fees.fees_amount\n\n \n Workflow function to change the state to confirm.\n \n @return: True\n \"\"\"\n currency_obj = self.pool.get('res.currency')\n new_amount = 0.0\n for fees in self.browse(cr, uid, ids):\n \n contract_currency = contract.currency_id.id\n euro_id = currency_obj.search(cr, uid, [('name','=','EUR')],limit=1)\n curren = currency_obj.browse(cr, uid, euro_id)\n new_amount = currency_obj.compute(cr, uid, contract_currency, curren[0].id, fees.fees_amount, fees.fees_date) \n all_amount = contract.fees_total_amount + fees.fees_amount\n if all_amount > contract.contract_amount :\n raise osv.except_osv(_('Amount exceed !'), _('The total fees amount well be more than the contract amount ..'))\n else:\n contract.write({'fees_total_amount': all_amount}) \n self.write(cr,uid,ids,{'fees_amount_in_euro':new_amount })\n\n return True",
"def process_reconciliation(self, cr, uid, id, mv_line_dicts, context=None):\n if context is None:\n context = {}\n st_line = self.browse(cr, uid, id, context=context)\n company_currency = st_line.journal_id.company_id.currency_id\n statement_currency = st_line.journal_id.currency or company_currency\n bs_obj = self.pool.get('account.bank.statement')\n am_obj = self.pool.get('account.move')\n aml_obj = self.pool.get('account.move.line')\n currency_obj = self.pool.get('res.currency')\n\n # Checks\n if st_line.journal_entry_id.id:\n raise osv.except_osv(_('Error!'), _('The bank statement line was already reconciled.'))\n for mv_line_dict in mv_line_dicts:\n for field in ['debit', 'credit', 'amount_currency']:\n if field not in mv_line_dict:\n mv_line_dict[field] = 0.0\n if mv_line_dict.get('counterpart_move_line_id'):\n mv_line = aml_obj.browse(cr, uid, mv_line_dict.get('counterpart_move_line_id'), context=context)\n if mv_line.reconcile_id:\n raise osv.except_osv(_('Error!'), _('A selected move line was already reconciled.'))\n\n # Create the move\n move_name = (st_line.statement_id.name or st_line.name) + \"/\" + str(st_line.sequence)\n move_vals = bs_obj._prepare_move(cr, uid, st_line, move_name, context=context)\n move_id = am_obj.create(cr, uid, move_vals, context=context)\n\n # Create the move line for the statement line\n if st_line.statement_id.currency.id != company_currency.id:\n if st_line.currency_id == company_currency:\n amount = st_line.amount_currency\n else:\n ctx = context.copy()\n ctx['date'] = st_line.date\n amount = currency_obj.compute(cr, uid, st_line.statement_id.currency.id, company_currency.id, st_line.amount, context=ctx)\n else:\n amount = st_line.amount\n bank_st_move_vals = bs_obj._prepare_bank_move_line(cr, uid, st_line, move_id, amount, company_currency.id, context=context)\n aml_obj.create(cr, uid, bank_st_move_vals, context=context)\n # Complete the dicts\n st_line_currency = st_line.currency_id or statement_currency\n st_line_currency_rate = st_line.currency_id and (st_line.amount_currency / st_line.amount) or False\n to_create = []\n for mv_line_dict in mv_line_dicts:\n if mv_line_dict.get('is_tax_line'):\n continue\n mv_line_dict['ref'] = move_name\n mv_line_dict['move_id'] = move_id\n mv_line_dict['period_id'] = st_line.statement_id.period_id.id\n mv_line_dict['journal_id'] = st_line.journal_id.id\n mv_line_dict['company_id'] = st_line.company_id.id\n mv_line_dict['statement_id'] = st_line.statement_id.id\n if mv_line_dict.get('counterpart_move_line_id'):\n mv_line = aml_obj.browse(cr, uid, mv_line_dict['counterpart_move_line_id'], context=context)\n mv_line_dict['partner_id'] = mv_line.partner_id.id or st_line.partner_id.id\n mv_line_dict['account_id'] = mv_line.account_id.id\n if st_line_currency.id != company_currency.id:\n ctx = context.copy()\n ctx['date'] = st_line.date\n mv_line_dict['amount_currency'] = mv_line_dict['debit'] - mv_line_dict['credit']\n mv_line_dict['currency_id'] = st_line_currency.id\n if st_line.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:\n debit_at_current_rate = self.pool.get('res.currency').round(cr, uid, company_currency, mv_line_dict['debit'] / st_line_currency_rate)\n credit_at_current_rate = self.pool.get('res.currency').round(cr, uid, company_currency, mv_line_dict['credit'] / st_line_currency_rate)\n elif st_line.currency_id and st_line_currency_rate:\n debit_at_current_rate = currency_obj.compute(cr, uid, statement_currency.id, company_currency.id, mv_line_dict['debit'] / st_line_currency_rate, context=ctx)\n credit_at_current_rate = currency_obj.compute(cr, uid, statement_currency.id, company_currency.id, mv_line_dict['credit'] / st_line_currency_rate, context=ctx)\n else:\n debit_at_current_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['debit'], context=ctx)\n credit_at_current_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['credit'], context=ctx)\n if mv_line_dict.get('counterpart_move_line_id'):\n #post an account line that use the same currency rate than the counterpart (to balance the account) and post the difference in another line\n ctx['date'] = mv_line.date\n if mv_line.currency_id.id == mv_line_dict['currency_id'] \\\n and float_is_zero(abs(mv_line.amount_currency) - abs(mv_line_dict['amount_currency']), precision_rounding=mv_line.currency_id.rounding):\n debit_at_old_rate = mv_line.credit\n credit_at_old_rate = mv_line.debit\n else:\n debit_at_old_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['debit'], context=ctx)\n credit_at_old_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['credit'], context=ctx)\n mv_line_dict['credit'] = credit_at_old_rate\n mv_line_dict['debit'] = debit_at_old_rate\n if debit_at_old_rate - debit_at_current_rate:\n currency_diff = debit_at_current_rate - debit_at_old_rate\n to_create.append(self.get_currency_rate_line(cr, uid, st_line, -currency_diff, move_id, context=context))\n if credit_at_old_rate - credit_at_current_rate:\n currency_diff = credit_at_current_rate - credit_at_old_rate\n to_create.append(self.get_currency_rate_line(cr, uid, st_line, currency_diff, move_id, context=context))\n if mv_line.currency_id and mv_line_dict['currency_id'] == mv_line.currency_id.id:\n amount_unreconciled = mv_line.amount_residual_currency\n else:\n amount_unreconciled = currency_obj.compute(cr, uid, company_currency.id, mv_line_dict['currency_id'] , mv_line.amount_residual, context=ctx)\n if float_is_zero(mv_line_dict['amount_currency'] + amount_unreconciled, precision_rounding=mv_line.currency_id.rounding):\n amount = mv_line_dict['debit'] or mv_line_dict['credit']\n sign = -1 if mv_line_dict['debit'] else 1\n currency_rate_difference = sign * (mv_line.amount_residual - amount)\n if not company_currency.is_zero(currency_rate_difference):\n exchange_lines = self._get_exchange_lines(cr, uid, st_line, mv_line, currency_rate_difference, mv_line_dict['currency_id'], move_id, context=context)\n for exchange_line in exchange_lines:\n to_create.append(exchange_line)\n\n else:\n mv_line_dict['debit'] = debit_at_current_rate\n mv_line_dict['credit'] = credit_at_current_rate\n elif statement_currency.id != company_currency.id:\n #statement is in foreign currency but the transaction is in company currency\n prorata_factor = (mv_line_dict['debit'] - mv_line_dict['credit']) / st_line.amount_currency\n mv_line_dict['amount_currency'] = prorata_factor * st_line.amount\n to_create.append(mv_line_dict)\n # If the reconciliation is performed in another currency than the company currency, the amounts are converted to get the right debit/credit.\n # If there is more than 1 debit and 1 credit, this can induce a rounding error, which we put in the foreign exchane gain/loss account.\n if st_line_currency.id != company_currency.id:\n diff_amount = bank_st_move_vals['debit'] - bank_st_move_vals['credit'] \\\n + sum(aml['debit'] for aml in to_create) - sum(aml['credit'] for aml in to_create)\n if not company_currency.is_zero(diff_amount):\n diff_aml = self.get_currency_rate_line(cr, uid, st_line, diff_amount, move_id, context=context)\n diff_aml['name'] = _('Rounding error from currency conversion')\n to_create.append(diff_aml)\n # Create move lines\n move_line_pairs_to_reconcile = []\n for mv_line_dict in to_create:\n counterpart_move_line_id = None # NB : this attribute is irrelevant for aml_obj.create() and needs to be removed from the dict\n if mv_line_dict.get('counterpart_move_line_id'):\n counterpart_move_line_id = mv_line_dict['counterpart_move_line_id']\n del mv_line_dict['counterpart_move_line_id']\n new_aml_id = aml_obj.create(cr, uid, mv_line_dict, context=context)\n if counterpart_move_line_id != None:\n move_line_pairs_to_reconcile.append([new_aml_id, counterpart_move_line_id])\n # Reconcile\n for pair in move_line_pairs_to_reconcile:\n aml_obj.reconcile_partial(cr, uid, pair, context=context)\n # Mark the statement line as reconciled\n self.write(cr, uid, id, {'journal_entry_id': move_id}, context=context)\n if st_line.statement_id.to_partner:\n self.pool.get('account.move').write(cr, uid, move_id, {'partner_id': st_line.statement_id.partner_id.id}, context)",
"def mission_approved(self, cr, uid, ids, context=None):\n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n account_period_obj = self.pool.get('account.period')\n for mission in self.browse(cr, uid, ids, context=context):\n mission_amount = mission.mission_fee\n if mission_amount <= 0:\n raise osv.except_osv(_('Warning!'),_('Mission fee should be more than zero'))\n \n if mission.mission_id.fees_account_id and mission.mission_id.journal_id and mission.mission_id.account_analytic_id:\n date = time.strftime('%Y-%m-%d')\n period = account_period_obj.find(cr, uid, dt=date, context={'company_id':mission.company_id.id})[0]\n voucher_dict = {\n 'company_id':mission.company_id.id,\n 'journal_id':mission.mission_id.journal_id.id,\n 'account_id':mission.mission_id.fees_account_id.id,\n 'period_id': period,\n 'name': mission.name + ' - ' + mission.start_date,\n 'amount':mission_amount,\n 'type':'purchase',\n 'date': date,\n 'reference':'HR/Mission Fees/' + mission.name + ' - ' + mission.start_date,\n \t\t\t\t\t'department_id': mission.department_id.id,\n\t\t\t\t\t'currency': mission.mission_id.fees_currency_id.id,\n }\n voucher = voucher_obj.create(cr, uid, voucher_dict, context=context)\n voucher_line_dict = {\n 'voucher_id':voucher,\n 'account_id':mission.mission_id.fees_account_id.id,\n 'account_analytic_id':mission.mission_id.account_analytic_id.id,\n 'amount':mission_amount,\n 'type':'dr',\n }\n voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)\n \n vouch = voucher_obj.browse(cr, uid, voucher, context=context)\n self.create_grant_rights(cr,uid,ids,context=context)\n return self.write(cr, uid, ids, {'state':'approved', 'voucher_number': vouch.number, })\n else:\n raise osv.except_osv(_('Error!'),_(\"Please enter mission accounting details at the configuration of the mission destination\"))\n \n self.create_grant_rights(cr,uid,ids,context=context)\n return self.write(cr, uid, ids, {'state':'approved'}, context=context)",
"def check_spare_invoice(self, cr ,uid ,ids , context=None):\n \n for rec in self.browse(cr , uid ,ids):\n approved=False\n for quote in rec.q_ids:\n if quote.state == 'done':\n approved=True\n if not approved:\n raise osv.except_osv( _('No approved Invoice!'), _('There is No Invoice approved.'))\n return False\n \n return True",
"def post(self):\n for rec in self:\n # code start\n# total = 0.0\n# for line in rec.invoice_lines:\n# if line.allocation < 0:\n# raise ValidationError(_(\"Negative allocation amount not allowed!\"))\n# if line.allocation > line.open_amount:\n# raise UserError(\"Allocation amount %s is greater then open amount %s of Invoice.\" % (line.allocation, line.open_amount))\n# total += line.allocation\n# if line.open_amount != line.invoice_id.residual:\n# raise UserError(\"Due amount changed.\\n Please click 'Update Invoice' button to update amount\")\n# \n# if total > rec.amount:\n# raise UserError(\"Total allocation %s is more then payment amount %s\" % (total, rec.amount))\n amt = 0\n if rec.invoice_lines:\n \n for line in rec.invoice_lines:\n amt += line.allocation\n # if rec.amount < amt:\n # raise ValidationError((\"Payment amount must be greater then or equal to '%s'\") %(amt))\n # if rec.amount > amt:\n # for line in rec.invoice_lines:\n # line.allocation = line.allocation + (rec.amount - amt)\n # break\n return super(account_payment,self).post()",
"def write(self, cr, uid, ids, vals, context=None):\n write_boolean = super(employee_mission_line, self).write(cr, uid, ids, vals, context=context)\n for line in self.browse(cr, uid, ids, context=context):\n total_days = 0\n for allow_state in line.allow_state:\n total_days += allow_state.day_state \n if total_days > line.days:\n raise osv.except_osv(_('Warning!'),_('The total days of allowance state for the employee %s is more than mission days for the same employee.')%(line.employee_id.name))\n \n return write_boolean",
"def print_checks(self):\n self = self.filtered(lambda r: r.payment_method_id.code == 'check_printing' and r.state != 'reconciled')\n\n if len(self) == 0:\n raise UserError(_(\"Payments to print as a checks must have 'Check' selected as payment method and \"\n \"not have already been reconciled\"))\n if any(payment.journal_id != self[0].journal_id for payment in self):\n raise UserError(_(\"In order to print multiple checks at once, they must belong to the same bank journal.\"))\n\n self.filtered(lambda r: r.state == 'draft').post()\n self.write({'state': 'sent'})\n\n if not self[0].journal_id.check_manual_sequencing:\n is_printed = False\n if self.check_number != 0:\n is_printed = True\n return {\n 'name': _('Print Check Report'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'wiz.print.check',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'target': 'new',\n 'context': {\n 'payment_ids': self.ids,\n 'default_next_check_number': self._get_next_check_no()[0],\n\t\t 'default_preprinted': is_printed,\n }\n }\n else:\n return self.do_print_checks()",
"def mission_approved_old(self, cr, uid, ids, context=None):\n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n account_period_obj = self.pool.get('account.period')\n emp_obj = self.pool.get('hr.employee')\n payroll_obj = self.pool.get('payroll')\n user = self.pool.get('res.users').browse(cr, uid, uid, context = context)\n \tjournal_id = user.company_id.hr_journal_id and user.company_id.hr_journal_id.id or False\n analytic_id = user.company_id.hr_analytic_account_id and user.company_id.hr_analytic_account_id.id or False\n for mission in self.browse(cr, uid, ids, context=context):\n employees_dic = {}\n note = ' \\n' + u'الموظفين:' \n total_amount = tax_amount = stamp_amount = 0.0\n for line in mission.mission_line:\n total_amount += line.mission_amounts\n tax_amount += line.tax\n stamp_amount += line.stamp\n employees_dic[line.employee_id] = line.mission_amounts\n note += ' \\n' + line.employee_id.name \n\n lines = emp_obj.get_emp_analytic(cr, uid, employees_dic, {'allow_deduct_id': mission.mission_id.allowance_id.id})\n \n print \"--------------------note\", note\n for line in lines:\n line['allow_deduct_id'] = mission.mission_id.allowance_id.id\n reference = 'HR/Mission Allowance/' + mission.name + ' - ' + mission.start_date\n narration = 'HR/Mission Allowance/' + mission.name + ' - ' + mission.start_date\n narration += note\n ## if total amount more than 0 then create voucher or continue\n if total_amount > 0:\n voucher = payroll_obj.create_payment(cr, uid, ids, {'reference':reference, 'lines':lines,\n 'tax_amount':tax_amount, 'stamp_amount':stamp_amount,\n 'narration':narration,'department_id':mission.department_id.id,\n 'model':'account.voucher'}, context=context)\n if voucher:\n vouch = voucher_obj.browse(cr, uid, voucher, context=context)\n return self.write(cr, uid, ids, {'voucher_number': voucher})\n '''mission_amount = 0.0\n stamp = 0.0\n for emp_mission_amount in mission.mission_line:\n mission_amount += emp_mission_amount.mission_amounts\n stamp += emp_mission_amount.stamp\n if mission.mission_id.allowance_id.account_id and journal_id and analytic_id:\n date = time.strftime('%Y-%m-%d')\n period = account_period_obj.find(cr, uid, dt=date, context={'company_id':mission.company_id.id})[0]\n voucher_dict = {\n 'company_id':mission.company_id.id,\n 'journal_id':journal_id,\n 'account_id':mission.mission_id.allowance_id.account_id.id,\n 'period_id': period,\n 'name': mission.name + ' - ' + mission.start_date,\n 'amount':mission_amount-stamp,\n 'type':'purchase',\n 'date': date,\n 'reference':'HR/Mission/' + mission.name + ' - ' + mission.start_date,\n \t\t\t\t\t'department_id': mission.department_id.id,\n\t\t\t\t\t'currency': mission.mission_id.fees_currency_id.id,\n }\n voucher = voucher_obj.create(cr, uid, voucher_dict, context=context)\n\n voucher_line_dict = {\n 'voucher_id':voucher,\n 'account_id':mission.mission_id.allowance_id.account_id.id,\n 'account_analytic_id':analytic_id,\n 'amount':mission_amount,\n 'type':'dr',\n }\n voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)\n if stamp:\n if mission.company_id.stamp_account_id.id:\n fees_voucher_line = {\n 'voucher_id':voucher,\n 'account_id':mission.mission_id.allowance_id.account_id.id,\n 'amount':-stamp,\n 'type':'dr',\n }\n voucher_line_obj.create(cr, uid, fees_voucher_line, context=context)\n else:\n raise osv.except_osv(_('Error!'),_(\"Please enter stamp account in HR settings\"))\n vouch = voucher_obj.browse(cr, uid, voucher, context=context)\n return self.write(cr, uid, ids, {'state':'approved', 'voucher_number': vouch.number, })\n else:\n raise osv.except_osv(_('Error!'),_(\"Please enter mission accounting details\"))'''\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method for deleting printed check. It delete the chk_seq value in payment & make the check status in check log "delete"
|
def do_delete(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids, context=context)[0]
voucher_pool = self.pool.get('account.voucher')
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
check_log_pool = self.pool.get('check.log')
voucher = voucher_pool.browse(cr, uid, data.payment_id.id, context=context)
next_seq =voucher.number
chk_log_ids = check_log_pool.search(cr,uid,[('name','=',voucher.id),('status','=','active')], context=context)
voucher_pool.write(cr, uid,[voucher.id],{'chk_seq':'','chk_status':True,'date_due':(voucher.date_due or voucher.date)}, context=context)
if chk_log_ids:
check_log_pool.write(cr, uid, chk_log_ids, {'status':'delete','deleted':True},context=context)
move_pool.write(cr, uid,[voucher.move_id.id], {'ref' : next_seq or ''}, context=context)
lines = move_line_pool.search(cr, uid,[('move_id','=',voucher.move_id.id)], context=context)
move_line_pool.write(cr, uid,lines, {'ref' : next_seq or ' '}, context=context)
return {'type':'ir.actions.act_window_close'}
|
[
"def delete_check(self, entity, check):\r\n uri = \"/%s/%s/checks/%s\" % (self.uri_base, utils.get_id(entity),\r\n utils.get_id(check))\r\n resp, resp_body = self.api.method_delete(uri)",
"def delete_check(self, entity, check):\r\n return self._entity_manager.delete_check(entity, check)",
"def check_payment(self, cr, uid, ids, context=None):\n data = self.browse(cr, uid, ids, context=context)[0]\n check_log_pool = self.pool.get('check.log')\n sequence_pool = self.pool.get('ir.sequence')\n move_pool = self.pool.get('account.move') \n voucher_pool = self.pool.get('account.voucher')\n move_line_pool = self.pool.get('account.move.line')\n voucher_id = (data.payment_id and data.payment_id.id) or (context['active_model'] == 'account.move' and self.check_move_data(cr, uid, ids, context=context))\n if not data.payment_id: data.write({'payment_id':voucher_id})\n if data.new_no:\n voucher = voucher_pool.browse(cr, uid, voucher_id, context=context)\n journal_id=voucher and (voucher.pay_journal_id or voucher.journal_id)\n if self._check_journal_seq(journal_id, context=context):\n chk_log_ids = check_log_pool.search(cr,uid,[('name','=',voucher.id),('status','=','active')], context=context)\n if data.state == 'reprint':\n check_log_pool.write(cr,uid,chk_log_ids, {'status': data.status}, context=context)\n\n sequence_pool.write(cr, uid, [journal_id.check_sequence.id], {'number_next_actual':int(data.new_no)}, context=context)\n next_seq = sequence_pool.get_id(cr, uid, journal_id.check_sequence.id, context=context)\n voucher_pool.write(cr, uid,[voucher.id],{'amount_in_word': amount_to_text_ar(voucher.amount, 'ar'),'chk_seq': next_seq, 'chk_status':True, 'date_due': (voucher.date_due or voucher.date)}, context=context)\n if data.state == 'update':\n check_log_pool.write(cr,uid,chk_log_ids, {'check_no': next_seq}, context=context)\n else: \n check_log_pool.create(cr, uid,{'name': voucher.id, 'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id}, context=context)\n move_pool.write(cr, uid,[voucher.move_id.id], {'ref' : next_seq or ' '}, context=context)\n lines = move_line_pool.search(cr, uid,[('move_id','=',voucher.move_id.id)], context=context)\n move_line_pool.write(cr, uid,lines, {'ref' : next_seq or ' '}, context=context)\n if data.state != 'update':\n return self.print_report(cr, uid, ids, context=context)\n return {'type':'ir.actions.act_window_close'}",
"def test_delete_checker_result(self):\n pass",
"def print_check(self, cr, uid, ids, context=None):\n if context == None:\n context = {}\n value = {}\n model_data = self.pool.get('ir.model.data')\n check_layout_report = {\n 'top': 'account.print.check.top.jam',\n 'middle': 'account.print.check.middle.jam',\n 'bottom': 'account.print.check.bottom.jam',\n }\n check = self.browse(cr, uid, ids[0], context=context)\n if check.check_number or check.journal_id.use_preprint_check:\n check_layout = check.company_id.check_layout\n value = {\n 'type': 'ir.actions.report.xml',\n 'report_name': check_layout_report[check_layout],\n 'datas': {\n 'model': 'account.voucher',\n 'id': ids and ids[0] or False,\n 'ids': ids and ids or [],\n 'report_type': 'pdf'\n },\n 'nodestroy': True\n }\n else:\n form_view = model_data.get_object_reference(cr, uid, 'account_check_writing_jam', 'view_account_check_write')\n value = {\n 'name': _('Print Check'),\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'account.check.write',\n 'views': [(form_view and form_view[1] or False, 'form'), (False, 'tree')],\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context,\n }\n return value",
"def delete_record():",
"def delete_all_checkpoints(self, path):\n self.log.info(\"Airavata.checkpounts.delete_all_checkpoints: ('%s')\", path)",
"def _delete_element(self, checkbox, del_button, element_name):\n if not self.s.is_checked(checkbox):\n self.s.click_and_wait(checkbox)\n\n self.s.choose_ok_on_next_confirmation()\n self.s.click_and_wait(del_button)\n\n time.sleep(1)\n if not self.s.is_confirmation_present(5):\n raise Exception(\"No dialog confirmation for deleting %s appears on checkbox(%s)/del_button(%s)\" % (element_name, checkbox, del_button))\n\n self.s.get_confirmation()\n \n alt = self.s.get_alert()\n if alt:\n logging.info('alert get %s'%alt)\n raise Exception('alert get [%s]'%alt)",
"def delete_all_checkpoints(self, path):\n #TODO",
"def test_delete_muveto_pmt_item(self):\n pass",
"def test_eliminacion(self):\n S2 = Sprint.objects.get(nombre= 'Sprint 2')\n S2.delete()\n\n print('Eliminacion de Sprints ejecutada correctamente.')",
"def print_checks(self):\n self = self.filtered(lambda r: r.payment_method_id.code ==\n 'check_printing' and r.state != 'reconciled')\n if len(self) == 0:\n raise UserError(_(\"Payments to print as a checks must have 'Check' selected as payment method and \"\n \"not have already been reconciled\"))\n if any(payment.journal_id != self[0].journal_id for payment in self):\n raise UserError(\n _(\"In order to print multiple checks at once, they must belong to the same bank journal.\"))\n if not self[0].journal_id.check_manual_sequencing:\n is_printed = False\n if self.check_number != 0:\n is_printed = True\n last_printed_check = self.search([\n ('journal_id', '=', self[0].journal_id.id),\n ('check_number', '!=', 0)], order=\"check_number desc\", limit=1)\n next_check_number = last_printed_check and last_printed_check.check_number\n if not is_printed:\n next_check_number = last_printed_check and last_printed_check.check_number + 1 or 1\n return {\n 'name': _('Print Check Report'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'wiz.print.check',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'target': 'new',\n 'context': {\n 'payment_ids': self.ids,\n 'default_next_check_number': next_check_number,\n 'default_preprinted': is_printed,\n }\n }\n else:\n return self.do_print_checks()",
"def deletedetails(name):\r\n message=db.deleteinfo(name)\r\n #print(message)\r\n eel.deleted(message)\r\n #print(name)\r",
"def print_checks(self):\n self = self.filtered(lambda r: r.payment_method_id.code == 'check_printing' and r.state != 'reconciled')\n\n if len(self) == 0:\n raise UserError(_(\"Payments to print as a checks must have 'Check' selected as payment method and \"\n \"not have already been reconciled\"))\n if any(payment.journal_id != self[0].journal_id for payment in self):\n raise UserError(_(\"In order to print multiple checks at once, they must belong to the same bank journal.\"))\n\n self.filtered(lambda r: r.state == 'draft').post()\n self.write({'state': 'sent'})\n\n if not self[0].journal_id.check_manual_sequencing:\n is_printed = False\n if self.check_number != 0:\n is_printed = True\n return {\n 'name': _('Print Check Report'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'wiz.print.check',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'target': 'new',\n 'context': {\n 'payment_ids': self.ids,\n 'default_next_check_number': self._get_next_check_no()[0],\n\t\t 'default_preprinted': is_printed,\n }\n }\n else:\n return self.do_print_checks()",
"def stepCreateCheckPayment(self, sequence=None, sequence_list=None, **kwd):\n self.check_payment = self.check_payment_module.newContent(id = 'check_payment',\n portal_type = 'Check Payment',\n destination_payment_value = self.bank_account_1,\n # aggregate_value = self.check_1,\n resource_value = self.currency_1,\n aggregate_free_text = \"0000050\",\n description = \"test\",\n # source_value = self.bi_counter,\n start_date = DateTime().Date(),\n source_total_asset_price = 20000.0,\n unique_per_account=True)\n # call set source to go into the interaction workflow to update local roles\n self.check_payment._setSource(self.bi_counter.getRelativeUrl())\n self.assertNotEqual(self.check_payment, None)\n self.assertEqual(self.check_payment.getTotalPrice(fast=0), 0.0)\n self.assertEqual(self.check_payment.getDestinationPayment(), self.bank_account_1.getRelativeUrl())\n self.assertEqual(self.check_payment.getAggregateFreeText(), self.check_1.getReference())\n self.assertEqual(self.check_payment.getSourceTotalAssetPrice(), 20000.0)\n self.assertEqual(self.check_payment.getSource(), self.bi_counter.getRelativeUrl())\n # set source reference\n self.setDocumentSourceReference(self.check_payment)\n # check source reference\n self.assertNotEqual(self.check_payment.getSourceReference(), '')\n self.assertNotEqual(self.check_payment.getSourceReference(), None)\n # the initial state must be draft\n self.assertEqual(self.check_payment.getSimulationState(), 'draft')\n\n # source reference must be automatically generated\n self.check_payment.setSourceReference(self.check_payment.Baobab_getUniqueReference())\n self.assertNotEqual(self.check_payment.getSourceReference(), None)\n self.assertNotEqual(self.check_payment.getSourceReference(), '')",
"def test_delete_escalation(self):\n pass",
"def _delete_unfunded(self):\n\n order_id = digest(json.dumps(self.contract, indent=4)).encode(\"hex\")\n if self.is_purchase:\n file_path = DATA_FOLDER + \"purchases/in progress/\" + order_id + \".json\"\n self.db.Purchases().delete_purchase(order_id)\n else:\n file_path = DATA_FOLDER + \"store/listings/in progress/\" + order_id + \".json\"\n self.db.Sales().delete_sale(order_id)\n if os.path.exists(file_path):\n os.remove(file_path)",
"def test_deleteVoucher(amount: int = 10) -> 'reference_number':\r\n\r\n # SetUp\r\n _, result = u.getVouchers()\r\n if len(result) == 0:\r\n u.createVoucher(amount)\r\n time.sleep(3)\r\n _, result = u.getVouchers()\r\n\r\n # Action\r\n status, result = u.deleteVoucher(result[0][\"voucherid\"])\r\n\r\n # Assertion\r\n AssertResultIsRefNum(status, result)",
"def checklist_delete(user, user_id, checklist_id):\n\n checklist = Checklist.query.get(checklist_id)\n\n if checklist.owner_id != user.id:\n return abort(401, description=\"You do not have permission to delete this checklist.\")\n\n if not checklist:\n return abort(404, description=\"Checklist not found.\")\n\n db.session.delete(checklist)\n db.session.commit()\n\n return jsonify(\"the following checklist was deleted\", checklist_schema.dump(checklist))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
rest_framework can't deal with ManyToMany relations that have a through table. In xos, most of the through tables we have use defaults or blank fields, so there's no reason why we shouldn't be able to save these objects. So, let's strip out these m2m relations, and deal with them ourself.
|
def NEED_TO_UPDATE_save_object(self, obj, **kwargs):
obj._complex_m2m_data={};
if getattr(obj, '_m2m_data', None):
for relatedObject in obj._meta.get_all_related_many_to_many_objects():
if (relatedObject.field.rel.through._meta.auto_created):
# These are non-trough ManyToMany relations and
# can be updated just fine
continue
fieldName = relatedObject.get_accessor_name()
if fieldName in obj._m2m_data.keys():
obj._complex_m2m_data[fieldName] = (relatedObject, obj._m2m_data[fieldName])
del obj._m2m_data[fieldName]
serializers.ModelSerializer.save_object(self, obj, **kwargs);
for (accessor, stuff) in obj._complex_m2m_data.items():
(relatedObject, data) = stuff
through = relatedObject.field.rel.through
local_fieldName = relatedObject.field.m2m_reverse_field_name()
remote_fieldName = relatedObject.field.m2m_field_name()
# get the current set of existing relations
existing = through.objects.filter(**{local_fieldName: obj});
data_ids = [item.id for item in data]
existing_ids = [getattr(item,remote_fieldName).id for item in existing]
#print "data_ids", data_ids
#print "existing_ids", existing_ids
# remove relations that are in 'existing' but not in 'data'
for item in list(existing):
if (getattr(item,remote_fieldName).id not in data_ids):
print "delete", getattr(item,remote_fieldName)
item.delete() #(purge=True)
# add relations that are in 'data' but not in 'existing'
for item in data:
if (item.id not in existing_ids):
#print "add", item
newModel = through(**{local_fieldName: obj, remote_fieldName: item})
newModel.save()
|
[
"def save_m2m(self, bundle):\n\n\n for field_name, field_object in self.fields.items():\n\n if not getattr(field_object, 'is_m2m', False):\n continue\n\n if not field_object.attribute:\n continue\n\n if field_object.readonly:\n continue\n # Get the manager.\n related_mngr = None\n if isinstance(field_object.attribute, basestring):\n related_mngr = getattr(bundle.obj, field_object.attribute)\n\n elif callable(field_object.attribute):\n related_mngr = field_object.attribute(bundle)\n\n if not related_mngr:\n continue\n\n if field_name not in bundle.data:\n continue\n\n new = []\n existing = []\n\n existing_objects = {}\n for obj in related_mngr.all():\n existing_objects[obj.id] = False\n\n related_objs = []\n\n for related_bundle in bundle.data[field_name]:\n if related_bundle.obj.id is None:\n new.append(related_bundle)\n continue\n if related_bundle.obj.id in existing_objects.keys():\n existing_objects[related_bundle.obj.id] = True\n existing.append(related_bundle)\n continue\n # We have an id, but we're not existing... odd.\n new.append(related_bundle)\n\n related_mngr.add(*[n.obj for n in new])\n\n to_delete = filter(lambda o: existing_objects[o] == False, existing_objects.keys())\n if len(to_delete) > 0:\n delete_on_unlink = getattr(field_object, \"delete_on_unlink\", False)\n if delete_on_unlink == True:\n #TODO: Soft delete, if enabled.\n\n for obj in related_mngr.filter(id__in=to_delete):\n if hasattr(obj, 'soft_delete'):\n obj.soft_delete()\n else:\n obj.delete()\n else:\n for a in related_mngr.filter(id__in=to_delete):\n related_mngr.remove(a)\n\n for related_bundle in existing:\n pass\n # related_bundle.obj.save()",
"def handle_m2m_field(self, obj, field):\n if hasattr(field, \"_priv_name\") or field.rel.through._meta.auto_created:\n fname = (field._priv_name if hasattr(field, \"_priv_name\")\n else field.name)\n if fname in self.relations:\n # perform full serialization of M2M\n serializer = Serializer()\n options = {}\n if isinstance(self.relations, dict):\n if isinstance(self.relations[fname], dict):\n options = self.relations[fname]\n self._fields[fname] = [\n serializer.serialize([related], **options)[0]\n for related in getattr(obj, fname).iterator()]\n else:\n # emulate the original behaviour and serialize to a list of \n # primary key values\n if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):\n m2m_value = lambda value: value.natural_key()\n else:\n m2m_value = lambda value: smart_unicode(\n value._get_pk_val(), strings_only=True)\n self._fields[fname] = [m2m_value(related)\n for related in getattr(obj, fname).iterator()]",
"def _post_process_reverse_relations(self, obj: Object):\n # check added fields\n for field in obj.fields.values():\n if isinstance(field, RelatedObjectField) and field.link_type == LINK_TYPES.INNER:\n if field.reverse_field and field.reverse_field.name not in field.obj.fields:\n field.obj.fields[field.reverse_field.name] = field.reverse_field\n self.update(field.obj)",
"def _construct_many_to_many_relationship_artifacts(required=False):\n return schemas_artifacts.types.ManyToManyRelationshipPropertyArtifacts(\n type=types.PropertyType.RELATIONSHIP,\n schema={}, # type: ignore\n sub_type=types.RelationshipType.MANY_TO_MANY,\n parent=\"RefModel\",\n backref_property=None,\n kwargs=None,\n write_only=None,\n description=None,\n required=required,\n secondary=\"secondary_1\",\n )",
"def Rfc2889oneToMany(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.rfc2889onetomany.rfc2889onetomany import Rfc2889oneToMany\n\t\treturn Rfc2889oneToMany(self)",
"def many_to_many(self):\n return make_immutable_fields_list(\n \"many_to_many\", (f for f in self._get_fields(reverse=False)\n if f.is_relation and f.many_to_many))",
"def _save_m2m(self, instance, load_data=None):\n\n def _save_from_data(instance, load_data):\n model_opts = instance._meta\n for f in chain(model_opts.many_to_many, model_opts.private_fields):\n if f.name in self.related_fields:\n rel = self.related_fields[f.name]\n if rel.many:\n m2m_items = load_data[f.name]\n f.save_form_data(instance, m2m_items)\n\n if not self.many:\n _save_from_data(instance, load_data)\n else:\n for data in load_data:\n _save_from_data(instance, data)",
"def after_get_relationship(self, obj, related_objects, relationship_field, related_type_, related_id_field,\n view_kwargs):\n raise NotImplementedError",
"def test_nested_relationship_many_many(session, oso, tag_nested_many_many_test_fixture):\n # TODO This direction doesn't work, because tag in user.tags is a concrete object.\n # allow(user, \"read\", post: Post) if tag in post.tags and tag in user.tags;\n oso.load_str(\n \"\"\"\n allow(user, \"read\", post: Post) if tag in post.tags and user in tag.users;\n \"\"\"\n )\n\n posts = authorize_model(\n oso, tag_nested_many_many_test_fixture[\"user\"], \"read\", session, Post\n )\n # TODO (dhatch): Check that this SQL query is correct, seems right from results.\n print_query(posts)\n assert tag_nested_many_many_test_fixture[\"user_eng_post\"] in posts\n assert tag_nested_many_many_test_fixture[\"user_user_post\"] in posts\n assert not tag_nested_many_many_test_fixture[\"random_post\"] in posts\n assert not tag_nested_many_many_test_fixture[\"not_tagged_post\"] in posts\n assert tag_nested_many_many_test_fixture[\"all_tagged_post\"] in posts\n\n posts = authorize_model(\n oso, tag_nested_many_many_test_fixture[\"other_user\"], \"read\", session, Post\n )\n assert not tag_nested_many_many_test_fixture[\"user_eng_post\"] in posts\n assert not tag_nested_many_many_test_fixture[\"user_user_post\"] in posts\n assert tag_nested_many_many_test_fixture[\"random_post\"] in posts\n assert not tag_nested_many_many_test_fixture[\"not_tagged_post\"] in posts\n assert tag_nested_many_many_test_fixture[\"all_tagged_post\"] in posts",
"def test_database_many_to_many_relationship(engine, sessionmaker):\n # Defining specification\n spec = {\n \"components\": {\n \"schemas\": {\n \"RefTable\": {\n \"properties\": {\n \"id\": {\"type\": \"integer\", \"x-primary-key\": True},\n \"name\": {\"type\": \"string\"},\n },\n \"x-tablename\": \"ref_table\",\n \"x-backref\": \"tables\",\n \"x-secondary\": \"association\",\n \"type\": \"object\",\n },\n \"Table\": {\n \"properties\": {\n \"id\": {\"type\": \"integer\", \"x-primary-key\": True},\n \"name\": {\"type\": \"string\"},\n \"ref_tables\": {\n \"type\": \"array\",\n \"items\": {\"$ref\": \"#/components/schemas/RefTable\"},\n },\n },\n \"x-tablename\": \"table\",\n \"type\": \"object\",\n },\n }\n }\n }\n # Creating model factory\n base = declarative.declarative_base()\n model_factory = open_alchemy.init_model_factory(spec=spec, base=base)\n model = model_factory(name=\"Table\")\n ref_model = model_factory(name=\"RefTable\")\n\n # Creating models\n base.metadata.create_all(engine)\n # Creating instance of model and ref_model\n ref_model_instance = ref_model(id=11, name=\"ref table name 1\")\n model_instance = model(id=12, name=\"table name 1\", ref_tables=[ref_model_instance])\n session = sessionmaker()\n session.add(ref_model_instance)\n session.add(model_instance)\n session.flush()\n\n # Querying session\n queried_model = session.query(model).first()\n assert queried_model.id == 12\n assert queried_model.name == \"table name 1\"\n assert len(queried_model.ref_tables) == 1\n assert queried_model.ref_tables[0].id == 11\n assert queried_model.ref_tables[0].name == \"ref table name 1\"\n queried_ref_model = session.query(ref_model).first()\n assert queried_ref_model.id == 11\n assert queried_ref_model.name == \"ref table name 1\"\n assert len(queried_ref_model.tables) == 1\n assert queried_ref_model.tables[0].id == 12\n assert queried_ref_model.tables[0].name == \"table name 1\"",
"def _get_many_to_many(*, schema: oa_types.Schema, schemas: oa_types.Schemas, **_):\n items_schema = peek.items(schema=schema, schemas=schemas)\n assert items_schema is not None\n\n parent = _get_parent(schema=items_schema, schemas=schemas)\n\n return types.ManyToManyRelationshipPropertyArtifacts(\n type=oa_types.PropertyType.RELATIONSHIP,\n sub_type=oa_types.RelationshipType.MANY_TO_MANY,\n schema=_calculate_one_to_x_schema(\n parent=parent, schema=schema, schemas=schemas\n ),\n required=False, # to be fixed on calling function\n parent=parent,\n backref_property=_get_backref_property(schema=items_schema, schemas=schemas),\n kwargs=_get_kwargs(parent=parent, schema=items_schema, schemas=schemas),\n write_only=_get_write_only(parent=parent, schema=schema, schemas=schemas),\n description=_get_description(parent=parent, schema=schema, schemas=schemas),\n secondary=_get_secondary(schema=items_schema, schemas=schemas),\n )",
"def create_forward_edge_to_edge_manager(superclass, rel, reverse):\n\n class EdgeRelatedManager(superclass):\n def __init__(self, instance=None):\n super().__init__()\n\n self.instance = instance\n\n if not reverse:\n self.model = rel.model\n self.query_field_name = rel.field.related_query_name()\n self.prefetch_cache_name = rel.field.name\n self.source_field_name = rel.field.e2e_field_name()\n self.target_field_name = rel.field.e2e_reverse_field_name()\n self.symmetrical = rel.symmetrical\n else:\n self.model = rel.related_model\n self.query_field_name = rel.field.name\n self.prefetch_cache_name = rel.field.related_query_name()\n self.source_field_name = rel.field.e2e_reverse_field_name()\n self.target_field_name = rel.field.e2e_field_name()\n self.symmetrical = False\n\n self.through = rel.through\n self.reverse = reverse\n\n self.source_field = self.through._meta.get_field(self.source_field_name)\n self.target_field = self.through._meta.get_field(self.target_field_name)\n\n self.core_filters = {}\n self.pk_field_names = {}\n for lh_field, rh_field in self.source_field.related_fields:\n core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)\n self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)\n self.pk_field_names[lh_field.name] = rh_field.name\n\n self.related_val = self.source_field.get_foreign_related_value(instance)\n if None in self.related_val:\n raise ValueError('\"%r\" needs to have a value for field \"%s\" before '\n 'this many-to-many relationship can be used.' %\n (instance, self.pk_field_names[self.source_field_name]))\n # Even if this relation is not to pk, we require still pk value.\n # The wish is that the instance has been already saved to DB,\n # although having a pk value isn't a guarantee of that.\n if instance.pk is None:\n raise ValueError(\"%r instance needs to have a primary key value before \"\n \"a many-to-many relationship can be used.\" %\n instance.__class__.__name__)\n\n def __call__(self, *, manager):\n manager = getattr(self.model, manager)\n manager_class = create_forward_edge_to_edge_manager(manager.__class__, rel, reverse)\n return manager_class(instance=self.instance)\n do_not_call_in_templates = True\n\n def _build_remove_filters(self, removed_vals):\n filters = Q(**{self.source_field_name: self.related_val})\n # No need to add a subquery condition if removed_vals is a QuerySet without\n # filters.\n removed_vals_filters = (not isinstance(removed_vals, QuerySet) or\n removed_vals._has_filters())\n if removed_vals_filters:\n filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})\n if self.symmetrical:\n symmetrical_filters = Q(**{self.target_field_name: self.related_val})\n if removed_vals_filters:\n symmetrical_filters &= Q(\n **{'%s__in' % self.source_field_name: removed_vals})\n filters |= symmetrical_filters\n return filters\n\n def _apply_rel_filters(self, queryset):\n \"\"\"\n Filter the queryset for the instance this manager is bound to.\n \"\"\"\n queryset._add_hints(instance=self.instance)\n if self._db:\n queryset = queryset.using(self._db)\n return queryset._next_is_sticky().filter(**self.core_filters)\n\n def _remove_prefetched_objects(self):\n try:\n self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)\n except (AttributeError, KeyError):\n pass # nothing to clear from cache\n\n def get_queryset(self):\n try:\n return self.instance._prefetched_objects_cache[self.prefetch_cache_name]\n except (AttributeError, KeyError):\n queryset = super().get_queryset()\n return self._apply_rel_filters(queryset)\n\n def get_prefetch_queryset(self, instances, queryset=None):\n if queryset is None:\n queryset = super().get_queryset()\n\n queryset._add_hints(instance=instances[0])\n queryset = queryset.using(queryset._db or self._db)\n\n query = {'%s__in' % self.query_field_name: instances}\n queryset = queryset._next_is_sticky().filter(**query)\n\n # M2M: need to annotate the query in order to get the primary model\n # that the secondary model was actually related to. We know that\n # there will already be a join on the join table, so we can just add\n # the select.\n\n # For non-autocreated 'through' models, can't assume we are\n # dealing with PK values.\n fk = self.through._meta.get_field(self.source_field_name)\n join_table = fk.model._meta.db_table\n connection = connections[queryset.db]\n qn = connection.ops.quote_name\n queryset = queryset.extra(select={\n '_prefetch_related_val_%s' % f.attname:\n '%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})\n return (\n queryset,\n lambda result: tuple(\n getattr(result, '_prefetch_related_val_%s' % f.attname)\n for f in fk.local_related_fields\n ),\n lambda inst: tuple(\n f.get_db_prep_value(getattr(inst, f.attname), connection)\n for f in fk.foreign_related_fields\n ),\n False,\n self.prefetch_cache_name,\n False,\n )\n\n def add(self, *objs, through_defaults=None):\n self._remove_prefetched_objects()\n db = router.db_for_write(self.through, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n self._add_items(\n self.source_field_name, self.target_field_name, *objs,\n through_defaults=through_defaults,\n )\n # If this is a symmetrical m2m relation to self, add the mirror\n # entry in the m2m table. `through_defaults` aren't used here\n # because of the system check error fields.E332: Many-to-many\n # fields with intermediate tables must not be symmetrical.\n if self.symmetrical:\n self._add_items(self.target_field_name, self.source_field_name, *objs)\n add.alters_data = True\n\n def remove(self, *objs):\n self._remove_prefetched_objects()\n self._remove_items(self.source_field_name, self.target_field_name, *objs)\n remove.alters_data = True\n\n def clear(self):\n db = router.db_for_write(self.through, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n signals.e2e_changed.send(\n sender=self.through, action=\"pre_clear\",\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=None, using=db,\n )\n self._remove_prefetched_objects()\n filters = self._build_remove_filters(super().get_queryset().using(db))\n self.through._default_manager.using(db).filter(filters).delete()\n\n signals.e2e_changed.send(\n sender=self.through, action=\"post_clear\",\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=None, using=db,\n )\n clear.alters_data = True\n\n def set(self, objs, *, clear=False, through_defaults=None):\n # Force evaluation of `objs` in case it's a queryset whose value\n # could be affected by `manager.clear()`. Refs #19816.\n objs = tuple(objs)\n\n db = router.db_for_write(self.through, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n if clear:\n self.clear()\n self.add(*objs, through_defaults=through_defaults)\n else:\n old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))\n\n new_objs = []\n for obj in objs:\n fk_val = (\n self.target_field.get_foreign_related_value(obj)[0]\n if isinstance(obj, self.model) else obj\n )\n if fk_val in old_ids:\n old_ids.remove(fk_val)\n else:\n new_objs.append(obj)\n\n self.remove(*old_ids)\n self.add(*new_objs, through_defaults=through_defaults)\n set.alters_data = True\n\n def create(self, *, through_defaults=None, **kwargs):\n db = router.db_for_write(self.instance.__class__, instance=self.instance)\n new_obj = super(EdgeRelatedManager, self.db_manager(db)).create(**kwargs)\n self.add(new_obj, through_defaults=through_defaults)\n return new_obj\n create.alters_data = True\n\n def get_or_create(self, *, through_defaults=None, **kwargs):\n db = router.db_for_write(self.instance.__class__, instance=self.instance)\n obj, created = super(EdgeRelatedManager, self.db_manager(db)).get_or_create(**kwargs)\n # We only need to add() if created because if we got an object back\n # from get() then the relationship already exists.\n if created:\n self.add(obj, through_defaults=through_defaults)\n return obj, created\n get_or_create.alters_data = True\n\n def update_or_create(self, *, through_defaults=None, **kwargs):\n db = router.db_for_write(self.instance.__class__, instance=self.instance)\n obj, created = super(EdgeRelatedManager, self.db_manager(db)).update_or_create(**kwargs)\n # We only need to add() if created because if we got an object back\n # from get() then the relationship already exists.\n if created:\n self.add(obj, through_defaults=through_defaults)\n return obj, created\n update_or_create.alters_data = True\n\n def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None):\n # source_field_name: the PK fieldname in join table for the source object\n # target_field_name: the PK fieldname in join table for the target object\n # *objs - objects to add. Either object instances, or primary keys of object instances.\n through_defaults = through_defaults or {}\n\n # If there aren't any objects, there is nothing to do.\n from django.db.models import Model\n if objs:\n new_ids = set()\n for obj in objs:\n if isinstance(obj, self.model):\n if not router.allow_relation(obj, self.instance):\n raise ValueError(\n 'Cannot add \"%r\": instance is on database \"%s\", value is on database \"%s\"' %\n (obj, self.instance._state.db, obj._state.db)\n )\n fk_val = self.through._meta.get_field(\n target_field_name).get_foreign_related_value(obj)[0]\n if fk_val is None:\n raise ValueError(\n 'Cannot add \"%r\": the value for field \"%s\" is None' %\n (obj, target_field_name)\n )\n new_ids.add(fk_val)\n elif isinstance(obj, Model):\n raise TypeError(\n \"'%s' instance expected, got %r\" %\n (self.model._meta.object_name, obj)\n )\n else:\n new_ids.add(obj)\n\n db = router.db_for_write(self.through, instance=self.instance)\n vals = (self.through._default_manager.using(db)\n .values_list(target_field_name, flat=True)\n .filter(**{\n source_field_name: self.related_val[0],\n '%s__in' % target_field_name: new_ids,\n }))\n new_ids.difference_update(vals)\n\n with transaction.atomic(using=db, savepoint=False):\n if self.reverse or source_field_name == self.source_field_name:\n # Don't send the signal when we are inserting the\n # duplicate data row for symmetrical reverse entries.\n signals.e2e_changed.send(\n sender=self.through, action='pre_add',\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=new_ids, using=db,\n )\n\n # Add the ones that aren't there already\n self.through._default_manager.using(db).bulk_create([\n self.through(**through_defaults, **{\n '%s_id' % source_field_name: self.related_val[0],\n '%s_id' % target_field_name: obj_id,\n })\n for obj_id in new_ids\n ])\n\n if self.reverse or source_field_name == self.source_field_name:\n # Don't send the signal when we are inserting the\n # duplicate data row for symmetrical reverse entries.\n signals.e2e_changed.send(\n sender=self.through, action='post_add',\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=new_ids, using=db,\n )\n\n def _remove_items(self, source_field_name, target_field_name, *objs):\n # source_field_name: the PK colname in join table for the source object\n # target_field_name: the PK colname in join table for the target object\n # *objs - objects to remove. Either object instances, or primary\n # keys of object instances.\n if not objs:\n return\n\n # Check that all the objects are of the right type\n old_ids = set()\n for obj in objs:\n if isinstance(obj, self.model):\n fk_val = self.target_field.get_foreign_related_value(obj)[0]\n old_ids.add(fk_val)\n else:\n old_ids.add(obj)\n\n db = router.db_for_write(self.through, instance=self.instance)\n with transaction.atomic(using=db, savepoint=False):\n # Send a signal to the other end if need be.\n signals.e2e_changed.send(\n sender=self.through, action=\"pre_remove\",\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=old_ids, using=db,\n )\n target_model_qs = super().get_queryset()\n if target_model_qs._has_filters():\n old_vals = target_model_qs.using(db).filter(**{\n '%s__in' % self.target_field.target_field.attname: old_ids})\n else:\n old_vals = old_ids\n filters = self._build_remove_filters(old_vals)\n self.through._default_manager.using(db).filter(filters).delete()\n\n signals.e2e_changed.send(\n sender=self.through, action=\"post_remove\",\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=old_ids, using=db,\n )\n\n return EdgeRelatedManager",
"def backfillRelations(project, flat_type):\n if flat_type == State:\n # Fill in media relations.\n relations = []\n for obj in State.objects.filter(project=project):\n for media in obj.polymorphic.association.media.all():\n media_states = State.media.through(\n state_id=obj.id,\n media_id=media.media_polymorphic.id,\n )\n relations.append(media_states)\n if len(relations) > 1000:\n State.media.through.objects.bulk_create(relations)\n logger.info(f\"Created {len(relations)} many-to-many relations from State to Media...\")\n relations = []\n State.media.through.objects.bulk_create(relations)\n logger.info(f\"Created {len(relations)} many-to-many relations from State to Media...\")\n\n # Fill in localization relations.\n relations = []\n for obj in State.objects.filter(project=project):\n if isinstance(obj.polymorphic.association, LocalizationAssociation):\n for localization in obj.polymorphic.association.localizations.all():\n localization_states = State.localizations.through(\n state_id=obj.id,\n localization_id=localization.localization_polymorphic.id,\n )\n relations.append(localization_states)\n if len(relations) > 1000:\n State.localizations.through.objects.bulk_create(relations)\n logger.info(f\"Created {len(relations)} many-to-many relations from State to Localization...\")\n relations = []\n State.localizations.through.objects.bulk_create(relations)\n logger.info(f\"Created {len(relations)} many-to-many relations from State to Localization...\")\n\n if flat_type == Leaf:\n # Fill in parent relations.\n leaves = []\n for obj in Leaf.objects.filter(project=project).iterator():\n if obj.polymorphic.parent:\n obj.parent = obj.polymorphic.parent.leaf_polymorphic\n leaves.append(obj)\n if len(leaves) > 1000:\n Leaf.objects.bulk_update(leaves, ['parent'])\n logger.info(f\"Updated {len(leaves)} parent relations for Leaf...\")\n leaves = []\n Leaf.objects.bulk_update(leaves, ['parent'])\n logger.info(f\"Updated {len(leaves)} parent relations for Leaf...\")",
"def forwards(self, orm):\r\n\r\n for tag in orm.Tag.objects.filter(slug__isnull=True):\r\n tag.save()\r\n\r\n for tag in orm.Tag.objects.filter(slug=''):\r\n tag.save()",
"def test_portals_id_members_rel_fk_put(self):\n pass",
"def extract_manytomany_values(self, values, model):\n\n manytomany_values = {}\n\n for name, value in values.items():\n model_field = model._meta.get_field(name)\n if isinstance(model_field, ManyToManyField):\n manytomany_values[name] = values[name]\n\n for name in manytomany_values.keys():\n del values[name]\n\n return values, manytomany_values",
"def _construct_one_to_many_relationship_artifacts(required=False):\n return schemas_artifacts.types.OneToManyRelationshipPropertyArtifacts(\n type=types.PropertyType.RELATIONSHIP,\n schema={}, # type: ignore\n sub_type=types.RelationshipType.ONE_TO_MANY,\n parent=\"RefModel\",\n backref_property=None,\n kwargs=None,\n write_only=None,\n description=None,\n required=required,\n foreign_key=\"foreign.key\",\n foreign_key_property=\"foreign_key\",\n )",
"def hydrate_m2m(self, bundle):\n\n old_data = bundle.data.copy()\n\n m2m_bundle = super(ExtendedModelResource, self).hydrate_m2m(bundle)\n\n # Drop fields that havn't got blank=True set. Otherwise we'll wipe them.\n for field_name, field_obj in m2m_bundle.data.items():\n if field_name not in old_data.keys() and self.fields[field_name].blank == False:\n del m2m_bundle.data[field_name]\n del old_data\n return m2m_bundle",
"def _doManyById(self, model, idList, field):\r\n xpDict = self.getMap(idList, userModel=model)\r\n for idTarget in xpDict.keys():\r\n addModel = xpDict[idTarget]\r\n field.add(addModel)",
"async def insert_with_relations(entity, models: List[Dict]) -> None:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads the table with the help of the data_manager module. Returns the title (str) of the item with the given id (str) on None om case of nonexisting id.
|
def get_title_by_id(id_):
sales_table = data_manager.get_table_from_file("sales/sales.csv")
title = get_title_by_id_from_table(sales_table, id_)
return title
#[(ui.print_result(row[TITLE_INDEX], "The title is: "), return None) for row in sales_table if id_[0] == row[ID_INDEX]]
#return None
|
[
"def find_item_title(item_id):\n if item_id == 'UNKNOWN_ID':\n return None\n if item_id in ITEM_CACHE['titles']:\n return ITEM_CACHE['titles'][item_id]\n item_obj = ff_utils.get_metadata(item_id, ff_env='data', add_on='frame=object')\n title = item_obj.get('display_title')\n ITEM_CACHE['titles'][item_id] = title\n return title",
"def FetchById( id ):\n\tresult = None\n\t\n\ttry:\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Trying to grab data from table using Id'))\n\t\tquery = \"SELECT * FROM shop WHERE id = %s;\"\n\t\tdb.cursor.execute( query, ( id, ) )\n\t\tresult = db.cursor.fetchone()\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Successfully grabbed data'))\n\t\t\n\texcept Error as e:\n\t\tLog.error(('SHOPS-Fetch-Id:', e))\n\t\tLog.info(('SHOPS-Fetch-Id:', query))\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Failed to grab data'))\n\treturn result",
"def by_id(self, id_):\n with self.cursor() as cursor:\n cursor.execute(\"SELECT TASK, NAME, DESCRIPTION FROM TASK WHERE\"\n \" TASK = ?\", (id_,))\n row = cursor.fetchone()\n if not row:\n raise KeyError(\"No Task with id: {}\".format(id_))\n return Task.map_row(row)",
"def row(self,id):\n\t\tresult=self.where('id =' + str(id))\n\t\tif len(result) == 0:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn autoRow(result[0].content,self)",
"def read(self, id):",
"def load(self, _id):\n raise NotImplementedError(\"load item by id\")",
"def get_item(id='', title='', category_id='', category_name=''):\n if id:\n # search by id\n if session.query(Item.id).filter_by(id=id).scalar() is None:\n # return None if it doesn't exist\n return None\n else:\n return session.query(Item).filter_by(id=id).one()\n elif title and (category_id or category_name):\n if not category_id:\n category_id = category.get_category(name=category_name).id\n # search by name\n if (session.query(Item.id)\n .filter_by(title=title,\n category_id=category_id).scalar()) is None:\n # return None if it doesn't exist\n return None\n else:\n return (session.query(Item)\n .filter_by(title=title,\n category_id=category_id).one())\n else:\n dprint(1, \"Insufficient parameters passed to get_item.\"\n \"id=%s, title=%s, category_id=%s\" % (id, title, category_id))\n return None",
"def get_row_from_id(conn, table_name, id):\r\n c = conn.cursor()\r\n print(c)\r\n c.execute(\"SELECT * FROM {tn} WHERE Student_ID={id}\"\r\n .format(tn=table_name, id=id))\r\n row_info = [str(val) for val in c.fetchall()[0]]\r\n print(row_info)\r\n return row_info",
"def __getitem__(self, id):\n # First, check to see if enough time has passed since my\n # last query.\n self.limiter.wait()\n \n try:\n handle = NCBI.efetch(\n db=\"pubmed\", id=id, retmode='text', rettype='medlars')\n except IOError, x:\n # raise a KeyError instead of an IOError\n # XXX I really should distinguish between a real IOError and\n # if the id is not in the database.\n raise KeyError, x\n if self.parser is not None:\n return self.parser.parse(handle)\n return handle.read()",
"def load_items(id_lv, table_name, additional_fields={}):\n\n db = db_handler.get_dataset()\n return db[table_name].find(\n id_lv=id_lv,\n **additional_fields,\n order_by='cislo_zaznamu'\n )",
"def get_title_text(doc_id):\n data=read_data(\"doc-data.json\")\n\n text = data.get(doc_id).get(\"Text\")\n\n title = data.get(doc_id).get(\"Title\")\n\n return title[0] + text",
"def read_table_item(table, pk_name, pk_value):\n response = table.get_item(Key={pk_name: pk_value})\n\n return response",
"def get(self, id: int) -> Optional[Item]:\n return self.session.query(Item).get(id)",
"def getId(self, id):\n return self.getDataset().find_one({'_id': id})",
"def get(self, id):\n\n\t\tself._fp = open(self._filename, 'r')\n\t\tself._fp.seek(self._offset[id])\n\t\theader = self._fp.readline()\n\t\tm = re.search(r'>\\s*(\\S+)\\s*(.*)', header)\n\t\tid = m[1]\n\t\tdesc = m[2]\n\t\tseq = []\n\t\twhile True:\n\t\t\tline = self._fp.readline()\n\t\t\tif line[0:1] == '>': break\n\t\t\tif line == '': break\n\t\t\tline = line.replace(' ', '')\n\t\t\tseq.append(line.strip())\n\t\tself._fp.close()\n\t\treturn FASTA_entry(id, desc, \"\".join(seq))",
"def get_food_item_by_item_id(self, item_id):\n sql = 'SELECT * FROM {} WHERE id={}'.format(FoodItem.DB_TABLE_NAME, item_id)\n row = self._query_db(sql, (), True)\n return self.__parse_food_item(row)",
"def readById(self, id):\n print(\"Searching for the restaurant with ID: \" + str(id))\n result = session.query(Restaurant).filter_by(id=id).one()\n return result",
"def getMovieTitle(movieID):\n f = open(\"ml-100k/u.item\")\n\n for line in f.readlines():\n d = line.split(\"|\")\n if int(d[0]) == movieID:\n return d[1]\n f.close()",
"def get_title_by_imdbid(imdb_id):\r\n\r\n url = f\"https://api.themoviedb.org/3/find/{imdb_id}?api_key={TMDB_API_KEY}&external_source=imdb_id&\"\r\n\r\n response = requests.get(url)\r\n\r\n return json.loads(response.text)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads the table with the help of the data_manager module. Returns the _id_ of the item that was sold most recently.
|
def get_item_id_sold_last():
# your code
sales_table = data_manager.get_table_from_file("sales/sales.csv")
item_id = get_item_id_sold_last_from_table(sales_table)
return item_id
|
[
"def get_item_id_sold_last():\n table = data_manager.get_table_from_file(sales_file)\n\n recently_sold = (0, 0)\n\n for line, games in enumerate(table):\n if len(games[3]) == 1:\n month = '0' + str(games[3])\n else:\n month = str(games[3])\n\n if len(games[4]) == 1:\n day = '0' + str(games[4])\n else:\n day = str(games[4])\n\n sold_date = str(games[5]) + month + day\n\n if int(sold_date) > int(recently_sold[0]):\n recently_sold = (sold_date, line)\n\n line_with_search_line = recently_sold[1]\n return table[line_with_search_line][0]",
"def get_most_sold_item(df):\n return get_most_of_by(df, 'Item', 'Units')",
"def get_most_sold_item(df):\n\n\n top = df.groupby('Item').Units.sum().nlargest(1)\n\n return list(top.items())[0]",
"def read_last_price(table,db='rofex.db',conn=None):\n table = rename_table(table)\n if conn == None:\n conn = make_connection(db)\n query = 'SELECT LA_price FROM \"{}\" ORDER BY date DESC LIMIT 1'.format(table)\n c = conn.cursor()\n value = c.execute(query).fetchone()\n return value[0]",
"def get_recent_item(date):\n logger.debug(\"Requested the recent item added on %s\", date)\n return spark_query_engine.get_recent_item_api(date)",
"def get_item_table(self) -> ItemTable:\n\t\treturn self.item_table",
"def read_table_item(table, pk_name, pk_value):\n response = table.get_item(Key={pk_name: pk_value})\n\n return response",
"def getLastIdBaseline(self):\n try:\n cursor = self.db.cursor()\n sql = \"SELECT MAX(id) FROM baseline;\"\n cursor.execute(sql)\n resultset = cursor.fetchall()\n return resultset\n except:\n return \"Error!\"",
"def get_items_sold(self, item, time_format = 'unix'):\n if not item.isdigit():\n item = self.get_item_id(item)\n\n time_data = time.time()\n if time_format == 'local':\n time_data = time.strftime(\"%d:%m:%y %H:%M:%S\", time.localtime(time_data))\n\n\n data = self.methods.get_world_market_sub_list(item)['detailList']\n items_sold = data[0]['totalTradeCount']\n\n return (time_data, items_sold)",
"def item_id(self):\r\n return self.content['item_id']",
"def item_id(self):\n return self.content[\"item_id\"]",
"def getLastRowID(self): \n return self.lastRowID",
"def get_food_item_by_item_id(self, item_id):\n sql = 'SELECT * FROM {} WHERE id={}'.format(FoodItem.DB_TABLE_NAME, item_id)\n row = self._query_db(sql, (), True)\n return self.__parse_food_item(row)",
"def FetchById( id ):\n\tresult = None\n\t\n\ttry:\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Trying to grab data from table using Id'))\n\t\tquery = \"SELECT * FROM shop WHERE id = %s;\"\n\t\tdb.cursor.execute( query, ( id, ) )\n\t\tresult = db.cursor.fetchone()\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Successfully grabbed data'))\n\t\t\n\texcept Error as e:\n\t\tLog.error(('SHOPS-Fetch-Id:', e))\n\t\tLog.info(('SHOPS-Fetch-Id:', query))\n\t\tLog.info(('SHOPS-Fetch-Id:', 'Failed to grab data'))\n\treturn result",
"def get_last_trade_date():\n df = sf.get_stock('IBM')\n return df.index.max()",
"def get_sale_by_id(self, sale_id):\n self.prod_id = sale_id\n self.cursor.execute(\n \"Select * from sales where sales_id = %s\",\n (self.prod_id,)\n )\n sale = self.cursor.fetchone()\n return sale",
"def get_sale_by_id(id):\n \n return Sale.query.filter(Sale.id==id).first()",
"def get_last_processed_id(connection):\n query = 'select id from last_processed_id;'\n res = connection.execute(text(query)).fetchall()\n if res:\n first_id = res[-1][0]\n else:\n query = 'select id from public.raw_data limit 1'\n res = connection.execute(text(query)).fetchall()\n if not res:\n raise Exception('Tables last_processed_id and raw_data is empty, run fill_db.py !')\n first_id = res[0][0]\n\n print('first_id', first_id)\n return first_id",
"def get_free_id(self, table_name):\n\n self.cursor.execute(\"SELECT Id FROM \" + table_name + \";\")\n content = self.cursor.fetchall()\n ids = [list(id_)[0] for id_ in content]\n\n if ids:\n\n free_ids = [item for item in range(1, len(ids) + 1) if item not in ids]\n\n if free_ids:\n\n return free_ids[0]\n\n else:\n\n return ids[-1] + 1\n\n return 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads the table of sales with the help of the data_manager module. Returns the sum of the prices of the items in the item_ids.
|
def get_the_sum_of_prices(item_ids):
# your code
table = data_manager.get_table_from_file("sales/sales.csv")
return get_the_sum_of_prices_from_table(table, item_ids)
|
[
"def get_the_sum_of_prices_from_table(table, item_ids):\n\n # your code\n ID_INDEX = 0\n PRICE_INDEX = 2\n sum_of_prices = 0\n for row in table:\n for single_id in item_ids:\n if single_id == row[ID_INDEX]:\n sum_of_prices += int(row[PRICE_INDEX])\n return sum_of_prices",
"def all_sales():\n return [\n {\n \"sale_id\": 1,\n \"product\": \"Samsung Flatscreen Tv\",\n \"quantity\": 2,\n \"price\": 4500000\n },\n {\n \"sale_id\": 2,\n \"product\": \"Toshiba Flatscreen Tv\",\n \"quantity\": 6,\n \"price\": 9000000\n },\n {\n \"sale_id\": 3,\n \"product\": \"LG Flatscreen Tv\",\n \"quantity\": 12,\n \"price\": 1500000\n },\n {\n \"sale_id\": 4,\n \"product\": \"Sony Flatscreen Tv\",\n \"quantity\": 1,\n \"price\": 500000\n },\n {\n \"sale_id\": 5,\n \"product\": \"Hisense Flatscreen Tv\",\n \"quantity\": 2,\n \"price\": 800000\n },\n ]",
"def get_sales():\n sales = []\n file = open('sales.csv')\n for line in file:\n # using unpacking syntax\n id, date, total_price, num_products, product_id, employee_id = line.split(\n ',')\n\n sales.append(dict_from_entries(sales_keys, (\n int(id),\n date,\n float(total_price),\n int(num_products),\n int(product_id),\n int(employee_id.rstrip()),\n )))\n\n file.close()\n return tuple(sales)",
"def get_sales_by_user_id(self, user_id):\n self.cursor.execute(\n \"Select * from sales where user_id = %s\",\n (user_id,)\n )\n sale = self.cursor.fetchall()\n return sale",
"def get_all_sales(self):\n self.cursor.execute(\"Select * from sales\")\n sales = self.cursor.fetchall()\n return sales",
"def get_all_sales_ids_for_customer_ids():\n\n # your code\n sales_table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_sales_ids_for_customer_ids_from_table(sales_table)",
"def get_sale_by_prod_id(self, prod_id):\n self.cursor.execute(\"Select * from sales where product_id = %s\",\n (prod_id,))\n sales = self.cursor.fetchall()\n if sales:\n return sales",
"def get_item_id_sold_last():\n\n # your code\n sales_table = data_manager.get_table_from_file(\"sales/sales.csv\")\n item_id = get_item_id_sold_last_from_table(sales_table)\n return item_id",
"def calc_total_price(items):\n total_price = 0\n for item in items:\n total_price += item.get('price') * item.get('quantity')\n return total_price",
"def calculate_stock_price(items):\n total = 0\n for key in items:\n print key\n print \"price: %s\" % prices[key]\n print \"stock: %s\" % stock[key]\n total += prices[key] * stock[key]\n return total",
"def get_sales(start_date: datetime.datetime, end_date: datetime.datetime, seller_skus: set) -> List:\n\n print(\"getting sales data...\")\n interval = create_date_interval(start_date, end_date)\n\n return _get_sales(interval, Granularity.HOUR, seller_skus)",
"def calculate_item_total(order_items):\n item_total = 0\n\n for order in order_items:\n item_total += order.get('price') * order.get('quantity')\n\n return item_total",
"def readFromDtb(self) -> list:\n\n self.cursor.execute('SELECT Expense, Price, MoreInfo FROM ' + self.table)\n return self.cursor.fetchall()",
"def get_items_sold(self, item, time_format = 'unix'):\n if not item.isdigit():\n item = self.get_item_id(item)\n\n time_data = time.time()\n if time_format == 'local':\n time_data = time.strftime(\"%d:%m:%y %H:%M:%S\", time.localtime(time_data))\n\n\n data = self.methods.get_world_market_sub_list(item)['detailList']\n items_sold = data[0]['totalTradeCount']\n\n return (time_data, items_sold)",
"def get_item_data(items, locations):\n items = list(items)\n if type(items) is list:\n for i in range(len(items)):\n items[i] = \",\".join(items[i])\n items = \",\".join(items)\n url = \"https://albion-online-data.com/api/v2/stats/prices/{}?locations={}\".format(\n items, \",\".join(locations)\n )\n return get_request(url)",
"def calculateAmount(self) -> float:\n\n amount = 0\n for item in self.items:\n amount += item.price\n return amount",
"def ItemPrice(request, item_id):\n return _SetField(models.Item, float, request, item_id)",
"def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict): \n res = {}\n for line in self.browse(cr, uid, ids):\n res[line.id] = line.price_unit * line.product_qty\n return res",
"def export_sales_to_csv():\n with open('sold_items.csv', 'w', newline='') as csvfile:\n sold_writer = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n for item in sold_items:\n sold_writer.writerow(item)\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the sum of the prices of the items in the item_ids.
|
def get_the_sum_of_prices_from_table(table, item_ids):
# your code
ID_INDEX = 0
PRICE_INDEX = 2
sum_of_prices = 0
for row in table:
for single_id in item_ids:
if single_id == row[ID_INDEX]:
sum_of_prices += int(row[PRICE_INDEX])
return sum_of_prices
|
[
"def get_the_sum_of_prices(item_ids):\n\n # your code\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_the_sum_of_prices_from_table(table, item_ids)",
"def calc_total_price(items):\n total_price = 0\n for item in items:\n total_price += item.get('price') * item.get('quantity')\n return total_price",
"def calculateAmount(self) -> float:\n\n amount = 0\n for item in self.items:\n amount += item.price\n return amount",
"def calculate_item_total(order_items):\n item_total = 0\n\n for order in order_items:\n item_total += order.get('price') * order.get('quantity')\n\n return item_total",
"def ItemPrice(request, item_id):\n return _SetField(models.Item, float, request, item_id)",
"def lineitem_price(self):\n price = Decimal(\"0.00\")\n for li in self.lineitems.all():\n price += li.total\n return price",
"def calculate_stock_price(items):\n total = 0\n for key in items:\n print key\n print \"price: %s\" % prices[key]\n print \"stock: %s\" % stock[key]\n total += prices[key] * stock[key]\n return total",
"def get_item_price(self, item):\n if not item.isdigit():\n item = self.get_item_id(item)\n\n data = self.methods.get_world_market_sub_list(item)['detailList']\n item_value = data[0]['pricePerOne']\n\n return item_value",
"def total(self):\n total = 0.0 \n for vinylid in self.cart:\n total += self.get_price(vinylid) \n return total",
"def subtotal_calc(selected_products):\n subtotal = 0\n for product in selected_products:\n price = product[\"price\"]\n subtotal = price + subtotal\n return subtotal",
"def insurance_price(self, items: Iterable[Item]) -> int:\n total_price: float = sum(\n self.__templates_repository.get_template(item).props.CreditsPrice\n for item in items\n )\n total_price *= self.__insurance_price_multiplier\n total_price -= total_price * min(self.standing.current_standing, 0.5)\n return int(total_price)",
"def _get_price(self):\n return sum((cart_line.price for cart_line in self.values()))",
"def sum_all(everything):\n price = 0\n for item in everything:\n price += item[1]\n return price",
"def get_total_price(receipt):\n return sum(price for name, price in receipt)",
"def calculate_total_price(prices, discount):\n \n sum_prices = 0\n\n for price in prices:\n dis = discount/100\n pricedis = price - price * dis\n print(pricedis)\n sum_prices = sum_prices + pricedis\n print(sum)\n return math.floor(sum_prices)",
"def sumBy(items, function):\n return __builtin__.sum(map(function, items))",
"def cost_of(amount, item, hours, products):\n for items in products:\n if items[0] == item:\n return float(items[2]) * float(amount) * float(hours)",
"def get_cost(prices, item, quantity):\n return quantity * prices[item]",
"def total_items(country='USA'):\n # Calculate the total for a given country\n items_total = session.query(func.sum(Items.UnitPrice * Items.Quantity)).\\\n filter(Invoices.InvoiceId == Items.InvoiceId).\\\n filter(Invoices.BillingCountry == country).scalar()\n\n return jsonify(float(items_total))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads the sales table with the help of the data_manager module. Returns the customer_id that belongs to the given sale_id or None if no such sale_id is in the table.
|
def get_customer_id_by_sale_id(sale_id):
table = data_manager.get_table_from_file("sales/sales.csv")
return get_customer_id_by_sale_id_from_table(table, sale_id)
|
[
"def get_sale_by_id(self, sale_id):\n self.prod_id = sale_id\n self.cursor.execute(\n \"Select * from sales where sales_id = %s\",\n (self.prod_id,)\n )\n sale = self.cursor.fetchone()\n return sale",
"def get_sale(sales_id):\n #use Sales instance to call get_sale function\n a_single_sale = Sales.get_sale(sales_id)\n if a_single_sale:\n return a_single_sale, 200\n else:\n raise InvalidUsage('There is no sale record matching that ID', status_code=404)",
"def get_customer_id_by_sale_id_from_table(table, sale_id):\n\n for element in table:\n if element[0] == sale_id:\n return element[6]",
"def get_sale_by_prod_id(self, prod_id):\n self.cursor.execute(\"Select * from sales where product_id = %s\",\n (prod_id,))\n sales = self.cursor.fetchall()\n if sales:\n return sales",
"def get_sale_by_id(id):\n \n return Sale.query.filter(Sale.id==id).first()",
"def get_all_sales_ids_for_customer_ids():\n\n # your code\n sales_table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_sales_ids_for_customer_ids_from_table(sales_table)",
"def get_sales_by_user_id(self, user_id):\n self.cursor.execute(\n \"Select * from sales where user_id = %s\",\n (user_id,)\n )\n sale = self.cursor.fetchall()\n return sale",
"def get_sales_by_customer(entity):\n \n return Sale.query.filter(Sale.entity_id==entity.id).all().order_by(\"date\")",
"def sale_get_by_id(current_user, sale_id):\n sale = Sale.query.filter_by(id=sale_id).first()\n sale_json = {\n \"id\": sale.id,\n \"product\": Product.query.filter_by(id=sale.product_id),\n \"quantity\": sale.quantity,\n \"sellingPrice\": sale.sellingPrice,\n \"created_on\": sale.created_on,\n }\n return jsonify(sale_json), 200",
"def get_sales():\n all_sales = Sales.get_all_sales()\n if all_sales:\n return all_sales, 200\n else:\n raise InvalidUsage('No sales added yet', status_code=404)",
"def get_sale_by_invoice(invoice_no):\n \n return Sale.query.filter(Sale.invoice_no==invoice_no).first()",
"def find(customer_id):\n if not Customer.data:\n return None\n customers = [customer for customer in Customer.data if customer.id == customer_id]\n if customers:\n return customers[0]\n return None",
"def find_using_magento_id(cls, order_id):\n # each sale has to be unique in an channel of magento\n sales = cls.search([\n ('magento_id', '=', order_id),\n ('channel', '=',\n Transaction().context['current_channel'])\n ])\n return sales and sales[0] or None",
"def get_customer_from_id(customer_id):\n with DATABASE.transaction():\n try:\n return Customer.get(Customer.customer_id == customer_id)\n except DoesNotExist:\n return False",
"def add_sale(self, *args):\n total_sale = args[0]\n sold_by = args[1]\n\n add_sale = \"\"\"\n INSERT INTO sales(total_sale, sold_by)\\\n VALUES ('{}', '{}')\n RETURNING sale_id;\n \"\"\".format(total_sale, sold_by)\n cursor.execute(add_sale)\n sale_id = cursor.fetchone()[0]\n return sale_id",
"def search_customer(customer_id):\n\n try:\n logging.debug(f\"Searching database for customer_id: {customer_id}\")\n return Customer.get(Customer.customer_id == customer_id)\n except DoesNotExist:\n logging.debug(f\"Unable to find customer with id: {customer_id}\")\n raise DoesNotExist",
"async def get_customer_by_id(self,id):\n async with self._db.acquire() as conn:\n data= await conn.execute(Customer.select().where((Customer.c.customer_id == id)))\n return await data.fetchone()",
"def get_all_sales(self):\n self.cursor.execute(\"Select * from sales\")\n sales = self.cursor.fetchall()\n return sales",
"def get_sales():\n sales = []\n file = open('sales.csv')\n for line in file:\n # using unpacking syntax\n id, date, total_price, num_products, product_id, employee_id = line.split(\n ',')\n\n sales.append(dict_from_entries(sales_keys, (\n int(id),\n date,\n float(total_price),\n int(num_products),\n int(product_id),\n int(employee_id.rstrip()),\n )))\n\n file.close()\n return tuple(sales)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a set of customer_ids that are present in the table.
|
def get_all_customer_ids_from_table(table):
all_id = set()
for row in table:
all_id.add(str(row[-1]))
ui.print_result(all_id, "All customers ID: ")
return all_id
|
[
"def get_all_customer_ids_from_table(table):\n customer_ids = set()\n for row in table:\n id_customer = str(row[0])\n customer_ids.add(id_customer)\n\n return customer_ids # sales_comtroller print the table of this set",
"def load_customer_ids(data_main):\n data_main = data_main.dropna()\n return data_main['customer_id'].drop_duplicates().to_list()",
"def get_all_sales_ids_for_customer_ids():\n\n # your code\n sales_table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_sales_ids_for_customer_ids_from_table(sales_table)",
"def ids(self) -> set():\n return set(self.values_list(\"id\", flat=True))",
"def fetch_customer_ids_from_csv(source: CsvUrlMemberSource) -> Set[int]:\n r = requests.get(source.url)\n\n if r.status_code != 200:\n return set()\n\n ids = set()\n for row in csv.reader(StringIO(r.text)):\n if len(row) <= source.customer_id_column_index:\n logger.warning(f\"Member CSV row is {len(row) + 1} rows. \"\n f\"Expected customer ID at column index \"\n f\"{source.customer_id_column_index}\")\n else:\n try:\n ids.add(int(row[source.customer_id_column_index].strip()))\n except ValueError:\n pass\n return ids",
"def get_all_customers():\n\n # This relies on access to the global dictionary `customers`\n\n return list(customers.values())",
"def search_customers(customer_ids):\n logger.info('Searching for customer ids %s', customer_ids)\n query = [search_customer(customer_id) for customer_id in customer_ids]\n logger.info('Customer ids found with query.')\n return query",
"def fetch_employee_ids() -> set:\n\n employee_user_ids = set()\n\n with BehaviourDatabaseCursor() as db_cursor:\n db_cursor.execute(\n \"SELECT user_id FROM employees;\"\n )\n result_rows = db_cursor.fetchall()\n\n for row in result_rows:\n employee_user_ids.add(row[0])\n\n return employee_user_ids",
"def fetch_customer_ids(\n sources: List[MemberSource],\n limit: int\n) -> Set[int]:\n json_sources = [s for s in sources if isinstance(s, JsonUrlMemberSource)]\n csv_sources = [s for s in sources if isinstance(s, CsvUrlMemberSource)]\n\n json_ids = set(flatten(\n fetch_customer_ids_from_json(s) for s in json_sources\n ))\n csv_ids = set(flatten(\n fetch_customer_ids_from_csv(s) for s in csv_sources\n ))\n\n return set(list(json_ids | csv_ids)[:limit])",
"def check_if_selection_in_set(self, selection_set):\n selection_set = self.__make_list_distinct(selection_set)\n\n ids = []\n for selection_set_id, current_selection_set in self.__selection_sets.iteritems():\n if len([sv for sv in selection_set if sv in current_selection_set]) > 0:\n ids.append(selection_set_id)\n\n return ids",
"def ineligible_collection_ids(self, cutoff_timestamp):\n result = self._connection.fetch_all_rows(\n _ineligible_query, [cutoff_timestamp, ]\n )\n return [collection_id for (collection_id, ) in result]",
"def _get_all_cids(ibs):\n all_cids = ibs.db.get_all_rowids(CHIP_TABLE)\n return all_cids",
"def retrieveCellIds(self, listOfPoints, containedOnly=False):\n cellIds = []\n for cntb, bound in enumerate(listOfPoints):\n cellIds.append([])\n for point in bound:\n cellIds[cntb].extend(self.gridContainer['vertexToCellIds'][tuple(point)])\n if cntb == 0:\n previousSet = set(cellIds[cntb])\n if containedOnly:\n previousSet = set(previousSet).intersection(cellIds[cntb])\n else:\n previousSet.update(cellIds[cntb])\n\n return list(set(previousSet))",
"def feature_ids(self) -> Set[int]:\n return set(self._query_json(\"feature_ids\"))",
"def get_unique_countries():\n\n return set(TABLE_BETS['country'].unique())",
"def get_used_kit_ids(cursor):\n cursor.execute(\"select supplied_kit_id from ag_kit\")\n kits = set([i[0] for i in cursor.fetchall()])\n return kits",
"def fetch_customer_ids_from_json(source: JsonUrlMemberSource) -> Set[int]:\n r = requests.get(source.url)\n\n if r.status_code != 200:\n logger.warning(\n f\"Non-200 status code ({r.status_code}) from URL {source.url}\"\n )\n return set()\n\n data = r.json()\n\n try:\n return set(v for v in [int_or_none(v) for v in data] if v is not None)\n except TypeError:\n logger.warning(\n f\"Tried to iterate JSON from URL ({source.url}) and failed.\",\n exc_info=True\n )\n return set()",
"def retrieve_selected_rows(input_table_name, input_id_column):\r\n\t# Add a reference to the source data table in the script.\r\n\tsourceTable = Document.Data.Tables[input_table_name]\r\n\r\n\t# Create a cursor for the table column to get the values from, it uses the IDColumn property to identify in which line it should add the comment.\r\n\tsource_cursor = DataValueCursor.CreateFormatted(sourceTable.Columns[input_id_column])\r\n\r\n\t# Retrieve the marking selection (in other words, which lines/rows were selected?)\r\n\tmarkings = Document.ActiveMarkingSelectionReference.GetSelection(sourceTable)\r\n\r\n\t# Create a List object to store the retrieved data marking selection (which lines/rows were selected)\r\n\tids = []\r\n\r\n\t# Iterate through the source data table rows to retrieve the unique id of the marked/selected rows\r\n\tfor row in sourceTable.GetRows(markings.AsIndexSet(),source_cursor):\r\n\t\tvalue = source_cursor.CurrentValue # Get the value of the current row\r\n\t\tif value not in ids:\r\n\t\t\tids.append(value)\r\n\r\n\treturn ids",
"def getActiveIds(self):\n ret = []\n for client in self.clients:\n if self.isClientActive(self.getClient(client)):\n ret.append(client)\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads the customersales association table with the help of the data_manager module.
|
def get_all_sales_ids_for_customer_ids():
# your code
sales_table = data_manager.get_table_from_file("sales/sales.csv")
return get_all_sales_ids_for_customer_ids_from_table(sales_table)
|
[
"def fetchAllCustomers():\n return CustomerDao().fetch_all_customers()",
"def get_associations(customer_id):\n customer = CustomerController.get(customer_id)\n\n if not customer:\n return jsonify(error='Customer not found'), 500\n\n associations = CustomerController.get_associations(customer).all()\n\n return jsonify(associations=serialize_sqla(associations))",
"def get_customers():\n try:\n openConnection\n with conn.cursor() as cur:\n result = cur.run_query('SELECT * FROM customer')\n cur.close()\n conn.close()\n except:\n return Exception\n customers = []\n for row in result:\n if row[0] == 1:\n continue\n customer = {'id': row[0], 'name':row[1], 'credit': 0, 'rfid': row[2]}\n customers.append(customer)\n return customers",
"def _fetch_chorizon(cokeys):\n keys = ','.join([str(k) for k in cokeys])\n query = 'SELECT cokey, chkey, hzname, hzdepb_r, hzdept_r, hzthk_r, ' \\\n 'dbthirdbar_r, ksat_r, sandtotal_r, claytotal_r, ' \\\n 'om_r, cec7_r, awc_l, fraggt10_r, frag3to10_r, ' \\\n 'desgnmaster, sieveno10_r, wthirdbar_r, wfifteenbar_r, ' \\\n 'sandvf_r, ll_r ' \\\n 'FROM chorizon ' \\\n 'WHERE cokey IN (%s) ORDER BY cokey' % keys\n\n xml = _makeSOAPrequest(query)\n return _extract_table(xml)",
"def _load_categories(self):\n categories = db.fetchall(\n \"category\",\n \"codename name is_base_expense aliases\".split()\n )\n categories = self._fill_aliases(categories)\n\n return categories",
"def get_customers(self, email_id):\n querystring = {'load_relations': '[\"Contact\"]', 'Contact.email': email_id}\n customers = self.request(action='customer', **querystring)\n return customers if 'Customer' in customers else None",
"def get_all_customers():\n\n # This relies on access to the global dictionary `customers`\n\n return list(customers.values())",
"def run(self, diseases, efos):\n\t\tfile = open(postgap.Globals.DATABASES_DIR+\"/Phewas_Catalog.txt\")\n\t\tres = [ self.get_association(line, diseases, efos) for line in file ]\n\t\tres = filter(lambda X: X is not None, res)\n\n\t\tif postgap.Globals.DEBUG:\n\t\t\tprint \"\\tFound %i GWAS SNPs associated to diseases (%s) or EFO IDs (%s) in Phewas Catalog\" % (len(res), \", \".join(diseases), \", \".join(efos))\n\n\t\treturn res",
"def get_all_customer_ids_from_table(table):\n customer_ids = set()\n for row in table:\n id_customer = str(row[0])\n customer_ids.add(id_customer)\n\n return customer_ids # sales_comtroller print the table of this set",
"def load_category_table():\n global UCDCategories\n f = open(os.path.join(os.path.dirname(__file__), CATEGORY_FILE), 'rb')\n UCDCategories = load(f)\n f.close()",
"def fetch_customers(self, all=False, page_num=0, limit=10):\n #query all the customers\n customers = self.query_object(\"Customer\")\n\n return customers",
"def load_customer_ids(data_main):\n data_main = data_main.dropna()\n return data_main['customer_id'].drop_duplicates().to_list()",
"def get_colleague_associated_data(self):\n colleagues = self.get_all_colleague_data()\n loci = self.get_all_collegue_locus()\n relations = self.get_all_colleague_relation()\n result = self.association_helper(colleagues, loci, relations)\n return result",
"def run(self, diseases, efos):\n\t\tfile = open(postgap.Globals.DATABASES_DIR+\"/GWAS_DB.txt\")\n\n\t\t# Note the format of the EFO strings is modified in this file format, so we need to change the queries\n\t\tefos2 = [re.sub(\"_\", \"ID:\", efo) for efo in efos]\n\t\tres = [ self.get_association(line, diseases, efos) for line in file ]\n\t\tres = filter(lambda X: X is not None, res)\n\n\t\tif postgap.Globals.DEBUG:\n\t\t\tprint \"\\tFound %i GWAS SNPs associated to diseases (%s) or EFO IDs (%s) in GWAS DB\" % (len(res), \", \".join(diseases), \", \".join(efos))\n\n\t\treturn res",
"def load_companies():\n\n\twith open('./seed_data/companies.tsv', 'r+') as data:\n\t\tfor row in data:\n\t\t\trow = row.rstrip()\n\n\t\t\tname, address, hr_contact_info = row.split('\\t')\n\t\t\tcompanies = Companies(name=name,\n\t\t\t\t\t\t\t\taddress=address,\n\t\t\t\t\t\t\t\thr_contact_info=hr_contact_info)\n\t\t\tdb.session.add(companies)\n\t\t\tdb.session.commit()",
"def test_get_role_associates(self):\n pass",
"def get_acc_info(self):\n acc_info=self.connection.get_accounts()\n self.db.update_account_info_table(self.account_id, acc_info)",
"def extractCustomer(path,filename):\r\n \r\n cust_cols = {\"LoyaltyCardNum\":np.int64,\r\n \"HouseholdNum\":np.int64,\r\n \"MemberFavStore\":np.int64,\r\n \"City\":str,\r\n \"State\":str,\r\n \"ZipCode\":str,\r\n \"ExtraCol\":str\r\n }\r\n cust_list = pd.read_csv(path + filename,\r\n sep = '|',\r\n header = None,\r\n encoding='latin_1',\r\n quoting=3,\r\n names=cust_cols.keys())\r\n cust_list.isna().sum()\r\n cust_list.fillna({'LoyaltyCardNum':-999,\r\n 'HouseholdNum':-999,\r\n 'MemberFavStore':-999},inplace=True)\r\n cust_list = cust_list.astype(cust_cols)\r\n cust_list = cust_list.drop(['ExtraCol'],axis = 1)\r\n return (cust_list)",
"def load_data(self, sheet_names):\n # print self.work_sheet\n\n static_rownum = 10\n for sheet_name, sheet_rows in self.work_sheet.items():\n dataset_acronyms = list()\n temp = sheet_rows[:]\n for row_id, row in enumerate(temp):\n temp_row = [cell.value for cell in row]\n\n if sheet_name == sheet_names[0]:\n if 'ObjectCategory_table' in temp_row:\n cur_table = sheet_rows[row_id + 4:]\n for row_id, row in enumerate(cur_table):\n row_id = row_id + static_rownum - 2\n if all('' == cell.value for cell in row):\n break\n obj_cat = SqlAlchemy.ObjectCategories()\n if row[0].value == \"\":\n raise Exception('Error in {} row of \"ObjectCategory_table\" of sheet {}\\nField named \"ObjectCategoryName\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[0]))\n obj_cat.ObjectCategoryName = row[0].value\n obj_cat.CategoryDefinition = row[1].value\n self.setup.push_data(obj_cat)\n break\n\n if sheet_name == sheet_names[1]:\n if 'AttributeCategory_table' in temp_row:\n cur_table = sheet_rows[row_id + 3:]\n for row_id, row in enumerate(cur_table):\n row_id = row_id + static_rownum - 3\n\n if all('' == cell.value for cell in row):\n break\n attrib_cat = SqlAlchemy.AttributeCategories()\n if row[0].value == \"\":\n raise Exception('Error in {} row of \"AttributeCategory_table\" of sheet \"{}\"\\nField named \"AttributeCategoryName\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[1]))\n attrib_cat.AttributeCategoryName = row[0].value\n attrib_cat.CategoryDefinition = row[1].value\n self.setup.push_data(attrib_cat)\n break\n\n if sheet_name == sheet_names[2]:\n temp_row = [cell.value for cell in row]\n if 'ResourceTypes_table' in temp_row:\n cur_table = sheet_rows[row_id + 4:]\n temp_org = cur_table[:]\n for row_id, row in enumerate(cur_table):\n temp_row = [cell.value for cell in row]\n row_id = row_id + static_rownum\n if all('' == cell.value for cell in row):\n break\n data_struct = SqlAlchemy.ResourceTypes()\n\n if row[0].value == \"\":\n raise Exception('Error in {} row of \"ResourceTypes_table\" of sheet \"{}\"\\nField named \"ResourceType\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[2]))\n if row[1].value == \"\":\n raise Exception('Error in {} row of \"ResourceTypes_table\" of sheet \"{}\"\\nField named \"ResourceTypeAcronym\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[2]))\n\n if row[3].value == \"\":\n raise Exception('Error in {} row of \"ResourceTypes_table\" of sheet \"{}\"\\nField named \"MethodName\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[3]))\n\n if not row[0].value:\n\n\n raise Exception('Error in \"ResourceTypes_table\" of sheet \"{}\"\\nField named \"ResourceType\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(sheet_names[2]))\n # raise Exception('Empty Field found in ResourceType Column in ResourceTypes table')\n existname = None\n try:\n existname = self.__session.query(SqlAlchemy.ResourceTypes).filter(\n SqlAlchemy.ResourceTypes.ResourceType == row[0].value\n ).first().ResourceTypeAcronym\n self.datasetAcronym = existname\n define.datasetName = existname\n except:\n existname = None\n\n if existname == None:\n data_struct.ResourceType = row[0].value\n if row[1].value:\n data_struct.ResourceTypeAcronym = row[1].value\n self.datasetAcronym = row[1].value\n define.datasetName = row[1].value\n else:\n raise Exception('Error in \"ResourceTypes_table\" of sheet \"{}\"\\nField named \"ResourceTypeAcronym\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(sheet_names[2]))\n # raise Exception('Empty Fields found in ResourceTypeAcronym Column in ResourceTypes table')\n\n try:\n data_struct.MethodID = self.__session.query(SqlAlchemy.Methods).filter(\n SqlAlchemy.Methods.MethodName == row[3].value\n ).first().MethodID\n except Exception as e:\n print e\n raise Exception('Error in sheet {}\\ncould not find {} in Methods\\n\\n'\n .format(sheet_names[2], row[3].value))\n data_struct.Description = row[4].value\n self.setup.push_data(data_struct)\n\n # Storing ResourceTypeAcronym in Dict.\n if not row[1].value in dataset_acronyms:\n dataset_acronyms.append(row[1].value)\n\n if 'ObjectTypes_table' in temp_row:\n cur_table = sheet_rows[row_id + 5:]\n for row_id, row in enumerate(cur_table):\n row_id = row_id + static_rownum + 8\n if all('' == cell.value for cell in row):\n break\n # Loading main ObjectTypes into the database\n obj_type = SqlAlchemy.ObjectTypes()\n\n # Raise an error if the user leaves the required field \"ObjectType\" empty\n if row[0].value == '':\n raise Exception('Error in ObjectTypes_table\\'s {} row of sheet {}\\nField named \"ObjectType\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[2], row[0].value))\n\n # Raise an error if the user leaves the required field \"ObjectTypology\" empty\n if row[1].value == '':\n raise Exception('Error in {} row of \"ObjectTypes_table\" of sheet {}\\nField named \"ObjectTypology\" which \"ObjectType\" is {} is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[2], row[0].value))\n # Raise an error if the user leaves the required field \"DatasetAcronym\" empty\n if row[2].value == '':\n raise Exception('Error in {} row of \"ObjectTypes_table\" of sheet {}\\n\"ResourceTypeAcronym\" field which \"ObjectType\" is {} is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[2],row[0].value))\n\n obj_type.ObjectType = row[0].value\n obj_type.ObjectTypologyCV = row[1].value\n\n try:\n if row[2].value:\n obj_type.ResourceTypeID = self.__session.query(SqlAlchemy.ResourceTypes).filter(\n SqlAlchemy.ResourceTypes.ResourceTypeAcronym == row[2].value\n ).first().ResourceTypeID\n except Exception as e:\n print e\n raise Exception('Error in sheet {}\\ncould not find {} in ResourceTypes\\n\\n'\n .format(sheet_names[2], row[2].value))\n\n try:\n if row[3].value:\n obj_type.ObjectTypeCV = self.__session.query(SqlAlchemy.CV_ObjectType).filter(\n SqlAlchemy.CV_ObjectType.Name == row[3].value\n ).first().Name\n except Exception as e:\n print e\n raise Exception('Error in sheet {}\\ncould not find {} in ObjectTypeCV\\n\\n'\n .format(sheet_names[2], row[3].value))\n\n obj_type.Layout = row[4].value\n\n\n try:\n if row[5].value:\n obj_type.ObjectCategoryID = self.__session.query(SqlAlchemy.ObjectCategories).filter(\n SqlAlchemy.ObjectCategories.ObjectCategoryName == row[5].value\n ).first().ObjectCategoryID\n except Exception as e:\n print e\n raise Exception('Error in sheet {}\\ncould not find {} in ObjectCategory\\n\\n'\n .format(sheet_names[2], row[5].value))\n\n if row[6]:\n obj_type.Description = row[6].value\n\n self.setup.push_data(obj_type)\n # Creating dummy attributes for corresponding object type\n obj = self.create_dummy_attrib(['ObjectTypeInstances', \"The purpose of this \"\n \"Attribute is to connect\"\n \" and help query all \"\n \"the instances that \"\n \"belong to one \"\n \"ObjectType\"])\n self.setup.push_data(obj)\n\n\n break\n\n if sheet_name == sheet_names[3]:\n if 'Attributes_table' in temp_row:\n cur_table = sheet_rows[row_id + 5:]\n for row_id, row in enumerate(cur_table):\n row_id = row_id + static_rownum\n if all('' == cell.value for cell in row):\n break\n #print row\n attrib = SqlAlchemy.Attributes()\n if row[0].value == \"\":\n raise Exception('Error in {} row of \"Attributes_table\" of sheet \"{}\"\\nField named \"ObjectType\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[3]))\n\n if row[1].value == \"\":\n raise Exception('Error in {} row of \"Attributes_table\" of sheet \"{}\"\\nField named \"AttributeName\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[3]))\n\n if row[4].value == \"\":\n raise Exception('Error in {} row of \"Attributes_table\" of sheet \"{}\"\\nField named \"AttributeUnit\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[3]))\n\n if row[6].value == \"\":\n raise Exception('Error in {} row of \"Attributes_table\" of sheet \"{}\"\\nField named \"AttributeDataTypeCV\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(row_id, sheet_names[3]))\n\n if row[1].value:\n attrib.AttributeName = row[1].value\n else:\n raise Exception('Error in \"Attributes_table\" of sheet \"{}\"\\nField named \"AttributeName\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value\\n\\n'\n .format(sheet_names[3]))\n # raise Exception('Empty field found in AttributeName \\n\\n Column of Attributes table')\n try:\n if row[0]:\n try:\n ResourceTypeID = self.__session.query(SqlAlchemy.ResourceTypes).filter(\n SqlAlchemy.ResourceTypes.ResourceTypeAcronym == self.datasetAcronym\n ).first().ResourceTypeID\n attrib.ObjectTypeID = self.__session.query(SqlAlchemy.ObjectTypes).filter(\n and_(\n SqlAlchemy.ObjectTypes.ObjectType == row[0].value,\n SqlAlchemy.ObjectTypes.ResourceTypeID == ResourceTypeID\n )\n ).first().ObjectTypeID\n except Exception as e:\n print e\n raise Exception(e.message)\n else:\n raise Exception('Empty field found in ObjectType Column of Attributes table \\n\\n')\n except Exception as e:\n print e\n raise Exception('Error in sheet {}\\n could not find {} in ObjectTypes\\n\\n'\n .format(sheet_names[3], row[0].value))\n\n if row[3].value:\n attrib.AttributeNameCV = row[3].value\n\n # if row[2].value:\n # attrib.UnitName = row[3].value\n # else:\n # raise Exception('Empty field found in AttributeUnit column of Attributes sheet')\n\n if row[5].value:\n try:\n attrib.UnitNameCV = self.__session.query(SqlAlchemy.CV_Units).filter(\n SqlAlchemy.CV_Units.Name == row[5].value\n ).first().Name\n except Exception as e:\n print e\n raise Exception(\"Error in sheet '{}'\\n could not find '{}' in CV_Units table\\n\\n\"\n .format(sheet_names[3], row[5].value))\n\n if row[4].value:\n attrib.UnitName = row[4].value\n # try:\n # attrib.UnitName = self.__session.query(SqlAlchemy.CV_Units).filter(\n # SqlAlchemy.CV_Units.Name == row[3].value\n # ).first().Name\n # except Exception as e:\n # print e\n # raise Exception('Error in sheet {}\\ncould not find {} in AttributeTypes\\n\\n'\n # .format(sheet_names[3], row[3].value))\n else:\n raise Exception('Error in \"Attributes_table\" of sheet \"{}\"\\nField named \"AttributeUnit\" is empty.\\nThis field should not be empty.\\nPlease fill this field to a value'\n .format(sheet_names[3]))\n # raise Exception('Empty field found in AttributeDataTypeCV column of Attributes table')\n if row[6].value:\n try:\n attrib.AttributeDataTypeCV = self.__session.query(SqlAlchemy.CV_AttributeDataType).filter(\n SqlAlchemy.CV_AttributeDataType.Name == row[6].value\n ).first().Name\n except Exception as e:\n print e\n raise Exception('Error in sheet {}\\ncould not find {} in AttributeDataTypeCV\\n\\n'\n .format(sheet_names[3], row[6].value))\n if row[7].value:\n try:\n attrib.AttributeCategoryID = self.__session.query(SqlAlchemy.AttributeCategories).filter(\n SqlAlchemy.AttributeCategories.AttributeCategoryName == row[7].value\n ).first().AttributeCategoryID\n except Exception as e:\n print e\n raise Exception('Error in sheet {}\\ncould not find {} in AttributeCategory\\n\\n'\n .format(sheet_names[3], row[7].value))\n\n attrib.AttributeName_Abstract = row[2].value\n attrib.ModelInputOrOutput = row[8].value\n attrib.AttributeDescription = row[9].value\n if row[10].value=='':\n attrib.AttributeScale =1 # scale default value=1\n else:\n attrib.AttributeScale = row[10].value\n\n self.setup.push_data(attrib)\n break"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use ``git lsremote`` to list branches and tags without cloning the repository.
|
def lsremote(self, include_tags=True, include_branches=True):
if not include_tags and not include_branches:
return [], []
extra_args = []
if include_tags:
extra_args.append("--tags")
if include_branches:
extra_args.append("--heads")
cmd = ["git", "ls-remote", *extra_args, self.repo_url]
self.check_working_dir()
_, stdout, _ = self.run(*cmd, demux=True, record=False)
branches = []
# Git has two types of tags: lightweight and annotated.
# Lightweight tags are the "normal" ones.
all_tags = {}
light_tags = {}
for line in stdout.splitlines():
try:
commit, ref = line.split(maxsplit=1)
except ValueError:
# Skip this line if we have a problem splitting the line
continue
if ref.startswith("refs/heads/"):
branch = ref.replace("refs/heads/", "", 1)
branches.append(VCSVersion(self, branch, branch))
if ref.startswith("refs/tags/"):
tag = ref.replace("refs/tags/", "", 1)
# If the tag is annotated, then the real commit
# will be on the ref ending with ^{}.
if tag.endswith('^{}'):
light_tags[tag[:-3]] = commit
else:
all_tags[tag] = commit
# Merge both tags, lightweight tags will have
# priority over annotated tags.
all_tags.update(light_tags)
tags = [VCSVersion(self, commit, tag) for tag, commit in all_tags.items()]
return branches, tags
|
[
"def ls_remote(remote, config=None, **kwargs):\n if config is None:\n config = StackedConfig.default()\n client, host_path = get_transport_and_path(remote, config=config, **kwargs)\n return client.get_refs(host_path)",
"def git_ls_remote_tags(url):\n return [os.path.basename(line.split(\"\\t\")[1])\n for line in execute(\"git ls-remote --tags --refs {}\".format(url),\n capture=True).decode(\"utf-8\").split(\"\\n\") if line]",
"def gitlist():\n local('git ls-files -i --exclude-standard')",
"def list_tracked_repos():\n cmd_str = GIT + ' remote'\n return run_cmd(cmd_str, splitter='__ALL_WHITE_SPACES__')",
"def remote_refs(remote, heads=False, tags=False):\n args = ['git', 'ls-remote', remote]\n if heads:\n args.insert(2, '--heads')\n if tags:\n args.insert(2, '--tags')\n cmd = subprocess.Popen(args, stdout=subprocess.PIPE)\n s = lambda line: line.rstrip().split(\"\\t\")[1]\n return set(map(s, cmd.stdout))",
"def gitlist2():\n local('git ls-files -i -X .gitignore')",
"def get_branches_on_remote(self, remote):\n output = self.run_git_cmd(['branch', '--remote', '--no-color'])\n return self._get_branches_from_branch_remote_output(output, remote)",
"def branches(self, match=None):\n args = ['--list', '--remote']\n if match:\n args.append(match)\n result = self.git('branch', *args)\n branches = [branch.strip() for branch in result.stdout.readlines()]\n return [branch.decode() for branch in branches]",
"def repo_remote_names(conf, repo):\n\n rnames = set()\n\n out, _, _ = execute(\n [\"git\", \"remote\", \"-v\"],\n cwd=repo_path(conf, repo), pipe=True\n )\n for line in out.decode(\"utf-8\").splitlines():\n rnames.add(line.split()[0])\n\n return list(rnames)",
"def GitRemoteRepoList(directory, include_fetch=True, include_push=True,\n logger=None):\n remote_repos = log_tools.CheckOutput(GitCmd() + ['remote', '-v'],\n logger=logger, cwd=directory)\n\n repo_set = set()\n for remote_repo_line in remote_repos.splitlines():\n repo_name, repo_url, repo_type = remote_repo_line.split()\n if include_fetch and repo_type == '(fetch)':\n repo_set.add((repo_name, repo_url))\n elif include_push and repo_type == '(push)':\n repo_set.add((repo_name, repo_url))\n\n return sorted(repo_set)",
"def do_ls(self, args):\n\n if self.__is_open():\n try:\n files = list(self.fe.ls(add_details=True))\n files.sort(key=self.__sort_files)\n\n if self.fe.pwd() != \"/\":\n files = [(\"..\", \"D\")] + files\n\n print(\"\\nRemote files in '%s':\\n\" % self.fe.pwd())\n\n for elem, type in files:\n if type == 'D':\n print(\" <dir> %s\" % elem)\n else:\n print(\" <file/empty_dir> %s\" % elem)\n\n print(\"\")\n\n except IOError as e:\n self.__error(str(e))\n except Exception as e:\n print(e)",
"def test_ls_remote_without_local_clone(orchestra: OrchestraShim):\n orchestra(\"update\")\n\n component = orchestra.configuration.components[\"component_A\"]\n remote_repo_path = orchestra.default_remote_base_url / \"component_A\"\n\n current_commit = git.rev_parse(remote_repo_path)\n current_branch_name = git.run(remote_repo_path, \"name-rev\", \"--name-only\", \"HEAD\").strip()\n\n assert component.branch() == current_branch_name\n assert component.commit() == current_commit",
"def NETRemoteList(self):\n Rclone.list_remotes_in_vim_buffer()",
"def fetch_tags(self, **kwargs):\n kwargs.setdefault('name', 'git fetch tags')\n self('fetch', 'origin', '--tags', **kwargs)",
"def _GetRemotes(self):\n if not self._remotes:\n exit_code, output, _ = self.RunCommand('git remote -v')\n if exit_code == 0:\n self._remotes = list(filter(None, output.split('\\n')))\n\n return self._remotes",
"def _fetchRemotes(remotes):\n def _get_name(ref):\n \"\"\"Return the local name of a remote or tag reference.\"\"\"\n return ref.remote_head if isinstance(ref, git.RemoteReference) else ref.name\n\n info = [(\"NEW_HEAD\", \"new branch\", \"new branches\"),\n (\"NEW_TAG\", \"new tag\", \"new tags\"),\n (\"FAST_FORWARD\", \"branch update\", \"branch updates\")]\n up_to_date = BLUE + \"Up to date\" + RESET\n\n for remote in remotes:\n print(GREEN + \"Fetching \" + remote.name)\n\n if not remote.config_reader.has_option(\"fetch\"):\n print(YELLOW + \"Skipped:\" + RESET + \" No configured refspec.\")\n continue\n\n try:\n results = remote.fetch()\n except git.exc.GitCommandError as err:\n msg = err.command[0].replace(\"Error when fetching: \", \"\")\n if not msg.endswith(\".\"):\n msg += \".\"\n print(ERROR + msg)\n return\n except AssertionError: # Seems to be the result of a bug in GitPython\n # This happens when git initiates an auto-gc during fetch:\n print(ERROR + \"Something went wrong in GitPython but the fetch might have been successful.\")\n return\n rlist = []\n for attr, singular, plural in info:\n names = [_get_name(res.ref)\n for res in results if res.flags & getattr(res, attr)]\n if names:\n desc = singular if len(names) == 1 else plural\n colored = GREEN + desc + RESET\n rlist.append(\"{0} ({1})\".format(colored, \", \".join(names)))\n print((\", \".join(rlist) if rlist else up_to_date) + \".\")",
"def list_repos(self, conn):\n\t\trepos = self.get_repos()\n\t\tpacket = self.set_packet(6, repos)\n\t\tconn.sendall(packet)",
"def pull(self, remote = 'origin'):",
"def pull(env, opts, args):\n remotes = remote.get_remotes(env)\n if args:\n filt = lambda rmt: rmt.name in args\n else:\n filt = lambda rmt: True\n for entry in filter(filt, remotes):\n external.rsync(entry.url, env.directory, backup_dir=env.backup)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Return an iterable of submodule paths in this repository. In order to get the submodules paths without initializing them, we parse the .gitmodules file. For this we make use of the ``git config getregexp`` command. Keys and values from the config can contain spaces. In order to parse the output unambiguously, we use the ``null`` option to separate each result with a null character, and each key and value with a newline character.
|
def submodules(self) -> Iterable[str]:
exit_code, stdout, _ = self.run(
"git",
"config",
"--null",
"--file",
".gitmodules",
"--get-regexp",
# Get only the path key of each submodule.
r"^submodule\..*\.path$",
record=False,
)
if exit_code != 0:
# The command fails if the project doesn't have submodules (the .gitmodules file doesn't exist).
return []
keys_and_values = stdout.split("\0")
for key_and_value in keys_and_values:
try:
key, value = key_and_value.split("\n", maxsplit=1)
except ValueError:
# This should never happen, but we log a warning just in case
# Git doesn't return the expected format.
log.warning("Wrong key and value format.", key_and_value=key_and_value)
continue
if key.endswith(".path"):
yield value
else:
# This should never happen, but we log a warning just in case the regex is wrong.
log.warning("Unexpected key extracted fom .gitmodules.", key=key)
|
[
"def parse_gitmodule(path):\n rel_path_subm = []\n regex = r\"^path = \"\n with open(os.path.join(path, \".gitmodules\")) as f:\n for line in f:\n line = line.strip()\n match = re.search(regex, line)\n if match:\n rel_path_subm.append(re.sub(regex, '', line))\n rel_path_subm = [os.path.join(path, elem) for elem in rel_path_subm]\n return rel_path_subm",
"def get_submodules_config() -> Dict[str,Dict[str,str]]:\n gitmodules_fn = os.path.join(get_gitwdir(), '.gitmodules')\n gitmodules_data = run_cmd(['git', 'config', '--list', '--file', gitmodules_fn])\n prefix = 'submodule.'\n config: Dict[str, Dict[str,str]] = {}\n for line in gitmodules_data.splitlines():\n if not line.startswith(prefix):\n continue\n splitted = line.split('=', maxsplit=1)\n if len(splitted) != 2:\n continue\n section, val = splitted\n # remove \"submodule.\" prefix\n section = section[len(prefix):]\n # split section into module name and variable\n splitted = section.rsplit('.', maxsplit=1)\n if len(splitted) != 2:\n continue\n module_name, var = splitted\n if module_name not in config:\n config[module_name] = {}\n config[module_name][var] = val\n\n return config",
"def all_paths():\n repo_root = os.path.abspath(os.path.join(INFRABOTS_DIR, os.pardir, os.pardir))\n output = subprocess.check_output(['git', 'ls-files'], cwd=repo_root).rstrip()\n return output.splitlines()",
"def list_submodules():\n submodule_names = []\n stages = sp.check_output(['git', 'ls-files', '--stage'], encoding='utf8').strip()\n submodules_list = re.findall(r\"^160000\", stages, flags=re.MULTILINE)\n logging.debug(\"Found submodules: \" + '\\n'.join(submodules_list))\n for submodule in submodules_list:\n # this assumes no spaces in submodule paths\n submodule_names.append(re.split(r\"[ ]+\", submodule.strip())[-1])\n return submodule_names",
"def parse_git_submodules(gitmodules_data):\n gitmodules_data = gitmodules_data.decode(\"utf-8\")\n result = {}\n # NOTE: configparser.ConfigParser() doesn't seem to like the file\n # (i.e. read_string() always returns None), so do the parsing\n # manually here.\n section_name = None\n in_submodule_section = False\n submodule_name = None\n submodule_prefix = 'submodule \"'\n urls = {}\n branches = {}\n for line in gitmodules_data.splitlines():\n if line.startswith('['):\n section_name = line[1:-1]\n is_submodule_section = section_name.startswith(submodule_prefix)\n if is_submodule_section:\n submodule_name = section_name[len(submodule_prefix):-1]\n elif is_submodule_section:\n key, _, value = line.strip().partition('=')\n if not value:\n continue\n key = key.strip()\n value = value.strip()\n if key == 'url':\n urls[submodule_name] = value\n elif key == 'branch':\n branches[submodule_name] = value\n\n result = {}\n for submodule, url in urls.items():\n branch = branches.get(submodule)\n if not branch:\n branch = get_git_remote_ref(url, 'heads/master')\n result[submodule] = '%s@%s' % (url, branch)\n return result",
"def get_paths(self, name):\n info = self.get_module_info(name)\n if info:\n return info.get(constants.MODULE_PATH, [])\n return []",
"def parse_gitmodules(raw):\n\n result = {}\n locals_ = {}\n\n def reset():\n locals_.clear()\n\n def add_result():\n if locals_.get('added'):\n return\n\n path = locals_.get('path')\n url = locals_.get('url')\n\n if (path is None or url is None):\n return\n result[path] = url\n locals_['added'] = True\n\n for line in raw.splitlines():\n if not line.strip():\n continue \n\n if line.startswith('[submodule '):\n reset()\n continue\n\n try:\n name, value = line.split('=', 1)\n except:\n # too few values?\n continue\n locals_[name.strip()] = value.strip()\n add_result()\n\n return result",
"def _get_all_git_path(root_path):\n for dir_path, dir_names, _ in os.walk(root_path):\n if _GIT_FOLDER_NAME in dir_names:\n yield dir_path",
"def get_all_modpaths(self, tree):\n result = []\n for key in list(tree.keys()):\n if key is None:\n continue\n elif not isinstance(tree[key], dict):\n result.append(key)\n else:\n result.append((key, self.get_all_modpaths(tree[key])))\n return result",
"def _getRepositoryListPaths():\r\n _repositoryListPaths = []\r\n _repositoryListPaths.append(os.path.join(home,\".subuser\",\"repositories.json\"))\r\n _repositoryListPaths.append(\"/etc/subuser/repositories.json\") # TODO how does this work on windows?\r\n _repositoryListPaths.append(os.path.join(_getSubuserDir(),\"repositories.json\"))\r\n repositoryListPaths = []\r\n for path in _repositoryListPaths:\r\n if os.path.exists(path):\r\n repositoryListPaths.append(path)\r\n return repositoryListPaths",
"def dirpaths(self):\n parts = self.split()\n result = [DotPath(parts[0] or \"/\")]\n for name in parts[1:]:\n result.append(result[-1] / name)\n return result",
"def get_submodule_names():\n return _SUBMODULE_NAMES",
"def get_tree_modpaths(self, path):\n tree = self.file_tree\n root_modlist = sub_modlist = []\n while len(path) > 1:\n next_sub_modlist = []\n sub_modlist.append((path[0], next_sub_modlist))\n tree = tree[path[0]]\n path = path[1:]\n sub_modlist = next_sub_modlist\n if isinstance(tree[path[0]], dict):\n sub_modlist.append((path[0], self.get_all_modpaths(tree[path[0]])))\n else:\n sub_modlist.append(path[0])\n return root_modlist[0]",
"def path_list(ctx, module_name, version):\n module_tree = ctx.obj.check_module_tree()\n loader = ctx.obj.check_module(module_tree, module_name, version)\n print(\n \"\\n\".join(\n f\"{str(p)} -> {p.resolve(loader.module_path())}\"\n for p in loader.module.paths\n )\n )",
"def submodules_generator():\n sub_path = \"./subs\"\n do_not_use = [\"solver\"]\n for item in os.listdir(sub_path):\n path = os.path.join(sub_path, item)\n if item not in do_not_use:\n for sub in os.listdir(path):\n if sub == f\"{item}.py\" and sub not in do_not_use:\n yield f\"subs.{item}.{sub[:-3]}\"",
"def get_module_paths(self) -> Set[str]:\n paths = []\n for module in self.module_names:\n module_found = False\n for search_path in self.search_paths:\n path = Path(search_path).absolute().joinpath(module)\n is_package = self.is_package(path)\n is_file = path.is_file()\n if (is_package or is_file) and not module_found:\n paths.append(str(path))\n module_found = True\n pass\n pass\n if not module_found:\n paths.append(\"external:{}\".format(module))\n pass\n return set(paths)",
"def get_repo_list(self):\r\n return self._repo.keys()",
"def repolist(self):\n return self._repolist",
"def list_repositories(namespace):\n return [x for x in namespace.iterdir() if rcfiles.git.is_repo(x)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checkout all repository submodules. If submodules is empty, all submodules will be updated.
|
def checkout_submodules(self, submodules: list[str], recursive: bool):
self.run('git', 'submodule', 'sync')
cmd = [
'git',
'submodule',
'update',
'--init',
'--force',
]
if recursive:
cmd.append("--recursive")
cmd.append("--")
cmd += submodules
self.run(*cmd)
|
[
"def git_update_all(root_path=os.path.expanduser('~')):\n\n start_time_sec = time.time()\n git_util.git_logger.info('git_update_all() : start')\n updater = GitRepositoryUpdater(root_path, 'config')\n updater.recursively_find_in()\n git_util.git_logger.info('git_update_all() : end')\n git_util.git_logger.info('git_update_all() : elapsed time = %g (sec)' % (time.time() - start_time_sec))",
"def initGitModules():\n tools.progress(\"Updating the RV-Monitor submodules...\")\n tools.runNoError([\"git\", \"submodule\", \"update\", \"--init\", \"--recursive\"])",
"def test_update_submodules():",
"def check_submodules():\n ok = True\n\n for submodule in submodules.status().values():\n if submodule['status'] is None:\n cli.log.error('Submodule %s has not yet been cloned!', submodule['name'])\n ok = False\n elif not submodule['status']:\n cli.log.error('Submodule %s is not up to date!', submodule['name'])\n ok = False\n\n return ok",
"def recursive_pull(URL, sha, workdir, depth):\n # some helpful locations\n final_location = workdir + 'final/'\n tarfile_path = workdir + 'current.tar.gz'\n # From the URL, determine which instance it is on\n if 'github.com' in URL:\n gh = github_external\n else:\n gh = github_internal\n owner, repo = parse_owner(URL)\n # This is going to be the most problematic section, you could get problems\n # because:\n # -The github instance you were talking to is unreachable/offline\n # -Mis spellings of the owner or repository\n # -The owner or repository do not exist\n # -They do exist, but the specified SHA does not\n try:\n repository = gh.repository(owner=owner, repository=repo)\n tarball = repository.archive(format=\"tarball\",\n path=tarfile_path, ref=sha)\n tar = tarfile.open(mode='r|gz', name=tarfile_path)\n tar.extractall(workdir)\n tar.close()\n except:\n raise NameError(\"Error pulling \" + URL + \" \" + sha + \"\\n\" +\n \"Ensure that the URL is properly formated, and that \" +\n \"the specified SHA exists.\")\n\n # move it into the correct place\n internal_name = workdir + owner + '-' + repo + '-' + sha[:7] + '/'\n os.rename(internal_name, final_location + depth)\n # Now get and parse the .submodules file\n modulefile = final_location + depth + '.gitmodules'\n if not os.path.exists(modulefile):\n # Then there is no submodules here\n return\n moduleparser = configparser.ConfigParser()\n moduleparser.read(modulefile)\n for submodule in moduleparser.sections():\n # determine the SHA for this submodule\n content = repository.contents(\n ref=sha,\n path=moduleparser[submodule]['path']\n )\n submodulesha = content.to_json()['sha']\n recursive_pull(\n moduleparser[submodule]['url'],\n submodulesha,\n workdir,\n depth + moduleparser[submodule]['path'] + '/'\n )",
"def update_update_strategy(path):\n with utils.cd(path):\n base_cmd = '/usr/bin/git -C %s config ' % path\n base_cmd += 'submodule.$name.update rebase'\n cmd = \"/usr/bin/git submodule foreach --recursive '%s'\" % base_cmd\n subprocess.call(cmd, shell=True)",
"def checkout_source_tree(\n ctx,\n target_remote,\n target_ref,\n target_commit,\n clean,\n ignore_initial_submodule_checkout_failure,\n):\n\n workspace = ctx.obj.workspace\n # Check out specified repository\n click.echo(\n checkout_tree(\n workspace,\n target_remote,\n target_ref,\n commit=target_commit,\n clean=clean,\n allow_submodule_checkout_failure=ignore_initial_submodule_checkout_failure,\n )\n )\n\n try:\n ctx.obj.config = read_config(determine_config_file_name(ctx), ctx.obj.volume_vars)\n if clean:\n with git.Repo(workspace) as repo:\n clean_repo(repo, ctx.obj.config['clean'])\n git_cfg = ctx.obj.config['scm']['git']\n except (click.BadParameter, KeyError, TypeError, OSError, IOError, YAMLError):\n return\n\n if 'worktrees' in git_cfg:\n with git.Repo(workspace) as repo:\n\n worktrees = git_cfg['worktrees'].items()\n fetch_result = repo.remotes.origin.fetch([ref for subdir, ref in worktrees])\n\n worktrees = dict((subdir, fetchinfo.ref) for (subdir, refname), fetchinfo in zip(worktrees, fetch_result))\n log.debug(\"Worktree config: %s\", worktrees)\n\n for subdir, ref in worktrees.items():\n try:\n os.remove(workspace / subdir / '.git')\n except (OSError, IOError):\n pass\n clean_output = repo.git.clean('-xd', subdir, force=True)\n if clean_output:\n log.info('%s', clean_output)\n\n repo.git.worktree('prune')\n\n for subdir, ref in worktrees.items():\n repo.git.worktree('add', subdir, ref.commit)\n\n if 'remote' not in git_cfg and 'ref' not in git_cfg:\n return\n\n code_dir_re = re.compile(r'^code(?:-\\d+)$')\n code_dirs = sorted(Path(dir) for dir in os.listdir(workspace) if code_dir_re.match(dir))\n for dir in code_dirs:\n try:\n with git.Repo(workspace / dir):\n pass\n except (git.InvalidGitRepositoryError, git.NoSuchPathError):\n pass\n else:\n code_dir = dir\n break\n else:\n seq = 0\n while True:\n dir = Path('code' if seq == 0 else f\"code-{seq:03}\")\n seq += 1\n if dir not in code_dirs:\n code_dir = dir\n break\n\n # Check out configured repository and mark it as the code directory of this one\n ctx.obj.code_dir = workspace / code_dir\n with git.Repo(workspace) as repo, repo.config_writer() as cfg:\n cfg.remove_section('hopic.code')\n cfg.set_value('hopic.code', 'dir', str(code_dir))\n cfg.set_value('hopic.code', 'cfg-remote', target_remote)\n cfg.set_value('hopic.code', 'cfg-ref', target_ref)\n cfg.set_value('hopic.code', 'cfg-clean', str(clean))\n\n checkout_tree(\n ctx.obj.code_dir,\n git_cfg.get(\"remote\", target_remote),\n git_cfg.get(\"ref\", target_ref),\n clean=clean,\n clean_config=ctx.obj.config[\"clean\"],\n )",
"def update():\n import os\n os.chdir(path.scriptdir)\n tools.run(\"git\", \"pull\")",
"def gitext_foreach(recursive, subcommand):\n from git_externals import externals_sanity_check, get_repo_name, foreach_externals_dir, root_path\n\n externals_sanity_check()\n\n def run_command(rel_url, ext_path, targets):\n try:\n info(\"External {}\".format(get_repo_name(rel_url)))\n output = decode_utf8(command(*subcommand))\n info(\"Ok: CWD: {}, cmd: {}\".format(os.getcwd(), subcommand))\n echo(output)\n except CommandError as err:\n info(\"Command error {} CWD: {}, cmd: {}\".format(err, os.getcwd(), subcommand))\n error(str(err), exitcode=err.errcode)\n\n foreach_externals_dir(root_path(), run_command, recursive=recursive)",
"def update_code(ctx, tag):\r\n with ctx.lcd(settings.SRC_DIR):\r\n ctx.local('git fetch')\r\n ctx.local('git checkout -f %s' % tag)\r\n ctx.local('git submodule sync')\r\n ctx.local('git submodule update --init --recursive')",
"def reload_all_modules ():\n\n debug (\"In reload_all_modules\")\n msg = \"Warning: This reloads all the currently loaded modules. \"\\\n \"This is a feature useful only for developers. You _might_ \"\\\n \"see funny behaviour for already instantiated objects.\\n\\n\"\\\n \"Are you sure you want to do this?\"\n if not tkMessageBox.askyesno (\"Warning\", msg):\n return\n\n my_dir = os.path.dirname (os.path.abspath (__file__)) \n\n dont_load = list (sys.builtin_module_names)\n\n Common.state.busy ()\n for key in sys.modules.keys ():\n if key not in dont_load:\n mod = sys.modules[key]\n if mod and hasattr (mod, '__file__'):\n p = os.path.abspath (mod.__file__)\n if os.path.commonprefix ([p, my_dir]) == my_dir:\n debug (\"Reloading %s\"%key)\n reload (mod)\n Common.state.idle ()",
"def test_sha() -> None:\n submodules = get_submodules_config()\n\n for name, variables in submodules.items():\n sbom_hash = variables.get('sbom-hash')\n if not sbom_hash:\n continue\n module_path = variables.get('path')\n if not module_path:\n continue\n output = run_cmd(['git', 'ls-tree', 'HEAD', module_path])\n if not output:\n continue\n module_hash = output.split()[2]\n msg = (f'Submodule \\\"{name}\\\" SHA \\\"{module_hash}\\\" in git '\n f'tree does not match SHA \\\"{sbom_hash}\\\" recorded in .gitmodules. '\n f'Please update \\\"sbom-hash\\\" in .gitmodules for \\\"{name}\\\" '\n f'and also please do not forget to update version and other submodule '\n f'information if necessary. It is important to keep this information '\n f'up-to-date for SBOM generation.')\n assert module_hash == sbom_hash, msg",
"def update(ctx, subworkflow, dir, force, prompt, sha, all, preview, save_diff, update_deps):\n from nf_core.subworkflows import SubworkflowUpdate\n\n try:\n subworkflow_install = SubworkflowUpdate(\n dir,\n force,\n prompt,\n sha,\n all,\n preview,\n save_diff,\n update_deps,\n ctx.obj[\"modules_repo_url\"],\n ctx.obj[\"modules_repo_branch\"],\n ctx.obj[\"modules_repo_no_pull\"],\n )\n exit_status = subworkflow_install.update(subworkflow)\n if not exit_status and all:\n sys.exit(1)\n except (UserWarning, LookupError) as e:\n log.error(e)\n sys.exit(1)",
"def test_new_submodules(testing_new_submodules):\n new_submodules = compute_build_graph.git_changed_recipes()\n assert 'conda-env-feedstock' in new_submodules\n assert 'docker-images' not in new_submodules",
"def _update_list_of_modules(cls):\n import pkgutil\n cls.list_of_modules = []\n for item in pkgutil.iter_modules():\n cls.list_of_modules.append(item[1])",
"def enablemoddependsrecursive(self):\n pass",
"def _update_gitrepo(self, session, env, repodict):\n # Calculate md5 of merge base diff\n merge_base_diff = repodict['merge_base']['diff']\n if not merge_base_diff:\n merge_base_diff_md5 = None\n else:\n m = hashlib.md5()\n m.update(''.join(merge_base_diff).encode('utf-8'))\n merge_base_diff_md5 = m.hexdigest()\n\n # Calculate md5 of working_tree_diff\n working_tree_diff = repodict['working_tree']['diff']\n if not working_tree_diff:\n working_tree_diff_md5 = None\n else:\n m = hashlib.md5()\n m.update(''.join(working_tree_diff).encode('utf-8'))\n working_tree_diff_md5 = m.hexdigest()\n\n # Make instance of the git repo\n gitrepo = GitRepoEntity(\n environment=env.identity,\n active_branch_name=repodict.get('active_branch_name'),\n head_sha=repodict.get('head_sha'),\n is_detached=repodict.get('is_detached'),\n merge_base_name=repodict['merge_base']['name'],\n merge_base_diff_md5=merge_base_diff_md5,\n path=repodict.get('path'),\n working_tree_dirty=repodict['working_tree']['is_dirty'],\n working_tree_diff_md5=working_tree_diff_md5\n )\n gitrepo.update(session, self.time_in_ms)\n\n # Update all remotes.\n remotes = []\n for name, urls in repodict.get('remotes', {}).items():\n remotes.append(self._update_remote(session, gitrepo, name, urls))\n gitrepo.remotes.update(session, remotes, self.time_in_ms)\n\n # Update untracked files\n untracked = []\n for path in repodict['working_tree'].get('untracked_files', []):\n untracked.append(self._update_untracked_file(session, path))\n\n gitrepo.untrackedfiles.update(session, untracked, self.time_in_ms)\n return gitrepo",
"def quick_update_tree(self):\n self.treeTests.model().update_all()",
"def update(ctx, tool, dir, force, prompt, sha, all, preview, save_diff, update_deps):\n from nf_core.modules import ModuleUpdate\n\n try:\n module_install = ModuleUpdate(\n dir,\n force,\n prompt,\n sha,\n all,\n preview,\n save_diff,\n update_deps,\n ctx.obj[\"modules_repo_url\"],\n ctx.obj[\"modules_repo_branch\"],\n ctx.obj[\"modules_repo_no_pull\"],\n )\n exit_status = module_install.update(tool)\n if not exit_status and all:\n sys.exit(1)\n except (UserWarning, LookupError) as e:\n log.error(e)\n sys.exit(1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return if light supports brightness.
|
def supports_brightness(self):
return self.dimmer.initialized
|
[
"def supports_dimmer(self) -> bool:\n return bool(self.supported_features & SUPPORT_BRIGHTNESS)",
"def brightness(self) -> int:\n light_brightness = self._device.light_brightness * 16\n if light_brightness == 256:\n light_brightness = 255\n return int(light_brightness)",
"def light_detection(self) -> bool:\n return self.details['light_detection_switch']",
"def getLightBrightness(self):\r\n return self.lights.sensors[0].getBrightPct()",
"def brightness(self) -> float:\n raise NotImplementedError()",
"def brightness(self):\n return float(self.visa_ask(':WBR?') / 100.0)",
"async def get_brightness(self) -> int:\n brightness = await self.__send_request(EP_BRIGHTNESS)\n return int(brightness[\"value\"]) if brightness[\"mode\"] == \"enabled\" else 100",
"def test_supports_dimm_yes(self):\n xknx = XKNX()\n light = Light(\n xknx,\n \"Diningroom.Light_1\",\n group_address_switch=\"1/6/4\",\n group_address_brightness=\"1/6/6\",\n )\n self.assertTrue(light.supports_brightness)",
"def is_on(self) -> bool:\n return self._device.light_on",
"def light_detection_state(self) -> bool:\n return self.details['environment_light_state']",
"def read_brightness(self):\n return self._send(READ_BRIGHTNESS)[0]",
"async def test_light_set_brightness(hass: HomeAssistant, init_integration) -> None:\n init_integration\n registry = er.async_get(hass)\n\n entity_id = \"light.lightbulb\"\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_ON\n assert state.attributes.get(\"friendly_name\") == \"lightbulb\"\n\n entry = registry.async_get(entity_id)\n assert entry\n assert (\n entry.unique_id\n == \"3WRRJR6RCZQZSND8VP0YTO3YXCSOFPKBMW8T51TU-LQ*JHJZIZ9ORJNHB7DZNBNAOSEDECVTTZ48SABTCA3WA3M\"\n )\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n SERVICE_TURN_ON,\n {ATTR_ENTITY_ID: [entity_id], ATTR_BRIGHTNESS: 255},\n blocking=True,\n )\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_ON\n assert int(state.attributes[ATTR_BRIGHTNESS]) == 0",
"def is_on(self):\n return self._light.on",
"def light(self):\n return self._light",
"def is_light():\n # load in data directory to avoid redownloading\n loader = Loader('~/skyfield_data')\n ts = loader.timescale()\n e = loader('de421.bsp')\n\n # set current location (melbourne does not appear in the default list)\n melbourne = api.Topos('37.951910 S', '145.152080 E')\n # get current time in UTC format\n now = datetime.datetime.utcnow()\n now = now.replace(tzinfo=utc)\n # set the interval for now and 24 hours from now\n t0 = ts.utc(now)\n t1 = ts.utc(now + timedelta(hours=24))\n\n # find the times and types of event (sunrise/sunset)\n t, y = almanac.find_discrete(t0, t1, almanac.sunrise_sunset(e, melbourne))\n\n #y[0] = True for sunrise (which means it is currently dark)\n\n light = not y[0]\n\n return light",
"def get_brightness(self):\n return int(requests.get(url+'/groups/{}'.format(self.group_number), verify=False).json()['action']['bri'])",
"def red_brightness(self):\n return self._bcr",
"def enable_white_mode(self) -> bool:\n return self.set_status(brightness=100)",
"def get_bat_color():\n try:\n v = os.read_battery()\n if v > 3.8:\n return battery_color_good\n if v > 3.6:\n return battery_color_ok\n return battery_color_bad\n except AttributeError:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
check answer for errors VALIDATION RULES proper columns contigious intragroup order, starting from 1 all intragroup ordered groups must be fully specified groups P and L must be at first and last(s) positions respectively P group must have only 1 member, no intragroup order L group can never have intragroup order only 1 grouporder maximum per group columns should be labelled Code, Grouping, IntraGroupOrder, GroupOrder no duplicate codes ASSUMPTIONS No principal procedure 0.5 marks for partial intragroup ordering
|
def errorCheckMaster( self, answer):
self.errorCheckSubmission( answer )
for colName in ["Grouping", "IntraGroupOrder", "GroupOrder"]:
assert colName in answer.columns, "We need a %s column in the master spreadsheet" % colName
|
[
"def _verify_groups_syntax(groups):\n num_errors = 0\n num_warnings = 0\n \n for key, value in groups.items():\n if \"instances\" in value: # Templates\n if type(value[\"instances\"]) != int:\n logging.error(\"Instances must be an Integer for group %s\", key)\n num_errors += 1\n if \"ad-group\" in value:\n if type(value[\"ad-group\"]) != str:\n logging.error(\"AD group must be a string\")\n num_errors += 1\n elif \"filename\" in value:\n e, w = _check_group_file(value[\"filename\"])\n num_errors += e\n num_warnings += w\n else:\n logging.error(\"Invalid user specification method for template group %s\", key)\n num_errors += 1\n else: # Non-templates\n if \"ad-group\" in value:\n if type(value[\"ad-group\"]) != str:\n logging.error(\"AD group must be a string\")\n num_errors += 1\n elif \"filename\" in value:\n e, w = _check_group_file(value[\"filename\"])\n num_errors += e\n num_warnings += w\n elif \"user-list\" in value:\n if type(value[\"user-list\"]) is not list:\n logging.error(\"Username specification must be a list for group %s\", key)\n num_errors += 1\n else:\n logging.error(\"Invalid user specification method for group %s\", key)\n num_errors += 1\n return num_errors, num_warnings",
"def errorCheckSubmission( self, answer):\n \n for colName in [\"Code\", \"Convention\", \"GroupOrder\"]:\n assert colName in answer.columns, \"We need a %s column in the master spreadsheet\" % colName",
"def validate_ncs_phil_groups(\n self,\n pdb_h,\n ncs_phil_groups,\n asc,\n validate_user_supplied_groups=True):\n def show_particular_ncs_group(ncs_gr):\n p_obj = ncs_group_master_phil.extract()\n p_obj.ncs_group[0].reference = ncs_gr.reference\n p_obj.ncs_group[0].selection = ncs_gr.selection\n to_show = ncs_group_master_phil.format(python_object=p_obj)\n to_show.show(out=self.log)\n\n def show_empty_selection_error_message(ng, where=\"reference\"):\n print(\" Missing or corrupted %s field:\" % where, file=self.log)\n print(\" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\", file=self.log)\n print(\" _ALL_ user-supplied groups will be ignored\", file=self.log)\n print(\" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\", file=self.log)\n show_particular_ncs_group(ng)\n\n # Massage NCS groups\n # return ncs_phil_groups\n validated_ncs_groups = []\n if ncs_phil_groups is None:\n return None\n if(ncs_phil_groups is not None and len(ncs_phil_groups)==0):\n # print \"exiting here\"\n ncs_phil_groups=None\n return None\n if (ncs_phil_groups is not None and\n len(ncs_phil_groups)==1 and\n ncs_phil_groups[0].reference is None and\n len(ncs_phil_groups[0].selection) == 1 and\n ncs_phil_groups[0].selection[0] is None):\n # This is empty ncs_group definition somehow creeped into here.\n # Not a big deal.\n return None\n if(ncs_phil_groups is not None):\n print(\"Validating user-supplied NCS groups...\", file=self.log)\n empty_cntr = 0\n for ng in ncs_phil_groups:\n if ng.reference is None or len(ng.reference.strip())==0:\n show_empty_selection_error_message(ng, where=\"reference\")\n empty_cntr += 1\n for s in ng.selection:\n if s is None or len(s.strip())==0:\n show_empty_selection_error_message(ng, where=\"selection\")\n empty_cntr += 1\n if(empty_cntr>0):\n print(\" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\", file=self.log)\n print(\" _ALL_ user-supplied groups are ignored.\", file=self.log)\n print(\" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\", file=self.log)\n ncs_phil_groups=None\n return None\n # Verify NCS selections\n msg=\"Empty selection in NCS group definition: %s\"\n if not validate_user_supplied_groups:\n for ncs_group in ncs_phil_groups:\n print(\" Copying user-supplied groups without validation:\", file=self.log)\n show_particular_ncs_group(ncs_group)\n m_isel = asc.iselection(ncs_group.reference)\n ng = NCS_restraint_group(\n master_iselection = m_isel,\n str_selection = ncs_group.reference)\n for s_string in ncs_group.selection:\n c_isel = asc.iselection(s_string)\n c = NCS_copy(\n copy_iselection=c_isel,\n rot = None,\n tran = None,\n str_selection=s_string,\n rmsd=999)\n ng.append_copy(c)\n self.ncs_restraints_group_list.append(ng)\n validated_ncs_groups.append(ng)\n master_sel = flex.bool(pdb_h.atoms_size(), False)\n for gr in self.ncs_restraints_group_list:\n master_sel.set_selected(gr.master_iselection, True)\n # asu_sites = pdb_h.atoms().extract_xyz().select(master_sel)\n # self.ncs_restraints_group_list._show(hierarchy=pdb_h,brief=False)\n # STOP()\n self.ncs_restraints_group_list.recalculate_ncs_transforms(asu_site_cart=pdb_h.atoms().extract_xyz())\n # validated_ncs_groups.recalculate_ncs_transforms(asu_site_cart=pdb_h.atoms().extract_xyz())\n\n else:\n for ncs_group in ncs_phil_groups:\n print(\" Validating:\", file=self.log)\n show_particular_ncs_group(ncs_group)\n selection_list = []\n # first, check for selections producing 0 atoms\n user_original_reference_iselection = None\n user_original_copies_iselections = []\n n_atoms_in_user_ncs = 0\n s_string = ncs_group.reference\n if s_string is not None:\n sel = asc.iselection(s_string)\n selection_list.append(s_string)\n n_atoms_in_user_ncs = sel.size()\n if(n_atoms_in_user_ncs==0):\n raise Sorry(msg%s_string)\n user_original_reference_iselection = sel\n for s_string in ncs_group.selection:\n if(s_string is not None):\n sel = asc.iselection(s_string)\n selection_list.append(s_string)\n n_copy = sel.size()\n if(n_copy==0):\n raise Sorry(msg%s_string)\n user_original_copies_iselections.append(sel)\n #\n # The idea for user's groups is to pick them one by one,\n # select only reference and selections from the model,\n # If there are multiple chains in ref or selection -\n # combine them in one chain,\n # save atom original i_seq in atom.tmp\n # run searching procedure for the resulting hierarchy\n # if the user's selections were more or less OK - there should be\n # one group, get atom.tmp values for the selected atoms and using\n # original hierarchy convert them into string selections when needed.\n # If multiple groups produced - use them, most likely the user\n # provided something really wrong.\n # Need to pay some attention to what came out as master and what order\n # of references.\n #\n combined_h = iotbx.pdb.hierarchy.root()\n combined_h.append_model(iotbx.pdb.hierarchy.model())\n all_c_ids = all_chain_ids()\n cur_ch_id_n = 0\n master_chain = self.pdb_h_into_chain(pdb_h.select(\n user_original_reference_iselection),ch_id=all_c_ids[cur_ch_id_n])\n # print \"tmp in master chain:\", list(master_chain.atoms().extract_tmp_as_size_t())\n cur_ch_id_n += 1\n combined_h.only_model().append_chain(master_chain)\n\n # combined_h = iotbx.pdb.hierarchy.new_hierarchy_from_chain(master_chain)\n # print \"tmp combined_h1:\", list(combined_h.atoms().extract_tmp_as_size_t())\n for uocis in user_original_copies_iselections:\n # print \"adding selection to combined:\", s_string\n sel_chain = self.pdb_h_into_chain(pdb_h.select(\n uocis),ch_id=all_c_ids[cur_ch_id_n])\n combined_h.only_model().append_chain(sel_chain)\n cur_ch_id_n += 1\n\n combined_h.reset_atom_i_seqs()\n # combined_h.write_pdb_file(\"combined_in_validation.pdb\")\n # print \"tmp:\", list(combined_h.atoms().extract_tmp_as_size_t())\n\n\n # XXX Here we will regenerate phil selections using the mechanism\n # for finding NCS in this module. Afterwards we should have perfectly\n # good phil selections, and later the object will be created from\n # them.\n # Most likely this is not the best way to validate user selections.\n\n # selection_list\n nrgl_fake_iseqs = ncs_search.find_ncs_in_hierarchy(\n ph=combined_h,\n chains_info=None,\n chain_max_rmsd=max(self.params.chain_max_rmsd, 10.0),\n log=None,\n chain_similarity_threshold=min(self.params.chain_similarity_threshold, 0.5),\n residue_match_radius=max(self.params.residue_match_radius, 1000.0))\n # hopefully, we will get only 1 ncs group\n # ncs_group.selection = []\n if nrgl_fake_iseqs.get_n_groups() == 0:\n # this means that user's selection doesn't match\n # print \"ZERO NCS groups found\"\n rejected_msg = \" REJECTED because copies don't match good enough.\\n\" + \\\n \"Try to revise selections or adjust chain_similarity_threshold or \\n\" + \\\n \"chain_max_rmsd parameters.\"\n print(rejected_msg, file=self.log)\n continue\n # User triggered the fail of this assert!\n selections_were_modified = False\n #\n for ncs_gr in nrgl_fake_iseqs:\n new_gr = ncs_gr.deep_copy()\n new_ncs_group = ncs_group_master_phil.extract().ncs_group[0]\n for i, isel in enumerate(ncs_gr.get_iselections_list()):\n m_all_isel = isel.deep_copy()\n original_m_all_isel = combined_h.atoms().\\\n select(m_all_isel).extract_tmp_as_size_t()\n if n_atoms_in_user_ncs > original_m_all_isel.size():\n selections_were_modified = True\n # print \"new isels\", list(m_all_isel)\n # print \"old isels\", list(original_m_all_isel)\n all_m_select_str = selection_string_from_selection(\n pdb_h=pdb_h,\n selection=original_m_all_isel,\n chains_info=self.chains_info,\n atom_selection_cache=asc)\n # print \"all_m_select_str\", all_m_select_str\n if i == 0:\n new_gr.master_iselection = original_m_all_isel\n new_gr.master_str_selection = all_m_select_str\n new_ncs_group.reference=all_m_select_str\n else:\n new_gr.copies[i-1].iselection = original_m_all_isel\n new_gr.copies[i-1].str_selection = all_m_select_str\n new_ncs_group.selection.append(all_m_select_str)\n self.ncs_restraints_group_list.append(new_gr)\n new_ncs_group.selection = new_ncs_group.selection[1:]\n validated_ncs_groups.append(new_ncs_group)\n # Finally, we may check the number of atoms in selections that will\n # go further.\n # XXX Deleted, because this is taken care of previously\n ok_msg = \" OK. All atoms were included in\" +\\\n \" validated selection.\\n\"\n modified_msg = \" MODIFIED. Some of the atoms were excluded from\" + \\\n \" your selection.\\n The most common reasons are:\\n\" + \\\n \" 1. Missing residues in one or several copies in NCS group.\\n\" + \\\n \" 2. Presence of alternative conformations (they are excluded).\\n\" + \\\n \" 3. Residue mismatch in requested copies.\\n\" + \\\n \" Please check the validated selection further down.\\n\"\n if selections_were_modified:\n print(modified_msg, file=self.log)\n self.phil_groups_modified = True\n else:\n print(ok_msg, file=self.log)\n # print \"len(validated_ncs_groups)\", len(validated_ncs_groups)\n # for ncs_gr in validated_ncs_groups:\n # print \" reference:\", ncs_gr.reference\n # print \" selection:\", ncs_gr.selection\n self.finalize_nrgl()\n return validated_ncs_groups",
"def check_word_group_validity(tokens_word_group_inside):\n casegen_count = tokens_word_group_inside.count(CASE_GEN_SYM)\n if casegen_count > 1:\n raise SyntaxError(\"There can be only one case generation modifier \"+\n \"in a word group.\")\n if casegen_count == 1 and tokens_word_group_inside.index(CASE_GEN_SYM) != 0:\n raise SyntaxError(\"Case generation modifiers have to be at the start \"+\n \"of a word group.\")\n\n variation_count = tokens_word_group_inside.count(VARIATION_SYM)\n if variation_count > 0:\n raise SyntaxError(\"Word groups cannot take variation modifiers.\")\n\n argument_count = tokens_word_group_inside.count(ARG_SYM)\n if argument_count > 0:\n raise SyntaxError(\"Word groups cannot take arguments.\")\n\n randgen_count = tokens_word_group_inside.count(RAND_GEN_SYM)\n if randgen_count > 1:\n raise SyntaxError(\"There can be only one random generation modifier \"+\n \"per word group.\")\n percentgen_count = tokens_word_group_inside.count(PERCENT_GEN_SYM)\n if percentgen_count > 1:\n raise SyntaxError(\"There can be only one percentage for generation \"+\n \"modifier per word group.\")\n if percentgen_count == 1 and randgen_count == 0:\n raise SyntaxError(\"There cannot be a percentage for generation \"+\n \"modifier if there is no random generation modifier \"+\n \"(did you mean to escape '\"+PERCENT_GEN_SYM+\"'?)\")\n if percentgen_count == 1:\n index_randgen = tokens_word_group_inside.index(RAND_GEN_SYM)\n index_percentgen = tokens_word_group_inside.index(PERCENT_GEN_SYM)\n if index_randgen > index_percentgen:\n raise SyntaxError(\"A percentage for generation modifier must \"+\n \"always be right after the random generation \"+\n \"modifier.\")\n if index_percentgen == len(tokens_word_group_inside)-1:\n raise SyntaxError(\"No percentage found after the special symbol \"+\n \"for percentage modifier.\")\n try:\n percentgen = int(tokens_word_group_inside[index_percentgen+1])\n except ValueError:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"an integer.\")\n if percentgen < 0 or percentgen > 100:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"between 0 and 100.\")",
"def markUnorderedGroups(self,submission):\n\n maGroups= self.ma[ eAnd( isblank(self.ma.IntraGroupOrder), notblank(self.ma.Grouping)) ].Grouping.unique()\n\n # P and L groups are taken care of by absoluteOrdering routine. Different marks too\n #maGroups = set(maGroups).difference( set(\"P\", \"L\"))\n label='UnorderedGroups'\n submission = self.addColumn(submission, label )\n submission.loc[:,label]=None\n for group in maGroups:\n # take the group slice\n magSet = set( self.ma[ self.ma.Grouping==group].Code)\n subSlice = submission[ submission.Grouping==group].Code\n subSet = set( subSlice )\n nCorrect=len( magSet & subSet )\n submission.loc[ submission.Code.isin( magSet ), label] = group\n if group==\"P\":\n if nCorrect == len(magSet ) : # all correct, principal\n self.addNote( \"Correct principal diagnosis, 1 mark\" )\n self.addMark(\"Principal Diagnosis\", 1)\n self.addError( {\n 'AOI': 'PrincipalCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"True\" \n })\n else:\n self.addNote( \"Incorrect principal diagnosis, answer is %s, you had %s \" % ( pprintSlice(magSet), pprintSlice(subSet)) )\n self.addError( {\n 'AOI': 'PrincipalCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"False\" \n })\n next\n\n if group==\"L\" : # Last Codes \n if len(subSlice) > 0 and max( subSlice.index ) == max(submission.index ):\n self.addNote( \"Correct final codes, 0.5 marks\" )\n self.addMark( \"Final Code(s) Group\", 0.5 )\n self.addError( {\n 'AOI': 'LastCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"True\" \n })\n else:\n self.addNote( \"Incorrect final code(s), should be %s\" % ( pprintSlice(magSet)) )\n self.addError( { 'AOI': 'LastCode', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n\n # we don't need to process the group if the master says it is only one code long\n if len( magSet ) == 1:\n next\n\n\n\n if nCorrect == len(magSet ) : # all correct\n self.addNote( \"Unordered Group %s, %s entirely correct, 0.5 marks\" % (group, pprintSlice(magSet)) )\n self.addMark(\"Unordered Group %s\" % group, 0.5)\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"True\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n elif (nCorrect > 0 ) :\n self.addNote( \"Unordered Group %s partially correct, answer is %s, you had %s, 0.5 marks \" \n % (group, pprintSlice(magSet), pprintSlice(subSet)) )\n self.addMark(\"Unordered Group %s\" % group, 0.5)\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n else:\n self.addNote( \"Unordered Group %s, %s entirely missing\" % (group, pprintSlice(magSet)) )\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': \"\",\n })\n\n return submission",
"def validation_fields_check(self, sheet):\n # Below value indicates column I in excel\n column_no = 8\n error = ''\n\n if self.year == '2019':\n # each element in below indicates range of cell addresses i.e. 154, 164 indicates all cells from 155 to 165.\n # Cell addresses in this module start at 0. hence we are referring to I155, I156 etc. here\n row_list = [(180, 180, 'Too Many DAG Members Validation Check'),\n (182, 182, 'DB Copy Count Validation Check'),\n (184, 184, 'Calculated Max DB Size Not Zero Check'),\n (185, 185, 'Mailbox Size Limit Not Zero Check'),\n (187, 187, 'Disk Count Validation Check'),\n (188, 188, 'Invalid Active/Active DAG'),\n (193, 193, 'Calculator Validation Check')]\n else:\n row_list = [(155, 157, 'Too Many DAG Members Validation Check & DB Copy Count Validation Check & '\n 'Mailbox Size Limit Not Zero Check'),\n (160, 163, 'Calculator Validation Check & Calculated Max DB Size Not Zero Check & '\n 'Disk Count Validation Check & Invalid Active/Active DAG'),\n (181, 188, 'JBOD Validation checks section')]\n\n for item in row_list:\n\n for i in range(item[0], item[1]+1):\n\n if sheet.cell(i, column_no).value:\n error += \"\\\"%s\\\" in \\\"%s\\\" sheet has failed.\\n\" % (sheet.cell(i, column_no-1).value, sheet.name)\n\n self.errors = error",
"def check_input_validation(df):\n print(\"\\nChecking the input data validation.............................\")\n invalid_fields = []\n\n isnull_sum = df.isnull().sum()\n for index, val in isnull_sum.iteritems():\n if val > 0:\n invalid_fields.append(index)\n if len(invalid_fields) > 0:\n raise ValueError(\"The NaN missing values still exist in fields: \" + str(invalid_fields))\n\n # TODO: Why not working properly??\n isreal_sum = df.applymap(np.isreal).sum()\n for index, val in isreal_sum.iteritems():\n if val < len(df):\n invalid_fields.append(index)\n # if len(invalid_fields) > 0:\n # raise ValueError(\"The non-numerical values still exist in fields: \" + str(invalid_fields))\n\n return True",
"def markGroupOrder(self,submission):\n\n \"\"\" make sure that there exist groupOrders in the answer\"\"\"\n groupOrder = self.ma.ix[ notblank(self.ma.GroupOrder),(\"Grouping\",\"GroupOrder\")]\n if len( groupOrder ) == 0:\n return submission\n\n \"\"\" find out where these groups live in the submission:\n create data frame with rows Grouping, GroupOrder, and mindex, maxdex \n 1) find all the rows that relate to the answer grouping, and their minimum and maximum index (mindex)\n \"\"\"\n submissionGroupPos = submission[ submission.Grouping.isin(groupOrder.Grouping)]\n submissionGroupPos.loc[:,\"index\"]=submissionGroupPos.index\n submissionGroupPosMin = pd.DataFrame(submissionGroupPos.groupby(\"Grouping\")[\"index\"].agg(np.min))\n submissionGroupPosMin[\"mindex\"] = submissionGroupPosMin[\"index\"]\n submissionGroupPosMax = pd.DataFrame(submissionGroupPos.groupby(\"Grouping\")[\"index\"].agg(np.max))\n submissionGroupPosMax[\"maxdex\"] = submissionGroupPosMax[\"index\"]\n\n # error check to make sure we have got Min and Max Grouping columns\n if not 'Grouping' in submissionGroupPosMin.columns:\n submissionGroupPosMin['Grouping'] = submissionGroupPosMin.index\n if not 'Grouping' in submissionGroupPosMax.columns:\n submissionGroupPosMax['Grouping']=submissionGroupPosMax.index\n groupOrder=groupOrder.merge(submissionGroupPosMin, how='left', on=\"Grouping\")\n groupOrder=groupOrder.merge(submissionGroupPosMax, how='left', on=\"Grouping\").sort(columns=\"mindex\")\n\n \n groupOrder.loc[ : , \"Consecutive\"] = False\n i=0\n for go in groupOrder.GroupOrder:\n if str(go).endswith(\"N\"):\n groupOrder.loc[ i, \"Consecutive\"] = True\n groupOrder.loc[ i, \"GroupOrder\"] = groupOrder.loc[ i, \"GroupOrder\"][0:-1] \n i = i + 1\n\n\n \"\"\" go through each group in mindex order, make sure that \n - all the groups exist\n - the groups are consecutive (when the first group ends in an N, and \n - the GroupOrder ascends\n\n \"\"\"\n if ( \n all( not np.isnan( i ) for i in groupOrder.ix[:, \"mindex\"] ) # pylint: disable=E1101 \n and all( not groupOrder.ix[i,\"Consecutive\"] \n or groupOrder.ix[i, \"maxdex\"]+1 == groupOrder.ix[i+1, \"mindex\"]\n for i in range( len(groupOrder) -1 )\n )\n\n and all( groupOrder.ix[i, \"GroupOrder\"] <= groupOrder.ix[i+1, \"GroupOrder\"] \n for i in range( len(groupOrder) -1 )\n )\n ):\n self.addNote( \"Correct ALL group ordering, 0.5 marks\" )\n self.addMark(\"All Groups Ordering\", 0.5)\n self.addError( { 'AOI': 'AllGroupsOrdering', \n 'Code': \"\", \n 'IsCorrect': \"True\" ,\n 'Value': \"\", \n 'ValueSubmitted': \"\",\n })\n\n else:\n self.addNote( \"Incorrect ALL group ordering\" )\n self.addError( { 'AOI': 'AllGroupsOrdering', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': \"\", \n 'ValueSubmitted': \"\",\n })\n\n\n return submission",
"def test_check_ncs_group_list(self):\n phil_groups = ncs_group_master_phil.fetch(\n iotbx.phil.parse(phil_str)).extract()\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy(),\n ncs_phil_groups=phil_groups.ncs_group)\n nrgl = ncs_obj_phil.get_ncs_restraints_group_list()\n pdb_inp = iotbx.pdb.input(lines=test_pdb_str_2,source_info=None)\n ph = pdb_inp.construct_hierarchy()\n # passing test\n self.assertTrue(nu.check_ncs_group_list(nrgl,ph,chain_max_rmsd=1))\n # make sure test fails when it suppose to\n nrgl[0].copies[1].t = matrix.col([100, -89.7668, 5.8996])\n self.assertFalse(nu.check_ncs_group_list(nrgl,ph,chain_max_rmsd=1))",
"def test_pandas_multi_index(self):\n multi_frame = pd.MultiIndex.from_tuples([\n ('I', 'a'),\n ('II', 'b'),\n ('III', 'c'),\n ('IV', 'd'),\n ])\n requirement = [('I', 'a'), ('II', 'b'), ('III', 'c'), ('IV', 'd')]\n logging.info(\"validating Pandas multiframe logic, expected and actual values{} {}\".format(requirement, multi_frame))\n multi_frame.validate(requirement)",
"def validate(self):\n # every symbol is length one str\n invalid_symbols = list(filter(\n lambda x: (type(x) != str and type(x) != unicode) or len(x) != 1,\n chain(self.N, self.T))) \n # intersection must be empty\n invalid_symbols.extend(self.N.intersection(self.T))\n # every rule must match specified format\n invalid_rules = list()\n for rule in self.P:\n L, D = rule.replace(\" \", \"\").split('->')\n D = D.replace(\"|\", \"\")\n if list(filter(lambda x: x not in self.N.union(self.T), D)) \\\n or L not in self.N:\n print L not in self.N\n print list(filter(lambda x: x not in self.N.union(self.T), D))\n invalid_rules.append(rule) \n\n if invalid_symbols or invalid_rules:\n raise ValueError(\"Invalid symbols: \" + str(invalid_symbols) + \" \" +\n \"Invalid rules: \" + str(invalid_rules))",
"def validate_all():\n tau[3], y_data[3] = extract_2d_data('edge_03_ar4_s0.csv')\n tau[5], y_data[5] = extract_2d_data('edge_05_ar4_s0.csv')\n tau[7], y_data[7] = extract_2d_data('edge_07_ar4_s0.csv')\n validate(62, 160, M=3)\n validate(62, 160, M=5)\n validate(49, 160, M=7)\n validate(15, 160, M=3, closeup=True)\n validate(15, 160, M=5, closeup=True)\n validate(15, 160, M=7, closeup=True)",
"def check_regex_error(self, m, query, group_numbers=1):\n if m and group_numbers == 1: # If m has groups\n return False\n if m and group_numbers >= 1: # If m has more than one group\n if len(m.groups()) == group_numbers: # Then check the group numbers\n return False\n else:\n self.is_error = True\n LOG.error(\"Parse query error! Query string is %s\" % query)\n return True\n else:\n self.is_error = True\n LOG.error(\"Parse query error! Query string is %s\" % query)\n return True",
"def _validate_inputs(self,col_list):\n if not set(col_list).difference(self.raw_data.columns):\n print 'Columns is ok,Begin to Run....'\n else:\n raise ValueError('''The columns not in data's columns ''')",
"def group_error_rates(labels, predictions, groups):\r\n errors = []\r\n for jj in range(groups.shape[1]):\r\n if groups[:, jj].sum() == 0: # Group is empty?\r\n errors.append(0.0)\r\n else:\r\n signed_labels_jj = 2 * labels[groups[:, jj] == 1] - 1\r\n predictions_jj = predictions[groups[:, jj] == 1]\r\n errors.append(torch.mean((signed_labels_jj * predictions_jj <= 0).float()))\r\n return errors",
"def test_bad_grid_parameter_with_too_few_unique_values(self):\n DF = pd.DataFrame({\"p1\": [4, 4, 4, 4], \"p2\": [1, 2, 3, 4],\n \"l2\": [5, 6, 7, 8]})\n self.assertRaisesRE(ValueError, \"3 unique values are required\",\n NB_Model, DF, [\"p1\", \"p2\"])",
"def validate(ddtable):\n margin_upp = ddtable.sum(axis=1).transpose()\n count_upp = count_vec(margin_upp)\n remainder_upp = np.remainder(margin_upp, count_upp)\n\n margin_low = ddtable.sum(axis=0)\n count_low = count_vec(margin_low)\n remainder_low = np.remainder(margin_low, count_low)\n\n if not ((remainder_low == 0).all() and (remainder_upp == 0).all()):\n return False\n\n # e_ij <= d^u_i * d^l_j\n div_upp = np.divide(margin_upp, count_upp)\n div_low = np.divide(margin_low, count_low)\n for i in xrange(0,div_upp.size):\n for j in xrange(0,div_low.size):\n if ddtable[i,j] > div_upp.A1[i] * div_low.A1[j]: # is this the right way to access this?\n print (i, j, ddtable[i,j], div_upp.A1[i] * div_low.A1[j])\n return False\n return True",
"def query_check(tables, query):\n (valid_from, from_error) = from_check(tables, query) \n (valid_select, select_error) = check_select(tables, query)\n (valid_group, group_error) = check_group_by(tables, query)",
"def test_set_grouping_error(self):\n H = qml.Hamiltonian([1.0, 2.0, 3.0], [qml.PauliX(0), qml.PauliX(1), qml.PauliZ(0)])\n\n with pytest.raises(ValueError, match=\"The grouped index value\"):\n H.grouping_indices = [[3, 1], [2]]\n\n with pytest.raises(ValueError, match=\"The grouped index value\"):\n H.grouping_indices = \"a\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
check answer for errors VALIDATION RULES proper columns
|
def errorCheckSubmission( self, answer):
for colName in ["Code", "Convention", "GroupOrder"]:
assert colName in answer.columns, "We need a %s column in the master spreadsheet" % colName
|
[
"def _validate_inputs(self,col_list):\n if not set(col_list).difference(self.raw_data.columns):\n print 'Columns is ok,Begin to Run....'\n else:\n raise ValueError('''The columns not in data's columns ''')",
"def errorCheckMaster( self, answer):\n self.errorCheckSubmission( answer ) \n for colName in [\"Grouping\", \"IntraGroupOrder\", \"GroupOrder\"]:\n assert colName in answer.columns, \"We need a %s column in the master spreadsheet\" % colName",
"def check_input_validation(df):\n print(\"\\nChecking the input data validation.............................\")\n invalid_fields = []\n\n isnull_sum = df.isnull().sum()\n for index, val in isnull_sum.iteritems():\n if val > 0:\n invalid_fields.append(index)\n if len(invalid_fields) > 0:\n raise ValueError(\"The NaN missing values still exist in fields: \" + str(invalid_fields))\n\n # TODO: Why not working properly??\n isreal_sum = df.applymap(np.isreal).sum()\n for index, val in isreal_sum.iteritems():\n if val < len(df):\n invalid_fields.append(index)\n # if len(invalid_fields) > 0:\n # raise ValueError(\"The non-numerical values still exist in fields: \" + str(invalid_fields))\n\n return True",
"def check_valid_column(observation):\n \n valid_columns = {\n \"observation_id\",\n \"Type\",\n \"Date\",\n \"Part of a policing operation\",\n \"Latitude\",\n \"Longitude\",\n \"Gender\",\n \"Age range\",\n \"Officer-defined ethnicity\",\n \"Legislation\",\n \"Object of search\",\n \"station\"\n }\n \n keys = set(observation.keys())\n \n if len(valid_columns - keys) > 0: \n missing = valid_columns - keys\n error = \"Missing columns: {}\".format(missing)\n return False, error\n \n if len(keys - valid_columns) > 0: \n extra = keys - valid_columns\n error = \"Unrecognized columns provided: {}\".format(extra)\n return False, error \n\n return True, \"\"",
"def _validate(self):\n warnings = []\n # check that no columns have null values\n null_cols = self.view[self._activeCols].isnull().any()\n for i in null_cols.iteritems():\n col, hasna = i\n if hasna:\n warnings.append(\"{} has null values.\".format(col))\n # cross check values with allowed values in self.schema\n if self.schema is not None:\n malformed_values = schemaModule.validateView(self.view, self.schema)\n if malformed_values:\n for k in malformed_values:\n warnings.append(\"{} contains the following values which are \"\n \"not specified in the schema: {}\".format(\n k, \", \".join(map(str, malformed_values[k]))) +\n \"\\n\\tPossible values are {}\".format(\n \", \".join(self.schema.loc[k].value.values)))\n return warnings",
"def test_check_multiple_columns():\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n b: Series[int]\n\n @pa.check(\"a\", \"b\")\n @classmethod\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n return series < 100\n\n df = pd.DataFrame({\"a\": [101], \"b\": [200]})\n with pytest.raises(\n pa.errors.SchemaErrors, match=\"2 schema errors were found\"\n ):\n Schema.validate(df, lazy=True)",
"def _check_column_valid(self, column):\n if (isinstance(column, (int, long) )):\n if (column<0 and column>=self.get_number_of_cols()):\n raise ValueError(\"ERROR! column number (\" + str(column) + \") not valid!\")\n \n if (isinstance(column, str )):\n if (column not in self._col_names):\n raise ValueError(\"ERROR! column name (\" + column + \") not valid!\")",
"def _validators_valid(self, dataframe: pd.DataFrame):\n results, msgs = [], []\n for dd_col in self.list_columns():\n df_col = dataframe.get(dd_col.name, None)\n if df_col is not None and len(dd_col.validator) > 0:\n for validator in dd_col.validator:\n try:\n validator(inst=self, attr=dd_col, value=df_col)\n results.append(True)\n except:\n results.append(False)\n msgs.append(\n f\"The column [{dd_col.name}] failed {str(validator)[1:-1]}.\"\n )\n\n return results, msgs",
"def test_column_name_validation_fail(self):\n\n schema = {\n 'decimal_1': float\n }\n df = pd.DataFrame(data=(1, 2, 3), columns=['err_col'])\n\n try:\n val = Validator().validate_column_names(df, schema)\n except Exception as e:\n assert \"decimal_1\" in str(e).lower()\n assert e.__class__ == AssertionError",
"def validate_request(user_columns, data_columns):\n\n # Isolate our user- and data-columns into sets.\n data_columns_set = set(data_columns)\n user_columns_set = set(user_columns)\n\n # If the user denotes :all keyword, analyze all columns.\n if ':all' in user_columns_set:\n return data_columns\n\n # Valid columns are in the intersection between the two,\n # invalid columns are in the difference from user to data columns.\n valid, invalid = (\n user_columns_set.intersection(data_columns_set),\n user_columns_set.difference(data_columns_set)\n )\n\n # For all invalid columns, inform the user of their invalidity.\n for column in invalid:\n print(\"`{}` is not a valid column --- skipping.\".format(column))\n\n # Proceed with the analysis using only valid columns.\n return valid",
"def validation_fields_check(self, sheet):\n # Below value indicates column I in excel\n column_no = 8\n error = ''\n\n if self.year == '2019':\n # each element in below indicates range of cell addresses i.e. 154, 164 indicates all cells from 155 to 165.\n # Cell addresses in this module start at 0. hence we are referring to I155, I156 etc. here\n row_list = [(180, 180, 'Too Many DAG Members Validation Check'),\n (182, 182, 'DB Copy Count Validation Check'),\n (184, 184, 'Calculated Max DB Size Not Zero Check'),\n (185, 185, 'Mailbox Size Limit Not Zero Check'),\n (187, 187, 'Disk Count Validation Check'),\n (188, 188, 'Invalid Active/Active DAG'),\n (193, 193, 'Calculator Validation Check')]\n else:\n row_list = [(155, 157, 'Too Many DAG Members Validation Check & DB Copy Count Validation Check & '\n 'Mailbox Size Limit Not Zero Check'),\n (160, 163, 'Calculator Validation Check & Calculated Max DB Size Not Zero Check & '\n 'Disk Count Validation Check & Invalid Active/Active DAG'),\n (181, 188, 'JBOD Validation checks section')]\n\n for item in row_list:\n\n for i in range(item[0], item[1]+1):\n\n if sheet.cell(i, column_no).value:\n error += \"\\\"%s\\\" in \\\"%s\\\" sheet has failed.\\n\" % (sheet.cell(i, column_no-1).value, sheet.name)\n\n self.errors = error",
"def is_column_valid(column: list) -> bool:\n return is_row_valid(column)",
"def checkColumns(self, row, columns, log):\n rescols = set(row.keys())\n cols = set(columns.values())\n if not rescols >= cols:\n log.error(\n \"result missing columns: '%s'\",\n \",\".join(cols.difference(rescols)),\n )\n return False\n return True",
"def validate_fields(row):\n try:\n row.validate_fields()\n except AssertionError:\n raise",
"def test_check_single_column():\n\n class Schema(pa.SchemaModel):\n a: Series[int]\n\n @pa.check(\"a\")\n def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:\n # pylint:disable=no-self-argument\n assert cls is Schema\n return series < 100\n\n df = pd.DataFrame({\"a\": [101]})\n schema = Schema.to_schema()\n err_msg = r\"Column\\s*a\\s*int_column_lt_100\\s*\\[101\\]\\s*1\"\n with pytest.raises(pa.errors.SchemaErrors, match=err_msg):\n schema.validate(df, lazy=True)",
"def validate(self) -> None:\n val_results = [val.val_column_exist(self.field_found())]\n if self.field_found():\n val_results.append(val.val_column_sort(self._correct_position))\n val_results += self._validate_this_field()\n print_validation_report(self.field_name, val_results)",
"def _preprocessing_failed(self, column_name_1, column_name_2, sdtype_col_1, sdtype_col_2):\n error = None\n if column_name_1 in self._columns_datetime_conversion_failed.keys():\n error = self._columns_datetime_conversion_failed[column_name_1]\n\n elif column_name_2 in self._columns_datetime_conversion_failed.keys():\n error = self._columns_datetime_conversion_failed[column_name_2]\n\n elif self._sdtype_to_shape[sdtype_col_1] != self._sdtype_to_shape[sdtype_col_2]:\n if column_name_1 in self._columns_discretization_failed.keys():\n error = self._columns_discretization_failed[column_name_1]\n elif column_name_2 in self._columns_discretization_failed.keys():\n error = self._columns_discretization_failed[column_name_2]\n\n return error",
"def validate_expression(self, expression):\n\t\t#return self.evaluate(expression, 0, 2)\n\t\tvars = set(self.get_column_names(True, True)) | set(self.variables.keys())\n\t\tfuncs = set(expression_namespace.keys())\n\t\treturn vaex.expresso.validate_expression(expression, vars, funcs)",
"def data_checks():\n for func in [read_adult, read_bank, read_compas, read_german, read_sqf,\n read_synthetic]:\n xtr, xte, ytr, yte, ztr, zte = func()\n\n if np.any(xtr[:, 0] != 1.) or np.any(xte[:, 0] != 1.):\n print(\"WARNING: intercept issue in {}\".format(func.__name__))\n if np.any((ytr != 1) & (ytr != 0)) or np.any((yte != 1) & (yte != 0)):\n print(\"WARNING: label issue in {}\".format(func.__name__))\n if np.any(np.std(xtr[:, 1:], 0) == 0) or np.any(np.std(xte[:, 1:], 0) == 0):\n print(\"WARNING: constant column in X {}\".format(func.__name__))\n if np.any(np.std(ztr, 0) == 0) or np.any(np.std(zte, 0) == 0):\n print(\"WARNING: constant column in Z {}\".format(func.__name__))\n if np.std(ytr) == 0 or np.std(yte) == 0:\n print(\"WARNING: constant column in y {}\".format(func.__name__))\n\n print(\"Done running checks.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
return a mark, and a marked up submission the latter ready to write back to excel file algorithm find groups and create groupStart array, groupOrder, groupMarking columns??? check intragroup order check group order
|
def mark( self, submission):
""" did the student not submit anything with this name?"""
if submission is None or len(submission)==0:
submission = pd.DataFrame( columns = self.ma.columns)
#return (pd.DataFrame(), 0, pd.DataFrame())
submission = self.dataClean( submission )
self.initalizeSubmissionDetails()
submission = self.findGroups(submission)
submission=self.markUnspecifiedPositions(submission)
if notblank( self.ma.Grouping ) != []:
submission=self.markUnorderedGroups(submission)
submission=self.markIntragroupOrder(submission)
submission=self.markGroupOrder(submission)
submission=self.markPrefix(submission)
submission=self.markConvention(submission)
label = "Marks: Category"
submission = self.addColumn( submission, label )
for idx, mc in enumerate(self.markCategory):
submission.loc[ idx, label ] = mc
totMarks = 0
label = "Marks: Amount"
submission = self.addColumn(submission, label )
for idx, mark in enumerate(self.marks):
submission.loc[ idx, label ] = mark
totMarks = totMarks + mark
label = "Marking Notes"
submission = self.addColumn(submission, label )
for idx, note in enumerate(self.notes):
submission.loc[ idx, label ] = note
submission.loc[ len(self.marks)+1, "Marks: Category" ] = ""
submission.loc[ len(self.marks)+1, "Marks: Amount" ] = "------------"
submission.loc[ len(self.marks)+2, "Marks: Category" ] = "Total"
submission.loc[ len(self.marks)+2, "Marks: Amount" ] = totMarks
return (submission, totMarks, self.errorFrame)
|
[
"def markGroupOrder(self,submission):\n\n \"\"\" make sure that there exist groupOrders in the answer\"\"\"\n groupOrder = self.ma.ix[ notblank(self.ma.GroupOrder),(\"Grouping\",\"GroupOrder\")]\n if len( groupOrder ) == 0:\n return submission\n\n \"\"\" find out where these groups live in the submission:\n create data frame with rows Grouping, GroupOrder, and mindex, maxdex \n 1) find all the rows that relate to the answer grouping, and their minimum and maximum index (mindex)\n \"\"\"\n submissionGroupPos = submission[ submission.Grouping.isin(groupOrder.Grouping)]\n submissionGroupPos.loc[:,\"index\"]=submissionGroupPos.index\n submissionGroupPosMin = pd.DataFrame(submissionGroupPos.groupby(\"Grouping\")[\"index\"].agg(np.min))\n submissionGroupPosMin[\"mindex\"] = submissionGroupPosMin[\"index\"]\n submissionGroupPosMax = pd.DataFrame(submissionGroupPos.groupby(\"Grouping\")[\"index\"].agg(np.max))\n submissionGroupPosMax[\"maxdex\"] = submissionGroupPosMax[\"index\"]\n\n # error check to make sure we have got Min and Max Grouping columns\n if not 'Grouping' in submissionGroupPosMin.columns:\n submissionGroupPosMin['Grouping'] = submissionGroupPosMin.index\n if not 'Grouping' in submissionGroupPosMax.columns:\n submissionGroupPosMax['Grouping']=submissionGroupPosMax.index\n groupOrder=groupOrder.merge(submissionGroupPosMin, how='left', on=\"Grouping\")\n groupOrder=groupOrder.merge(submissionGroupPosMax, how='left', on=\"Grouping\").sort(columns=\"mindex\")\n\n \n groupOrder.loc[ : , \"Consecutive\"] = False\n i=0\n for go in groupOrder.GroupOrder:\n if str(go).endswith(\"N\"):\n groupOrder.loc[ i, \"Consecutive\"] = True\n groupOrder.loc[ i, \"GroupOrder\"] = groupOrder.loc[ i, \"GroupOrder\"][0:-1] \n i = i + 1\n\n\n \"\"\" go through each group in mindex order, make sure that \n - all the groups exist\n - the groups are consecutive (when the first group ends in an N, and \n - the GroupOrder ascends\n\n \"\"\"\n if ( \n all( not np.isnan( i ) for i in groupOrder.ix[:, \"mindex\"] ) # pylint: disable=E1101 \n and all( not groupOrder.ix[i,\"Consecutive\"] \n or groupOrder.ix[i, \"maxdex\"]+1 == groupOrder.ix[i+1, \"mindex\"]\n for i in range( len(groupOrder) -1 )\n )\n\n and all( groupOrder.ix[i, \"GroupOrder\"] <= groupOrder.ix[i+1, \"GroupOrder\"] \n for i in range( len(groupOrder) -1 )\n )\n ):\n self.addNote( \"Correct ALL group ordering, 0.5 marks\" )\n self.addMark(\"All Groups Ordering\", 0.5)\n self.addError( { 'AOI': 'AllGroupsOrdering', \n 'Code': \"\", \n 'IsCorrect': \"True\" ,\n 'Value': \"\", \n 'ValueSubmitted': \"\",\n })\n\n else:\n self.addNote( \"Incorrect ALL group ordering\" )\n self.addError( { 'AOI': 'AllGroupsOrdering', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': \"\", \n 'ValueSubmitted': \"\",\n })\n\n\n return submission",
"def markUnorderedGroups(self,submission):\n\n maGroups= self.ma[ eAnd( isblank(self.ma.IntraGroupOrder), notblank(self.ma.Grouping)) ].Grouping.unique()\n\n # P and L groups are taken care of by absoluteOrdering routine. Different marks too\n #maGroups = set(maGroups).difference( set(\"P\", \"L\"))\n label='UnorderedGroups'\n submission = self.addColumn(submission, label )\n submission.loc[:,label]=None\n for group in maGroups:\n # take the group slice\n magSet = set( self.ma[ self.ma.Grouping==group].Code)\n subSlice = submission[ submission.Grouping==group].Code\n subSet = set( subSlice )\n nCorrect=len( magSet & subSet )\n submission.loc[ submission.Code.isin( magSet ), label] = group\n if group==\"P\":\n if nCorrect == len(magSet ) : # all correct, principal\n self.addNote( \"Correct principal diagnosis, 1 mark\" )\n self.addMark(\"Principal Diagnosis\", 1)\n self.addError( {\n 'AOI': 'PrincipalCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"True\" \n })\n else:\n self.addNote( \"Incorrect principal diagnosis, answer is %s, you had %s \" % ( pprintSlice(magSet), pprintSlice(subSet)) )\n self.addError( {\n 'AOI': 'PrincipalCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"False\" \n })\n next\n\n if group==\"L\" : # Last Codes \n if len(subSlice) > 0 and max( subSlice.index ) == max(submission.index ):\n self.addNote( \"Correct final codes, 0.5 marks\" )\n self.addMark( \"Final Code(s) Group\", 0.5 )\n self.addError( {\n 'AOI': 'LastCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"True\" \n })\n else:\n self.addNote( \"Incorrect final code(s), should be %s\" % ( pprintSlice(magSet)) )\n self.addError( { 'AOI': 'LastCode', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n\n # we don't need to process the group if the master says it is only one code long\n if len( magSet ) == 1:\n next\n\n\n\n if nCorrect == len(magSet ) : # all correct\n self.addNote( \"Unordered Group %s, %s entirely correct, 0.5 marks\" % (group, pprintSlice(magSet)) )\n self.addMark(\"Unordered Group %s\" % group, 0.5)\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"True\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n elif (nCorrect > 0 ) :\n self.addNote( \"Unordered Group %s partially correct, answer is %s, you had %s, 0.5 marks \" \n % (group, pprintSlice(magSet), pprintSlice(subSet)) )\n self.addMark(\"Unordered Group %s\" % group, 0.5)\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n else:\n self.addNote( \"Unordered Group %s, %s entirely missing\" % (group, pprintSlice(magSet)) )\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': \"\",\n })\n\n return submission",
"def mark_group(self, name, mark):\n self._mark_group(name.encode(), mark)",
"def prepare_marks_data() -> DataFrame:\n data = read_excel(\n DATA_MARKS_PATH,\n header=2,\n usecols=[0, 2, 15, 16, 19],\n parse_dates=['Дата создания заказа'],\n dayfirst=True\n )\n data = data.rename(columns={\n 'ID заказа': 'id_order',\n 'Дата создания заказа': 'date_creating_order',\n 'Марка применена': 'mark_implement',\n 'Конструктор - создатель марки': 'name_constructor',\n '№ позиции': 'pos_number'\n })\n data['date_month'] = data['date_creating_order'].map(beginning_month)\n\n return data",
"def mark(self):\n materials = (\n MaterialGroup(tag=24, entities=[self.s_domain]),\n )\n for material in materials:\n material.create_physical_group()\n\n vertex_groups = (\n VertexGroup(name=\"boundary\", tag=1, dim=1, entities=[self.l_xneg, self.l_yneg, self.l_xpos, self.l_ypos]),\n )\n for group in vertex_groups:\n group.create_physical_group()",
"def buildmarkpointlist(eachrationumlist,blockcount):\r\n \r\n markpointlistdict={}\r\n \r\n markpointlist=[]\r\n rationumaccumulationlist=[]\r\n rationumaccumulationlist.append(0) \r\n for i in range(1,globalconfig.RATIO_NUM): #计算放缩率数量累加列表\r\n rationumaccumulationlist.append(rationumaccumulationlist[i-1]+eachrationumlist[i-1])\r\n \r\n block_x_count=blockcount%globalconfig.BLOCK_X_NUM\r\n block_y_count=blockcount//globalconfig.BLOCK_X_NUM\r\n \r\n block_x_offset=globalconfig.block_x_accumulationlist[block_x_count]*globalconfig.X_LENGTH/globalconfig.X_OUTLINE_RATIO\r\n block_y_offset=globalconfig.block_y_accumulationlist[block_y_count]*globalconfig.Y_LENGTH/globalconfig.Y_OUTLINE_RATIO\r\n \r\n \r\n \r\n \r\n for i in range(len(eachrationumlist)): \r\n markpointlist=[] \r\n for row in range(0,eachrationumlist[i]):\r\n markpointlist.append([globalconfig.X_BLANK+globalconfig.CUTLINE_X_OFFSET+(globalconfig.X_LENGTH/globalconfig.X_OUTLINE_RATIO)*(rationumaccumulationlist[i]+row)+globalconfig.MARK_X_OFFSET+block_x_offset,globalconfig.Y_BLANK+globalconfig.CUTLINE_Y_OFFSET+globalconfig.MARK_Y_OFFSET+block_y_offset])\r\n \r\n if globalconfig.BLOCK_X_NUM==1: \r\n mark=globalconfig.blockmark_y_list[block_y_count]+globalconfig.markratiolist[i] \r\n elif globalconfig.BLOCK_Y_NUM==1:\r\n mark=globalconfig.blockmark_x_list[block_x_count]+globalconfig.markratiolist[i] \r\n elif globalconfig.RATIO_NUM==1:\r\n mark=globalconfig.blockmark_x_list[block_x_count]+globalconfig.blockmark_y_list[block_y_count]\r\n else:\r\n mark=globalconfig.blockmark_x_list[block_x_count]+globalconfig.blockmark_y_list[block_y_count]+globalconfig.markratiolist[i] \r\n markpointlistdict[mark]=markpointlist\r\n return markpointlistdict",
"def strand_grouper(counted_fname, fraction_fname, class_key_fname,\n tstrand_by_class_outfname, fmsdstrand_by_class_outfname):\n\n fraction_file = open(fraction_fname)\n counted_file = open(counted_fname)\n class_key_file = open(class_key_fname)\n tstrand_by_class_outfile = open(tstrand_by_class_outfname, 'w')\n fmsdstrand_by_class_outfile = open(fmsdstrand_by_class_outfname, 'w')\n\n tab = '\\t'\n\n sub_totals = {'G>T':0, 'G>C':0, 'G>A':0, 'A>T':0, 'A>G':0, 'A>C':0}\n\n sub_totals_list = sorted(sub_totals)\n\n tstrand_by_class_outfile.write (\"Class\")\n fmsdstrand_by_class_outfile.write (\"Class\")\n\n # Make a header\n for x in sub_totals_list:\n tstrand_by_class_outfile.write (tab + x)\n\n tstrand_by_class_outfile.write ('\\n')\n\n classes = []\n cell_classes = {}\n bio_classes = {}\n\n # Populate the cell_classes dict,\n # which has cells as keys and\n # classes as values\n # Use this to look up the class for\n # every cell.\n\n for line in class_key_file:\n if \"Class\" not in line:\n sline = line.split('\\t')\n cell = sline[0]\n bio_class = sline[1].strip('\\n').strip('\\r')\n\n if bio_class + \"+\" not in classes:\n classes.append(bio_class + \"+\")\n classes.append(bio_class + \"-\")\n\n if bio_class + \"+\" not in bio_classes:\n bio_classes[bio_class + \"+\"] = [cell]\n bio_classes[bio_class + \"-\"] = [cell]\n\n else:\n bio_classes[bio_class + \"+\"].append(cell)\n bio_classes[bio_class + \"-\"].append(cell)\n\n cell_classes[cell + \"+\"] = bio_class + \"+\"\n cell_classes[cell + \"-\"] = bio_class + \"-\"\n\n cells_per_bio_class = {}\n\n for h in bio_classes:\n cells_per_bio_class[h] = str(len(bio_classes[h]))\n\n # Prepare the class_counter dictionary\n # which will have classes as keys and\n # a list of ints reflecting the count\n # of each SNV type as values.\n # Also prep the class_fraction dictionary\n # which will have classes as keys and\n\n class_counter = {}\n class_fractions = {}\n\n for c in classes:\n class_counter[c] = [0,0,0,0,0,0]\n class_fractions[c] = {'A>C':[], 'A>G':[], 'A>T':[], 'G>A':[], 'G>C':[], 'G>T':[]}\n\n\n for line in counted_file:\n if \"C>T\" not in line:\n sline = line.split('\\t')\n cell_strand_data = sline[0]\n\n if cell_strand_data in cell_classes:\n SNV_class = cell_classes[cell_strand_data]\n\n for m in range(len(class_counter[SNV_class])):\n class_counter[SNV_class][m] += int(sline[m+1].strip('\\n'))\n\n SNS_types = ['A>C', 'A>G', 'A>T', 'G>A', 'G>C', 'G>T']\n\n for f in sorted(class_counter):\n tstrand_by_class_outfile.write(f)\n\n for n in class_counter[f]:\n tstrand_by_class_outfile.write('\\t')\n tstrand_by_class_outfile.write(str(n))\n\n\n tstrand_by_class_outfile.write('\\n')\n\n\n for line in fraction_file:\n if \"C>A\" not in line:\n sline = line.split('\\t')\n cell_strand_data = sline[0]\n\n if cell_strand_data in cell_classes:\n SNV_class = cell_classes[cell_strand_data]\n class_fractions[SNV_class][\"A>C\"].append(sline[1])\n class_fractions[SNV_class][\"A>G\"].append(sline[2])\n class_fractions[SNV_class][\"A>T\"].append(sline[3])\n class_fractions[SNV_class][\"G>A\"].append(sline[4])\n class_fractions[SNV_class][\"G>C\"].append(sline[5])\n class_fractions[SNV_class][\"G>T\"].append(sline[6].strip('\\n'))\n\n class_fraction_means = {}\n class_fraction_SDs = {}\n\n for a in class_fractions:\n class_fraction_means[a] = {}\n class_fraction_SDs[a] = {}\n\n for snv in class_fractions[a]:\n all_snv_fractions = []\n for g in class_fractions[a][snv]:\n all_snv_fractions.append(float(g))\n\n class_snv_fractions = np.array(all_snv_fractions)\n class_snv_mean = np.mean(class_snv_fractions)\n class_snv_sd = np.std(class_snv_fractions)\n class_fraction_means[a][snv] = class_snv_mean\n class_fraction_SDs[a][snv] = class_snv_sd\n\n sorted_classes = sorted(class_fraction_means)\n\n # header should have SNS types in column 1\n # col 2, 3, and 4 should be (for example) AT+ mean,\n # AT+ SD, and AT+ N, and columns 5, 6, and 7 should be\n # (for example) AT- mean, AT- SD, and AT- N.\n # use the sorted_classes list to write this line\n # then, on the next line, fill in the data for each sample\n\n fmsdstrand_by_class_outfile.write (\"SNS type\")\n\n for o in sorted_classes:\n fmsdstrand_by_class_outfile.write (tab + o + \" mean\")\n fmsdstrand_by_class_outfile.write (tab + o + \" SD\")\n fmsdstrand_by_class_outfile.write (tab + \"N\")\n\n fmsdstrand_by_class_outfile.write (\"\\n\")\n\n for d in sub_totals_list:\n fmsdstrand_by_class_outfile.write(d)\n for l in sorted_classes:\n fmsdstrand_by_class_outfile.write(tab + str(class_fraction_means[l][d]))\n fmsdstrand_by_class_outfile.write(tab + str(class_fraction_SDs[l][d]))\n fmsdstrand_by_class_outfile.write(tab + cells_per_bio_class[l])\n\n fmsdstrand_by_class_outfile.write (\"\\n\")\n\n\n\n fraction_file.close()\n counted_file.close()\n class_key_file.close()\n tstrand_by_class_outfile.close()\n fmsdstrand_by_class_outfile.close()",
"def write_group(pf, tag,mdef):\n tbl=pf.get_tbl(tag)\n filename=tag+\".csv\"\n fh=open(filename,\"w+\")\n fh.write('\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n' % (\"Key\",\"Type\",\"Mutable\",\"Concept\"))\n for k in tbl:\n t=mdef.type(k)\n tstr=\"undefined\"\n if(t==MDtype.Int64):\n tstr=\"int\"\n elif(t==MDtype.Double):\n tstr=\"double\"\n elif(t==MDtype.String):\n tstr=\"string\"\n elif(t==MDtype.Boolean):\n tstr=\"boolean\"\n writeable=mdef.writeable(k)\n wstr=\"undefined\"\n if(writeable):\n wstr=\"Yes\"\n else:\n wstr=\"No\"\n fh.write('\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n' % (k,tstr,wstr,mdef.concept(k)))\n fh.close()",
"def markPrefix(self,submission):\n label='Prefix?'\n submission = self.addColumn( submission, label )\n submission.loc[:,label]=\"Not Correct\"\n if not 'Prefix' in submission.columns:\n return submission\n prefixes = submission.ix[:,(\"Code\",\"Prefix\")]\n prefixes.columns = [ \"Code\",\"submissionPrefix\"]\n if len( prefixes ) == 0:\n return submission\n prefixes = prefixes.merge(self.ma.loc[:, (\"Code\",\"Prefix\")], how=\"left\", on=\"Code\")\n isCorrect = list(not pd.isnull( c ) and c==s for s,c in zip(prefixes.submissionPrefix, prefixes.Prefix))\n submission.ix[ isCorrect, label ] = \"Correct\"\n nCorrect = sum( isCorrect )\n \n \"\"\" \n prepare errorframe from a 'what is correct' perspective\n 1) create error dataframe from master, columns Code and prefix\n 1a) rename prefix to Value\n 2) fill submission prefix, matching by code\n 3) fill IsCorrect\n \"\"\"\n errors = self.ma.ix[:,(\"Code\",\"Prefix\")]\n errors.columns = [ \"Code\", \"Value\" ]\n errors = errors.merge(submission.loc[:, (\"Code\",\"Prefix\")], how=\"left\", on=\"Code\")\n errors.columns = [ \"Code\", \"Value\", \"ValueSubmitted\" ]\n errors = self.addColumn( errors, \"AOI\" )\n errors.loc[:,\"AOI\"]=\"Prefix\"\n label = \"IsCorrect\"\n errors = self.addColumn( errors, label )\n errors.loc[:, label ]=\"False\"\n isCorrect = list(not pd.isnull( c ) and c==s \n for s,c in zip(errors.Value, errors.ValueSubmitted))\n errors.ix[ isCorrect, label ] = \"True\"\n self.addError( errors )\n\n self.addNote(\"You had %d correct prefixes, gaining %2.1f marks\" %(nCorrect, nCorrect * 0.5))\n self.addMark(\"%d Correct prefixes\" % nCorrect, nCorrect * 0.5)\n\n return submission",
"def write_groups(out_file, groupname):\r\n print(\"To create a single group please just enter the main group name i.e. Group Name\")\r\n print('To create a subgroup to an exisitng group, please enter /Group Name/Subgroup Name/etc/etc/')\r\n print() \r\n attributes = {}\r\n print(\"Enter attributes for\", groupname)\r\n meta = input(\"Is there a metadata file? (Y/N): \")\r\n if meta == \"Y\" or meta == \"y\":\r\n metapath = input(\"Enter metadata file path: \")\r\n with open(metapath, 'r') as metafile:\r\n for line in metafile:\r\n line = line.split('\\t')\r\n item = line[0].strip('\\n')\r\n value = line[-1].strip('\\n')\r\n if item in attributes.keys():\r\n attributes[item].append(value)\r\n else:\r\n attributes[item] = [value]\r\n else:\r\n input_attributes = input(\"Enter an attribute followed by a value. i.e. Project Name: iknowit, Date: 04-11-2019: \")\r\n for attribute in input_attributes.split(','):\r\n attribute = attribute.split(':')\r\n attributes[attribute[0].strip(' ')] = attribute[1].strip(' ')\r\n data_file = h5py.File(out_file, 'a')\r\n dset = data_file.create_group(groupname)\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v",
"def process_completion_mcg():\n warnings = ['\\nProcessing User completions mark Report data Warnings:\\n']\n warnings_to_process = False\n print('\\nProcessing User completions mark Report data.')\n # Confirm the required files are in place\n required_files = ['User completions mark Report']\n ad.confirm_files('User completions mark Report', required_files)\n # Get name for 'User completions mark Report' Report data file and then load\n report_data, to_add, warnings_to_add = load_data('User_Completions_Mark_'\n 'Report_')\n # print('Check loaded data:')\n # ad.debug_list(report_data)\n if to_add:\n warnings_to_process = True\n for line in warnings_to_add:\n warnings.append(line)\n # Create a dataframe with the data\n headings = ['Course', 'Tutor group', 'Student ID', 'Student', 'Tutor',\n 'Head Tutor', 'Manager']\n comp = pd.DataFrame(data = report_data, columns = headings)\n # Change value in Course column to 'Skip' if not a student course\n comp['Course'] = comp['Course'].apply(list_non_st)\n # Remove courses that are not Part-time ('Skip' in 'Course')\n comp = comp.drop(comp.index[comp['Course'] == 'Skip']) \n # Save Master file\n f_name = 'User_Completions_Mark_All_{}.xls'.format(\n ft.generate_time_string())\n comp.to_excel(f_name, index=False)\n print('\\nUser_Completions_Mark_All_ has been saved to {}'.format(f_name))\n ft.process_warning_log(warnings, warnings_to_process)",
"def getMarkPosition(self, i: int) -> int:\n ...",
"def markConvention(self,submission):\n label='Convention?'\n submission = self.addColumn( submission, label )\n submission.loc[:,label]=\"Not Correct\"\n if not 'Convention' in submission.columns:\n return submission\n conventions = submission.ix[:,(\"Code\",\"Convention\")] \n conventions.columns = [\"Code\",\"submissionConvention\"]\n if len( conventions ) == 0:\n return submission\n conventions = conventions.merge(self.ma.loc[:, (\"Code\",\"Convention\")], how=\"left\", on=\"Code\")\n\n \"\"\" it exists, and conventions match\"\"\"\n isCorrect = list(not pd.isnull( c ) and \n bool(re.match( c,s )) for c,s in zip(conventions.Convention, conventions.submissionConvention))\n submission.loc[ isCorrect, label ] = \"Correct\"\n nCorrect = sum( isCorrect )\n \n \"\"\" \n prepare errorframe \n \"\"\"\n errors = self.ma.ix[:,(\"Code\",\"Convention\")]\n errors.columns = [ \"Code\", \"Value\" ]\n errors = errors.merge(submission.loc[:, (\"Code\",\"Convention\")], how=\"left\", on=\"Code\")\n errors.columns = [ \"Code\", \"Value\", \"ValueSubmitted\" ]\n errors = self.addColumn( errors, \"AOI\" )\n errors.loc[:,\"AOI\"]=\"Convention\"\n label = \"IsCorrect\"\n errors = self.addColumn( errors, label )\n errors.loc[:, label ]=\"False\"\n isCorrect = list(not pd.isnull( c ) and bool(re.match( c,s ))\n for s,c in zip(errors.Value, errors.ValueSubmitted))\n errors.ix[ isCorrect, label ] = \"True\"\n self.addError( errors )\n\n self.addNote(\"You had %d correct conventions, gaining %2.1f marks\" %(nCorrect, nCorrect * 1))\n self.addMark(\"%d Correct conventions\" % nCorrect, nCorrect * 1)\n\n\n return submission",
"def mark_all_groups(self, mark):\n self._mark_all_groups(mark)",
"def save_grouped_Brain_Data_archive_from_raw(Brain_Data_filepath):\n print(\"loading pkl...\")\n Brain_Data_allsubs = pickle.load(open(Brain_Data_filepath,'rb'))\n print(\"pkl loaded.\")\n \n \n Brain_Data_allsubs_nn = Brain_Data_allsubs[Brain_Data_allsubs.X.response.isnull()==False]\n \n print(\"filtered by response.\")\n behavioral_design = Brain_Data_allsubs_nn.X.copy()\n grouping_var_list=['response']\n run_data_list = ['type','run','wave','subject']\n #for each beta, get the key designating which group it belongs to\n behavioral_design_group_key = behavioral_design[run_data_list + grouping_var_list]\n #now just get a list of the groups from that\n grouped_subj_behavioral_design = behavioral_design_group_key.drop_duplicates().reset_index(drop=True)\n \n print(\"iterating through group\")\n bd_list = []\n #go through each group\n group_len = grouped_subj_behavioral_design.shape[0]\n for row_i in range(group_len):\n #pull the rows of the original design that are within the group\n print(str(row_i) + \" of \" + str(group_len) + \", \",end='')\n beta_group = grouped_subj_behavioral_design.iloc[row_i,:]\n betas_in_group = (behavioral_design_group_key==beta_group).all(axis=1)\n\n #filter on that\n if(betas_in_group.sum()>1):\n group_beta = Brain_Data_allsubs_nn[betas_in_group].mean()\n else:\n group_beta = Brain_Data_allsubs_nn[betas_in_group]\n bd_list = bd_list + [group_beta]\n\n print('concatenating...')\n\n\n #img_list = Brain_Data_allsubs_nn[]\n bd_grouped = nlt.utils.concatenate(bd_list)\n bd_grouped.X=grouped_subj_behavioral_design\n \n \n \n filepath_out = re.sub('\\.pkl$','_grouped.pkl',Brain_Data_filepath)\n print('saving ' + filepath_out)\n \n with open(filepath_out, 'wb') as pkl_file:\n pickle.dump(bd_grouped,pkl_file)",
"def mark_empty_groups(self, mark):\n self._mark_empty_groups(mark)",
"def marked(df):\n mark_df = df[df.event_type == 'mark']\n\n # we dont need the event_type anymore (to save memory)\n mark_df = mark_df.drop(columns=['event_type'])\n\n sorted_mark_df = mark_df.sort_values('ts')\n\n depuplicated_mark_df = sorted_mark_df.drop_duplicates(['user_id'])\n\n return depuplicated_mark_df",
"def WriteData2(df, sheet, row_start_point, col_start_point, GroupNums1, GroupNums2):\n for row in range(GroupNums1):\n tmp = df.iloc[:, 0]\n try:\n sheet.write(row_start_point + row, col_start_point, tmp[row].astype(float), style2)\n except:\n sheet.write(row_start_point + row, col_start_point, tmp[row], style2)\n\n for row in range(GroupNums2):\n tmp = df.iloc[:, 1]\n try:\n sheet.write(row_start_point + row, col_start_point + 1, tmp[row].astype(float), style2)\n except:\n sheet.write(row_start_point + row, col_start_point + 1, tmp[row], style2)",
"def createNewGroupRow(currGroup, nextGroup, pointsToGather, endPoint):\n newRow = {}\n newRow[\"Region No\"] = currGroup[\"Region No\"]\n currPoint = 1\n #Sort out the Group Points\n for currPointIndex in range(len(pointsToGather[0])):\n currGroupPoint = pointsToGather[0][currPointIndex]\n newRow[f\"Point {currPoint + currPointIndex} Orig Rt\"] = currGroup[f\"Point {currGroupPoint} Orig Rt\"]\n newRow[f\"Point {currPoint + currPointIndex} Agent Difference\"] = currGroup[f\"Point {currGroupPoint} Agent Difference\"]\n newRow[f\"Point {currPoint + currPointIndex} Action to Next Point\"] = currGroup[f\"Point {currGroupPoint} Action to Next Point\"]\n \n currPoint += len(pointsToGather[0])\n\n for currPointIndex in range(len(pointsToGather[1])):\n currGroupPoint = pointsToGather[1][currPointIndex]\n newRow[f\"Point {currPoint + currPointIndex} Orig Rt\"] = nextGroup[f\"Point {currGroupPoint} Orig Rt\"]\n newRow[f\"Point {currPoint + currPointIndex} Agent Difference\"] = nextGroup[f\"Point {currGroupPoint} Agent Difference\"]\n newRow[f\"Point {currPoint + currPointIndex} Action to Next Point\"] = nextGroup[f\"Point {currGroupPoint} Action to Next Point\"]\n \n #Sort out the Goal Point and Next Point\n goalPoint = pointsToGather[1][currPointIndex]+1\n\n newRow[f\"Goal Point Orig Rt\"] = nextGroup[f\"Point {goalPoint} Orig Rt\"]\n newRow[f\"Goal Point Agent Difference\"] = nextGroup[f\"Point {goalPoint} Agent Difference\"]\n\n if endPoint:\n newRow[f\"Next Point Rt Orig\"] = nextGroup[\"Goal Point Orig Rt\"]\n newRow[f\"Next Point Agent Difference\"] = nextGroup[\"Goal Point Agent Difference\"]\n newRow[f\"Agent Action to Next Point\"] = nextGroup[f\"Point {goalPoint} Action to Next Point\"]\n else:\n goalPoint += 1\n newRow[f\"Next Point Rt Orig\"] = nextGroup[f\"Point {goalPoint} Orig Rt\"]\n newRow[f\"Next Point Agent Difference\"] = nextGroup[f\"Point {goalPoint} Agent Difference\"]\n newRow[f\"Agent Action to Next Point\"] = nextGroup[f\"Point {goalPoint} Action to Next Point\"]\n\n return newRow"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
subcodes all the student submission codes search the model answer group slice we are searching for in subcodes find the maximum length set of codes in search which match somewhere in the student subCodes try to find some group in subCodes, of length len(codes)..2 the most important thing is to match the longest length of search so, we first look for an exact match of search, and then, look for for an exact match of search with 1 extra code interspersed start looking for chunk one longer than searchSetLength, cause can have 1 mistake traverse student subCodes one by one, see if enough codes exists, stop when found find the maximimum lenght winner from subCodes either, exact winner, or winner with a single inner wrong element be satisfied with increasingly smaller sets of the model answer group
|
def findSlice( self, subCodes, search):
searchSet=set(search)
for searchSliceLen in range(len(search), 0, -1):
# go through the student answer, from start to end
for startPos in range(0, len(subCodes) - searchSliceLen + 1 ):
# first, look for a contigious match
# see if the current slice is an exact winner, that is, a set of codes in subCodes that
# has searchSliceLen of the codes in search
# subcodes = abcdef, search = abc, ssl = 3
# every code in this chunk of the student's submission
# has a match in the model answer (search)
# and there is no bigger match
subSlice = subCodes.ix[ startPos:startPos + searchSliceLen - 1 ]
#print "exact", searchSliceLen, startPos, len(subCodes), len(subSlice)
if (len(searchSet & set( subSlice )) == searchSliceLen) :
return subSlice
#
# Now, if we are not already at the end,
# search for the single mistakes
# if the first and last codes on the students answer match
# and there is 1 mistake in the middle somethere
if startPos + searchSliceLen - 1 == len(subCodes):
continue
subSlice = subCodes.ix[ startPos:startPos + searchSliceLen ]
#print "inexact", searchSliceLen, startPos, len(subCodes), len(subSlice)
if (subSlice.iloc[0] in searchSet and
subSlice.iloc[ len(subSlice) - 1 ] in searchSet and
len(searchSet & set( subSlice )) == searchSliceLen
):
#print "off by one"
#off by one winner
#Assert: there should be one incorrect code, in the middle of the group somewhere
#assert(len(temp)==len(foundSlice)-1, "should have one error code at this stage, "+temp+foundSlice)
return subSlice[ subSlice.isin(searchSet)]
return []
|
[
"def is_subsequence2(s, w, y, debug=True):\n n, m = len(s), len(w)\n i, j = 0, 0\n e = 0\n sufficient_len = lambda i, j: (n - i) - (m - j) >= 0\n while i < n and j < m and sufficient_len(i, j):\n\n if debug: print(\"j={}, w[j] = {}\".format(j, w[j]))\n\n # l is the list of index in s for character at w[j]\n\n l = y[w[j]] if w[j] in y else None\n if debug: print(\"l={}, w[j]={}, j={}\".format(l, w[j], j))\n\n # Find index t of w[j] in s such that min(t >= i in l)\n q = [k for k in l if k >= i] if l else None\n t = min(q) if q else None\n #t = min(k for k in l if k >= i) if l else None\n\n # Use t is not None to compare since possible t values are 0.\n if t is not None:\n # The letter of a word w in a dictionary D is found at index t of string S\n i = t+1\n j += 1\n if debug: print(\"k = {}, i = {}, j = {}, q = {}, t = {}, s = {}, w = {}\"\n .format(e, i, j, q, t, s[i:], w[j:]))\n else:\n # The letter of a word w is not found in string S\n i += 1\n if debug: print(\"k = {}, i = {}, j = {}, s = {}, w = {}\".format(k , i, j, s[i:], w[j:]))\n if debug: print()\n\n # For debug purpose to count number of loops\n if debug: e += 1\n\n # Returns True if all letters of word w is found in string S. Otherwise, False\n return j == m",
"def findMinSubsequence(characters,stringArray):\n minLength = len(characters) #The minimum possible subsequence is the no. of charatcers in characters list, which will be best case.\n maxLength = len(stringArray) #The max possible subsequence is the no. of characters in stringArray, which will be worst case scenario.\n\n minSubsequence = []\n\n flags = {} #This is a dictionary to keep track of the characters found in the subsequence.\n for i in characters: #Here we are setting all the characters' flags to zero, since no character is yet found.\n flags[i] = 0\n \n\n for k in range(minLength,maxLength+1): #k here represents the current length of subsequence being processed. \n for j in range(0,maxLength-k+1): #j here reprsents the current start index of subsequence with k characters. \n minSubsequence.clear() \n for i in range(j,k+j): #i iterates over each character in a subsequence with k characters with start index j.\n minSubsequence.append(stringArray[i])\n if stringArray[i] in flags.keys():\n flags[stringArray[i]] = 1\n if 0 not in flags.values():\n return minSubsequence\n flags.clear()\n for i in characters:\n flags[i] = 0",
"def nt_search(seq, subseq): # -> list[str]:\n ...",
"def make_matched_subset_fuzzy(d_in,print_output=False) :\n d = d_in.copy() \n d = data_query(d,q40a=[4], q40b=[4]) \n d_pre = data_query(d,PrePost='Pre')\n d_post = data_query(d,PrePost='Post')\n #for each pre-entry create a matching score between pre and post\n partial_match_1of3 = 0 \n partial_match_2of3 = 0\n SID_match = 0 \n \n match_unique_ID = p.Series(np.zeros(len(d),dtype=np.int),name='SID_unique',index=d.index)\n match_unique_ID = match_unique_ID.apply(str)\n \n #loop over all the possible matches and test for 2 of 3 matches\n for ind_pre in d_pre.index :\n pre_row = d_pre.ix[ind_pre]\n \n for ind_post in d_post.index :\n post_row = d_post.ix[ind_post]\n score = 0\n #make sure to clean up the strings before matching\n if clean_str(str(pre_row['First_Name'])) == clean_str(str(post_row['First_Name'])) :\n score += 1\n if clean_str(str(pre_row['Last_Name'])) == clean_str(str(post_row['Last_Name'])) :\n score += 2\n if clean_str(str(pre_row['SID'])) == clean_str(str(post_row['SID'])) :\n score += 2\n SID_match +=1\n if score >= 1:\n partial_match_1of3 += 1\n if score >= 3: #Must match 2 of 3 \n unique_ID = str(pre_row['SID'])+'_'+str(post_row['SID']) \n match_unique_ID[ind_pre] = unique_ID\n match_unique_ID[ind_post] = unique_ID\n partial_match_2of3 += 1 \n if print_output == True: \n print(str(score) + ' | ' + \\\n str(pre_row['First_Name']) + '?' + str(post_row['First_Name']) + ' | ' + \\\n str(pre_row['Last_Name']) + '?' + str(post_row['Last_Name']) + ' | ' + \\\n str(pre_row['SID']) + '?' + str(post_row['SID']))\n match_unique_ID_df = p.DataFrame(match_unique_ID, columns=['SID_unique'])\n d = p.concat([d,match_unique_ID_df], axis=1) \n d = d[d['SID_unique'] != '0'] #only return the items which have a match\n \n #Goal: Remove the duplicate entries that have identical 'SID_unique'\n match_counts = d['SID_unique'].value_counts()\n duplicates = match_counts[(match_counts > 2) + (match_counts == 1)] #A series with SID_uniques that occur more than 2 times\n duplicates_SID_unique_list = duplicates.index.tolist() #the list of SID_uniques\n duplicates_index = data_query(d,SID_unique=duplicates_SID_unique_list).index #the corresponding indices\n d = d.drop(duplicates_index) #drop the duplicate indices\n \n #Remove one \n \n #print summary\n if print_output == True : \n print(\"SID matches = \" + str(SID_match))\n print(\"partial matches 2 of 3 = \" + str(partial_match_2of3))\n print(\"matches after duplicates removed = \" + str(len(d)) + \"/2 =\" + str(len(d)/2))\n return d",
"def _slice_size_search(self, batch_size: int, sub_batch_size: int, supports_sub_batching: bool) -> int:\n raise NotImplementedError",
"def fasta_within_seq_big_withError(myfasta, error_rate = 0.02,kmerlen = 6):\n # add dict of seqlen\n dc_seqlen = {n:len(k.seq) for n,k in enumerate(myfasta)}\n seqlen_min = min(dc_seqlen.values())\n if seqlen_min < kmerlen:\n if seqlen_min >= 6:\n print('minimum protein length is', seqlen_min, 'change kmerlen to', seqlen_min)\n kmerlen = seqlen_min\n else:\n print('minimum protein length is', seqlen_min, 'change kmerlen to 6')\n kmerlen = 6\n\n time1 = time.time()\n dickmernum = getDicKmernum(myfasta, kmerlen = kmerlen)\n # remove keys with single value to speed up\n dickmernum = {k:v for k,v in dickmernum.items() if len(v) > 1}\n print(time.time()-time1) \n\n toremove = set()\n if tqdm_exist:\n to_iter = tqdm.tqdm(range(len(myfasta)))\n else:\n to_iter = range(len(myfasta))\n for num1 in to_iter:\n seq1 = str(myfasta[num1].seq)\n seq1len = dc_seqlen[num1]\n seq1kmers = [] # all kmernum, here is kmer5 in seq1\n for i in range(len(seq1)+1-kmerlen):\n seq1kmers.append(seq1[i:i+kmerlen])\n seq1kmers = set(seq1kmers)\n if error_rate == 0:\n if any([i not in dickmernum for i in seq1kmers]):\n continue\n # print(time.time()-time1)\n seq1targets = []\n for kmernum in seq1kmers:\n if kmernum in dickmernum:\n seq1targets += list(dickmernum[kmernum])\n seq1targets = Counter(seq1targets) # count the number of common kmers for each targets\n seq1targets = seq1targets.most_common() # sort the targets based on the number of commn kmers\n # print(time.time()-time1)\n errors = int(len(seq1)*error_rate)\n for seq2id, seq2_counts in seq1targets:\n if seq2id != num1:\n if seq1len <= dc_seqlen[seq2id]:\n if seq2id not in toremove:\n if seq2_counts >= len(seq1kmers) - errors * kmerlen:\n seq2 = str(myfasta[seq2id].seq)\n if errorMatch(seq1,seq2,errors):\n toremove.add(num1)\n break\n \n print(time.time()-time1)\n print('further removed sequence number is')\n print(len(toremove))\n nonredunfasta =[]\n for i in range(len(myfasta)):\n if i not in toremove:\n nonredunfasta.append(myfasta[i])\n return nonredunfasta",
"def main():\n\n samp_size = [500, 1000, 10000]\n tests = {'Sequential': 0,\n 'Ordered': 0,\n 'Bin Iterative': 0,\n 'Bin Recursive': 0}\n\n for smpl in samp_size:\n counter = 0\n while counter < 100:\n test_list = list_gen(smpl)\n tests['Sequential'] += sequential_search(test_list, -1)[0]\n tests['Ordered'] += ordered_sequential_search(test_list, -1)[0]\n tests['Bin Iterative'] += binary_search_iterative(test_list, -1)[0]\n tests['Bin Recursive'] += binary_search_recursive(test_list, -1)[0]\n counter += 1\n\n print 'For sample size %s:' % (smpl)\n\n for tst in tests:\n print ('%s Search took %10.7f seconds to run, '\n 'on average.') % (tst, tests[tst] / counter)",
"def find_sublist(mainlist, sublist):\n match_index = -1\n for start in range( len(mainlist)-len(sublist)+1 ):\n local_match = True\n for i in range(len(sublist)):\n if (mainlist[start+i]!=sublist[i]):\n local_match = False\n break\n if local_match:\n match_index = start\n break\n return match_index",
"def test_search_big(self):\n seq = \"GCCTGGAAAGGC\"\n filler = \"A\"*50\n big_seq = filler + seq + filler\n motif = [(54, 'CTGGAAAG')]\n self.assertEqual(stem.search(motif, big_seq), [seq])",
"def max_scu_list(seg_ng):\n\tscus_list, cos_list, nglen_list = [], [], []\n\tfor each_ng in seg_ng:\n\t\tif each_ng.strip() in ng_dict:\n\t\t\tscus_list.append(ng_dict[each_ng.strip()][0])\n\t\t\tcos_list.append(ng_dict[each_ng.strip()][1])\n\t\t\tnglen_rep = list(itertools.repeat(len(each_ng.strip().split(\" \")), len(ng_dict[each_ng.strip()][0])))\n\t\t\tnglen_list.append(nglen_rep)\n\t\n\tscus = list(itertools.product(*scus_list))\n\tcos_sim = list(itertools.product(*cos_list))\n\tng_len = list(itertools.product(*nglen_list))\n\tscu_cs = {}\n\tfor i in xrange(len(scus)): # {scu_id_combo : [(cos_sim_combo), (ng_len_combo)]} \n\t\tscu_cs[scus[i]] = [cos_sim[i], ng_len[i]]\n\n\tscu_uel = [list(set(each_el)) for each_el in scus]\n\tuq_list = [(list(x), compute_score(list(x))) for x in set(tuple(x) for x in scu_uel)]\n\t# print \"MAX ------- \", max(uq_list, key = (lambda x : x[1]))\n\n\tmax_value = max(uq_list, key = (lambda x : x[1]))\n\tmax_key = max_value[0] # This is a 'set' of unique scu id's\n\t# Will look thru scu_cs, look for dictionary keys having same (&only these el)\n\tnew_d = {}\n\tfor each_key in scu_cs:\n\t\tif ((len(set(each_key)) == len(set(max_key)) == len(set(each_key).intersection(set(max_key))))):\n\t\t\t# print \"Matching : \",each_key, max_key\n\t\t\tfor i in xrange(len(each_key)): # {scu_id_combo : cos_sim_combo, len_ng_combo}\n\t\t\t\tif each_key[i] not in new_d:\n\t\t\t\t\t# {scu_id_combo \t: \tcos_sim_combo, len_ng_combo}\n\t\t\t\t\tnew_d[each_key[i]] = [[scu_cs[each_key][0][i]], [scu_cs[each_key][1][i]]]\n\t\t\t\telif ((each_key[i] in new_d) and (scu_cs[each_key][0][i] not in new_d[each_key[i]][0])):\n\t\t\t\t\tnew_d[each_key[i]][0].append(scu_cs[each_key][0][i])\n\t\t\t\t\tnew_d[each_key[i]][1].append(scu_cs[each_key][1][i])\n\n\tprint \"new_d\", new_d\t\t\t\t\n\treturn [max(uq_list, key = (lambda x : x[1])), new_d]",
"def find_substrs12_endchars(sidestr,mainstr,substr1,substr2,delay1=0,delay2=0):\n ## don't use regular expressions re module, which finds only non-overlapping matches\n ## we want to find overlapping matches too.\n substr2len = len(substr2)\n substr1len = len(substr1)\n abs_idx1 = 0 ## mainstr is getting chopped, but we maintain abs index on sidestr\n while True:\n idx2 = mainstr.find(substr2)\n ## find returns -1 if substr2 not found\n if idx2 != -1:\n endcharidx2 = idx2+substr2len+delay2\n ### NOTE: abs_startidx1 is one earlier than definition!!! I think necessary for causality.\n ## put +1 below to switch to definition in Quinn et al 2010\n abs_startidx1 = abs_idx1 + endcharidx2 - substr1len-delay1\n if endcharidx2<len(mainstr): # mainstr Y has characters left?\n if abs_startidx1 >= 0: # sidestr X has sufficient chars before?\n ## sidestr has substr1 before the char to be returned? and mainstr is not over\n ## IMP: below if's first term is the only place directed info enters.\n ## Remove first term below and you get just the entropy of mainstr Y: VERIFIED.\n #print sidestr[abs_startidx1:abs_startidx1+substr1len], substr1, abs_startidx1\n if sidestr[abs_startidx1:abs_startidx1+substr1len]==substr1:\n yield mainstr[endcharidx2]\n else: # reached end of string\n break\n ## chop the mainstr just after the start of substr2,\n ## not after the end, as we want overlapping strings also\n mainstr = mainstr[idx2+1:]\n ## don't chop sidestr as substr1len may be greater than substr2len\n ## in the next iteration, idx2 will be relative, but for sidestr we maintain abs_idx1\n abs_idx1 += idx2+1\n else: # substr2 not found\n break",
"def make_comparison_set(start_set, end_set,average,l_b, u_b, set_size):\n\n # assert len(end_set) == set_size, \"The size of the end set is too small. Must contain 7 elements\"\n better_peers = end_set[end_set > average]\n worse_peers = end_set[end_set < average]\n worse_eq_peers = end_set[end_set <= average]\n\n # the solution set must have 20-40% of its members worse than \"average\" and 30-50% worse-or-equal than average.\n proportion_of_worse = random.uniform(0.2,0.40)\n number_of_worse = set_size*proportion_of_worse\n number_of_worse_eq = set_size*(proportion_of_worse+0.1)\n\n if average+l_b <= round(np.mean(end_set), 2) <= average+u_b and len(end_set) == set_size and \\\n len(worse_peers)>=number_of_worse and len(worse_eq_peers)>=number_of_worse_eq:\n return end_set\n\n # assert len(start_set) > 0, \"Could not find a subset from the set given\"\n if len(start_set) == 0:\n return np.zeros(set_size)\n\n end_set = np.append(end_set, start_set[0])\n start_set = start_set[1:]\n\n if round(np.mean(end_set), 2) > u_b:\n max_indices = end_set.argsort()[-3:]\n max_index = np.argmax(end_set)\n end_set = np.delete(end_set, max_index)\n elif round(np.mean(end_set), 2) < l_b:\n min_indices = end_set.argsort()[3:]\n min_index = np.argmin(end_set)\n end_set = np.delete(end_set, min_index)\n return make_comparison_set(start_set, end_set, average, l_b, u_b, set_size)",
"def _identify_substring(self, sentence_slice, fsa_list):\n fsaCounter = -1\n for fsa in fsa_list:\n logger.debug(\"Applying FSA %s\" % fsa.fsaname)\n fsaCounter += 1\n # We first used acceptsShortestSubstringOf(), now we use the longest\n # match. The latter gave a marginally better result, but this was\n # only apparent on one Slink in the Slink regression test so more\n # tests may be needed.\n lenSubstring = fsa.acceptsSubstringOf(sentence_slice)\n if lenSubstring:\n logger.debug(\"FSA %s matched\" % fsa.fsaname)\n return (lenSubstring, fsaCounter)\n return (0, fsaCounter)",
"def subsets(self, nums):\n return self.recursive_approach(nums)",
"def split(probs):\n if len(probs) == 1:\n return\n prob_sum = sum(v for k,v in probs)\n diff_min = 1.0\n index_min = -1\n for i in range(1,len(probs)):\n first_sum = sum(v for k,v in probs[:i])\n second_sum = prob_sum - first_sum\n if abs(first_sum - second_sum) < diff_min:\n diff_min = abs(first_sum - second_sum)\n index_min = i\n #adding 0 or 1 for two sets \n for i in range(index_min):\n codewords[probs[i][0]] += '0'\n for i in range(index_min, len(probs)):\n codewords[probs[i][0]] += '1'\n #recursively call split() for two subsets\n split(probs[:index_min])\n split(probs[index_min:])",
"def substring(s):\n\n\ts=s.lower()\n\tn=len(s)\n\tss=[] #substring set\n\tlongest=1\n\tlongesti=0\n\tlongestj=0\n\n\ti=0\n\twhile(i<n):\n\t\t# if the remainder of the string to be evaluated is \n\t\t# less than the longest found so far, we are done.\n\t\tif n-i<longest:\n\t\t\tbreak\t\t\n\n\t\tss=[]\n\n\t\tfor j in range(i,n):\n\t\t\tif s[j] not in ss:\n\t\t\t\tss.append(s[j])\n\t\t\telse:\n\t\t\t\tif len(ss)>longest:\n\t\t\t\t\tlongest=len(ss)\n\t\t\t\t\tlongesti=i\n\t\t\t\t\tlongestj=j\n\t\t\t\t# shortcut to reduce substrings we need to test\t\n\t\t\t\ti += ss.index(s[j])\n\t\t\t\tbreak\n\t\ti += 1\n\tprint(s[longesti:longestj])\n\treturn longest",
"def get_set(average, goal_grade, av_goal_df, set_size):\n # solution = []\n\n if average == 0:\n return np.zeros(set_size), 0, False, 'no grade'\n\n for i in range(10, 50):\n peers, w = get_same_goal_set(av_goal_df, i, goal_grade)\n if len(peers) == 0:\n has_solution = False\n edge_case = 'error'\n return np.zeros(set_size), w, has_solution, edge_case\n for j in range(0, 100):\n random.shuffle(peers)\n start_set = peers[set_size:]\n end_set = peers[:set_size]\n solution = make_comparison_set(start_set, end_set, average, 0.5, 1.0, set_size)\n\n if np.sum(solution) > 0:\n has_solution = True\n edge_case = 'no'\n return solution, w, has_solution, edge_case\n\n # edge cases\n if np.sum(solution) == 0:\n if dist_from_top(av_goal_df, average)<= set_size:\n isTop = True\n isOther = False\n edge_case = \"top\"\n elif dist_from_bottom(av_goal_df, average) <= set_size:\n isTop = False\n isOther = False\n edge_case = 'bottom'\n else:\n isTop = False\n isOther = True\n edge_case = 'other'\n solution = get_special_set(av_goal_df, average, isTop, isOther, set_size)\n has_solution = True\n return solution, 0, has_solution, edge_case\n has_solution = False\n edge_case = 'error'\n return np.zeros(set_size), w, has_solution, edge_case",
"def get_matches(strobes, idx, k, dont_merge_matches, ref_id_to_accession, acc, selfalign):\n if dont_merge_matches:\n matches = []\n for q_p1, q_p2, h in strobes:\n # print()\n # print(\"Q\", q_p1)\n if h in idx:\n for r_id, r_p1, r_p2 in grouper(idx[h], 3):\n # print(\"R\", r_id, r_p1)\n matches.append( (r_id, r_p1, q_p1, r_p2 - r_p1 + k) )\n return sorted(matches, key = lambda x: (x[0], x[2], x[1]) )\n else:\n cpm = {} # current potential merges\n merged_matches = []\n for q_p1, q_p2, h in strobes: # iterate over query in ascending order\n if h in idx:\n # print()\n # print(\"----------------\", q_p1)\n # print(cpm)\n # print(\"All pos:\", idx[h])\n # prev_r_id, prev_hit_r_p1,prev_hit_r_p2 = 0,0,0 # these only keep track of identical consecutive kmers/strobes\n for r_id, r_p1, r_p2 in grouper(idx[h], 3): # iterate over references, all in ascending order\n # if prev_r_id == r_id and r_p1 == prev_hit_r_p1 + 1 and r_p2 == prev_hit_r_p2+1:\n # prev_r_id = r_id\n # prev_hit_r_p1 = r_p1\n # prev_hit_r_p2 = r_p2\n # update_relevant_pos()\n\n # continue\n\n # print(q_p1, q_p2+ k , r_p1, r_p2+k)\n # remove self matches with below if statement, for now commented out to find eventual bugs\n if not selfalign and ref_id_to_accession[r_id] == acc:\n continue\n if r_id in cpm:\n is_added_to_an_interval_query = False\n # print(q_p1, list(cpm[r_id].keys()))\n for end_q in list(cpm[r_id].keys()):\n # print()\n # print(\"r_id\",r_id, \"end_q\", end_q)\n if q_p1 <= end_q: # overlap on query\n is_added_to_an_interval_query = True \n is_added_to_an_interval_reference = False \n # print(list(cpm[r_id][end_q].keys())) \n for end_r in list(cpm[r_id][end_q].keys()):\n # print(\"Case1 end_r\", end_r)\n # print(q_p1, )\n prev_q_p1, prev_q_p2, prev_ref_p1, prev_ref_p2 = cpm[r_id][end_q][end_r]\n # print(r_id,q_p1, \"CRUCIAL:\",prev_q_p1, prev_q_p2, prev_ref_p1, prev_ref_p2)\n # print(r_id, q_p1, cpm[r_id][end_q][end_r])\n # check all refs\n new_q_p2 = max(prev_q_p2, q_p2 + k)\n if prev_ref_p1 <= r_p1 <= end_r: # Overlap on reference\n is_added_to_an_interval_reference = True \n # print(\"OK\", prev_ref_p1, r_p1, end_r)\n # print(\"lol\", prev_ref_p1, r_p1, end_r)\n new_r_p2 = max(end_r, r_p2 + k)\n del cpm[r_id][end_q][end_r]\n if not cpm[r_id][end_q]:\n del cpm[r_id][end_q]\n if new_q_p2 not in cpm[r_id]:\n cpm[r_id][ new_q_p2 ] = {}\n cpm[r_id][ new_q_p2 ][new_r_p2] = ( prev_q_p1, new_q_p2, prev_ref_p1, new_r_p2)\n # print(\"new:\", ( prev_q_p1, new_q_p2, prev_ref_p1, new_r_p2) )\n elif new_r_p2 not in cpm[r_id][ new_q_p2 ]:\n cpm[r_id][ new_q_p2 ][new_r_p2] = ( prev_q_p1, new_q_p2, prev_ref_p1, new_r_p2)\n # print(\"appended:\", ( prev_q_p1, new_q_p2, prev_ref_p1, new_r_p2) )\n else:\n # print(\"Was already present:\", cpm[r_id][ new_q_p2 ][new_r_p2], \"attempted new:\", ( prev_q_p1, new_q_p2, prev_ref_p1, new_r_p2) )\n ( old_q_p1, new_q_p2, old_ref_p1, new_r_p2) = cpm[r_id][ new_q_p2 ][new_r_p2]\n cpm[r_id][ new_q_p2 ][new_r_p2] = ( min(old_q_p1, prev_q_p1), new_q_p2, min(old_ref_p1, prev_ref_p1), new_r_p2)\n\n # cpm[r_id][ new_q_p2 ][new_r_p2] = [ prev_q_p1, new_q_p2, prev_ref_p1, new_r_p2]\n \n if not is_added_to_an_interval_reference:\n if new_q_p2 not in cpm[r_id]:\n cpm[r_id][ new_q_p2 ] = {} \n cpm[r_id][ new_q_p2 ][r_p2 + k] = (q_p1, new_q_p2, r_p1, r_p2 + k)\n # print(\"new added1:\", (q_p1, new_q_p2, r_p1, r_p2 + k) )\n\n elif r_p2 + k not in cpm[r_id][new_q_p2]:\n cpm[r_id][ new_q_p2 ][r_p2 + k] = (q_p1, new_q_p2, r_p1, r_p2 + k )\n # print(\"new added2:\", (q_p1, new_q_p2, r_p1, r_p2 + k ) )\n else:\n # print(\"Was already present:\", cpm[r_id][ new_q_p2 ][r_p2 + k], \"attempted new:\", (q_p1, new_q_p2, r_p1, r_p2 + k ) )\n ( old_q_p1, new_q_p2, old_ref_p1, new_r_p2) = cpm[r_id][ new_q_p2 ][r_p2 + k]\n cpm[r_id][ new_q_p2 ][new_r_p2] = ( min(old_q_p1, q_p1), new_q_p2, min(old_ref_p1, r_p1), new_r_p2)\n\n else:\n # print(\"Case2 end_r\", end_r)\n # revove the intervals that we have passed on the query here to not make the cpm dict too large...\n # add to merged_matches dict\n for r_end in cpm[r_id][end_q]:\n (q_pos_start, q_pos_stop, r_pos, r_pos_stop) = cpm[r_id][end_q][r_end]\n merged_matches.append( (r_id, r_pos, q_pos_start, r_pos_stop - r_pos) )\n del cpm[r_id][end_q]\n\n\n # # print(end_q, cpm[r_id][end_q][1])\n # # assert end_q == cpm[r_id][end_q][1]\n # # there is overlap in both reference and query to previous hit\n # # `q_1 <= q_2 <= q'_1 +k` and `r_1 <= r_2 <= r'_2+k`\n # if cpm[r_id][end_q][0] < q_p1 and q_p1 < cpm[r_id][end_q][1] and cpm[r_id][end_q][2] <= r_p1 <= cpm[r_id][end_q][3]:\n # prev_q_p1, prev_q_p2, prev_ref_p1, prev_ref_p2 = cpm[r_id][end_q]\n # new_q_p2 = max(cpm[r_id][end_q][1], q_p2 + k)\n # new_r_p2 = max(cpm[r_id][end_q][3], r_p2 + k)\n # del cpm[r_id][end_q]\n # cpm[r_id][ new_q_p2 ] = [ prev_q_p1, new_q_p2, prev_ref_p1, new_r_p2]\n # is_added_to_an_interval_query = True\n # # break\n # else:\n # prev_q_p1, prev_q_p2, prev_ref_p1, prev_ref_p2 = cpm[r_id][end_q]\n # new_q_p2 = max(cpm[r_id][end_q][1], q_p2 + k)\n # new_r_p2 = max(cpm[r_id][end_q][3], r_p2 + k)\n # del cpm[r_id][end_q]\n # cpm[r_id][ new_q_p2 ] = [ prev_q_p1, new_q_p2, prev_ref_p1, new_r_p2]\n # is_added_to_an_interval_query = True\n\n # # cpm[r_id][end_q][1] = max(cpm[r_id][end_q][1], q_p2 + k)\n # # cpm[r_id][end_q][3] = max(cpm[r_id][end_q][3], r_p2 + k)\n # # print(cpm[r_id][0], q_p2 + k)\n # # print(cpm[r_id][2], r_p2 + k)\n # # if cpm[r_id][1] > q_p2 + k:\n # # print(\"LOOL\")\n # # if cpm[r_id][3] > r_p2 + k:\n # # print(\"LOOL222\") \n if not is_added_to_an_interval_query: # no overlap with prev query sequences\n # prev_q_p1, prev_q_p2, prev_ref_p1, prev_ref_p2 = cpm[r_id][end_q]\n # assert prev_q_p2 - prev_q_p1 == prev_ref_p2 - prev_ref_p1\n # print(prev_q_p1,prev_q_p2, prev_q_p2 - prev_q_p1)\n # print(prev_ref_p1,prev_ref_p2, prev_ref_p2 - prev_ref_p1)\n # merged_matches.append( (r_id, prev_ref_p1, prev_q_p1, prev_ref_p2 - prev_ref_p1) )\n # cpm[r_id] = [q_p1, q_p2 + k, r_p1, r_p2 + k ]\n # print(\"HERE\")\n cpm[r_id][q_p2 + k] = {}\n cpm[r_id][q_p2 + k][r_p2 + k] = (q_p1, q_p2 + k, r_p1, r_p2 + k)\n else:\n cpm[r_id] = { q_p2 + k : {r_p2 + k : (q_p1, q_p2 + k, r_p1, r_p2 + k) }}\n # cpm[r_id] = [q_p1, q_p2 + k, r_p1, r_p2 + k ]\n\n # close all open merge intervals\n for r_id in cpm.keys():\n for q_stop in cpm[r_id]:\n for r_stop in cpm[r_id][q_stop]:\n (q_p1, q_pos_stop, r_pos, r_pos_stop) = cpm[r_id][q_stop][r_stop]\n merged_matches.append( (r_id, r_pos, q_p1, r_pos_stop - r_pos) )\n # print(merged_matches)\n if not merged_matches:\n return []\n # print(acc, merged_matches)\n # return sorted(merged_matches, key = lambda x: x[2]) \n\n # # If there are repetitive matches across e.g. a chromosome\n # # the merging will be broken up at the repetitive kmer.\n # # here we post merge such spuriously broken up overlapping matches\n\n # # sort first by reference id then by sum of reference and query position to resolve perfect repeats!\n # new_sort = sorted(merged_matches, key = lambda x: (x[0], x[1]+x[2], x[1] ) )\n # merged_matches = sort_merge(new_sort)\n # # print(merged_matches)\n\n # # sort first by reference id then by reference position\n # new_sort = sorted(merged_matches, key = lambda x: (x[0], x[1] ) )\n # merged_matches = sort_merge(new_sort)\n # # print(merged_matches)\n\n # # sort first by reference id then by query position\n # new_sort = sorted(merged_matches, key = lambda x: (x[0], x[2] ) )\n # merged_matches = sort_merge(new_sort)\n # # print(merged_matches)\n\n return sorted(set(merged_matches), key = lambda x: (x[0], x[2], x[1]) )",
"def test_identical_sequence_subset(sequences, test_sequence, print_result=False):\n fastMethod = True\n globalAlign = True\n matchscore = 4\n mismatchscore = -4\n gapscore = -5\n\n alignments = get_spoa_alignment_no_ref(sequences=sequences)\n\n test_alignments = get_alignments_by_sequence(alignments=alignments, sequence=test_sequence)\n\n test_alignment_strings = [test_alignment[1] for test_alignment in test_alignments]\n\n test_alignment_string_set = set(test_alignment_strings)\n\n if print_result:\n print_identical_subset_result(sequences=sequences,\n alignments=alignments,\n alignment_string_set=test_alignment_string_set)\n\n assert len(test_alignment_string_set) == 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
for each code in submission, mark if it has correct prefix assume all submissions are incorrect, mark those that are correct
|
def markPrefix(self,submission):
label='Prefix?'
submission = self.addColumn( submission, label )
submission.loc[:,label]="Not Correct"
if not 'Prefix' in submission.columns:
return submission
prefixes = submission.ix[:,("Code","Prefix")]
prefixes.columns = [ "Code","submissionPrefix"]
if len( prefixes ) == 0:
return submission
prefixes = prefixes.merge(self.ma.loc[:, ("Code","Prefix")], how="left", on="Code")
isCorrect = list(not pd.isnull( c ) and c==s for s,c in zip(prefixes.submissionPrefix, prefixes.Prefix))
submission.ix[ isCorrect, label ] = "Correct"
nCorrect = sum( isCorrect )
"""
prepare errorframe from a 'what is correct' perspective
1) create error dataframe from master, columns Code and prefix
1a) rename prefix to Value
2) fill submission prefix, matching by code
3) fill IsCorrect
"""
errors = self.ma.ix[:,("Code","Prefix")]
errors.columns = [ "Code", "Value" ]
errors = errors.merge(submission.loc[:, ("Code","Prefix")], how="left", on="Code")
errors.columns = [ "Code", "Value", "ValueSubmitted" ]
errors = self.addColumn( errors, "AOI" )
errors.loc[:,"AOI"]="Prefix"
label = "IsCorrect"
errors = self.addColumn( errors, label )
errors.loc[:, label ]="False"
isCorrect = list(not pd.isnull( c ) and c==s
for s,c in zip(errors.Value, errors.ValueSubmitted))
errors.ix[ isCorrect, label ] = "True"
self.addError( errors )
self.addNote("You had %d correct prefixes, gaining %2.1f marks" %(nCorrect, nCorrect * 0.5))
self.addMark("%d Correct prefixes" % nCorrect, nCorrect * 0.5)
return submission
|
[
"def markUnorderedGroups(self,submission):\n\n maGroups= self.ma[ eAnd( isblank(self.ma.IntraGroupOrder), notblank(self.ma.Grouping)) ].Grouping.unique()\n\n # P and L groups are taken care of by absoluteOrdering routine. Different marks too\n #maGroups = set(maGroups).difference( set(\"P\", \"L\"))\n label='UnorderedGroups'\n submission = self.addColumn(submission, label )\n submission.loc[:,label]=None\n for group in maGroups:\n # take the group slice\n magSet = set( self.ma[ self.ma.Grouping==group].Code)\n subSlice = submission[ submission.Grouping==group].Code\n subSet = set( subSlice )\n nCorrect=len( magSet & subSet )\n submission.loc[ submission.Code.isin( magSet ), label] = group\n if group==\"P\":\n if nCorrect == len(magSet ) : # all correct, principal\n self.addNote( \"Correct principal diagnosis, 1 mark\" )\n self.addMark(\"Principal Diagnosis\", 1)\n self.addError( {\n 'AOI': 'PrincipalCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"True\" \n })\n else:\n self.addNote( \"Incorrect principal diagnosis, answer is %s, you had %s \" % ( pprintSlice(magSet), pprintSlice(subSet)) )\n self.addError( {\n 'AOI': 'PrincipalCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"False\" \n })\n next\n\n if group==\"L\" : # Last Codes \n if len(subSlice) > 0 and max( subSlice.index ) == max(submission.index ):\n self.addNote( \"Correct final codes, 0.5 marks\" )\n self.addMark( \"Final Code(s) Group\", 0.5 )\n self.addError( {\n 'AOI': 'LastCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"True\" \n })\n else:\n self.addNote( \"Incorrect final code(s), should be %s\" % ( pprintSlice(magSet)) )\n self.addError( { 'AOI': 'LastCode', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n\n # we don't need to process the group if the master says it is only one code long\n if len( magSet ) == 1:\n next\n\n\n\n if nCorrect == len(magSet ) : # all correct\n self.addNote( \"Unordered Group %s, %s entirely correct, 0.5 marks\" % (group, pprintSlice(magSet)) )\n self.addMark(\"Unordered Group %s\" % group, 0.5)\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"True\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n elif (nCorrect > 0 ) :\n self.addNote( \"Unordered Group %s partially correct, answer is %s, you had %s, 0.5 marks \" \n % (group, pprintSlice(magSet), pprintSlice(subSet)) )\n self.addMark(\"Unordered Group %s\" % group, 0.5)\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n else:\n self.addNote( \"Unordered Group %s, %s entirely missing\" % (group, pprintSlice(magSet)) )\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': \"\",\n })\n\n return submission",
"def check_seq(self):\n nuc_list = ['A', 'T', 'C', 'G']\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n for letter in row['Primer_seq'].strip():\n if letter not in nuc_list:\n check += 1\n error = \"Invalid DNA primer sequence, see row %s in file\" % (row_index + 4)\n error_details.append(error)",
"def markConvention(self,submission):\n label='Convention?'\n submission = self.addColumn( submission, label )\n submission.loc[:,label]=\"Not Correct\"\n if not 'Convention' in submission.columns:\n return submission\n conventions = submission.ix[:,(\"Code\",\"Convention\")] \n conventions.columns = [\"Code\",\"submissionConvention\"]\n if len( conventions ) == 0:\n return submission\n conventions = conventions.merge(self.ma.loc[:, (\"Code\",\"Convention\")], how=\"left\", on=\"Code\")\n\n \"\"\" it exists, and conventions match\"\"\"\n isCorrect = list(not pd.isnull( c ) and \n bool(re.match( c,s )) for c,s in zip(conventions.Convention, conventions.submissionConvention))\n submission.loc[ isCorrect, label ] = \"Correct\"\n nCorrect = sum( isCorrect )\n \n \"\"\" \n prepare errorframe \n \"\"\"\n errors = self.ma.ix[:,(\"Code\",\"Convention\")]\n errors.columns = [ \"Code\", \"Value\" ]\n errors = errors.merge(submission.loc[:, (\"Code\",\"Convention\")], how=\"left\", on=\"Code\")\n errors.columns = [ \"Code\", \"Value\", \"ValueSubmitted\" ]\n errors = self.addColumn( errors, \"AOI\" )\n errors.loc[:,\"AOI\"]=\"Convention\"\n label = \"IsCorrect\"\n errors = self.addColumn( errors, label )\n errors.loc[:, label ]=\"False\"\n isCorrect = list(not pd.isnull( c ) and bool(re.match( c,s ))\n for s,c in zip(errors.Value, errors.ValueSubmitted))\n errors.ix[ isCorrect, label ] = \"True\"\n self.addError( errors )\n\n self.addNote(\"You had %d correct conventions, gaining %2.1f marks\" %(nCorrect, nCorrect * 1))\n self.addMark(\"%d Correct conventions\" % nCorrect, nCorrect * 1)\n\n\n return submission",
"def test_correct_digits_and_wrong_positions_human(self):\n\n game = mastermind.HumanPlayer()\n game.set_code([1, 2, 3, 4])\n black, white = game.check([2, 1, 3, 4], game.get_code())\n self.assertEqual(black, 2)\n self.assertEqual(white, 2)\n\n black, white = game.check([4, 2, 3, 1], game.get_code())\n self.assertEqual(black, 2)\n self.assertEqual(white, 2)\n\n black, white = game.check([4, 4, 4, 4], game.get_code())\n self.assertEqual(black, 1)\n self.assertEqual(white, 0)",
"def validateCDSmRNAPairs(gene, cds, mrna, strand):\n\n def _order(lst):\n return lst == sorted(lst) or lst == sorted(lst)[::-1]\n\n # load first mRNA exon into InterLap\n combined = []\n num = len(cds)\n warning = False\n for i in range(0, num):\n if strand == \"+\":\n sortCDS = sorted(cds[i], key=lambda tup: tup[0])\n else:\n sortCDS = sorted(cds[i], key=lambda tup: tup[0], reverse=True)\n compatible = []\n for x in range(0, num):\n if strand == \"+\":\n sortExon = sorted(mrna[x], key=lambda tup: tup[0])\n else:\n sortExon = sorted(mrna[x], key=lambda tup: tup[0], reverse=True)\n # simple first, if more cds than exons it is not compatible\n if len(sortCDS) > len(sortExon):\n compatible.append(False)\n continue\n result = True\n inter = InterLap(mrna[x])\n for i, coord in enumerate(sortCDS):\n if coord in inter:\n hit = list(inter.find(coord))[0]\n diff = np.subtract(coord, hit)\n # then it cannot contain the cds so has to be wrong\n if diff[0] < 0 or diff[1] > 0:\n result = False\n if len(sortCDS) > 1:\n # if an internal CDS, then must match perfectly or its wrong\n if i != 0 or (i + 1) != len(sortCDS):\n if diff[0] != 0 and diff[1] != 0:\n result = False\n elif i == 0:\n if strand == \"+\":\n if diff[1] != 0:\n result = False\n else:\n if diff[0] != 0:\n result = False\n elif (i + 1) == len(sortCDS):\n if strand == \"+\":\n if diff[0] != 0:\n return False\n else:\n if diff[1] != 0:\n return False\n compatible.append(result)\n combined.append(compatible)\n valid_orders = []\n for test in list(\n itertools.permutations(list(range(0, len(combined))), len(combined))\n ):\n # test is a tuple, slice list to see if all True\n tester = []\n for num, x in enumerate(test):\n tester.append(combined[num][x])\n if all(tester):\n valid_orders.append(list(test))\n mRNA_order = valid_orders[0]\n if not _order(mRNA_order):\n lib.log.debug(\n \"%s CDS/mRNA features out of phase, trying to fix. %s\" % (gene, mRNA_order)\n )\n if len(valid_orders) > 1:\n lib.log.debug(\n \"%s had %i possible solutions for CDS/mRNA, expect errors...\"\n % (gene, len(valid_orders))\n )\n warning = True\n mRNAout = []\n for y in mRNA_order:\n mRNAout.append(mrna[y])\n return cds, mRNAout, warning",
"def is_prefix(self, current_prefix, original, debug=DEBUG):\n if (current_prefix == original): #exit conditions\n return \"*\";\n else:\n #go backwards\n # 3 conditions for possible suffix\n split = (len(original)-len(current_prefix)) #the position at which the word is split 12 - 11 = 11 or -1\n first_part = original[0:split] #STILL Bb\n second_part = original[split:];\n second_part_cut = second_part[1:]; \n second_part_uncut = original[split-1:len(original)];\n if ((second_part in self.words_check) ): #and (not (second_part == original))\n second_condition = self.backward_trie.probability( reverse(second_part), reverse(second_part_cut), DEBUG) #could be switch cut and normal way round?\n if ((second_condition > 1 - threshold) and (second_condition < 1 + threshold)): #close to 1 (#TODO: Test closer values)\n third_condition = self.backward_trie.probability( reverse(second_part), reverse(second_part_uncut), DEBUG)\n if (third_condition < 1):\n if (first_part in self.word_score_prefix):\n self.word_score_prefix[first_part] = self.word_score_prefix.get(first_part, 0) + (reward) + 1 #20 instead of 19 because they'll be -1'd anyway. It avoids a few elses #morphemes might not in the original wordlist \n self.word_score_prefix[first_part] = self.word_score_prefix.get(first_part, 0) + punish;#self.word_score_prefix[first_part] -= 1; #if second part is not in words we don't care\n prefix_length = len(current_prefix)\n self.is_prefix(current_prefix + original[prefix_length :prefix_length+1], original, DEBUG) #recursively add on a new letter",
"def _trials_to_code(raw_exp, coded_dir):\n\n raw_trial_nums = tuple(sorted([t.trial_id for t in raw_exp.trials]))\n\n\n if not os.path.isfile(dataio.trial_index_filename(coded_dir)):\n #-- There is no index file - coding has not started yet\n print(\"no index file\")\n return True\n\n coded_trials = dataio.load_trials_index(coded_dir)\n coded_trial_nums = tuple(sorted(set([t['trial_id'] for t in coded_trials])))\n try:\n max_coded = max(coded_trial_nums)\n except:\n messagebox.showerror('Session was already coded', 'Please delete all files from the encoded-data folder and re-run WEncoder')\n return False\n\n #-- All trials were already coded\n if raw_trial_nums == coded_trial_nums:\n messagebox.showerror('Session was already coded',\n 'The encoded-data folder seems to contains the coding of all trials. ' +\n 'To encode the session again, delete all files from the encoded-data folder and re-run WEncoder)')\n return False\n\n\n #-- Coding has reached the last trial, but some trials are missing along the way\n elif max_coded == max(raw_trial_nums):\n messagebox.showerror('Session was already coded',\n 'The encoded-data folder contains the session''s coding, but the coding has skipped some trials. ')\n return False\n\n #-- More coded than raw trials\n elif max_coded > max(raw_trial_nums):\n messagebox.showerror('Session was already coded',\n 'The encoded-data folder contains the coding of MORE trials than exist in the session. ' +\n 'It could be that you have selected mismatching directories. ' +\n 'Please verify and re-run WEncoder')\n return False\n\n #-- All trials up to trial #N were coded, but some trials were not coded yet\n elif raw_trial_nums[:len(coded_trial_nums)] == coded_trial_nums:\n ans = messagebox.askquestion('Session was already coded',\n 'The encoded-data folder already contains coding for trials 1-{:}. '.format(max_coded) +\n 'Do you want to continue coding from trial #{:}?'.format(max_coded + 1))\n if ans:\n return max_coded + 1\n else:\n return False\n\n #-- Coding was done up to trial #N, but some trials were skipped and not coded\n else:\n ans = messagebox.askquestion('Session was already coded',\n 'The encoded-data folder contains the coding of trials up to {:}, '.format(max_coded) +\n 'but the coding has skipped some trials. ' +\n 'Do you want to continue coding from trial #{:}?'.format(max_coded + 1))\n if ans:\n return max_coded + 1\n else:\n return False",
"def test_postcode_found(self):\n _, _, one_E, one_N, _ = self.valid_entry_one_letter.split(',', 4)\n _, _, two_E, two_N, _ = self.valid_entry_two_letters.split(',', 4)\n\n for postcode, result in (\n ('n99zw', (one_E, one_N)), # one leading letter, no spaces\n ('n9 9zw', (one_E, one_N)), # one leading letter, one space\n ('n9 9zw', (one_E, one_N)), # one leading letter, two spaces\n ('n 9 9 z w', (one_E, one_N)), # one leading letter, a few spaces\n ('se99de', (two_E, two_N)), # two leading letters, no spaces\n ('se9 9de', (two_E, two_N)), # two leading letters, one space\n ('s e99d e', (two_E, two_N)), # two leading letters, a few spaces\n ):\n yield self.check_postcode_found, postcode, result",
"def test_invalid_postcode(self):\n for bad_postcode in (\n 'abc12', # three leading letters\n '12', # no leading letters\n '', # empty postcode\n '__21ks', # non-alphabetic character\n 'Q22 1AB', # Q cannot be in first place\n 'AJ89 1AB', # J cannot be in second place\n 'P6V 9AB', # V cannot be in third place\n 'UW9C 2XX', # C cannot be in fourth place\n 'R7H 5IB', # I cannot be in inward code\n 'R7H 5BC', # C cannot be in inward code\n 'Z223 1AB', # cannot have 3 numbers in outward code\n 'YZ2N AB', # cannot have 0 numbers in outward code\n 'YZ2N 12AB', # cannot have 2 numbers in outward code\n 'N1 1AAX', # correct postcode, but wrong trailing character\n 'XNN1 1AA', # correct postcode, but wrong leading character\n ):\n yield self.check_invalid_postcode, bad_postcode",
"def checks_pass(self):\n if self.error_code_adjust not in self.error_code_pass:\n return False\n else:\n return True",
"def test_code(self):\n from mirtop.mirna.realign import make_id, read_id\n\n def _convert(s, test, reverse=False):\n code = read_id(s) if reverse else make_id(s)\n if code != test:\n raise ValueError(\"%s didn't result on %s but in %s\" %\n (s, test, code))\n\n _convert(\"AAACCCTTTGGG\", \"iso-12-B1NY4\")\n _convert(\"AAACCCTTTGGGA\", \"iso-13-B1NYDX\")\n _convert(\"AAACCCTTTGGGAT\", \"iso-14-B1NYI7\")\n _convert(\"iso-12-B1NY4\", \"AAACCCTTTGGG\", True)\n _convert(\"iso-13-B1NYDX\", \"AAACCCTTTGGGA\", True)\n _convert(\"iso-14-B1NYI7\", \"AAACCCTTTGGGAT\", True)\n\n # if make_id(\"AGTFCVS\"):\n # raise ValueError(\"This should be False. Not valid sequence.\")\n # if read_id(\"asD(-\"):\n # raise ValueError(\"This should be False, Not valid code.\")",
"def test_valid():\n for block_type, codes in _VALID_CODES.items():\n for code in codes:\n block_type(code=code)",
"def duplication_processing(self, rule):\n was_modified = False\n need_stop = False\n right_term_marked0 = []\n for x in self.marked[rule.getRightTerms()[0]]:\n right_term_marked1 = []\n for y in self.marked[rule.getRightTerms()[1]]:\n temp = x.union(y)\n # Check if it was marked before\n if temp not in self.marked[rule.getLeftTerm()]:\n was_modified = True\n if rule.getLeftTerm() == rule.getRightTerms()[0]:\n right_term_marked0.append(temp)\n elif rule.getLeftTerm() == rule.getRightTerms()[1]:\n right_term_marked1.append(temp)\n else:\n self.marked[rule.getLeftTerm()].add(temp)\n # Stop condition, no need to continuer\n if rule.getLeftTerm() == \"S\" and len(temp) == 0:\n need_stop = True\n for temp in right_term_marked1:\n self.marked[rule.getRightTerms()[1]].add(temp)\n for temp in right_term_marked0:\n self.marked[rule.getRightTerms()[0]].add(temp)\n\n return (was_modified, need_stop)",
"def expand_expected_codes(codes):\n retval = set()\n codes = re.split(', *', codes)\n for code in codes:\n if not code:\n continue\n if '-' in code:\n low, hi = code.split('-')[:2]\n retval.update(\n str(i) for i in range(int(low), int(hi) + 1))\n else:\n retval.add(code)\n return retval",
"def _check_same_file_prefix(self) -> None:\n group_paths = [group.paths for group in self.groups]\n prefixes = []\n for paths in group_paths:\n prefixes.append([prefix(path) for path in paths])\n if not all_equal(prefixes):\n raise PrefixMatchError(self.groups)",
"def test_correct_digits_and_wrong_positions_computer(self):\n\n game = mastermind.HumanPlayer()\n game.set_code([6, 4, 2, 5])\n black, white = game.check([2, 4, 3, 4], game.get_code())\n self.assertEqual(black, 1)\n self.assertEqual(white, 1)\n\n black, white = game.check([2, 4, 6, 5], game.get_code())\n self.assertEqual(black, 2)\n self.assertEqual(white, 2)\n\n black, white = game.check([5, 4, 2, 5], game.get_code())\n self.assertEqual(black, 3)\n self.assertEqual(white, 0)",
"def test_grade_incorrect_code(self, preprocessors, gradebook, resources):\n cell = create_grade_cell(\"hello\", \"code\", \"foo\", 1)\n cell.metadata.nbgrader['checksum'] = compute_checksum(cell)\n cell.outputs = [new_output('error', ename=\"NotImplementedError\", evalue=\"\", traceback=[\"error\"])]\n nb = new_notebook()\n nb.cells.append(cell)\n preprocessors[0].preprocess(nb, resources)\n gradebook.add_submission(\"ps0\", \"bar\")\n preprocessors[1].preprocess(nb, resources)\n\n grade_cell = gradebook.find_grade(\"foo\", \"test\", \"ps0\", \"bar\")\n assert grade_cell.score == 0\n assert grade_cell.max_score == 1\n assert grade_cell.auto_score == 0\n assert grade_cell.manual_score == None\n assert not grade_cell.needs_manual_grade",
"def check_all_errors(student_resp_list, expected_resp_list):\n all_errors = [] # list that will hold all the feedback\n for student_resp, expected_resp in zip(student_resp_list, expected_resp_list):\n if student_resp == \"\" or student_resp is None:\n return \"Nothing entered\"\n if \"\\n\" in student_resp:\n student_resp = student_resp.replace('\\n', ' ')\n error = False\n text = []\n # check for gibberish (used currently to ensure english is written and not other langauges)\n if (avg_transition_prob(student_resp, model_mat) < threshold):\n text.append(\"The sentence is not fully in English.\")\n error = True\n # check for puntuation\n # 1. ensure that the response is not empty\n # 2. only check puntation for responses longer than 1 word\n # 3. check the end of the response to see if it has a puntation\n if len(student_resp) > 0 and len(student_resp.split()) > 1 and student_resp[-1] not in string.punctuation:\n text.append(\"The sentence is not properly punctuated.\")\n error = True\n matches = tool.check(student_resp)\n if len(matches) > 0:\n for m in matches:\n msg = bytes(m.msg, 'utf-8').decode('utf-8', 'ignore')\n # if the stundet's sentence does not start with an uppercase\n # letter but the expected response does not start with an\n # uppercase letter, DO NOT flag it\n if msg == \"This sentence does not start with an uppercase letter\" and expected_resp[0].islower():\n continue\n text.append(msg)\n error = True\n if not error:\n text.append(\"NO ERRORS!!\")\n all_errors.append({\"feedback\": text})\n return all_errors",
"def test_invalid_zip_code(self):\n\n invalid_zip_codes_to_test = [\"48066! \", \"Michigan\", \"4806689\", \"!48066%#$\", \"Roseville\", \"480366412\", \"41124112\", \"!@48021\"]\n \n for zip_code in invalid_zip_codes_to_test:\n self.database.zip_code = zip_code\n self.assertFalse(self.database.validate_zipCode())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
for each code in submission, mark if it has correct convention
|
def markConvention(self,submission):
label='Convention?'
submission = self.addColumn( submission, label )
submission.loc[:,label]="Not Correct"
if not 'Convention' in submission.columns:
return submission
conventions = submission.ix[:,("Code","Convention")]
conventions.columns = ["Code","submissionConvention"]
if len( conventions ) == 0:
return submission
conventions = conventions.merge(self.ma.loc[:, ("Code","Convention")], how="left", on="Code")
""" it exists, and conventions match"""
isCorrect = list(not pd.isnull( c ) and
bool(re.match( c,s )) for c,s in zip(conventions.Convention, conventions.submissionConvention))
submission.loc[ isCorrect, label ] = "Correct"
nCorrect = sum( isCorrect )
"""
prepare errorframe
"""
errors = self.ma.ix[:,("Code","Convention")]
errors.columns = [ "Code", "Value" ]
errors = errors.merge(submission.loc[:, ("Code","Convention")], how="left", on="Code")
errors.columns = [ "Code", "Value", "ValueSubmitted" ]
errors = self.addColumn( errors, "AOI" )
errors.loc[:,"AOI"]="Convention"
label = "IsCorrect"
errors = self.addColumn( errors, label )
errors.loc[:, label ]="False"
isCorrect = list(not pd.isnull( c ) and bool(re.match( c,s ))
for s,c in zip(errors.Value, errors.ValueSubmitted))
errors.ix[ isCorrect, label ] = "True"
self.addError( errors )
self.addNote("You had %d correct conventions, gaining %2.1f marks" %(nCorrect, nCorrect * 1))
self.addMark("%d Correct conventions" % nCorrect, nCorrect * 1)
return submission
|
[
"def markUnorderedGroups(self,submission):\n\n maGroups= self.ma[ eAnd( isblank(self.ma.IntraGroupOrder), notblank(self.ma.Grouping)) ].Grouping.unique()\n\n # P and L groups are taken care of by absoluteOrdering routine. Different marks too\n #maGroups = set(maGroups).difference( set(\"P\", \"L\"))\n label='UnorderedGroups'\n submission = self.addColumn(submission, label )\n submission.loc[:,label]=None\n for group in maGroups:\n # take the group slice\n magSet = set( self.ma[ self.ma.Grouping==group].Code)\n subSlice = submission[ submission.Grouping==group].Code\n subSet = set( subSlice )\n nCorrect=len( magSet & subSet )\n submission.loc[ submission.Code.isin( magSet ), label] = group\n if group==\"P\":\n if nCorrect == len(magSet ) : # all correct, principal\n self.addNote( \"Correct principal diagnosis, 1 mark\" )\n self.addMark(\"Principal Diagnosis\", 1)\n self.addError( {\n 'AOI': 'PrincipalCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"True\" \n })\n else:\n self.addNote( \"Incorrect principal diagnosis, answer is %s, you had %s \" % ( pprintSlice(magSet), pprintSlice(subSet)) )\n self.addError( {\n 'AOI': 'PrincipalCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"False\" \n })\n next\n\n if group==\"L\" : # Last Codes \n if len(subSlice) > 0 and max( subSlice.index ) == max(submission.index ):\n self.addNote( \"Correct final codes, 0.5 marks\" )\n self.addMark( \"Final Code(s) Group\", 0.5 )\n self.addError( {\n 'AOI': 'LastCode', \n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n 'Code': \"\", \n 'IsCorrect': \"True\" \n })\n else:\n self.addNote( \"Incorrect final code(s), should be %s\" % ( pprintSlice(magSet)) )\n self.addError( { 'AOI': 'LastCode', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n\n # we don't need to process the group if the master says it is only one code long\n if len( magSet ) == 1:\n next\n\n\n\n if nCorrect == len(magSet ) : # all correct\n self.addNote( \"Unordered Group %s, %s entirely correct, 0.5 marks\" % (group, pprintSlice(magSet)) )\n self.addMark(\"Unordered Group %s\" % group, 0.5)\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"True\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n elif (nCorrect > 0 ) :\n self.addNote( \"Unordered Group %s partially correct, answer is %s, you had %s, 0.5 marks \" \n % (group, pprintSlice(magSet), pprintSlice(subSet)) )\n self.addMark(\"Unordered Group %s\" % group, 0.5)\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': pprintSlice( subSet ),\n })\n else:\n self.addNote( \"Unordered Group %s, %s entirely missing\" % (group, pprintSlice(magSet)) )\n self.addError( { 'AOI': 'UnorderedGroup', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': pprintSlice(magSet), \n 'ValueSubmitted': \"\",\n })\n\n return submission",
"def test_code(self):\n from mirtop.mirna.realign import make_id, read_id\n\n def _convert(s, test, reverse=False):\n code = read_id(s) if reverse else make_id(s)\n if code != test:\n raise ValueError(\"%s didn't result on %s but in %s\" %\n (s, test, code))\n\n _convert(\"AAACCCTTTGGG\", \"iso-12-B1NY4\")\n _convert(\"AAACCCTTTGGGA\", \"iso-13-B1NYDX\")\n _convert(\"AAACCCTTTGGGAT\", \"iso-14-B1NYI7\")\n _convert(\"iso-12-B1NY4\", \"AAACCCTTTGGG\", True)\n _convert(\"iso-13-B1NYDX\", \"AAACCCTTTGGGA\", True)\n _convert(\"iso-14-B1NYI7\", \"AAACCCTTTGGGAT\", True)\n\n # if make_id(\"AGTFCVS\"):\n # raise ValueError(\"This should be False. Not valid sequence.\")\n # if read_id(\"asD(-\"):\n # raise ValueError(\"This should be False, Not valid code.\")",
"def storeCodes(self, issue, errmessage):\n if 'textRange' in issue:\n textRange = self.makeTextRange(issue)\n for entry in textRange:\n startLine = entry['textRange']['startLine']\n endLine = entry['textRange']['endLine']\n\n entry['code'] = []\n entry['code'].extend(self.storeSingleCodeReq(startLine, endLine, issue))\n errmessage['code'].append(entry)",
"def markPrefix(self,submission):\n label='Prefix?'\n submission = self.addColumn( submission, label )\n submission.loc[:,label]=\"Not Correct\"\n if not 'Prefix' in submission.columns:\n return submission\n prefixes = submission.ix[:,(\"Code\",\"Prefix\")]\n prefixes.columns = [ \"Code\",\"submissionPrefix\"]\n if len( prefixes ) == 0:\n return submission\n prefixes = prefixes.merge(self.ma.loc[:, (\"Code\",\"Prefix\")], how=\"left\", on=\"Code\")\n isCorrect = list(not pd.isnull( c ) and c==s for s,c in zip(prefixes.submissionPrefix, prefixes.Prefix))\n submission.ix[ isCorrect, label ] = \"Correct\"\n nCorrect = sum( isCorrect )\n \n \"\"\" \n prepare errorframe from a 'what is correct' perspective\n 1) create error dataframe from master, columns Code and prefix\n 1a) rename prefix to Value\n 2) fill submission prefix, matching by code\n 3) fill IsCorrect\n \"\"\"\n errors = self.ma.ix[:,(\"Code\",\"Prefix\")]\n errors.columns = [ \"Code\", \"Value\" ]\n errors = errors.merge(submission.loc[:, (\"Code\",\"Prefix\")], how=\"left\", on=\"Code\")\n errors.columns = [ \"Code\", \"Value\", \"ValueSubmitted\" ]\n errors = self.addColumn( errors, \"AOI\" )\n errors.loc[:,\"AOI\"]=\"Prefix\"\n label = \"IsCorrect\"\n errors = self.addColumn( errors, label )\n errors.loc[:, label ]=\"False\"\n isCorrect = list(not pd.isnull( c ) and c==s \n for s,c in zip(errors.Value, errors.ValueSubmitted))\n errors.ix[ isCorrect, label ] = \"True\"\n self.addError( errors )\n\n self.addNote(\"You had %d correct prefixes, gaining %2.1f marks\" %(nCorrect, nCorrect * 0.5))\n self.addMark(\"%d Correct prefixes\" % nCorrect, nCorrect * 0.5)\n\n return submission",
"def ensure_workspace_matter_code(sender, instance, **kwargs):\n if instance.matter_code in [None, '']:\n\n # the current number of matters this lawyer has\n count = instance.lawyer.workspace_set.all().count() if instance.lawyer is not None else 1\n workspace_slug = instance.slug\n\n final_matter_code = \"{0:05d}-{1}\".format(count, workspace_slug)[:100]\n\n while _model_slug_exists(model=instance.__class__.objects.model, matter_code=final_matter_code):\n logger.info('Workspace %s exists, trying to create another' % final_matter_code)\n count = count + 1\n final_matter_code = \"{0:05d}-{1}\".format(count, workspace_slug)\n\n instance.matter_code = final_matter_code",
"def test_valid():\n for block_type, codes in _VALID_CODES.items():\n for code in codes:\n block_type(code=code)",
"def test_post_journal_codes(self):\n pass",
"def test_code(self):\n codes = {\"A\": 1, \"B\": 2}\n text = \"COD A B C\"\n values = ((\"A\", 1), (\"B\", 2), (None, None), (\"C\", \"C\"))\n for i, code in enumerate(gfs._code(codes)(text, size=2)):\n self.assert_code(code, *values[i])",
"def test_checkCodeFormatAndQuality(self):\n style_guide = flake8.get_style_guide(max_line_length=2048)\n report = style_guide.check_files(self.list_of_files)\n self.assertEqual(report.total_errors, 0)",
"def test_formality_difference():\r\n markers = Formality.load_formality_markers()\r\n cs_object_name = '<pickle object with map: author to cs texts>'\r\n non_cs_object_name = '<pickle object with map: author to monolingual english texts>'\r\n cs_texts = Serialization.load_obj(cs_object_name)\r\n non_cs_texts = Serialization.load_obj(non_cs_object_name)\r\n print('loaded', len(cs_texts), 'and', len(non_cs_texts), 'cs and monolingual english by authors')\r\n for author in cs_texts: cs_texts[author] = ' '.join(cs_texts[author])\r\n for author in non_cs_texts: non_cs_texts[author] = ' '.join(non_cs_texts[author])\r\n\r\n cs_markers_by_authors, non_cs_markers_by_authors = Formality.extract_markers(cs_texts, non_cs_texts, markers)\r\n #print(cs_markers_by_authors, non_cs_markers_by_authors)\r\n\r\n print('mean markers frequency in cs:', np.mean(cs_markers_by_authors),\r\n 'in non-cs:', np.mean(non_cs_markers_by_authors))\r\n\r\n Serialization.save_obj(cs_markers_by_authors, 'formality.markers.cs')\r\n Serialization.save_obj(non_cs_markers_by_authors, 'formality.markers.non-cs')\r\n stat, pval = wilcoxon(cs_markers_by_authors, non_cs_markers_by_authors)\r\n print('paired ttest sig test pval:', pval, stat)\r\n\r\n mean1 = np.mean(cs_markers_by_authors); mean2 = np.mean(non_cs_markers_by_authors)\r\n std1 = np.std(cs_markers_by_authors); std2 = np.std(non_cs_markers_by_authors)\r\n r1, _ = spearmanr(cs_markers_by_authors, non_cs_markers_by_authors)\r\n r2, _ = pearsonr(cs_markers_by_authors, non_cs_markers_by_authors)\r\n print(mean1, mean2, std1, std2, r1, r2)",
"def check_spacing_around_code(md_text, notebook):\n for line in md_text:\n m = re.search(r'```', line)\n if m is not None:\n if not line.startswith('```'):\n print('[ILL-STYLED] ``` appears not at BOL.', notebook, line, sep=' | ', end='')\n continue\n\n starter = [' ', ' ', '。', '、', ')', ')', '(', '(', '・', ':', '「', '#']\n closer = [' ', '\\n', '。', '、', '(', ')', ')', ',', '・', '」', ']']\n terms = set()\n with contextlib.suppress(StopIteration):\n for code_pat in (r'\\*\\*`(.*?)`\\*\\*', r'<strong>`(.*?)`</strong>', r'`(.*?)`'):\n for m in re.finditer(code_pat, line):\n if m[1] in terms:\n continue\n terms.add(m[1])\n if not any(line.startswith(m[0] + suf) for suf in closer) and \\\n not any(pre + m[0] + suf in line for pre in starter for suf in closer):\n print('[ILL-STYLED] Spacing around code is inappropriate.', notebook, line, sep=' | ', end='')\n raise StopIteration",
"def test_pycodestyle(self):\n output = sh.pycodestyle(\"review_analysis.py\")\n assert(output.exit_code == 0)",
"def permissive_corrected_original_name(self) -> bool:\n return self in {\n NomenclatureStatus.not_published_with_a_generic_name,\n NomenclatureStatus.informal,\n NomenclatureStatus.not_intended_as_a_scientific_name,\n NomenclatureStatus.not_nominative_singular,\n NomenclatureStatus.not_based_on_a_generic_name,\n }",
"def count_codes(majors_file):\n majors = [ ]\n\n for line in majors_file:\n majors.append(line.strip())\n\n majors = sorted(majors)\n\n if len(majors)==0:\n print('File is empty')\n return\n \n count = 0\n major_pre = majors[0]\n majors.append('This is not a valid major code')\n \n for major in majors:\n if major == major_pre:\n count = count + 1\n else:\n print(major_pre, count)\n major_pre = major\n count = 1\n\n return",
"def errorCheckSubmission( self, answer):\n \n for colName in [\"Code\", \"Convention\", \"GroupOrder\"]:\n assert colName in answer.columns, \"We need a %s column in the master spreadsheet\" % colName",
"def analyze_random_codeword_codebook(self):\n stats_given_cw = {}\n for i, cw in enumerate(self._codewords):\n cw_idx = i + 1\n stats = {'good': 0, 'bad': 0}\n if self._codebook[seqtools.dna2num(cw)] == cw_idx:\n stats['self'] = 'good'\n else:\n stats['self'] = 'bad'\n for seq in FreeDivSphere.FreeDivSphere(cw, self.max_err_decode):\n seq_idx = seqtools.dna2num(seq)\n if self._codebook[seq_idx] == cw_idx: \n stats['good'] += 1\n else:\n stats['bad'] += 1\n stats['total'] = stats['good'] + stats['bad']\n stats_given_cw[cw] = stats\n return stats_given_cw",
"def _precheck(self, year_set, jobtype, data_type=None):\n for job in year_set.jobs:\n if job.type == jobtype:\n if job.type != 'regrid':\n return False\n else: # regrid is the only job type that can have multiple instances in a year_set\n if job.data_type == data_type: # but only one instance per data type\n return False\n return True",
"def pdb_in_results(pdb_code, results):\n return pdb_code.upper() in set([r.pdb_code.upper() for r in results])",
"def iscode(self, res, codes):\n\n for code in codes:\n if (res == code or res[::-1] == code):\n return True\n\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
single interruption ok any group larger than size 1 ok Algorithm for each unorderedGroup (ie, no intragroup order ) for each slice S of size length(uog) , if uog S = null set, we have a winner. Mark group correct
|
def markUnorderedGroups(self,submission):
maGroups= self.ma[ eAnd( isblank(self.ma.IntraGroupOrder), notblank(self.ma.Grouping)) ].Grouping.unique()
# P and L groups are taken care of by absoluteOrdering routine. Different marks too
#maGroups = set(maGroups).difference( set("P", "L"))
label='UnorderedGroups'
submission = self.addColumn(submission, label )
submission.loc[:,label]=None
for group in maGroups:
# take the group slice
magSet = set( self.ma[ self.ma.Grouping==group].Code)
subSlice = submission[ submission.Grouping==group].Code
subSet = set( subSlice )
nCorrect=len( magSet & subSet )
submission.loc[ submission.Code.isin( magSet ), label] = group
if group=="P":
if nCorrect == len(magSet ) : # all correct, principal
self.addNote( "Correct principal diagnosis, 1 mark" )
self.addMark("Principal Diagnosis", 1)
self.addError( {
'AOI': 'PrincipalCode',
'Value': pprintSlice(magSet),
'ValueSubmitted': pprintSlice( subSet ),
'Code': "",
'IsCorrect': "True"
})
else:
self.addNote( "Incorrect principal diagnosis, answer is %s, you had %s " % ( pprintSlice(magSet), pprintSlice(subSet)) )
self.addError( {
'AOI': 'PrincipalCode',
'Value': pprintSlice(magSet),
'ValueSubmitted': pprintSlice( subSet ),
'Code': "",
'IsCorrect': "False"
})
next
if group=="L" : # Last Codes
if len(subSlice) > 0 and max( subSlice.index ) == max(submission.index ):
self.addNote( "Correct final codes, 0.5 marks" )
self.addMark( "Final Code(s) Group", 0.5 )
self.addError( {
'AOI': 'LastCode',
'Value': pprintSlice(magSet),
'ValueSubmitted': pprintSlice( subSet ),
'Code': "",
'IsCorrect': "True"
})
else:
self.addNote( "Incorrect final code(s), should be %s" % ( pprintSlice(magSet)) )
self.addError( { 'AOI': 'LastCode',
'Code': "",
'IsCorrect': "False" ,
'Value': pprintSlice(magSet),
'ValueSubmitted': pprintSlice( subSet ),
})
# we don't need to process the group if the master says it is only one code long
if len( magSet ) == 1:
next
if nCorrect == len(magSet ) : # all correct
self.addNote( "Unordered Group %s, %s entirely correct, 0.5 marks" % (group, pprintSlice(magSet)) )
self.addMark("Unordered Group %s" % group, 0.5)
self.addError( { 'AOI': 'UnorderedGroup',
'Code': "",
'IsCorrect': "True" ,
'Value': pprintSlice(magSet),
'ValueSubmitted': pprintSlice( subSet ),
})
elif (nCorrect > 0 ) :
self.addNote( "Unordered Group %s partially correct, answer is %s, you had %s, 0.5 marks "
% (group, pprintSlice(magSet), pprintSlice(subSet)) )
self.addMark("Unordered Group %s" % group, 0.5)
self.addError( { 'AOI': 'UnorderedGroup',
'Code': "",
'IsCorrect': "False" ,
'Value': pprintSlice(magSet),
'ValueSubmitted': pprintSlice( subSet ),
})
else:
self.addNote( "Unordered Group %s, %s entirely missing" % (group, pprintSlice(magSet)) )
self.addError( { 'AOI': 'UnorderedGroup',
'Code': "",
'IsCorrect': "False" ,
'Value': pprintSlice(magSet),
'ValueSubmitted': "",
})
return submission
|
[
"def final_penguins_num2(game, ice, my_arrival_turn=-1, groups=[]):\n if ice in game.get_my_icebergs():\n status = \"mine\"\n elif ice in game.get_neutral_icebergs():\n status = \"neutral\"\n else:\n status = \"enemy\"\n my_penguin_amount = ice.penguin_amount\n if status == \"enemy\":\n my_penguin_amount *= -1\n last_group_turns_till_arrival = 0\n groups_toward_ice = [g for g in game.get_all_penguin_groups() if g.destination.equals(ice)]\n groups_toward_ice.sort(key=lambda g: some(g, groups))\n \n temp = groups_toward_ice[:]\n for g in temp:\n if g not in groups:\n total_d = calc_real_dis(g.source, ice)\n else:\n total_d = calc_illuse_dis(g.source, ice)\n kizuz = [grp for grp in game.get_all_penguin_groups() if grp.source.equals(ice) and grp.destination.equals(g.source)]\n for k in kizuz:\n if g not in groups:\n g_turn_till_arrival = real_turn_teal_arrival(g)\n else:\n g_turn_till_arrival = illusion_turn_teal_arrival(g)\n if real_turn_teal_arrival(k) + g_turn_till_arrival >= total_d: \n kiz = g.penguin_amount - k.penguin_amount\n if kiz < 0:\n kiz = 0\n g.penguin_amount = kiz\n groups_toward_ice[groups_toward_ice.index(g)].penguin_amount = kiz\n\n for g in groups_toward_ice:\n if g in game.get_my_decoy_penguin_groups():\n continue\n if g not in groups:\n g_turn_till_arrival = real_turn_teal_arrival(g)\n else:\n g_turn_till_arrival = illusion_turn_teal_arrival(g)\n \n if status == \"mine\":\n my_penguin_amount += (g_turn_till_arrival - last_group_turns_till_arrival) * ice.penguins_per_turn\n elif status == \"enemy\": # or status==\"neutral\":\n my_penguin_amount -= (g_turn_till_arrival - last_group_turns_till_arrival) * ice.penguins_per_turn\n \n if g in game.get_enemy_penguin_groups():\n my_penguin_amount -= g.penguin_amount\n else:\n my_penguin_amount += g.penguin_amount\n \n if my_penguin_amount > 0:\n status = \"mine\"\n elif my_penguin_amount == 0:\n status = \"neutral\"\n else:\n status = \"enemy\"\n last_group_turns_till_arrival = g_turn_till_arrival\n \n return my_penguin_amount, last_group_turns_till_arrival, status",
"def test_strategy(self):\n # Become grudged if the opponent defects twice in a row\n self.responses_test([], [], [C], attrs={\"grudged\": False})\n self.responses_test([C], [C], [C], attrs={\"grudged\": False})\n self.responses_test([C, C], [C, C], [C], attrs={\"grudged\": False})\n self.responses_test([C, C, C], [C, C, D], [C], attrs={\"grudged\": False})\n self.responses_test([C, C, C, C], [C, C, D, D], [D],\n attrs={\"grudged\": True})\n\n mem_length = self.player().mem_length\n for i in range(mem_length - 1):\n self.responses_test([C, C, C, C] + [D] * i, [C, C, D, D] + [D] * i,\n [D], attrs={\"grudged\": True,\n \"grudge_memory\": i})\n i = mem_length + 1\n self.responses_test([C, C, C, C] + [D] * i, [C, C, D, D] + [C] * i,\n [C], attrs={\"grudged\": False,\n \"grudge_memory\": 0})",
"def discardBad(wellNamesList,matrix,thresh):\n #keys are wells, values are a list of their group members\n wells = {}\n for name in wellNamesList:\n #the groups in which the wells are placed are the indexes of the wells in the input list\n wells[name]=[name]\n for i in range(len(matrix)):\n #loop over all the matrix except the diagonal (always 0)\n for j in [x for x in range(len(matrix[i])) if x!=i]:\n if matrix[i][j] > thresh:\n #remove edges that are greater than the threshold\n matrix[i][j] = -1\n else:\n #add the two wells within threshold to each others groupmembers' groups (including own)\n for wlj in wells[wellNamesList[j]]:\n if wellNamesList[i] not in wells[wlj]:\n wells[wlj].append(wellNamesList[i])\n for wli in wells[wellNamesList[i]]:\n if wellNamesList[j] not in wells[wli]:\n wells[wli].append(wellNamesList[j])\n #finding the beet group\n maxi=-1\n choose=\"\"\n for key in wells.keys():\n if len(wells[key]) > maxi:\n maxi = len(wells[key])\n maxkey = key #used as previous equal best if there is an equal max group\n choose = key #used as the key to the group to be chosen\n elif len(wells[key]) == maxi:\n #if the length is the same because key and maxkey are in the same group, simply skip to next loop iteration\n if key in wells[maxkey]:\n continue\n #equal largest group, choose the one with the smallest 'largest difference' amoong the pairs\n largestValKey = -1\n largestValMaxkey = -1\n for pair in combinations(wells[key],2):\n #finds largest pair comparison value in current group\n if matrix[wellNamesList.index(pair[0])][wellNamesList.index(pair[1])] > largestValKey:\n largestValKey = matrix[wellNamesList.index(pair[0])][wellNamesList.index(pair[1])]\n choose = key\n for pair2 in combinations(wells[maxkey],2):\n #find largest pair comparison value in previous equal size group\n if matrix[wellNamesList.index(pair2[0])][wellNamesList.index(pair2[1])] > largestValMaxkey:\n largestValMaxkey = matrix[wellNamesList.index(pair2[0])][wellNamesList.index(pair2[1])]\n choose = maxkey\n #choose group with the smaller largest value\n if largestValKey > largestValMaxkey:\n choose = key\n elif largestValKey < largestValMaxkey:\n choose = maxkey\n else:\n #two values are equal, both groups are discarded\n #either both groups are length 1, or there's no way to choose the better one\n choose=\"\"\n #appends the group members of the chosen group to be returned\n keep=[]\n if choose != \"\":\n for well in wells[choose]:\n keep.append(well)\n return keep\n else:\n return []",
"def overlaps(self, group):\n if not group:\n return None\n faces = [self(obj) for obj in group]\n return sum(len(o) for o in faces) != len(set.union(*faces))",
"def test_with_remainder(self):\n data = range(21)\n grouped = util.make_even_groups(data, 5)\n self.assertEqual(len(grouped), 4)\n for group in grouped:\n self.assertEqual(len(group), 5)\n full = sorted(flatten(grouped))\n self.assertEqual(full, data[:-1])",
"def build_square(data, sticks, i, n, remaining_len):\n\n for j in range(i, n):\n if data[sticks[j]] and remaining_len >= sticks[j]:\n data[sticks[j]] -= 1\n remaining_len -= sticks[j]\n if remaining_len == 0:\n return True\n if build_square(data, sticks, i, n, remaining_len):\n return True\n else:\n data[sticks[j]] += 1\n remaining_len += sticks[j]\n return False",
"def markGroupOrder(self,submission):\n\n \"\"\" make sure that there exist groupOrders in the answer\"\"\"\n groupOrder = self.ma.ix[ notblank(self.ma.GroupOrder),(\"Grouping\",\"GroupOrder\")]\n if len( groupOrder ) == 0:\n return submission\n\n \"\"\" find out where these groups live in the submission:\n create data frame with rows Grouping, GroupOrder, and mindex, maxdex \n 1) find all the rows that relate to the answer grouping, and their minimum and maximum index (mindex)\n \"\"\"\n submissionGroupPos = submission[ submission.Grouping.isin(groupOrder.Grouping)]\n submissionGroupPos.loc[:,\"index\"]=submissionGroupPos.index\n submissionGroupPosMin = pd.DataFrame(submissionGroupPos.groupby(\"Grouping\")[\"index\"].agg(np.min))\n submissionGroupPosMin[\"mindex\"] = submissionGroupPosMin[\"index\"]\n submissionGroupPosMax = pd.DataFrame(submissionGroupPos.groupby(\"Grouping\")[\"index\"].agg(np.max))\n submissionGroupPosMax[\"maxdex\"] = submissionGroupPosMax[\"index\"]\n\n # error check to make sure we have got Min and Max Grouping columns\n if not 'Grouping' in submissionGroupPosMin.columns:\n submissionGroupPosMin['Grouping'] = submissionGroupPosMin.index\n if not 'Grouping' in submissionGroupPosMax.columns:\n submissionGroupPosMax['Grouping']=submissionGroupPosMax.index\n groupOrder=groupOrder.merge(submissionGroupPosMin, how='left', on=\"Grouping\")\n groupOrder=groupOrder.merge(submissionGroupPosMax, how='left', on=\"Grouping\").sort(columns=\"mindex\")\n\n \n groupOrder.loc[ : , \"Consecutive\"] = False\n i=0\n for go in groupOrder.GroupOrder:\n if str(go).endswith(\"N\"):\n groupOrder.loc[ i, \"Consecutive\"] = True\n groupOrder.loc[ i, \"GroupOrder\"] = groupOrder.loc[ i, \"GroupOrder\"][0:-1] \n i = i + 1\n\n\n \"\"\" go through each group in mindex order, make sure that \n - all the groups exist\n - the groups are consecutive (when the first group ends in an N, and \n - the GroupOrder ascends\n\n \"\"\"\n if ( \n all( not np.isnan( i ) for i in groupOrder.ix[:, \"mindex\"] ) # pylint: disable=E1101 \n and all( not groupOrder.ix[i,\"Consecutive\"] \n or groupOrder.ix[i, \"maxdex\"]+1 == groupOrder.ix[i+1, \"mindex\"]\n for i in range( len(groupOrder) -1 )\n )\n\n and all( groupOrder.ix[i, \"GroupOrder\"] <= groupOrder.ix[i+1, \"GroupOrder\"] \n for i in range( len(groupOrder) -1 )\n )\n ):\n self.addNote( \"Correct ALL group ordering, 0.5 marks\" )\n self.addMark(\"All Groups Ordering\", 0.5)\n self.addError( { 'AOI': 'AllGroupsOrdering', \n 'Code': \"\", \n 'IsCorrect': \"True\" ,\n 'Value': \"\", \n 'ValueSubmitted': \"\",\n })\n\n else:\n self.addNote( \"Incorrect ALL group ordering\" )\n self.addError( { 'AOI': 'AllGroupsOrdering', \n 'Code': \"\", \n 'IsCorrect': \"False\" ,\n 'Value': \"\", \n 'ValueSubmitted': \"\",\n })\n\n\n return submission",
"def make_comparison_set(start_set, end_set,average,l_b, u_b, set_size):\n\n # assert len(end_set) == set_size, \"The size of the end set is too small. Must contain 7 elements\"\n better_peers = end_set[end_set > average]\n worse_peers = end_set[end_set < average]\n worse_eq_peers = end_set[end_set <= average]\n\n # the solution set must have 20-40% of its members worse than \"average\" and 30-50% worse-or-equal than average.\n proportion_of_worse = random.uniform(0.2,0.40)\n number_of_worse = set_size*proportion_of_worse\n number_of_worse_eq = set_size*(proportion_of_worse+0.1)\n\n if average+l_b <= round(np.mean(end_set), 2) <= average+u_b and len(end_set) == set_size and \\\n len(worse_peers)>=number_of_worse and len(worse_eq_peers)>=number_of_worse_eq:\n return end_set\n\n # assert len(start_set) > 0, \"Could not find a subset from the set given\"\n if len(start_set) == 0:\n return np.zeros(set_size)\n\n end_set = np.append(end_set, start_set[0])\n start_set = start_set[1:]\n\n if round(np.mean(end_set), 2) > u_b:\n max_indices = end_set.argsort()[-3:]\n max_index = np.argmax(end_set)\n end_set = np.delete(end_set, max_index)\n elif round(np.mean(end_set), 2) < l_b:\n min_indices = end_set.argsort()[3:]\n min_index = np.argmin(end_set)\n end_set = np.delete(end_set, min_index)\n return make_comparison_set(start_set, end_set, average, l_b, u_b, set_size)",
"def number_of_yes_in_group(group: Sequence[str]) -> int:\r\n group_size = len(group)\r\n if group_size == 1:\r\n return len(group[0])\r\n\r\n yeses = 0\r\n\r\n chars = []\r\n for string in group:\r\n for char in string:\r\n chars.append(char)\r\n\r\n while len(chars) > 0:\r\n c, y = restart_search(chars, group_size, yeses)\r\n chars = c\r\n yeses = y\r\n\r\n return yeses",
"def test_lecture_group_also_removed(self):\n self.assertTrue(Record.enqueue_student(self.bolek, self.exercise_1))\n self.assertTrue(\n Record.objects.filter(student=self.bolek,\n group=self.exercise_1,\n status=RecordStatus.ENROLLED).exists())\n self.assertTrue(\n Record.objects.filter(student=self.bolek,\n group=self.lecture,\n status=RecordStatus.ENROLLED).exists())\n\n # One cannot leave the auto-enrollment group.\n self.assertFalse(Record.remove_from_group(self.bolek, self.lecture))\n # One cannot leave the group he is not in.\n self.assertFalse(Record.remove_from_group(self.bolek, self.exercise_2))\n self.assertTrue(Record.remove_from_group(self.bolek, self.exercise_1))\n self.assertFalse(\n Record.objects.filter(student=self.bolek,\n group=self.exercise_1,\n status=RecordStatus.ENROLLED).exists())\n self.assertFalse(\n Record.objects.filter(student=self.bolek,\n group=self.lecture,\n status=RecordStatus.ENROLLED).exists())",
"def test_composite_group_occult():\n assert light_character.occulting([3, 1], 'G', 10000) == [\n ('Off', 500), ('G', 1000),\n ('Off', 500), ('G', 1000),\n ('Off', 500), ('G', 1000),\n ('G', 2000),\n ('Off', 500), ('G', 1000),\n ('G', 2000)\n ]",
"def test_stitch_counterfactual_and_dopplegangers(self):\n self.assert_uedge_set_equal(\n set(),\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([-x])])\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y)]), worlds=set([frozenset([+x])])\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)]),\n worlds=set([frozenset([+x])]),\n ),\n )\n self.assert_uedge_set_equal(\n set(),\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(directed=[(X, Y), (Y, Z)], undirected=[(X, Z)]),\n worlds=set([frozenset([+x])]),\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({Z @ -d, Z @ +x}), frozenset({Y @ +x, Y @ -d})},\n set(\n stitch_counterfactual_and_dopplegangers(\n graph=NxMixedGraph.from_edges(\n directed=[(X, Y), (Y, Z), (D, X)], undirected=[(X, Z)]\n ),\n worlds=set([frozenset([+x]), frozenset([-d])]),\n )\n ),\n )\n self.assert_uedge_set_equal(\n {frozenset({Y @ -d, Y @ +x}), frozenset({W @ +x, W @ -d}), frozenset({Z @ +x, Z @ -d})},\n stitch_counterfactual_and_dopplegangers(\n graph=figure_9a.graph, worlds=set([frozenset([+x]), frozenset([-d])])\n ),\n )",
"def signals_per_group(number_of_good, ngroup):\n nint, ny, nx = number_of_good.shape\n good_per_group = np.zeros((ngroup, ny, nx)).astype(np.int)\n bad_per_group = np.zeros((ngroup, ny, nx)).astype(np.int)\n\n for group in range(ngroup):\n grp_map = np.sum(group < number_of_good, axis=0)\n good_per_group[group, :, :] = grp_map\n bad_per_group[group, :, :] = nint - grp_map\n return good_per_group, bad_per_group",
"def testNGroupSplit(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupSplitter(2)\n hs_reversed = NGroupSplitter(2, reverse=True)\n\n for isreversed, splitter in enumerate((hs, hs_reversed)):\n splits = list(splitter(self.data))\n self.failUnless(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.failUnless( len(p) == 2 )\n self.failUnless( p[0].nsamples == 50 )\n self.failUnless( p[1].nsamples == 50 )\n\n self.failUnless((splits[0][1-isreversed].uniquechunks == [0, 1, 2, 3, 4]).all())\n self.failUnless((splits[0][isreversed].uniquechunks == [5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][1-isreversed].uniquechunks == [5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][isreversed].uniquechunks == [0, 1, 2, 3, 4]).all())\n\n # check if it works on pure odd and even chunk ids\n moresplits = list(hs(splits[0][0]))\n\n for split in moresplits:\n self.failUnless(split[0] != None)\n self.failUnless(split[1] != None)\n\n # now test more groups\n s5 = NGroupSplitter(5)\n s5_reversed = NGroupSplitter(5, reverse=True)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5_reversed)):\n splits = list(s5splitter(self.data))\n\n # must have 10 splits\n self.failUnless(len(splits) == 5)\n\n # check split content\n self.failUnless((splits[0][1-isreversed].uniquechunks == [0, 1]).all())\n self.failUnless((splits[0][isreversed].uniquechunks == [2, 3, 4, 5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][1-isreversed].uniquechunks == [2, 3]).all())\n self.failUnless((splits[1][isreversed].uniquechunks == [0, 1, 4, 5, 6, 7, 8, 9]).all())\n # ...\n self.failUnless((splits[4][1-isreversed].uniquechunks == [8, 9]).all())\n self.failUnless((splits[4][isreversed].uniquechunks == [0, 1, 2, 3, 4, 5, 6, 7]).all())\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return [ (train, test) for (train, test) in spl(dat) ]\n s20 = NGroupSplitter(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def doCorrectSetSubsumption(self):\r\n subsumer = None\r\n for ref in self.correctSet:\r\n cl = self.popSet[ref]\r\n if cl.isSubsumer():\r\n if subsumer == None or cl.isMoreGeneral(subsumer):\r\n subsumer = cl\r\n\r\n if subsumer != None: #If a subsumer was found, subsume all more specific classifiers in the correct set\r\n i=0\r\n while i < len(self.correctSet):\r\n ref = self.correctSet[i]\r\n if subsumer.isMoreGeneral(self.popSet[ref]):\r\n subsumer.updateNumerosity(self.popSet[ref].numerosity)\r\n# if subsumer.epochComplete:\r\n# if not self.popSet[ref].epochComplete:\r\n# self.ECPopSize += 1\r\n# self.ENCPopSize -= 1\r\n# else:\r\n# if self.popSet[ref].epochComplete:\r\n# self.ECPopSize -= 1\r\n# self.ENCPopSize += 1\r\n\r\n self.removeMacroClassifier(ref)\r\n self.deleteFromMatchSet(ref)\r\n self.deleteFromCorrectSet(ref)\r\n i = i - 1\r\n i = i + 1",
"def test_ranked_deep_bury():\n v = spatial.Voters(seed=0)\n v.add(voter_pref)\n c = spatial.Candidates(v)\n c.add(candidate_pref)\n \n # run honest election\n e1 = spatial.Election(voters=v, candidates=c)\n result1 = e1.run('ranked_pairs')\n \n # Make sure condorcet winner was found\n assert 1 in e1.result.winners\n \n #run strategic election\n strategy = {'tactics' : 'deep_bury',\n 'ratio' : 1,\n 'underdog' : None,\n 'subset' : '',\n 'frontrunnertype' : 'eliminate'}\n s2 = spatial.Strategies(v).add(**strategy)\n e2 = spatial.Election(voters=v, candidates=c, strategies=s2)\n e2.run('ranked_pairs', result=result1) \n \n # Make sure the correct front runners were \n tballots = e2.ballotgen.tacticalballots\n tgroup = list(tballots.root._tactical_groups.values())[0]\n front_runners = tgroup.front_runners\n assert 1 in front_runners\n assert 2 in front_runners\n \n # Check that #0 is the winner\n assert 0 in e2.result.winners\n \n ballots = e2.result.ballots\n ballots = votesim.votemethods.tools.rcv_reorder(ballots)\n \n # Check the new tactical rankings\n right = [\n [1, 2, 0],\n [1, 2, 0],\n [1, 2, 0],\n [1, 2, 0],\n [1, 2, 0],\n [1, 2, 0],\n [1, 2, 0],\n [3, 1, 0],\n [3, 1, 0],\n [3, 1, 0],\n [3, 0, 1],\n [3, 0, 1],\n [3, 0, 1],\n [3, 0, 1],\n [3, 0, 1]]\n right = votesim.votemethods.tools.rcv_reorder(right)\n \n assert np.all(right == ballots)\n return",
"def reduce_puzzle(grid):\n stuck = False\n while not stuck:\n\n begin = len([i for i in boxes if len(grid[i]) == 1])\n\n grid = eliminate(grid)\n grid = only_choice(grid)\n grid = naked_twins(grid)\n #grid = naked_triple(grid) - something for the future\n end = len([i for i in boxes if len(grid[i]) == 1])\n stuck = begin == end\n\n sanity = len([i for i in boxes if len(grid[i]) == 0])\n if sanity > 0:\n return\n\n return grid",
"def get_set(average, goal_grade, av_goal_df, set_size):\n # solution = []\n\n if average == 0:\n return np.zeros(set_size), 0, False, 'no grade'\n\n for i in range(10, 50):\n peers, w = get_same_goal_set(av_goal_df, i, goal_grade)\n if len(peers) == 0:\n has_solution = False\n edge_case = 'error'\n return np.zeros(set_size), w, has_solution, edge_case\n for j in range(0, 100):\n random.shuffle(peers)\n start_set = peers[set_size:]\n end_set = peers[:set_size]\n solution = make_comparison_set(start_set, end_set, average, 0.5, 1.0, set_size)\n\n if np.sum(solution) > 0:\n has_solution = True\n edge_case = 'no'\n return solution, w, has_solution, edge_case\n\n # edge cases\n if np.sum(solution) == 0:\n if dist_from_top(av_goal_df, average)<= set_size:\n isTop = True\n isOther = False\n edge_case = \"top\"\n elif dist_from_bottom(av_goal_df, average) <= set_size:\n isTop = False\n isOther = False\n edge_case = 'bottom'\n else:\n isTop = False\n isOther = True\n edge_case = 'other'\n solution = get_special_set(av_goal_df, average, isTop, isOther, set_size)\n has_solution = True\n return solution, 0, has_solution, edge_case\n has_solution = False\n edge_case = 'error'\n return np.zeros(set_size), w, has_solution, edge_case",
"def test_bad_grouping_algorithm(sourcextractor):\n run = sourcextractor(\n grouping_algorihtm='UNKNOWN'\n )\n assert run.exit_code > 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
for each non null GroupOrder, find out the group for each group, make sure the subSlice for this group is consecutive
|
def markGroupOrder(self,submission):
""" make sure that there exist groupOrders in the answer"""
groupOrder = self.ma.ix[ notblank(self.ma.GroupOrder),("Grouping","GroupOrder")]
if len( groupOrder ) == 0:
return submission
""" find out where these groups live in the submission:
create data frame with rows Grouping, GroupOrder, and mindex, maxdex
1) find all the rows that relate to the answer grouping, and their minimum and maximum index (mindex)
"""
submissionGroupPos = submission[ submission.Grouping.isin(groupOrder.Grouping)]
submissionGroupPos.loc[:,"index"]=submissionGroupPos.index
submissionGroupPosMin = pd.DataFrame(submissionGroupPos.groupby("Grouping")["index"].agg(np.min))
submissionGroupPosMin["mindex"] = submissionGroupPosMin["index"]
submissionGroupPosMax = pd.DataFrame(submissionGroupPos.groupby("Grouping")["index"].agg(np.max))
submissionGroupPosMax["maxdex"] = submissionGroupPosMax["index"]
# error check to make sure we have got Min and Max Grouping columns
if not 'Grouping' in submissionGroupPosMin.columns:
submissionGroupPosMin['Grouping'] = submissionGroupPosMin.index
if not 'Grouping' in submissionGroupPosMax.columns:
submissionGroupPosMax['Grouping']=submissionGroupPosMax.index
groupOrder=groupOrder.merge(submissionGroupPosMin, how='left', on="Grouping")
groupOrder=groupOrder.merge(submissionGroupPosMax, how='left', on="Grouping").sort(columns="mindex")
groupOrder.loc[ : , "Consecutive"] = False
i=0
for go in groupOrder.GroupOrder:
if str(go).endswith("N"):
groupOrder.loc[ i, "Consecutive"] = True
groupOrder.loc[ i, "GroupOrder"] = groupOrder.loc[ i, "GroupOrder"][0:-1]
i = i + 1
""" go through each group in mindex order, make sure that
- all the groups exist
- the groups are consecutive (when the first group ends in an N, and
- the GroupOrder ascends
"""
if (
all( not np.isnan( i ) for i in groupOrder.ix[:, "mindex"] ) # pylint: disable=E1101
and all( not groupOrder.ix[i,"Consecutive"]
or groupOrder.ix[i, "maxdex"]+1 == groupOrder.ix[i+1, "mindex"]
for i in range( len(groupOrder) -1 )
)
and all( groupOrder.ix[i, "GroupOrder"] <= groupOrder.ix[i+1, "GroupOrder"]
for i in range( len(groupOrder) -1 )
)
):
self.addNote( "Correct ALL group ordering, 0.5 marks" )
self.addMark("All Groups Ordering", 0.5)
self.addError( { 'AOI': 'AllGroupsOrdering',
'Code': "",
'IsCorrect': "True" ,
'Value': "",
'ValueSubmitted': "",
})
else:
self.addNote( "Incorrect ALL group ordering" )
self.addError( { 'AOI': 'AllGroupsOrdering',
'Code': "",
'IsCorrect': "False" ,
'Value': "",
'ValueSubmitted': "",
})
return submission
|
[
"def testNGroupSplit(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupSplitter(2)\n hs_reversed = NGroupSplitter(2, reverse=True)\n\n for isreversed, splitter in enumerate((hs, hs_reversed)):\n splits = list(splitter(self.data))\n self.failUnless(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.failUnless( len(p) == 2 )\n self.failUnless( p[0].nsamples == 50 )\n self.failUnless( p[1].nsamples == 50 )\n\n self.failUnless((splits[0][1-isreversed].uniquechunks == [0, 1, 2, 3, 4]).all())\n self.failUnless((splits[0][isreversed].uniquechunks == [5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][1-isreversed].uniquechunks == [5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][isreversed].uniquechunks == [0, 1, 2, 3, 4]).all())\n\n # check if it works on pure odd and even chunk ids\n moresplits = list(hs(splits[0][0]))\n\n for split in moresplits:\n self.failUnless(split[0] != None)\n self.failUnless(split[1] != None)\n\n # now test more groups\n s5 = NGroupSplitter(5)\n s5_reversed = NGroupSplitter(5, reverse=True)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5_reversed)):\n splits = list(s5splitter(self.data))\n\n # must have 10 splits\n self.failUnless(len(splits) == 5)\n\n # check split content\n self.failUnless((splits[0][1-isreversed].uniquechunks == [0, 1]).all())\n self.failUnless((splits[0][isreversed].uniquechunks == [2, 3, 4, 5, 6, 7, 8, 9]).all())\n self.failUnless((splits[1][1-isreversed].uniquechunks == [2, 3]).all())\n self.failUnless((splits[1][isreversed].uniquechunks == [0, 1, 4, 5, 6, 7, 8, 9]).all())\n # ...\n self.failUnless((splits[4][1-isreversed].uniquechunks == [8, 9]).all())\n self.failUnless((splits[4][isreversed].uniquechunks == [0, 1, 2, 3, 4, 5, 6, 7]).all())\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return [ (train, test) for (train, test) in spl(dat) ]\n s20 = NGroupSplitter(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def test_task_group_sorted(self):\n from airflow.operators.empty import EmptyOperator\n from airflow.serialization.serialized_objects import TaskGroupSerialization\n\n \"\"\"\n start\n ╱ ╲\n ╱ ╲\n task_group_up1 task_group_up2\n (task_up1) (task_up2)\n ╲ ╱\n task_group_middle\n (task_middle)\n ╱ ╲\n task_group_down1 task_group_down2\n (task_down1) (task_down2)\n ╲ ╱\n ╲ ╱\n end\n \"\"\"\n execution_date = datetime(2020, 1, 1)\n with DAG(dag_id=\"test_task_group_sorted\", start_date=execution_date) as dag:\n start = EmptyOperator(task_id=\"start\")\n\n with TaskGroup(\"task_group_up1\") as task_group_up1:\n _ = EmptyOperator(task_id=\"task_up1\")\n\n with TaskGroup(\"task_group_up2\") as task_group_up2:\n _ = EmptyOperator(task_id=\"task_up2\")\n\n with TaskGroup(\"task_group_middle\") as task_group_middle:\n _ = EmptyOperator(task_id=\"task_middle\")\n\n with TaskGroup(\"task_group_down1\") as task_group_down1:\n _ = EmptyOperator(task_id=\"task_down1\")\n\n with TaskGroup(\"task_group_down2\") as task_group_down2:\n _ = EmptyOperator(task_id=\"task_down2\")\n\n end = EmptyOperator(task_id=\"end\")\n\n start >> task_group_up1\n start >> task_group_up2\n task_group_up1 >> task_group_middle\n task_group_up2 >> task_group_middle\n task_group_middle >> task_group_down1\n task_group_middle >> task_group_down2\n task_group_down1 >> end\n task_group_down2 >> end\n\n task_group_middle_dict = TaskGroupSerialization.serialize_task_group(\n dag.task_group.children[\"task_group_middle\"]\n )\n upstream_group_ids = task_group_middle_dict[\"upstream_group_ids\"]\n assert upstream_group_ids == [\"task_group_up1\", \"task_group_up2\"]\n\n upstream_task_ids = task_group_middle_dict[\"upstream_task_ids\"]\n assert upstream_task_ids == [\"task_group_up1.task_up1\", \"task_group_up2.task_up2\"]\n\n downstream_group_ids = task_group_middle_dict[\"downstream_group_ids\"]\n assert downstream_group_ids == [\"task_group_down1\", \"task_group_down2\"]\n\n task_group_down1_dict = TaskGroupSerialization.serialize_task_group(\n dag.task_group.children[\"task_group_down1\"]\n )\n downstream_task_ids = task_group_down1_dict[\"downstream_task_ids\"]\n assert downstream_task_ids == [\"end\"]",
"def test_with_remainder(self):\n data = range(21)\n grouped = util.make_even_groups(data, 5)\n self.assertEqual(len(grouped), 4)\n for group in grouped:\n self.assertEqual(len(group), 5)\n full = sorted(flatten(grouped))\n self.assertEqual(full, data[:-1])",
"def test_subgroups(clean_raw_data):\n subgroup_names = subgroups(clean_raw_data)\n assert subgroup_names == ['spectrum1', 'spectrum2', 'spectrum3']",
"def is_group_ref_group(group, key):\n # FIXME this is hardcoded for PIAEL order\n north_index = key[0].find(group[0])\n east_column = ''.join([key[i][25] for i in range(26)])\n east_index = east_column.find(group[1])\n\n if key[east_index][north_index] != group[2] or key[25][north_index] != group[3] \\\n or key[east_index][0] != group[4]:\n return False\n\n return True",
"def group(seq, groupSize, noneFill=True):\n ret = []\n L = []\n i = groupSize\n for elt in seq:\n if i > 0:\n L.append(elt)\n else:\n ret.append(L)\n i = groupSize\n L = []\n L.append(elt)\n i -= 1\n if L:\n if noneFill:\n while len(L) < groupSize:\n L.append(None)\n ret.append(L)\n return ret",
"def _force_group_ops(self, force_group):\n for regex in force_group:\n force_group_ops = []\n for op, op_slices in self._op_slice_dict.items():\n if op_handler_util.group_match(regex, op_slices):\n force_group_ops.append(op)\n\n # If no ops match, continue to the next force-group.\n if not force_group_ops:\n raise ValueError('Regex \\'%s\\' did not match any ops.')\n\n # Assert all ops to force-group have only 1 OpSlice.\n if ([len(self._op_slice_dict[op]) for op in force_group_ops] !=\n [1] * len(force_group_ops)):\n multiple_slice_ops = []\n for op in force_group_ops:\n if len(self._op_slice_dict[op]) != 1:\n multiple_slice_ops.append(op.name)\n raise ValueError('Cannot force-group ops with more than 1 OpSlice: %s' %\n multiple_slice_ops)\n\n # Assert all ops to force-group have the same size.\n target_op_size = self._op_slice_dict[force_group_ops[0]][0].slice.size\n if ([self._op_slice_dict[op][0].slice.size for op in force_group_ops] !=\n [target_op_size] * len(force_group_ops)):\n op_names = [op.name for op in force_group_ops]\n raise ValueError(\n 'Cannot force-group ops with different sizes: %s' % op_names)\n\n # Group the ops.\n self.group_op_slices(\n [self._op_slice_dict[op][0] for op in force_group_ops])",
"def getNextLevelGroups(group, boundaryBeats, pitchBeats):\n nextLevel = [] # (List<Group>)\n # get all the sub-sub groups of the group and add them to the nextLevel beats\n # groups which have already seen boundary beats are not added. \n for g in createMockHighLevelGroup(group.getSubGroups(), pitchBeats):\n if identifyBoundaryBeat(g, pitchBeats) not in boundaryBeats:\n nextLevel.add(group2)\n return nextLevel",
"def IsGroup(self) -> bool:",
"def test_ordered_grouping_results(self):\n from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id\n from furious.marker_tree.result_sorter import first_iv_markers\n from furious.marker_tree.result_sorter import group_into_internal_vertex_results\n from furious.marker_tree.marker import Marker\n\n from furious.tests.marker_tree import dummy_leaf_combiner\n\n root_marker = Marker(id=\"little_job\")\n for x in xrange(3):\n root_marker.children.append(Marker(\n id=str(x),\n group_id=root_marker.id,\n result=[2, 2, 2],\n children=[Marker(id=\n leaf_persistence_id_from_group_id(str(x), i),\n result=2)\n for i in xrange(3)]\n ))\n\n for x in xrange(2):\n root_marker.children.append(Marker(\n id=leaf_persistence_id_from_group_id(root_marker.id, x + 3),\n result=1\n ))\n\n markers = first_iv_markers(root_marker.children)\n\n self.assertEqual(len(markers), 3)\n\n iv_results = group_into_internal_vertex_results(root_marker.children,\n dummy_leaf_combiner)\n\n self.assertEqual(len(iv_results), 4)\n self.assertEqual(iv_results, [[2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 1]])\n\n #shuffle children a few times\n chlin = root_marker.children\n children = [chlin[3], chlin[4], chlin[0], chlin[1], chlin[2]]\n iv_results = group_into_internal_vertex_results(children,\n dummy_leaf_combiner)\n self.assertEqual(iv_results, [[1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2]])\n children = [chlin[3], chlin[0], chlin[4], chlin[1], chlin[2]]\n iv_results = group_into_internal_vertex_results(children,\n dummy_leaf_combiner)\n self.assertEqual(iv_results, [[1], [2, 2, 2], [1], [2, 2, 2], [2, 2, 2]])\n iv_results = group_into_internal_vertex_results(children,\n None)\n self.assertEqual(iv_results, [[2, 2, 2], [2, 2, 2], [2, 2, 2]])",
"def test_group_odd_length():\n assert group(\"example\", 2) == ['ex', 'am', 'pl', 'e']",
"def findAllGroups(changes):\n groupToSplit = copy.deepcopy(changes)\n contigGroups = []\n for pixel in groupToSplit.pixels:\n if pixel in groupToSplit.pixels:\n newGroup = findThisGroup(pixel, groupToSplit)\n contigGroups.append(newGroup)\n #groupToSplit.removePixel(newGroup.pixels)\n return contigGroups\n # groupToSplit.remove(those pixels)",
"def connect_groups(self):\n \"\"\"\n end_group = [self.groups[0]]\n\n if len(self.groups) == 1:\n return end_group\n\n for group in self.groups[1:]:\n \n if len(group) < 5: \n continue\n \n print(len(group))\n\n closest_coord = group[0]\n closest_dist = self.width\n for coord in group[1:]:\n for point in end_group:\n if coord.distance(point) < closest_dist:\n closest_coord = point\n closest_dist = coord.distance(point)\n\n print(closest_coord, closest_dist)\n \"\"\"\n\n return False",
"def getNoteGroups(midi):\n boundaryStrengths = lbdm(midi)\n track = midi.tracks[0]\n avg_strength = sum(boundaryStrengths) / len(boundaryStrengths)\n groups = []\n current_group = []\n noteList = [noteEvent for noteEvent in track.eventList if noteEvent.type == \"note\"]\n# print len(boundaryStrengths), len(noteList)\n for i in range(len(noteList)):\n note = noteList[i]\n if current_group == []:\n current_group.append(note)\n elif len(current_group) < 4:\n current_group.append(note)\n elif i == len(boundaryStrengths):\n current_group.append(note)\n elif boundaryStrengths[i] > avg_strength:\n# current_group.append(note)\n groups.append(copy(current_group))\n current_group = [note]\n else:\n current_group.append(note)\n if current_group != []:\n groups.append(current_group)\n \n detailedGroups = []\n firstNoteOfGroup = 0\n for group in groups:\n first = []\n last = []\n firstStrength = 0.\n turningPointIndex = boundaryStrengths.index(max(boundaryStrengths[firstNoteOfGroup:firstNoteOfGroup + len(group) - 1]), firstNoteOfGroup)\n turningPoint = noteList[turningPointIndex]\n# foundTurningPoint = False\n for i in range(len(group)):\n if firstNoteOfGroup + i < turningPointIndex:\n first.append(note)\n elif firstNoteOfGroup + i > turningPointIndex:\n last.append(note)\n \n firstNoteOfGroup += len(group) - 1\n detailedGroups.append([first, turningPoint, last])\n \n \n \n \n \n return detailedGroups",
"def test_group_true(self):\n actual = self.view004(group=True)['rows']\n expected = [{'key': x, 'value': 2} for x in range(50)]\n self.assertEqual(len(actual), 50)\n self.assertEqual(len(expected), 50)\n self.assertEqual(actual, expected)",
"def _detect_cycle(group: Set[Operator]) -> bool:\n parents = [o for op1 in group for i in op1._attrs[\"inputs\"] for o in i.src_ops()]\n for op1 in group:\n for op2 in set(parents) - group:\n if transform_utils.is_ancestor(op1, op2):\n return True\n return False",
"def get_subgroups(data):\n dicom_fields = [data[('0028', '0101')].value, #Bits Stored\n data[('0028', '0103')].value] #Pixel Representation\n dicom_values = [get_first_of_dicom_field_as_int(x) for x in dicom_fields]\n if dicom_values[0] == 16:\n return 1\n elif dicom_values[0] == 12 and dicom_values[1] == 0:\n return 2\n elif dicom_values[0] == 12 and dicom_values[1] == 1:\n return 3\n else:\n return -1",
"def find_groups(a: List[Any]) -> List[Any]:\n groups = []\n cur_val, last_idx = 0, 0\n for idx, val in enumerate(a):\n if idx == 0:\n cur_val = val\n continue\n if val != cur_val or idx == len(a) - 1:\n if idx == len(a) - 1:\n groups.append((last_idx, idx + 1, cur_val))\n else:\n groups.append((last_idx, idx, cur_val))\n last_idx = idx\n cur_val = val\n else:\n cur_val = val\n return groups",
"def test_dials_symmetry_decide_pointgroup(\n reflection_spacegroup,\n experiments_spacegroup,\n expected_lattices,\n required_spacegroup_order,\n other_spacegroups,\n helper_directory,\n):\n helper, tmpdir = helper_directory\n refl_path = (tmpdir / \"test.refl\").strpath\n exp_path = (tmpdir / \"test.expt\").strpath\n generated_exp(space_group=experiments_spacegroup).as_file(exp_path)\n generate_reflections_in_sg(reflection_spacegroup).as_file(refl_path)\n\n symmetry_analyser = helper.dials_symmetry_decide_pointgroup([exp_path], [refl_path])\n\n # Note : instabilities have been observed in the order of the end of the\n # spacegroup list - this is likely due to the use of unseeded random number\n # generation in dials.symmetry symmetry element scoring, but this only seems\n # to affect the order of groups with a score near zero. Hence only assert the\n # order of the spacegroups that must be in order, near the start of the list.\n assert symmetry_analyser.get_possible_lattices() == expected_lattices\n spacegroups = symmetry_analyser.get_likely_spacegroups()\n assert spacegroups[: len(required_spacegroup_order)] == required_spacegroup_order\n assert set(spacegroups[len(required_spacegroup_order) :]) == set(other_spacegroups)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the number of cells that lie within a certain distance of a given query cell. Biologically, this is implemented by feedback signals (e.g. biomolecules that are secreted and later absorbed).
|
def number_cells_within_range(query_cell, cells, signaling_range):
if len(cells) == 0:
return 0
else:
query_cell_position = np.array([query_cell.position])
cell_positions = np.array([cell.position for cell in cells])
query_cell_distances = cdist(query_cell_position, cell_positions).ravel()
return len(query_cell_distances[query_cell_distances < signaling_range])
|
[
"def get_number_of_rooms_at_least_distance_away(self, distance: int) -> int:\n return len([_ for d, _ in self.graph.get_node_distances(self.origin) if d >= distance])",
"def compute_distance(ix: int, c: int) -> float:\n if c == cell.FREE:\n nearest_occupied: Optional[\n Tuple[kdtree.Node, float]\n ] = occupied_tree.search_nn(to_pos(ix), dist=points_dist)\n\n # Contingency for a map with no occupied cells.\n if nearest_occupied is None:\n return DIST_UNKNOWN\n\n (_, distance) = nearest_occupied\n\n return distance\n\n return DIST_OCCUPIED if c == cell.OCCUPIED else DIST_UNKNOWN",
"def alive_neighbors(self, row, col):\n\n count = 0 # Keeps track of how many alive cells are in the neighborhood\n\n if row > 0: # Checks if there is an upper row\n count += self._cells[row - 1][col].get_value() # Gets the value of the cell immediately above\n\n if col > 0: # Checks if there is a column to the left\n count += self._cells[row - 1][col - 1].get_value() # Gets the value of the cell at its top left\n\n if col < self._cols - 1: # Checks if there is a column to the right\n count += self._cells[row - 1][col + 1].get_value() # Gets the value of the cell at its top right\n\n if row < self._rows - 1: # Cheks if there is a lower row\n count += self._cells[row + 1][col].get_value() # Gets the value of the cell immediately below\n\n if col > 0: # Checks if there is a column to the left\n count += self._cells[row + 1][col - 1].get_value() # Gets the value of the cell at its bottom left\n\n if col < self._cols - 1: # Checks if there is a column to the right\n count += self._cells[row + 1][col + 1].get_value() # Gets the value of the cell at its bottom right\n\n if col > 0: # Checks if there is a column to the left\n count += self._cells[row][col - 1].get_value() # Gets the value of the cell at its left\n\n if col < self._cols - 1: # Checks if there is a column to the right\n count += self._cells[row][col + 1].get_value() # Gets the value of the cell at its right\n\n return count",
"def robot_to_cell_distance_grid(self):\n return scipy.linalg.norm(self.grid_to_pose, axis=0)",
"def cellWidthVsLatLon():\n # authors: Steven Brus, Phillip J. Wolfram\n km = 1000.0\n\n params = ct.default_params\n\n print(\"****QU120 background mesh and 10km refinement from NC to NH****\")\n params[\"mesh_type\"] = \"QU\"\n params[\"dx_max_global\"] = 120.0 * km\n params[\"region_box\"] = ct.Delaware_Bay\n params[\"plot_box\"] = ct.Western_Atlantic\n params[\"dx_min_coastal\"] = 10.0 * km\n params[\"trans_width\"] = 600.0 * km\n params[\"trans_start\"] = 400.0 * km\n\n cell_width, lon, lat = ct.coastal_refined_mesh(params)\n\n print(\"****5km refinement along coast from VA to NY****\")\n params[\"region_box\"] = ct.Delaware_Region\n params[\"plot_box\"] = ct.Delaware\n params[\"dx_min_coastal\"] = 5.0 * km\n params[\"trans_width\"] = 175.0 * km\n params[\"trans_start\"] = 75.0 * km\n\n cell_width, lon, lat = ct.coastal_refined_mesh(\n params, cell_width, lon, lat)\n\n print(\"****2km refinement inside Delaware Bay****\")\n params[\"region_box\"] = ct.Delaware_Bay\n params[\"plot_box\"] = ct.Delaware\n params[\"restrict_box\"] = ct.Delaware_restrict\n params[\"dx_min_coastal\"] = 2.0 * km\n params[\"trans_width\"] = 100.0 * km\n params[\"trans_start\"] = 17.0 * km\n\n cell_width, lon, lat = ct.coastal_refined_mesh(\n params, cell_width, lon, lat)\n\n return cell_width / 1000, lon, lat",
"def get_dist_counts(data, distance):\n #print \"in dist_counts\"\n distances = spatial.distance.squareform(spatial.distance.pdist(data, distance))\n closest_5 = np.argpartition(distances, kth=5, axis=1)[:,:5]\n # no argpartition in np 1.7 on hulk\n counts = np.zeros(len(distances))\n #print closest_5\n \n for i in range(len(closest_5)):\n for j in range(5):\n if j != i:\n counts[closest_5[i][j]] += 1\n \n\n \n return counts",
"def _countPaths(self, cells, n, m, x, y):\n \n if (self.dp):\n if ((x, y) in self.dpMap): return self.dpMap[(x, y)]\n if (cells[x][y] == True): return 0\n if (x == n - 1 and y == m - 1): return 1\n \n \n k = 0\n if (x + 1 < n): k += self._countPaths(cells, n, m, x + 1, y)\n if (y + 1 < m): k += self._countPaths(cells, n, m, x, y + 1)\n\n if (self.dp):\n self.dpMap[(x, y)] = k # Memorization for dynamic programming\n return k",
"def evaluate_euclidean_cell_utilities(self):\n for row in self.grid:\n for cell in row:\n cell.distance_utility = get_euclidean_distance(cell, self.target)",
"def test_get_potential_cells_6(self):\n p_arrs = generate_sample_dataset_2()\n cm = CellManager(arrays_to_bin=p_arrs, min_cell_size=1.,\n max_cell_size=2.)\n \n # the cells would have been setup, we start issuing queries.\n cell_list = []\n # query for search particle 1.\n pnt = Point()\n pnt.x = 0.5\n pnt.y = 0.5\n pnt.z = 0.0\n\n cm.py_get_potential_cells(pnt, 3.0, cell_list)\n \n # we should get all the cells\n self.assertEqual(len(cell_list), len(cm.cells_dict))",
"def _filter_cell_clumps(data, cells, wildcards, distance_threshold=10):\n if np.all(cells==0):\n return np.zeros((1480,1480))\n\n df = (Snake._extract_features(cells, cells, wildcards))\n # add column for [x,y] positions\n df['ij'] = df[['i','j']].values.tolist()\n ij = df['ij'].values.tolist()\n\n # calculate matrix of Euclidean distance between all cells in FOV\n distance = scipy.spatial.distance.cdist(ij, ij, 'euclidean')\n min_dist = np.where(distance>0, distance,distance.max()).min(1)\n # cells (labels) that pass distance threshold from nearest neighbor\n try:\n min_idx = np.hstack(np.argwhere(min_dist > distance_threshold))\n label = df.iloc[min_idx]\n mask = np.isin(cells, np.array(label['label'].values.tolist()))\n filtered_cells = np.multiply(mask.astype(int),cells)\n except:\n filtered_cells = np.zeros((1480,1480))\n\n return filtered_cells",
"def total_number_of_cell(self) ->float:\n return self.parameters.cell_per_well * self.nb_puits",
"def _cell_num_point(self, cell):\n obs, reqs = self.tiling.cell_basis()[cell]\n ob_lens = sorted(map(len, obs))\n assert ob_lens[0] == 2, \"Unexpected obstruction\"\n assert len(reqs) <= 1, \"Unexpected number of requirement\"\n if len(obs) == 1:\n maxlen = None\n elif len(obs) == 2:\n maxlen = ob_lens[1] - 1\n else:\n raise RuntimeError(\"Unexpected number of obstructions\")\n if not reqs:\n minlen = 0\n elif len(reqs) == 1:\n minlen = len(reqs[0])\n else:\n raise RuntimeError(\"Unexpected number of requirements\")\n return minlen, maxlen",
"def n_cells(self):\n return int(np.prod(self._n))",
"def get_cell_count(grid):\n\n return len(grid.cell_sprite)",
"def number_of_atoms_in_cell(position):\n num_atoms = np.shape(position)[0]\n return num_atoms",
"def evaluate_dijikstra_cell_utilities(self):\n self.target.set_distance_utility(0)\n unvisited_queue = [(self.target.get_utility(), self.target)]\n\n while len(unvisited_queue):\n unvisited = heapq.heappop(unvisited_queue)\n current_cell = unvisited[1]\n current_cell.set_visited()\n for next_cell in current_cell.get_adjacent_minus_obstacles():\n if next_cell.visited:\n continue\n new_dist = current_cell.get_utility() + get_euclidean_distance(current_cell, next_cell)\n if new_dist < next_cell.get_utility():\n next_cell.set_distance_utility(new_dist)\n heapq.heappush(unvisited_queue, (next_cell.get_utility(), next_cell))",
"def mesh_cells_within_radius(cell_xyz, radius):\n # Get distance matrix\n distmat = distance_matrix(cell_xyz, cell_xyz)\n # For each row, find indices where distance <= radius\n result_list = [np.nonzero(row <= radius)[0] for row in distmat]\n return result_list",
"def getCellSizeFn(points):\n # Coordinates of target\n target = (5.0e+3, -10.0e+3, -10.0e+3)\n\n # Compute distance from target\n dist = ((points[:, 0] - target[0])**2 +\n (points[:, 1] - target[1])**2 +\n (points[:, 2] - target[2])**2)**0.5\n bias_factor = 1.05 # Geometric rate\n dxStart = 1.0e+3 # Discretization size at target\n npts = numpy.ceil(numpy.log(1 - dist / dxStart * (1 - bias_factor)) / numpy.log(bias_factor))\n cellSize = dxStart * bias_factor**npts\n return cellSize",
"def _manhattan_distance_to_closest_ghost(self, state, row, col):\n\n \treturn self.distances[row][col]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Visualize how the selfrenewal probability of a given (cycling) cell depends upon the number of cycling and quiescent cells in its vicinity
|
def plot_self_renewal_probability():
def self_renewal_probability(x, y):
from read import read_into_dict
parameterValues = read_into_dict('parameterValues.in')
self_renewal_probability_max = parameterValues['self_renewal_probability_max']
return self_renewal_probability_max * W_positive_base(0, 0, x, y, parameterValues) * W_negative_base(0, x+y, parameterValues)
def print_self_renewal_probabilities(x, y):
print('number of wild-type quiescent cells = ' + str(x))
print('number of mutant quiescent cells = ' + str(y))
print('self-renewal probability = ' + str(self_renewal_probability(x, y)))
# how much does the self-renewal probability change if we replace one wild-type Q-cell with a mutant Q-cell?
print_self_renewal_probabilities(2, 0)
print_self_renewal_probabilities(1, 1)
print
print_self_renewal_probabilities(1, 0)
print_self_renewal_probabilities(0, 1)
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import pyplot as plt
fig = plt.figure(facecolor='white')
ax = fig.gca(projection='3d', zlim=(0, 1))
xx = np.linspace(0, 4)
yy = np.linspace(0, 4)
XX, YY = np.meshgrid(xx, yy)
# noinspection PyUnresolvedReferences
surf = ax.plot_surface(XX, YY, self_renewal_probability(XX, YY), rstride=2, cstride=2, cmap=cm.RdPu, linewidth=1, antialiased=True)
ax.zaxis.set_major_locator(LinearLocator(6))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_title('self renewal probability\n' + '(assumes equal +ve and -ve feedback range)')
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('number of local quiescent WT cells')
ax.set_ylabel('number of local quiescent MUTANT cells')
ax.view_init(elev=30, azim=-60)
ax.dist = 10
plt.show()
|
[
"def PlotIdealCellGrowth():\n\n # Make vectors:\n def MakeVectors(cells_start, doubling_time):\n movie_time = 80\n cell_list = [cells_start]\n time_list = [0]\n\n while time_list[-1] < movie_time:\n cell_list.append(cell_list[-1] * 2)\n time_list.append(time_list[-1] + doubling_time)\n\n return (cell_list, time_list)\n\n # Initiate the figure:\n fig = plt.figure()\n plt.xlabel(\"Time [hours]\")\n plt.ylabel(\"Cell count\")\n plt.title(\"Ideal Cell Growth per movie\")\n\n # Plot the dependencies:\n cells_start = [120, 140, 160, 180, 200]\n doubling_time = [16, 18, 20, 22, 24]\n\n for i in cells_start:\n for j in doubling_time:\n cell_list, time_list = MakeVectors(cells_start=i, doubling_time=j)\n plt.plot(time_list, cell_list, \"-o\", label=\"{} cells; {} hrs\".format(i, j))\n plt.legend(loc=\"upper left\")\n plt.show()\n plt.close()",
"def division_probability(cell):\n return 1",
"def __draw_solution_cells(self) -> None:\n\n for step in self.__solutionSteps:\n # Add an offset to place the circle in the center of the cell\n x = step[0] + Constants.CELL_SIZE / 2\n y = step[1] + Constants.CELL_SIZE / 2\n\n pygame.draw.circle(self.__screen, Colour.RED.value, (x, y), 3)\n pygame.display.update()\n\n sleep_if_animation(.1)",
"def curve_convergence(self):\n fig, ax = plt.subplots(1, 1, figsize=(20,15)) \n\n title = r'%d iterations ' % max(self.adjoint.iterations)\n title += 'at learning rate $\\gamma = %.1f$' % self.adjoint.lr\n self.subplot_solution_descent(ax, title)\n ax.legend(loc='upper center', ncol=2)\n\n plt.show()\n plt.close()",
"def current_dist_reported(self):\n\n if len(self.rounds) == 1:\n round_draw = self.rounds[0]\n else:\n round_draw = self.rounds[-1] - self.rounds[-2]\n\n if self.replacement:\n distribution_round_draw = binom.pmf(range(0, round_draw + 1), round_draw, self.contest.winner_prop)\n if len(self.rounds) == 1:\n self.distribution_reported_tally = distribution_round_draw\n else:\n self.distribution_reported_tally = fftconvolve(self.distribution_reported_tally, distribution_round_draw)\n else:\n reported_winner_ballots = int(self.contest.winner_prop * self.contest.contest_ballots)\n if len(self.rounds) == 1:\n # Simply compute hypergeometric for 1st round distribution\n self.distribution_reported_tally = hypergeom.pmf(np.arange(round_draw + 1), self.contest.contest_ballots,\n reported_winner_ballots, round_draw)\n else:\n distribution_round_draw = [0 for i in range(self.rounds[-1] + 1)]\n # Get relevant interval of previous round distribution\n interval = self.__get_interval(self.distribution_reported_tally)\n # For every possible number of winner ballots in previous rounds\n # and every possibility in the current round\n # compute probability of their simultaneity\n for prev_round_possibility in range(interval[0], interval[1] + 1):\n unsampled_contest_ballots = self.contest.contest_ballots - self.rounds[-2]\n unsampled_winner_ballots = reported_winner_ballots - prev_round_possibility\n\n curr_round_draw = hypergeom.pmf(np.arange(round_draw + 1), unsampled_contest_ballots, unsampled_winner_ballots,\n round_draw)\n for curr_round_possibility in range(round_draw + 1):\n component_prob = self.distribution_reported_tally[prev_round_possibility] * curr_round_draw[curr_round_possibility]\n distribution_round_draw[prev_round_possibility + curr_round_possibility] += component_prob\n self.distribution_reported_tally = distribution_round_draw",
"def prob6():\n domain = np.linspace(-1,1,100)\n T_p = elementwise_grad(cheb_poly)\n plt.ion()\n \n plt.plot(domain, T_p(domain, 0), label=\"n = 0\")\n plt.plot(domain, T_p(domain, 1), label=\"n = 1\")\n plt.plot(domain, T_p(domain, 2), label=\"n = 2\")\n plt.plot(domain, T_p(domain, 3), label=\"n = 3\")\n plt.plot(domain, T_p(domain, 4), label=\"n = 4\")\n plt.legend()\n plt.show()",
"def display_probability_map(self,p):\r\n\t self.screen.fill((0,0,0))\r\n\t for j in range(0,10):\r\n\t\t for i in range(0,10):\r\n\t\t\t pygame.draw.rect(self.screen,(50+205*p[i][j],0,50+205*p[i][j]),(i*64,j*48,64,48))\r\n\t pygame.draw.circle(self.screen,(255,255,255),(self.y*64+32,self.x*48+24),24)\r\n\t pygame.display.update()\r\n\t pygame.time.wait(50)",
"def draw_priors(number):\n #m1 = np.random.normal(m_tot,m_tot_err,number)\n #dist = np.random.normal(d_star,d_star_err,number)\n m1 = m_tot\n dist = d_star\n # Fixing and initial semi-major axis:\n a_au=100.0\n a_au=np.linspace(a_au,a_au,number)\n T = np.sqrt((np.absolute(a_au)**3)/np.absolute(m1))\n a = a_au/dist #semimajor axis in arcsec\n\n # Fixing an initial Longitude of ascending node in radians:\n O = np.radians(0.0) \n O=[O]*number\n\n # Randomly generated parameters:\n #to = Time of periastron passage in years:\n const = np.random.uniform(0.0,1.0,number)\n #^ Constant that represents the ratio between (reference epoch minus to) over period. Because we are scaling\n #semi-major axis, period will also scale, and epoch of periastron passage will change as a result. This ratio\n #will remain constant however, so we can use it scale both T and to appropriately.\n to = d-(const*T)\n\n # Eccentricity:\n e = np.random.uniform(0.0,1.0,number)\n # Inclination in radians:\n cosi = np.random.uniform(-1.0,1.0,number) #Draws sin(i) from a uniform distribution. Inclination\n # is computed as the arccos of cos(i):\n i = np.arccos(cosi)\n # Argument of periastron in degrees:\n w = np.random.uniform(0.0,360.0,number)\n w = np.radians(w) #convert to radians for calculations\n return a,T,const,to,e,i,w,O,m1,dist",
"def plotCostVsIterations(JVals):\n plt.figure()\n # plt.xkcd()\n plt.plot(JVals)\n plt.xlabel('iterations')\n plt.ylabel('cost')\n plt.title('gradient descent performance')\n plt.show()",
"def test(particle_object):\n\teps = particle_object.eps\n\ttwo_m_hbar_square = prefactor(particle_object)\n\teff_mass = effective_mass(particle_object)\n\n\tr_min = 0.5\n\tr_max = 5.5\n\tr_steps = 1000\n\trs, dr = np.linspace(r_min, r_max, r_steps, retstep=True)\n\n\tEs = np.linspace(0.4, 5.4, 200)\n\tdelta_l = np.zeros_like(Es)\n\ttotal_delta_l = np.zeros_like(Es)\n\tls = np.arange(0, 11, dtype=np.int8)\n\n\tfig = plt.figure(figsize=(12, 9)) # plot the calculated values\n\tfig.add_subplot(1, 1, 1)\n\n\tfor l in ls:\n\t\tfor i in range(np.size(Es)):\n\t\t\tk = two_m_hbar_square * Es[i]\n\t\t\tpsi = radial_schrodinger_equation(rs, dr, Es[i], l, particle_object)\n\t\t\tdelta_l[i] = 4.0 * np.pi * (2 * l + 1) * (np.sin(phase_shift(rs, dr, psi, Es[i], l, two_m_hbar_square)))**2 / k\n\t\t\ttotal_delta_l[i] += delta_l[i]\n\t\t# plt.plot(np.sqrt(2 * Es * const.e * 1e-3 / eff_mass), delta_l, label=r\" l={0}\".format(l))\n\t\tplt.plot(Es, delta_l, label=r\" l={0}\".format(l))\n\tplt.title(r'Total Cross Section(Hydrogen-{0})'.format(particle_object.name))\n\tplt.xlabel(r\"Energy[meV]\", fontsize=12)\n\tplt.ylabel(r\"Total cross section[$\\rho^2$]\", fontsize=12)\n\tplt.xlim(left=0.0)\n\tplt.fill_between(Es, total_delta_l, color='silver', label=r\"Total $\\delta$\")\n\tfor peak in particle_object.peak_e_info:\n\t\tplt.axvspan(peak[0] - peak[1], peak[0] + peak[1], ymin=0.0)\n\tplt.plot(Es, total_delta_l, \"--\")\n\tplt.legend()\n\tplt.tight_layout()\n\tfig.subplots_adjust(top=0.88)\n\t# plt.show()\n\tfig.savefig(\"{0}TotalCrossSection_Final.pdf\".format(particle_object.name), format=\"pdf\")",
"def test_disp100(nq, ne):\n\n uc = UnitCell( )\n at1=Atom(symbol='Fe', mass=57) ; pos1=(0.0,0.0,0.0)\n at2=Atom(symbol='Al') ; pos2=(0.5,0.5,0.5)\n site1 = Site(pos1, at1)\n site2 = Site(pos2, at2)\n uc.addAtom( at1, pos1, \"Fe1\" )\n uc.addAtom( at2, pos2, \"Al1\" )\n print uc\n\n kptlist = uc.getMonkhorstPackGrid((20,20,20)).reshape(8000,3)\n sqecalc = AbInitio.kernelGenerator.SqeCalculator.SqeCalculator(uc, kpoints=kptlist)\n\n sqecalc.readIDFeigenvectors(filename='pols_FeAl222.idf')\n sqecalc.readEigenvaluesFromIDFomega2(filename='omega2_FeAl222.idf')\n\n sqecalc._DebyeWallerCalculator._energies = sqecalc._energies\n sqecalc._DebyeWallerCalculator._polvecs = sqecalc._polvecs\n\n estart = 0.0\n deltae = 50.0 / ne\n sqecalc._etransferTol = deltae\n\n deltaqx = 3.0 / nq\n sqecalc._qtransferTolRadius = deltaqx\n qstart = numpy.array([0.0, 0.0, 0.0])\n deltaq = numpy.array([deltaqx, 0.0, 0.0])\n\n sqe = numpy.zeros((nq,ne), dtype='float')\n\n for iq in range(nq):\n for ie in range(ne):\n qtransfer = qstart + iq * deltaq\n etransfer = estart + ie * deltae\n sqe[iq,ie] = sqecalc.calcSqeCohCreateAllmodes(qtransfer, etransfer)\n print iq, ie, sqe[iq,ie]\n\n pylab.imshow(sqe)\n pylab.show()\n end = raw_input()\n return",
"def test_MCMC(p):\n bound_split = [0,15,20,21,22,23,24,25] #Thresholds\n\n simulation = MCMC_simulation(bound_split, p=p)\n estimators = np.cumsum(simulation, axis = 0)/(np.arange(1,len(simulation[:,0])+1).reshape((-1,1)))\n\n intermediate_probas = estimators[-1,:] # Array of conditional probabilities\n proba = np.prod(intermediate_probas)\n\n for i in range(len(estimators[0,:])): #Plotting the estimators of conditional probabilities\n plt.figure(1)\n plt.plot(np.arange(1,len(simulation[:,0])+1), estimators[:,i], label='Estimator '+str(i+1))\n\n\n plt.legend(loc='best')\n plt.ylabel(\"Estimators of the conditional probabilities \")\n plt.xlabel(\"Simulations\")\n plt.title('Convergence of the estimators of the conditional probabilities with cin = 90 and cout = 85')\n\n print('The thresholds are ', bound_split)\n for i in range(len(estimators[0,:])):\n print(\"Probability of having \", bound_split[i+1] ,\" badly clustered vertices knowing that we have \", bound_split[i], \" of them is \", intermediate_probas[i])\n\n print(\"The final estimated probability is \", proba)\n\n plt.show()\n\n return(intermediate_probas)",
"def supercontinuumgeneration():\n\n betas = [0,0,-11.830e-3*1e-24, 8.1038e-5*1e-36, -9.5205e-8*1e-48, 2.0737e-10*1e-60,\n -5.3943e-13*1e-72, 1.3486e-15*1e-84, -2.5495e-18*1e-96, 3.0524e-21*1e-108,\n -1.7140e-24*1e-120];\n gamma = 0.1\n flength = 0.15\n simparams = prepare_sim_params(0.0, \n betas ,\n 835e-9,\n gamma,\n flength,\n 13, # Npoints\n 1.0, #tempspread\n zpoints=200, \n integratortype='dop853', \n reltol=1e-3, \n abstol=1e-6 ,\n shock=True,\n raman = True,\n ramantype = 'blowwood',#'hollenbeck', #or 'blowwood', 'linagrawal'\n fr=0.18 )\n t0 = 28.4e-15\n p = 10e3\n inifield = np.sqrt(p) * 1./np.cosh(simparams['tvec']/t0) \n tf,ff,zv = perform_simulation( simparams, inifield)\n saveoutput('scg.demo', tf, ff, zv, simparams)\n #\n # output plot\n #\n d = loadoutput('scg.demo')\n inoutplot(d,zparams={\"fignr\":3, \"clim\":(-360,-220),'fylim':(-360,-220)})\n plt.show()",
"def GenerateBackgrounds(l1_air, l1_water, l_cell, l2_water, l2_air, n):\n \n '''\n -----------------------------------------\n ----- with reweighted probabilities -----\n -----------------------------------------\n '''\n \n AirLine_1_Bkg = np.array([]) #fotones que han interctuado en la linea de aire 1 y serán bkg\n WaterLine_1_Bkg = np.array([]) #fotones que han interctuado en la linea de agua 1 y serán bkg\n CellPhotonsArray = np.array([]) #los fotones que han interactuado en la célula que pasarán al MC\n WaterLine_2_Bkg = np.array([]) #fotones que han interctuado en la linea de agua 2 y serán bkg\n AirLine_2_Bkg = np.array([]) #fotones que han interctuado en la linea de aire 2 y serán bkg\n \n #Mean free paths:\n Lambda_air = 4124.77 #cm updated by nist xcom\n Lambda_water = 5.66671 #cm updated by nist xcom\n Lambda_cell = 3.3698418318647483 #cm evaluated by Dani for DNA\n #Lambda_cell = 4.11492449 #cm evaluated by Dani for Biomolecule\n #Lambda_cell = 4.684764 #cm evaluated by Dani for PMMA\n #Lambda_cell = 5.66671 #cm when the cell is water\n \n #Probabilities:\n P_air_1_old = ProbabilityInteract(l1_air, Lambda_air)\n P_water_1_old = ProbabilityInteract(l1_water, Lambda_water)\n P_cell_old = ProbabilityInteract(l_cell, Lambda_cell)\n P_air_2_old = ProbabilityInteract(l2_air, Lambda_air)\n P_water_2_old = ProbabilityInteract(l2_water, Lambda_water)\n \n #Weighted the probabilties\n Ptotal = P_air_1_old + P_water_1_old + P_cell_old + P_air_2_old + P_water_2_old\n \n P_air_1_new = P_air_1_old/Ptotal\n P_water_1_new = P_water_1_old/Ptotal\n P_cell_new = P_cell_old/Ptotal\n P_air_2_new = P_air_2_old/Ptotal\n P_water_2_new = P_water_2_old/Ptotal\n \n con = 1\n if (con == 1):\n print('aire1:', P_air_1_new)\n print('aire2:', P_air_2_new)\n print('agua1:', P_water_1_new)\n print('agua2:', P_water_2_new)\n print('cell:', P_cell_new)\n print('ptotal:', Ptotal)\n con = 2\n \n \n #-------------------------------------------------------------------------------------------\n #El beam pipe entra en aire lo primero:\n \n Pinteract_l1_air = np.array([]) #probability of interact in the line 1 air\n print('\\n')\n print('Interaction with the air line 1, calculating backgrounds...')\n for i in range(0, n):\n Pinteract_l1_air = np.append(Pinteract_l1_air, P_air_1_new)\n \n NoInteract_l1_air = np.array([]) #fotones supervivientes que continuarán al agua\n for i in range(0,len(Pinteract_l1_air)):\n rdn_air = rd.uniform(0,1)\n if (Pinteract_l1_air[i] < rdn_air):\n NoInteract_l1_air = np.append(NoInteract_l1_air, 0.0)\n else:\n AirLine_1_Bkg = np.append(AirLine_1_Bkg, 0.0)\n \n #ahora sobre los fotones que interaccionaron son bkg y estimamos los ángulos theta con el que salen que es un random\n for i in range(0, len(AirLine_1_Bkg)):\n rdn_air_angle = rd.uniform(0,1)*np.pi*2\n AirLine_1_Bkg[i] = rdn_air_angle\n\n #-------------------------------------------------------------------------------------------\n #Ahora los supervivientes del aire pasan al agua:\n \n Pinteract_l1_water = np.array([]) #probability of interact in the line 1 water\n print('\\n')\n print('Interaction with the water line 1, calculating backgrounds...')\n for i in range(0,len(NoInteract_l1_air)):\n Pinteract_l1_water = np.append(Pinteract_l1_water, P_water_1_new)\n \n NoInteract_l1_water = np.array([]) #fotones supervivientes que continuarán a la celula\n for i in range(0,len(Pinteract_l1_water)):\n rdn_water = rd.uniform(0,1)\n if (Pinteract_l1_water[i] < rdn_water):\n NoInteract_l1_water = np.append(NoInteract_l1_water, NoInteract_l1_air[i])\n else:\n WaterLine_1_Bkg = np.append(WaterLine_1_Bkg, NoInteract_l1_air[i])\n \n #ahora sobre los fotones que interaccionaron son bkg y estimamos los ángulos theta con el que salen que es un random\n for i in range(0, len(WaterLine_1_Bkg)):\n rdn_air_angle = rd.uniform(0,1)*np.pi*2\n WaterLine_1_Bkg[i] = rdn_air_angle \n\n #-------------------------------------------------------------------------------------------\n #Ahora los supervivientes del agua pasan a la célula:\n \n\n Pinteract_l_cell = np.array([]) #probability of interact in the cell\n print('\\n')\n print('Interaction with the cell, calculating backgrounds...')\n for i in range(0,len(NoInteract_l1_water)):\n Pinteract_l_cell = np.append(Pinteract_l_cell, P_cell_new)\n \n NoInteract_l_cell = np.array([]) #fotones supervivientes que continuarán al agua siguiente\n for i in range(0,len(Pinteract_l_cell)):\n rdn_cell = rd.uniform(0,1)\n if (Pinteract_l_cell[i] < rdn_cell):\n NoInteract_l_cell = np.append(NoInteract_l_cell, NoInteract_l1_water[i])\n else:\n CellPhotonsArray = np.append(CellPhotonsArray, NoInteract_l1_water[i])\n \n #-------------------------------------------------------------------------------------------\n #Los cell photon array son los fotones que pasarán al MC simulation y son nuestra señal\n #-------------------------------------------------------------------------------------------\n \n #-------------------------------------------------------------------------------------------\n #Ahora los supervivientes de la célula pasan al agua \n \n Lambda_water = 5.66671 #cm updated by nist xcom\n Pinteract_l2_water = np.array([]) #probability of interact in the line 2 water\n print('\\n')\n print('Interaction with the water line 2, calculating backgrounds...')\n for i in range(0,len(NoInteract_l_cell)):\n Pinteract_l2_water = np.append(Pinteract_l2_water, P_water_2_new)\n\n NoInteract_l2_water = np.array([]) #fotones supervivientes de la célula que continuarán al aire siguiente\n for i in range(0,len(Pinteract_l2_water)):\n rdn_water = rd.uniform(0,1)\n if (Pinteract_l2_water[i] < rdn_water):\n NoInteract_l2_water = np.append(NoInteract_l2_water, NoInteract_l_cell[i])\n else:\n WaterLine_2_Bkg = np.append(WaterLine_2_Bkg, NoInteract_l_cell[i])\n\n #ahora sobre los fotones que interaccionaron son bkg y estimamos los ángulos theta con el que salen que es un random\n for i in range(0, len(WaterLine_2_Bkg)):\n rdn_water_angle = rd.uniform(0,1)*np.pi*2\n WaterLine_2_Bkg[i] = rdn_water_angle\n\n #-------------------------------------------------------------------------------------------\n #Ahora los supervivientes del agua pasan al aire\n \n Lambda_air = 4124.77 #cm updated by nist xcom\n Pinteract_l2_air = np.array([]) #probability of interact in the line 2 air\n print('\\n')\n print('Interaction with the air line 2, calculating backgrounds...')\n \n for i in range(0,len(NoInteract_l2_water)):\n Pinteract_l2_air = np.append(Pinteract_l2_air, P_air_2_new)\n\n NoInteract_l2_air = np.array([]) #fotones supervivientes de la célula que continuarán al aire siguiente\n for i in range(0,len(Pinteract_l2_air)):\n rdn_water = rd.uniform(0,1)\n if (Pinteract_l2_air[i] < rdn_water):\n NoInteract_l2_air = np.append(NoInteract_l2_air, NoInteract_l2_water[i])\n else:\n AirLine_2_Bkg = np.append(AirLine_2_Bkg, NoInteract_l2_water[i])\n\n #ahora sobre los fotones que interaccionaron son bkg y estimamos los ángulos theta con el que salen que es un random\n for i in range(0, len(AirLine_2_Bkg)):\n rdn_air_angle = rd.uniform(0,1)*np.pi*2\n AirLine_2_Bkg[i] = rdn_air_angle\n\n return [AirLine_1_Bkg, WaterLine_1_Bkg, CellPhotonsArray, WaterLine_2_Bkg, AirLine_2_Bkg]",
"def visualize_prime_thm(N, step_size):\n\n primes = sieve(N)\n steps = np.linspace(2, N, step_size)\n result = [len([y for y in primes if y <= x]) / (x / np.log(x)) for x in steps]\n plt.plot(steps, result)\n plt.axhline(1, color='black', ls = '--')\n plt.show()",
"def disp_mic_npt(pos1, pos2, cell1, cell2):\n disp = pos2/cell2 - pos1/cell1\n for i in range(3):\n disp[i] -= round(disp[i]/cell2[i])\n return disp",
"def show_lightcone(eca, cell):\n # matplotlib\n pass",
"def plot_recap_vitro_ephy(title_dict, reM, phy_dict, cluster_ids, df_stim, cell_db_ids=None,\n checkerboard=None, fullfield_fl=None, fl_bars=None, chirp_am=None,\n chirp_fm=None, moving_gratings=None, export_path=\"./recap_plot.pdf\"):\n print(\"Generating the recap plot\")\n configure_pyplot_recap()\n\n cond = title_dict[\"condition\"]\n date = title_dict[\"date\"]\n record_name = title_dict[\"record_name\"]\n record_id = title_dict[\"record_id\"]\n\n if cell_db_ids is None:\n cell_db_ids = [-1]*len(cluster_ids)\n\n with PdfPages(export_path) as pp:\n\n #Plotting Cover\n fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2\n gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)\n ax_rem = fig.add_subplot(gs[:10,2:-1])\n reM.plot(ax_rem)\n\n ax_stim_recap = fig.add_subplot(gs[11:16,:])\n plot_stim_recap_table(ax_stim_recap, df_stim)\n suptitle = \" - \".join([cond, date, record_name+\" n°\"+str(record_id)])\n plt.suptitle(suptitle)\n\n pp.savefig()\n plt.close()\n\n for cluster, cell_id in zip(cluster_ids, cell_db_ids):\n reM_cell_idx = reM[\"S_matrix\"][0].attrs[\"cell_map\"][cluster]#np.where(cluster==cluster_ids)[0][0]\n\n fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2\n suptitle = \" - \".join([cond, date, record_name+\" n°\"+str(record_id),\n \"Cluster n°\"+str(cluster), \"Cell id n°\"+str(cell_id)])\n plt.suptitle(suptitle)\n\n mask_cluster = phy_dict[\"spike_clusters\"]==cluster\n cluster_composition = np.unique(phy_dict[\"spike_templates\"][mask_cluster])\n\n gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)\n\n #Template on electrodes\n cell_loc_ax = fig.add_subplot(gs[0:4,0:4])\n plot_spike_template_MEA(cell_loc_ax, cluster_composition, phy_dict[\"templates\"], phy_dict[\"channel_positions\"])\n\n #Autocorrelogram\n autocorr_ax = fig.add_subplot(gs[0:4,5:9])\n plot_autocorrelogram(autocorr_ax, cluster, phy_dict[\"spike_times\"], phy_dict[\"spike_clusters\"],\n bin_ms=.001, sampling_rate=30000, tails=30)\n\n #Spike amplitude across time\n sp_amp_ax = fig.add_subplot(gs[0:4,10:])\n plot_spike_amplitudes(sp_amp_ax, cluster, phy_dict[\"spike_templates\"], phy_dict[\"spike_clusters\"],\n phy_dict[\"spike_times\"], phy_dict[\"amplitudes\"])\n plot_stim_epochs_to_spikes(sp_amp_ax, reM, y_pos=0.6)\n\n #Checkerboard STA\n if checkerboard is not None:\n pval_checker = checkerboard[1][reM_cell_idx]\n pval_checker = np.min(pval_checker[pval_checker!=0])\n inner_grid = gridspec.GridSpecFromSubplotSpec(4, 4,\n subplot_spec=gs[5:12,0:12], wspace=.09, hspace=.13)\n plot_2d_sta(checkerboard[0][reM_cell_idx], pval=pval_checker, grid=inner_grid)\n\n #Fullfield flickering STA\n if fullfield_fl is not None:\n pval_fffl = fullfield_fl[1][reM_cell_idx]\n pval_fffl = np.min(pval_fffl[pval_fffl!=0])\n sp_amp_ax = fig.add_subplot(gs[5:12,13:])\n plot_t_sta(sp_amp_ax, fullfield_fl[0][reM_cell_idx], pval=pval_fffl)\n\n #Chirp_FM\n if chirp_fm is not None:\n chirpfm_ax = fig.add_subplot(gs[13:16,:])\n plot_chirp(chirpfm_ax, chirp_fm[0], chirp_fm[1][:,reM_cell_idx], smooth=False)\n chirpfm_ax.set_title(\"Chirp FM\")\n\n #Chirp_AM\n if chirp_am is not None:\n chirpam_ax = fig.add_subplot(gs[17:20,:])\n plot_chirp(chirpam_ax, chirp_am[0], chirp_am[1][:,reM_cell_idx], smooth=False)\n chirpam_ax.set_title(\"Chirp AM\")\n\n #Flickering bars\n if fl_bars is not None:\n pval_bars = fl_bars[1][reM_cell_idx]\n pval_bars = np.min(pval_bars[pval_bars!=0])\n fl_bars_ax = fig.add_subplot(gs[21:,:12])\n plot_fl_bars(fl_bars_ax, fl_bars[0][reM_cell_idx], pval=pval_bars)\n\n #Moving gratings\n if moving_gratings is not None:\n ds_ax = fig.add_subplot(gs[21:,13:], projection=\"polar\")\n plot_ds_wheel(ds_ax, moving_gratings, cell_idx=reM_cell_idx)\n\n pp.savefig()\n plt.close()\n\n print(\"Cell cluster n°\",cluster,\"done\")\n\n sns.set()\n plt.rcdefaults()\n print()",
"def test_min_neigh():\n recall = []\n precision = []\n range_parameter = []\n success = []\n for i in range(1, 20):\n r, p, s = get_metrics(minNeigh=i)\n recall.append(r); precision.append(p); range_parameter.append(i); success.append(s)\n \n plot_metrics(range_parameter, \"min neighbours\", recall, precision, None, \"min_neigh_comparison.png\")\n\n plt.close()\n plt.plot(range_parameter, success)\n plt.savefig(\"output/success_minNeigh.png\")\n plt.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a file in appropriate format, returns the triple (feature_vectors, patient_diagnoses, D) feature_vectors is a dictionary that maps integer patient identification numbers to Dvectors where D is the set of feature labels, and patient_diagnoses is a dictionary mapping patient identification numbers to {+1, 1}, where +1 indicates malignant and 1 indicates benign.
|
def read_training_data(fname, features=None):
file = open(fname)
params = ["radius", "texture", "perimeter","area","smoothness","compactness","concavity","concave points","symmetry","fractal dimension"];
stats = ["(mean)", "(stderr)", "(worst)"]
feature_labels = set([y+x for x in stats for y in params])
feature_map = {params[i]+stats[j]:j*len(params)+i for i in range(len(params)) for j in range(len(stats))}
if features is None: features = feature_labels
feature_vectors = {}
patient_diagnoses = {}
for line in file:
row = line.split(",")
patient_ID = int(row[0])
patient_diagnoses[patient_ID] = -1 if row[1]=='B' else +1
feature_vectors[patient_ID] = Vec(features, {f:float(row[feature_map[f]+2]) for f in features})
return rowdict2mat(feature_vectors), Vec(set(patient_diagnoses.keys()), patient_diagnoses)
|
[
"def read_training_data(fname, D=None):\n file = open(fname)\n params = [\"radius\", \"texture\", \"perimeter\",\"area\",\"smoothness\",\"compactness\",\"concavity\",\"concave points\",\"symmetry\",\"fractal dimension\"];\n stats = [\"(mean)\", \"(stderr)\", \"(worst)\"]\n feature_labels = set([y+x for x in stats for y in params])\n feature_map = {params[i]+stats[j]:j*len(params)+i for i in range(len(params)) for j in range(len(stats))}\n if D is None: D = feature_labels\n feature_vectors = {}\n patient_diagnoses = {}\n for line in file:\n row = line.split(\",\")\n patient_ID = int(row[0])\n patient_diagnoses[patient_ID] = -1 if row[1]=='B' else +1\n feature_vectors[patient_ID] = Vec(D, {f:float(row[feature_map[f]+2]) for f in D})\n return rowdict2mat(feature_vectors), Vec(set(patient_diagnoses.keys()), patient_diagnoses)",
"def read_patient_traces(filename):\r\n trace_data = []\r\n patient_ids = []\r\n first_line = True\r\n with open(filename) as f:\r\n for line in f:\r\n if first_line:\r\n patient_ids = line.split()\r\n patient_ids = map(int, patient_ids)\r\n first_line = False\r\n elif len(line) > 5:\r\n trace_data.append(line.rstrip('\\n'))\r\n return compute_pairwise_epi_distances(trace_data, patient_ids)",
"def read_data(filename):\n # open feature file, read each line, and fill in the sparse matrix\n i, j, j_max = 0, 0, 0\n data = []\n row_ind = []\n col_ind = []\n with open(filename) as file:\n for line in file:\n for c in line.split():\n j, feature = c.split(':')\n j = int(j)\n data.append(int(feature))\n row_ind.append(i)\n col_ind.append(j - 1)\n if j_max < j:\n j_max = j\n i += 1\n\n n = i # number of examples\n k = j_max # number of features\n\n feature_matrix = csc_matrix((data, (row_ind, col_ind)), shape=(n, k))\n return feature_matrix",
"def read_features_from_file(filename, desc_dim=132):\n\n print filename\n f = np.loadtxt(filename)\n\n if f.shape[0] == 0:\n f = np.zeros((1, desc_dim))\n print filename\n return f[:, :4], f[:, 4:] # feature locations, descriptors",
"def read_from_file() -> dict:\n\n adj_vertices_dict = {}\n matrix = []\n vertex = 1\n mf = open(\"matrix.txt\", \"r\")\n for line in mf:\n line = line.strip()\n line_list = line.split(\",\")\n line_list = [int(elm) for elm in line_list]\n matrix.append(line_list)\n for vertice in range(1, len(line_list) + 1):\n if line_list[vertice - 1] == 1:\n if vertex in adj_vertices_dict.keys():\n adj_vertices_dict[vertex][0].append(vertice)\n else:\n adj_vertices_dict[vertex] = [[vertice]]\n vertex += 1\n mf.close()\n for key, value in adj_vertices_dict.items():\n adj_vertices_dict[key].append(0)\n adj_vertices_dict[key].append(\n [0 for zero in range(0, len(adj_vertices_dict[key][0]))])\n if matrix_symetry_check(matrix):\n return adj_vertices_dict\n else:\n return None",
"def get_sentence_data(data_file):\n # Read sentences from file\n sents = []\n with open(data_file) as file:\n for line in file:\n # add them as arrays to make expansion easier\n sents.append(line.strip().split())\n \n # Get binary feature vects (d) and labels (l) from sents\n d = []\n l = []\n for line in sents:\n vect = numpy.zeros(feature_size)\n for i in line[1:]:\n i = i.split(\":\")\n word = i[0]\n value = i[1]\n #print word, value\n try:\n vect[features_index[word]] = float(value)\n except:\n pass\n l.append(line[0])\n d.append(vect)\n \n return d, l",
"def readDistances(fileName):\n infile = open(fileName, \"r\")\n distances = {}\n\n for line in infile:\n line = [i.strip() for i in line.split()]\n if not distances.has_key(line[0]):\n distances[line[0]] = {}\n distances[line[0]][line[1]] = float(line[2])\n\n if not distances.has_key(line[1]):\n distances[line[1]] = {}\n distances[line[1]][line[0]] = float(line[2])\n\n #Set diagonal to 0\n for key in distances.keys():\n distances[key][key] = 0\n\n infile.close()\n return distances",
"def read_feat(file):\n df = pd.read_csv(file, sep=\" \", names=[\"node_id\"] + list(range(0, 1364)))\n return df",
"def readSVCsig(filename):\n\n output = {}\n output['header']={}\n\n # read the header\n output['header']['commonHeader'], output['header']['referenceHeader'], \\\n output['header']['targetHeader'], linect = readSVCheader(filename)\n\n # read the data\n output['wavelength'], output['referenceDC'], output['targetDC'], \\\n output['reflectance'] = readSVCdata(filename, linect)\n\n return output",
"def VCF_to_sparseMat(vcf_file, tags=[\"AD\", \"DP\"], out_dir=None):\n\n # out samples, out_var, tag_files\n var_info = []\n tag_mat_list = []\n for _tag in tags:\n _dict = {\"data\": [], \"row\": [], \"col\": []}\n tag_mat_list.append(_dict)\n\n if vcf_file[-3:] == \".gz\" or vcf_file[-4:] == \".bgz\":\n infile = gzip.open(vcf_file, \"rb\")\n is_gzip = True\n else:\n infile = open(vcf_file, \"r\")\n is_gzip = False\n \n var_idx, obs_idx = 0, 0\n for line in infile:\n if is_gzip:\n line = line.decode('utf-8')\n if line.startswith(\"#\"):\n if line.startswith(\"#CHROM\"):\n samples = line.rstrip().split(\"\\t\")[9:]\n continue\n \n ## variants line\n var_idx += 1\n list_val = line.rstrip().split(\"\\t\")\n var_info.append(list_val[:8])\n FORMAT = list_val[8].split(\":\")\n \n tag_idx = []\n for _tag in tags:\n if _tag in FORMAT:\n tag_idx.append(FORMAT.index(_tag))\n else:\n tag_idx.append(None)\n\n for obs_idx in range(len(list_val[9:])):\n _samp_dat = list_val[9 + obs_idx]\n if _samp_dat == \".\":\n continue\n _samp_val = _samp_dat.split(\":\")\n for ii in range(len(tags)):\n if tag_idx[ii] is None:\n continue\n tag_dat = _samp_val[tag_idx[ii]]\n if (tag_dat != \".\" and tag_dat != \"0\" and \n tag_dat.count(\".,\") == 0):\n tag_mat_list[ii][\"data\"].append(tag_dat)\n tag_mat_list[ii][\"row\"].append(var_idx)\n tag_mat_list[ii][\"col\"].append(obs_idx + 1)\n infile.close()\n\n if out_dir is not None:\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n fid_obs = open(out_dir + \"/cellSNP.samples.tsv\", \"w\")\n fid_obs.writelines(\"\\n\".join(samples) + \"\\n\")\n fid_obs.close()\n\n fid_var = open(out_dir + \"/cellSNP.base.vcf\", \"w\")\n fid_var.writelines(\"##fileformat=VCFv4.2\\n\")\n fid_var.writelines(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n\")\n for _var_info in var_info:\n fid_var.writelines(\"\\t\".join(_var_info) + \"\\n\")\n fid_var.close()\n \n try:\n import shutil\n if shutil.which(\"bgzip\") is not None:\n bashCommand = \"bgzip -f %s\" %(out_dir + \"/cellSNP.base.vcf\")\n else:\n bashCommand = \"gzip -f %s\" %(out_dir + \"/cellSNP.base.vcf\")\n pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)\n pro.communicate()[0]\n except:\n print(\"sparse matrix: VCF uncmpressed.\")\n\n for ii in range(len(tags)):\n _mat = tag_mat_list[ii]\n _dat = _mat[\"data\"]\n _row = _mat[\"row\"]\n _col = _mat[\"col\"]\n fid = open(out_dir + \"/cellSNP.tag.%s.mtx\" %(tags[ii]), \"w\")\n fid.writelines(\"%\" + \n \"%MatrixMarket matrix coordinate integer general\\n\")\n fid.writelines(\"%\\n\")\n fid.writelines(\"%d\\t%d\\t%d\\n\" %(len(var_info), len(samples), \n len(_dat)))\n for jj in range(len(_dat)):\n fid.writelines(\"%d\\t%d\\t%s\\n\" %(_row[jj], _col[jj], _dat[jj]))\n fid.close()\n\n return var_info, samples, tag_mat_list",
"def parse_sigtyp_format(file_path, given_header=None):\n with open(file_path, 'r') as file:\n lines = file.readlines() \n lines = [i.rstrip() for i in lines]\n header = parse_header(lines[0])\n \n result = []\n if given_header is None: \n all_features = get_all_possible_features(lines[1:])\n else:\n all_features = given_header[7:] # if the header is already given, take only the features\n \n \n int_to_feature, features_to_int = feature_int_map(all_features)\n\n # iterate over all non header lines\n for line in lines[1:]: \n result.append(convert_line(line, features_to_int))\n\n # creates header\n for feature in int_to_feature:\n header.append(feature)\n\n return result, header",
"def process_detectors(filename):\n\n f=open(filename)\n det_col={}\n guide_lines = []\n ic = 0\n for line in f:\n cont = line.split(';')\n if ic==0:\n # parse header\n for key in cont:\n kr = key.strip()\n det_col[kr]=[]\n guide_lines.append(kr)\n ic+=1\n else:\n if line.find('yes')!=-1:\n continue\n for ind,keyl in enumerate(guide_lines):\n det_col[keyl].append(cont[ind])\n f.close()\n return det_col",
"def load(self, filename):\n\n # Separate first line from the rest\n with open(filename) as f:\n dimline, *datalines = f.readlines()\n\n mat = [list(map(int, line.split())) for line in datalines]\n dim = tuple(map(int, dimline.split()))\n\n return mat, dim",
"def load_dataset (fileName):\n # open(fileName).readline(): '1.000000 0.067732 3.176513'\n # numFeat = 2\n # numFeat = len(open(fileName).readline().split('\\t')) - 1\n fr = open(fileName)\n xArr, yArr = [], []\n\n for line in fr.readlines():\n lineArr = []\n # eg: ['1.000000', '0.067732', '3.176513']\n currentLine = line.strip().split('\\t') \n # X = [ [x0, x1], [x0, x1], .... ] (str -> float)\n for i in range(len(currentLine) - 1):\n lineArr.append(float(currentLine[i]))\n \n xArr.append(lineArr)\n # Y = [y1, y2, ... ]\n yArr.append(float(currentLine[-1]))\n\n return xArr, yArr",
"def parse_cmm_data(file_path):\n\toutput = {'specimen_code':'',\n\t\t\t 'datum_flatness':0,\n\t\t\t 'col_1_dist':0,\n\t\t\t 'col_1_flatness':0,\n\t\t\t 'col_1_parallelism':0,\n\t\t\t 'col_2_dist':0,\n\t\t\t 'col_2_flatness':0,\n\t\t\t 'col_2_parallelism':0,\n\t\t\t 'col_3_dist':0,\n\t\t\t 'col_3_flatness':0,\n\t\t\t 'col_3_parallelism':0,\n\t\t\t 'col_4_dist':0,\n\t\t\t 'col_4_flatness':0,\n\t\t\t 'col_4_parallelism':0}\n\n\tif not type(file_path) == str:\n\t\traise TypeError('Expecting argument of type string.')\n\n\twith open(file_path) as fs:\n\t\toutput['specimen_code'] = re.search('(\\d-\\d{6}-\\d).txt',fs.name).group(1)\n\t\tfile_contents = fs.read()\n\t \n\tpat_1 = r'==> Plane \\(1\\)\\n\\.\\.: Flatness\\nFlatness\\s+(\\d\\.\\d+)'\n\tpat_3 = r'==> Plane \\(3\\)\\n\\.\\.: Plane\\nZ\\s+(\\d\\.\\d+)\\nFlatness\\s+(\\d\\.\\d+)\\n\\n\\.\\.: Parallelism\\n.*\\nParallelism\\s+(\\d\\.\\d+)'\n\tpat_4 = r'==> Plane \\(4\\)\\n\\.\\.: Plane\\nZ\\s+(\\d\\.\\d+)\\nFlatness\\s+(\\d\\.\\d+)\\n\\n\\.\\.: Parallelism\\n.*\\nParallelism\\s+(\\d\\.\\d+)'\n\tpat_5 = r'==> Plane \\(5\\)\\n\\.\\.: Plane\\nZ\\s+(\\d\\.\\d+)\\nFlatness\\s+(\\d\\.\\d+)\\n\\n\\.\\.: Parallelism\\n.*\\nParallelism\\s+(\\d\\.\\d+)'\n\tpat_6 = r'==> Plane \\(6\\)\\n\\.\\.: Plane\\nZ\\s+(\\d\\.\\d+)\\nFlatness\\s+(\\d\\.\\d+)\\n\\n\\.\\.: Parallelism\\n.*\\nParallelism\\s+(\\d\\.\\d+)'\n\n\tre_1 = re.compile(pat_1)\n\tre_3 = re.compile(pat_3)\n\tre_4 = re.compile(pat_4)\n\tre_5 = re.compile(pat_5)\n\tre_6 = re.compile(pat_6)\n\t\n\tmatch_obj = re.search(re_1,file_contents)\n\toutput['datum_flatness'] = match_obj.group(1)\n\n\tmatch_obj = re.search(re_3,file_contents)\n\toutput['col_1_dist'] = match_obj.group(1)\n\toutput['col_1_flatness'] = match_obj.group(2)\n\toutput['col_1_parallelism'] = match_obj.group(3)\n\n\tmatch_obj = re.search(re_4,file_contents)\n\toutput['col_2_dist'] = match_obj.group(1)\n\toutput['col_2_flatness'] = match_obj.group(2)\n\toutput['col_2_parallelism'] =match_obj.group(3)\n\n\tmatch_obj = re.search(re_5,file_contents)\n\toutput['col_3_dist'] = match_obj.group(1)\n\toutput['col_3_flatness'] = match_obj.group(2)\n\toutput['col_3_parallelism'] = match_obj.group(3)\n\n\tmatch_obj = re.search(re_6,file_contents)\n\toutput['col_4_dist'] = match_obj.group(1)\n\toutput['col_4_flatness'] = match_obj.group(2)\n\toutput['col_4_parallelism'] = match_obj.group(3)\n\n\treturn output",
"def ParseFile(input_file):\n\n row_labels = []\n data_dict = {}\n\n # Open input file.\n with open(input_file) as f:\n header = f.readline().strip().split()\n samples = [x.split(\"_\")[0] for x in header[3:]] # Get the samples from the header.\n\n # Initialize entry in dict for each column.\n for samp in samples:\n data_dict[samp] = []\n\n for line in f:\n line = line.strip().split()\n label = MakeLabel(line[0:3]) # Make label from the first three elements of the line.\n\n i = 0 # Used as a counter to keep index for adding data to appropriate dicts.\n\n # Add data to appropriate dict.\n for item in line[3:]: # Ignore lines with no data.\n if item == \".\":\n break\n elif i == 0:\n row_labels.append(label) # Append the label to the list holding them.\n samp = samples[i] # Grab the corresponding sample.\n values = data_dict[samp] # Get the list of values for the sample.\n values.append(float(item)) # Add the new data point.\n data_dict[samp] = values # Reassign updated values list to the sample in the dict.\n i += 1 # Increment counter by one.\n\n return row_labels, data_dict",
"def Create_dic_from_file(file, vocab_size, seperator = ' '):\r\n stream = open(file, 'r')\r\n count = {}\r\n for line in stream:\r\n for element in line.replace(\"\\n\",\"\").split(seperator):\r\n if element in count:\r\n count[element] += 1\r\n else:\r\n count[element] = 1\r\n count = sorted(count.items(), key=lambda kv: kv[1],reverse=True)\r\n unk_count=0\r\n for c in count[vocab_size:]:\r\n unk_count += c[1]\r\n count = [('UNK', unk_count)] + count\r\n count = count[:vocab_size]\r\n dictionary = dict()\r\n for element, c in count:\r\n dictionary[element] = len(dictionary)\r\n count[0] = list(count[0])\r\n count[0][1] = unk_count\r\n count[0] = tuple(count[0])\r\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\r\n return count, dictionary, reversed_dictionary",
"def read_file(filename):\n adjacency_list = {}\n with open(filename, \"r\") as file:\n lines = file.read().strip().split(\"\\n\")\n\n for line in lines:\n line_data = list(map(int, line.strip().split(\"\\t\")))\n adjacency_list[line_data[0]] = line_data[1:]\n\n return adjacency_list",
"def read_embeddings(filename, verbose=0):\n embedding_index = {}\n embedding_file = open(filename, 'r', encoding=\"utf-8\")\n # header = list(map(int, embedding_file.readline().strip().split(' ')))\n for line in embedding_file:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n embedding_file.close()\n return embedding_index"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds a dictionary entry to the serializer_format_dict with a key, value of format, object respectively.
|
def register_format(self, format, creator):
self.serializer_format_dict[format] = creator
|
[
"def add_format(self, key, value):\n # type: (str, str) -> None\n self.format_fields[key] = value",
"def register_format(self, serializer):\n self._serializers[serializer.format] = serializer",
"def add_to_format(existing_format, dict_of_properties, workbook):\n new_dict = {}\n for key, value in existing_format.__dict__.iteritems():\n if (value != 0) and (value != {}) and (value is not None):\n new_dict[key] = value\n del new_dict['escapes']\n return(workbook.add_format(dict(new_dict.items() +\n dict_of_properties.items())))",
"def add_to_format(existing_format, dict_of_properties, workbook):\n new_dict = {}\n for key, value in existing_format.__dict__.items():\n if value:\n new_dict[key] = value\n del new_dict['escapes']\n new_dict.update(dict_of_properties)\n return workbook.add_format(new_dict)",
"def special_format_field(self, obj, format_spec):\n raise NotImplementedError()",
"def format(self, record):\n\n if isinstance(record.msg, dict):\n message_dict = record.msg\n record.message = None\n else:\n message_dict = {}\n record.message = record.getMessage()\n\n # only format time if needed\n if \"asctime\" in self.requiredFields:\n record.asctime = self.formatTime(record, self.datefmt)\n\n log_record = OrderedDict()\n\n # Record the required fields\n for field in self.requiredFields:\n log_record[field] = record.__dict__.get(field)\n\n # Update log_record using dictionary sent in the message\n log_record.update(message_dict)\n\n # Reading keys and values sending in the extra argument\n for key, value in record.__dict__.items():\n # this allows to have numeric keys\n if (key not in self.skipFields\n and not (hasattr(key, \"startswith\") and key.startswith('_'))):\n log_record[key] = value\n\n return json.dumps(log_record)",
"def setFormat(format):",
"def num_format(self, name, format):\n self._formats[name] = super().add_format({'num_format': format})",
"def db_format(obj):\n return {\n 'integer': obj['integer'],\n 'small_integer': obj['small_integer'],\n 'ip': obj['ip'],\n 'enum': obj['protocol'],\n 'emoji_text': obj['emoji_text'],\n 'creation_date': obj.get('creation_date', datetime.utcnow()),\n 'modified_date': obj.get('modified_date', None)\n }",
"def _set_format(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"format\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"format must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"format\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__format = t\n if hasattr(self, '_set'):\n self._set()",
"def detect_format(_d):\n # TODO: detect the format of the dict\n return 3",
"def _update_data(entry, data, input_format_number):\n if input_format_number == 1:\n entry = {\n 'lastname': entry[0],\n 'firstname': entry[1],\n 'phonenumber': _normalize_phone_number(entry[2]),\n 'color': entry[3],\n 'zipcode': entry[4]\n }\n elif input_format_number == 2:\n entry = {\n 'firstname': entry[0].split()[0],\n 'lastname': entry[0].split()[1],\n 'color': entry[1],\n 'zipcode': entry[2],\n 'phonenumber': _normalize_phone_number(entry[3])\n }\n elif input_format_number == 3:\n entry = {\n 'firstname': entry[0],\n 'lastname': entry[1],\n 'phonenumber': _normalize_phone_number(entry[3]),\n 'zipcode': entry[2],\n 'color': entry[4]\n }\n\n sorted_entry = collections.OrderedDict(sorted(entry.items())) # sort each dictionary/entry by key (alphabetically)\n data['entries'].append(sorted_entry)",
"def elasticsearch_format(self, entry):\n date_obj = self.parse_date(entry[\"reg_date\"])\n entry[\"reg_date\"] = datetime.strftime(date_obj, \"%Y-%m-%dT%H:%M:%S.000Z\")\n # all bulk data need meta data describing the data\n meta_dict = {\n \"index\": {\n \"_index\": self.es_index,\n \"_type\": self.es_doc,\n \"_id\": entry[\"id\"]\n }\n }\n return meta_dict, entry",
"def _set_format(self, format):\n if isinstance(format, logging.Formatter):\n self.format = format\n else:\n self.format = logging.Formatter(format)",
"def add_info_formatter(self, formatter):\n self.info_formatters.append(formatter)",
"def setFormatter(self, fmt):\r\n pass",
"def format_create_config(request_data: dict):\n formatted_data = {\n \"id\": str(uuid.uuid4()),\n \"organization\": request_data.get('organization'),\n \"repo\": request_data.get('repository'),\n \"pipeline_steps\": request_data.get('config'),\n \"status\": Status.pending_status.value,\n \"created_by\": request_data.get('user'),\n \"created_date_time\": datetime.now().strftime(\n StatConf.date_format.value\n ),\n \"updated_by\": request_data.get('user'),\n \"updated_date_time\": datetime.now().strftime(\n StatConf.date_format.value\n ),\n \"verified\": Status.verified_status_no.value,\n \"outdated\": Status.outdated_status_no.value\n }\n return formatted_data",
"def messageFormat(cls, messageFormatId):\n if messageFormatId in cls.formats:\n return cls.formats[messageFormatId]\n else:\n newFormat = None\n #123 = \"{\"\n if messageFormatId == 123:\n newFormat = JsonMessageFormat()\n cls.formats[messageFormatId] = newFormat\n elif (messageFormatId & 0b11100000) == 0b10100000:\n newFormat = CborMessageFormat()\n cls.formats[messageFormatId] = newFormat\n return newFormat",
"def make_json_serializable(doc: Dict):\n for k, v in doc.items():\n if isinstance(v, datetime.date):\n doc[k] = v.strftime(\"%Y-%m-%d\")\n elif isinstance(v, datetime.datetime):\n doc[k] = v.isoformat()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves the object to create based on the given serializer format in string form.
|
def get_serializer(self, format):
creator = self.serializer_format_dict.get(format.upper())
if not creator:
raise ValueError(format)
return creator()
|
[
"def get_serializer(self, format):\n serializer = self._serializers.get(format)\n if not serializer:\n raise ValueError(format)\n return serializer()",
"def make_primitive_serializer(format):\n structure = struct.Struct(format)\n def loads(b):\n return structure.unpack(b)[0]\n\n return Serializer(structure.pack, loads)",
"def _get_serializer(self, model, serializer):\n app_lbl = getattr(model, \"_meta\").app_label\n package = apps.get_app_config(app_lbl).module\n\n if \".\" in serializer: # pragma: no cover\n module, serializer = serializer.split(\".\", 1)\n\n else:\n module = \"serializers\"\n\n module = import_module(\".\".join((package.__name__, module)))\n return getattr(module, serializer)",
"def convertFormat(self, obj, outFormat='model'):\n if outFormat=='model' :\n return obj\n if outFormat=='json' :\n return json.dumps(obj, cls=self.getJSONEncoder(), indent=4)\n if outFormat=='python' :\n return json.loads(json.dumps(obj, cls=self.getJSONEncoder(), indent=4))",
"def _serialize(instance):\n return serializers.serialize(\"python\", [instance])[0]",
"def serializer_factory(model, serializer_class=serializers.ModelSerializer, attrs=None, meta=None):\n attrs = attrs or {}\n meta = meta or {}\n meta.setdefault(\"model\", model)\n attrs.setdefault(\"Meta\", type(str(\"Meta\"), (object,), meta))\n return type(str(\"%sSerializer\" % model.__name__), (serializer_class,), attrs)",
"def get_serde(\n file_format: str,\n) -> SerDe:\n available_format = AVAILABLE_FORMATS.get(file_format)\n if available_format:\n return available_format.ser_de()\n raise CoreException(f'Unsupported file format {file_format}.')",
"def from_str(cls, as_str):",
"def __init__(self, obj):\n self.obj = obj\n if isinstance(obj, VersionData):\n self.id = obj.id\n elif isinstance(obj, str):\n self.id = self.from_string(obj)\n elif isinstance(obj, Version):\n self.id = self.from_packaging_version(version=obj)\n elif isinstance(obj, SpecifierSet):\n self.id = self.from_specifier_set(version=obj)\n else:\n self.id = str(obj)",
"def load_model_from_string(s: bytes, format: Optional[Any] = None) -> ModelProto:\n return _deserialize(s, ModelProto())",
"def instance_from_json(self, data: str) -> Resource:\n return self.resource_class(self, json.loads(data))",
"def register_format(self, format, creator):\n\n self.serializer_format_dict[format] = creator",
"def deserialize(self, str):",
"def create_resource_from_json(resource_type, parent, json_string):\n if resource_type not in _RESOURCE_TYPE_MAP:\n return None\n resource_type = _RESOURCE_TYPE_MAP[resource_type]\n if not resource_type.get('can_create_resource'):\n return None\n\n return resource_type.get('class').from_json(parent, json_string)",
"def register_format(self, serializer):\n self._serializers[serializer.format] = serializer",
"def get_serializer_class(model_name, *args, **kwargs):\n if 'file' == model_name:\n return FileSerializer(*args, **kwargs)\n if 'image' == model_name:\n return ImageSerializer(*args, **kwargs)\n if 'video' == model_name:\n return VideoSerializer(*args, **kwargs)\n\n return TextSerializer(*args, **kwargs)",
"def create_deserializer():\n return TMXMLDeserializer()",
"def get_obj(self, obj_path):\n obj_path, obj_data, _ = self.http_req(obj_path)\n return RFObject(self, obj_path, obj_data)",
"def _from_api_repr(cls, resource):\n job_id = resource.get(\"jobId\")\n project = resource.get(\"projectId\")\n location = resource.get(\"location\")\n job_ref = cls(job_id, project, location)\n return job_ref"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write list of times to output file 'image_upload_times.csv'
|
def WriteUploadTimes(self, ui_times):
with open('image_upload_times.csv', 'w', newline='') as file1:
ui_writer = csv.writer(file1)
ui_writer.writerow(['Camera Upload Times'])
for i in ui_times:
print(i)
ui_writer.writerow(i)
file1.close()
|
[
"def WriteVideoQueryTimes(self, vq_times):\n with open('image_upload_times.csv', 'w', newline='') as file1:\n ui_writer = csv.writer(file1)\n ui_writer.writerow(['Camera Upload Times'])\n for i in vq_times:\n ui_writer.writerow(i)\n \n file2.close()",
"def date_report():\n csv_out = open(make_path(\"heatmap.tsv\"), 'w')\n csv_out.write(\"day\\thour\\tvalue\\n\")\n for date, hours_list in enumerate(date_list):\n for hour, count in hours_list.items():\n to_write = \"{}\\t{}\\t{}\\n\".format(date+1, hour, count)\n csv_out.write(to_write)\n csv_out.flush()\n csv_out.close()",
"def write_output_csv(file, data):\r\n with open(file, \"w\") as f:\r\n f.write(\"Datum;Sunny Hours\\n\")\r\n for key, value in data.items():\r\n f.write(key + \";\" + \"{:f}\".format(value) + \"\\n\")",
"def write_to_csv(array):\n print 'Writing To CSV'\n today = str(datetime.now().date())\n out_file= today + '.out'\n with open(out_file, 'w') as file:\n file.write('timestamp,response_code,count\\n') # Write header\n for key, value in array.iteritems():\n response_code = key.split(':')[3]\n time_stamp = key[0:-4]\n count = value\n file.write('{0},{1},{2}\\n'.format(time_stamp,response_code,count)) # Not using csv as this is a lighter-weight solution\n file.close()\n print('CSV Output Complete @ {0}'.format(out_file))",
"def write_output(time, HRinst, HRavg, btc, filename=\"assignment02_output.csv\"):\n\n file = open(filename, \"w+\")\n header = \"Time (s), Instantaneous Heart Rate, Average Heart Rate,\" \\\n \" Brady/Tachycardia Occurrence\\n\"\n file.write(header)\n for i, hr in enumerate(HRavg):\n row = str(time[i]) + \",\" + str(HRinst[i]) + \",\" + str(HRavg[i]) + \",\"\\\n + btc[i] + \"\\n\"\n file.write(row)\n file.close()",
"def create_csv(csv_name, img_list):\r\n data = [[]]\r\n for img in img_list:\r\n #creates photo_class, runs identify_region, appends the list to the data list\r\n data.append(wing_photo(img).identify_region())\r\n\r\n #writes the data to a csv\r\n myFile = open(csv_name + '.csv', 'w')\r\n with myFile:\r\n writer = csv.writer(myFile)\r\n writer.writerows(data)",
"def timestamp_writer( logFile ):\n ts = standard_timestamp()\n with open( logFile, 'a' ) as csvfile:\n writer = csv.writer( csvfile )\n writer.writerow( [ ts ] )",
"def write(self, path):\n\n df_to_write = self.data.reset_index()[['tag_number', 'time', 'tag']]\n df_to_write.time = df_to_write.time.dt.strftime(\"%Y/%-m/%-d(%a)\\u3000%H:%M:%S\").str.lower()\n df_to_write.to_csv(path, header=None, index=None, line_terminator='\\n')",
"def write_list(path_out, image_list):\n filename = os.path.join(args.root, path_out)\n print('filename=', filename)\n with open(filename, 'w') as fout:\n for i, item in enumerate(image_list):\n line = '%s\\t' % item[1]\n line += '%f\\n' % item[2]\n fout.write(line)",
"def writeSpeedsFile(speedSeqs):\r\n csvfile = open(args.vals, 'w', newline='')\r\n writer = csv.writer(csvfile, delimiter=',')\r\n writer.writerow([\"seq\", 'position', 'speed_value'])\r\n from tqdm import tqdm\r\n for item in tqdm(speedSeqs):\r\n for row in item:\r\n writer.writerow(row)\r\n csvfile.close()",
"def exportar_csv(GTFS_DIR):\n print(Fore.GREEN + \"AVISO:\" + Fore.RESET +\n \"Exportando en la tabla 'stop_times' a \" + GTFS_DIR + \"stop_times.txt\")\n os.system('sqlite3 -header -csv /var/tmp/gtfs.sqlite \"SELECT * FROM stop_times_orden;\" > ' +\n GTFS_DIR + 'stop_times.txt')\n # Exporta de nuevo trips.txt para generar un csv sin la columna horario\n # que sobre (en SQLITE no existe DROP COLUMN para hacerlo)\n print(Fore.GREEN + \"AVISO:\" + Fore.RESET +\n \"Exportando en la tabla 'trips' a \" + GTFS_DIR + \"trips.txt\")\n sql = (\"SELECT route_id, service_id, trip_id, trip_headsign, direction_id, block_id, shape_id, wheelchair_accessible \"\n \"FROM trips;\")\n os.system('sqlite3 -header -csv /var/tmp/gtfs.sqlite \"' +\n sql + '\" > ' + GTFS_DIR + 'trips.txt')\n return",
"def write_the_file(filtered_stars: list):\n with open(f\"{datetime.now()}.csv\", 'w') as f:\n header = \"ID, RA, DEC, Magnitude, Dis_from_gv_point\\n\"\n f.write(header)\n for star in filtered_stars:\n row_data = f'{star.star_id}' + ',' + \\\n f'{star.ra},' + \\\n f'{star.dec},' + \\\n f'{star.mag},' + \\\n f'{star.euclidean_distance} \\n'\n f.write(row_data)",
"def write_15min_csv(symbols):\n for symbol in symbols:\n latest_data = getData(symb=symbol, interval='15min')\n print(f'Writing {symbol} data to csv...')\n latest_data.to_csv(f'Christian Stock Data/{symbol}_data.csv', header=True, index=True)\n print('Done. Waiting to avoid API limit...')\n sleep(20)",
"def save_into_csv_files(departure_list):\n try:\n with open(\"departures.csv\", \"w\") as f:\n f.write(\"Name;Start Date;Finish Date;Category;\\n\")\n\n for dep in departure_list:\n print(f\"{dep['name']};{dep['start_date']};{dep['finish_date']};{dep['category']};\\n\")\n f.write(f\"{dep['name']};{dep['start_date']};{dep['finish_date']};{dep['category']};\\n\")\n \n except Exception as ex:\n raise ex",
"def store_to_files(self):\n core_stats = \"{}/core_stats.txt\".format(self.final_path)\n with open(core_stats, 'w') as f:\n for time, cores in izip(self.stats_time, self.num_cores):\n f.write(str(time) + ',' + str(cores) + '\\n')\n\n delay_stats = \"{}/delay_stats.txt\".format(self.final_path)\n with open(delay_stats, 'w') as f:\n for key, value in izip(self.keys, self.values):\n f.write(str(key) + ',' + str(value) + '\\n')",
"def write_thermal_frames(frames, times, output_dir):\r\n num_frames = len(frames)\r\n for i in range(len(frames)):\r\n img = frames[i]\r\n t = times[i]\r\n output_filename = \"t-{}.png\".format(t)\r\n output_path = os.path.join(output_dir, output_filename)\r\n cv2.imwrite(output_path, img)",
"def write_csv(data, filepath):\n pass #TODO implement",
"def get_file_times(times_file):\n\n # -- get the file list\n dpath = os.path.join(os.environ[\"AUDUBON_DATA\"])\n flist = []\n for root, dirs, files in os.walk(dpath):\n if \".\" not in root:\n print(\"\\rcrawling {0}\".format(root)),\n sys.stdout.flush()\n if root.endswith(\"night\"):\n for tfile in files:\n if tfile.endswith(\".raw\"):\n flist.append(os.path.join(dpath, root, tfile))\n nfile = len(flist)\n print(\"got full list of {0} files\".format(nfile))\n\n\n # -- get the times and write to file\n fopen = open(times_file, \"w\")\n fopen.write(\"filename,time,year,month,day,hour,minutes,seconds\\n\")\n for ii, tfile in enumerate(flist):\n if (ii + 1) % 10000 == 0:\n print(\"\\rgetting times for file {0} of {1}\".format(ii + 1, nfile)),\n sys.stdout.flush()\n mtime = os.path.getmtime(tfile)\n dt = datetime.datetime.fromtimestamp(mtime)\n fopen.write(\"{0},{1},{2},{3},{4},{5},{6},{7}\\n\" \\\n .format(tfile, mtime, dt.year, dt.month, dt.day,\n dt.hour, dt.minute, dt.second))\n fopen.close()\n print(\"wrote filenames and times to file {0}\".format(times_file))\n\n return",
"def _write_events(events):\n df = pd.concat(events).sort_values(by=[\"timestamp\"])\n output_file = os.path.join(OUTPUT_DIR, \"events.csv\")\n df.to_csv(output_file, index=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write list of times to output file 'video_query_times.csv'
|
def WriteVideoQueryTimes(self, vq_times):
with open('image_upload_times.csv', 'w', newline='') as file1:
ui_writer = csv.writer(file1)
ui_writer.writerow(['Camera Upload Times'])
for i in vq_times:
ui_writer.writerow(i)
file2.close()
|
[
"def WriteUploadTimes(self, ui_times):\n with open('image_upload_times.csv', 'w', newline='') as file1:\n ui_writer = csv.writer(file1)\n ui_writer.writerow(['Camera Upload Times'])\n for i in ui_times:\n print(i)\n ui_writer.writerow(i)\n \n file1.close()",
"def exportar_csv(GTFS_DIR):\n print(Fore.GREEN + \"AVISO:\" + Fore.RESET +\n \"Exportando en la tabla 'stop_times' a \" + GTFS_DIR + \"stop_times.txt\")\n os.system('sqlite3 -header -csv /var/tmp/gtfs.sqlite \"SELECT * FROM stop_times_orden;\" > ' +\n GTFS_DIR + 'stop_times.txt')\n # Exporta de nuevo trips.txt para generar un csv sin la columna horario\n # que sobre (en SQLITE no existe DROP COLUMN para hacerlo)\n print(Fore.GREEN + \"AVISO:\" + Fore.RESET +\n \"Exportando en la tabla 'trips' a \" + GTFS_DIR + \"trips.txt\")\n sql = (\"SELECT route_id, service_id, trip_id, trip_headsign, direction_id, block_id, shape_id, wheelchair_accessible \"\n \"FROM trips;\")\n os.system('sqlite3 -header -csv /var/tmp/gtfs.sqlite \"' +\n sql + '\" > ' + GTFS_DIR + 'trips.txt')\n return",
"def date_report():\n csv_out = open(make_path(\"heatmap.tsv\"), 'w')\n csv_out.write(\"day\\thour\\tvalue\\n\")\n for date, hours_list in enumerate(date_list):\n for hour, count in hours_list.items():\n to_write = \"{}\\t{}\\t{}\\n\".format(date+1, hour, count)\n csv_out.write(to_write)\n csv_out.flush()\n csv_out.close()",
"def write_output(time, HRinst, HRavg, btc, filename=\"assignment02_output.csv\"):\n\n file = open(filename, \"w+\")\n header = \"Time (s), Instantaneous Heart Rate, Average Heart Rate,\" \\\n \" Brady/Tachycardia Occurrence\\n\"\n file.write(header)\n for i, hr in enumerate(HRavg):\n row = str(time[i]) + \",\" + str(HRinst[i]) + \",\" + str(HRavg[i]) + \",\"\\\n + btc[i] + \"\\n\"\n file.write(row)\n file.close()",
"def write_output_csv(file, data):\r\n with open(file, \"w\") as f:\r\n f.write(\"Datum;Sunny Hours\\n\")\r\n for key, value in data.items():\r\n f.write(key + \";\" + \"{:f}\".format(value) + \"\\n\")",
"def writeSpeedsFile(speedSeqs):\r\n csvfile = open(args.vals, 'w', newline='')\r\n writer = csv.writer(csvfile, delimiter=',')\r\n writer.writerow([\"seq\", 'position', 'speed_value'])\r\n from tqdm import tqdm\r\n for item in tqdm(speedSeqs):\r\n for row in item:\r\n writer.writerow(row)\r\n csvfile.close()",
"def _write_events(events):\n df = pd.concat(events).sort_values(by=[\"timestamp\"])\n output_file = os.path.join(OUTPUT_DIR, \"events.csv\")\n df.to_csv(output_file, index=False)",
"def to_csv(self, outfile, units = ''):\n #alternative\n #np.saveas('frametime_out.csv', self.data, delimiter = ',')\n #alternative #2: use pandas.DataFrame.to_csv\n if self.data == None or self.units == None:\n raise DataError('Cannot export; no data!')\n if units == '':\n units = self.units\n filename = timestamp(outfile)\n with open(filename, 'wb') as out_file:\n writer = csv.writer(out_file, delimiter = ',')\n writer.writerow(['frame', 'start time', 'duration', 'stop time'])\n data = self.get_data(units)\n print data\n for frame in data:\n writer.writerow(frame)\n return filename",
"def write_to_csv(array):\n print 'Writing To CSV'\n today = str(datetime.now().date())\n out_file= today + '.out'\n with open(out_file, 'w') as file:\n file.write('timestamp,response_code,count\\n') # Write header\n for key, value in array.iteritems():\n response_code = key.split(':')[3]\n time_stamp = key[0:-4]\n count = value\n file.write('{0},{1},{2}\\n'.format(time_stamp,response_code,count)) # Not using csv as this is a lighter-weight solution\n file.close()\n print('CSV Output Complete @ {0}'.format(out_file))",
"def timestamp_writer( logFile ):\n ts = standard_timestamp()\n with open( logFile, 'a' ) as csvfile:\n writer = csv.writer( csvfile )\n writer.writerow( [ ts ] )",
"def write_15min_csv(symbols):\n for symbol in symbols:\n latest_data = getData(symb=symbol, interval='15min')\n print(f'Writing {symbol} data to csv...')\n latest_data.to_csv(f'Christian Stock Data/{symbol}_data.csv', header=True, index=True)\n print('Done. Waiting to avoid API limit...')\n sleep(20)",
"def durations_csv(begin, end):\n # Get the minimum and maximum datetimes from the database. These will be the\n # \"infinity\" values used in the name of the file (if one or both of\n # `begin`/`end` is `None`). This is helpful, so that the names of the\n # files always indicate the truth about what they contain.\n least, most = storage.select_maximal_time_range()\n\n # If there are no rows, then `least == most == None`, and we can return a\n # special no-rows CSV.\n if least is None:\n assert most is None\n return 'csv/empty.csv'\n else:\n if begin is None:\n begin = least\n if end is None:\n end = most\n csv_basename = f'{begin.isoformat()} to {end.isoformat()}.csv'\n\n rows = storage.select_durations(begin, end)\n if len(rows) == 0:\n return 'csv/empty.csv'\n\n path = 'csv/' + csv_basename\n header = ['begin', 'end', 'activity', 'milliseconds']\n write_csv(path, header, rows)\n return path",
"def write_to_submit_CSV(players,arrayOfResults):\n\twith open('../../datayasp/results.csv', 'w') as csvfile:\n\t\twriter = csv.DictWriter(csvfile, fieldresults)\n\t\twriter.writeheader()\n\t\tfor i in range(len(arrayOfResults)):\n\t\t\twriter.writerow({\"row ID\": players[i], \"battleneturl\":arrayOfResults[i]})",
"def write(self, path):\n\n df_to_write = self.data.reset_index()[['tag_number', 'time', 'tag']]\n df_to_write.time = df_to_write.time.dt.strftime(\"%Y/%-m/%-d(%a)\\u3000%H:%M:%S\").str.lower()\n df_to_write.to_csv(path, header=None, index=None, line_terminator='\\n')",
"def write_the_file(filtered_stars: list):\n with open(f\"{datetime.now()}.csv\", 'w') as f:\n header = \"ID, RA, DEC, Magnitude, Dis_from_gv_point\\n\"\n f.write(header)\n for star in filtered_stars:\n row_data = f'{star.star_id}' + ',' + \\\n f'{star.ra},' + \\\n f'{star.dec},' + \\\n f'{star.mag},' + \\\n f'{star.euclidean_distance} \\n'\n f.write(row_data)",
"def writePlayerCSV(self) -> None:\r\n with open(self.csv_location, \"w\", encoding=\"utf-16\") as file:\r\n for extracted_player in self._extracted_players:\r\n player_name = extracted_player\r\n print(self._extracted_players[extracted_player])\r\n assert len(self._extracted_players[extracted_player]) == 4 #ensures length is 5 to confirm the values can be unpacked\r\n player_long_name, player_position, player_rating, player_club = self._extracted_players[extracted_player]\r\n csv_format = re.compile(\r\n player_name + \",\" + player_long_name + \",\" + player_position + \",\" + player_rating + \",\" + player_club + \",\" + self._season + \"\\n\")\r\n file.write(csv_format.pattern) #Writes the compiled RegEx pattern with the values inserted\r",
"def export_results(file, results):\n\n with open(file, 'w+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(('Time [ns]', 'Best path', 'Best distance',\n 'Current path', 'Current distance'))\n for r in results:\n best_distance = r.best.distance if r.best else -1\n current_distance = r.current.distance if r.current else -1\n writer.writerow((r.time,\n r.best.path if r.best else '',\n best_distance,\n r.current.path if r.current else '',\n current_distance))",
"def csv_output(self):\n\n # determine the file name\n csv_filename = \"subscription-%s-%siter-%s-%s.csv\" % (self.subscriptiontype,\n self.iterations,\n self.chart_type.lower(),\n self.testdatetime)\n\n # initialize the csv file\n csvfile_stream = open(csv_filename, \"w\")\n csvfile_writer = csv.writer(csvfile_stream, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n\n # iterate over the SIBs\n for sib in self.results.keys(): \n \n row = [sib]\n \n # add all the times\n for value in self.results[sib]:\n row.append(value)\n\n # add the mean, min, max and variance value of the times to the row\n row.append(round(nmean(self.results[sib]),3)) \n row.append(round(nmin(self.results[sib]),3)) \n row.append(round(nmax(self.results[sib]),3)) \n row.append(round(nvar(self.results[sib]),3)) \n\n # write the row\n csvfile_writer.writerow(row)\n \n # close the csv file\n csvfile_stream.close()",
"def writecsv_sync(output):\n\t\n\tglobal sync_csv_index\n\n\tfor key in output:\n\t\tcsv_file = open(OUTPUT_SYNC_DIR+\"/\"+key+\"__\"+str(sync_csv_index)+\"__\"+\"csv.csv\", \"w\")\n\t\twrite_header_filtrage(csv_file)\n\t\tfor dict_e in output[key]:\n\t\t\tcsv_file.write(str(dict_e[\"qu_Date\"]) + SEPARATOR + str(dict_e[\"qu_MeaNorm\"])+ \"\\n\")\n\t\t\t\n\t\tcsv_file.close()\n\t\n\tsync_csv_index += 1\n\n\n\treturn"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
request modelname refer to the table model name tableheaderdict refer to the table header name in dict format {} filterquery refer to the queryfilter in dict format {} will return list of table
|
def model_table(request, modelname, tableheaderdict, filterquery = {}, paginations = 10):
modeltable = modelname.objects.filter(delete_field='no', **filterquery)
paginator = Paginator(modeltable, paginations, 1)
page = request.GET.get('page')
try:
list_table = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
list_table = paginator.page(1)
except EmptyPage:
list_table = paginator.page(paginator.num_pages)
# If page is out of range (e.g. 9999), deliver last page of results.
return list_table
|
[
"def model_query(model: db.Model) -> List[dict]:\n result = []\n fields = ['spin_mode', 'basis_set', 'method', 'method_family', 'program', 'version', 'solvation', 'solvent',\n 'embedding', 'periodic_boundaries', 'external_field', 'temperature', 'electronic_temperature']\n for field in fields:\n value = getattr(model, field)\n if value.lower() != \"any\":\n result.append({f\"model.{field}\": value})\n return result",
"def _tables_line(args):\n filter_ = args['filter'] if args['filter'] else '*'\n if args['dataset']:\n if args['project'] is None:\n datasets = [datalab.bigquery.Dataset(args['dataset'])]\n else:\n datasets = [datalab.bigquery.Dataset((args['project'], args['dataset']))]\n else:\n datasets = datalab.bigquery.Datasets(args['project'])\n\n tables = []\n for dataset in datasets:\n tables.extend([str(table) for table in dataset if fnmatch.fnmatch(str(table), filter_)])\n\n return _render_list(tables)",
"def filter_query(filters, request):\n for filter_name, db_filter in filters.items():\n if db_filter == \"all\":\n continue\n\n if filter_name == \"status\":\n # doesn't do += because query is a parameter of the function\n Agent.filter_agents_by_status(db_filter, request)\n elif filter_name == \"older_than\":\n # doesn't do += because query is a parameter of the function\n Agent.filter_agents_by_timeframe(db_filter, request)\n else:\n filter_con = {}\n filter_con[filter_name] = {}\n if isinstance(db_filter, list):\n filter_con[filter_name] = {\n }\n filter_con[filter_name][\"$in\"] = [re.compile(name.lower(), re.IGNORECASE) if filter_name != \"version\"\n else re.compile(re.sub( r'([a-zA-Z])([v])', r'\\1 \\2', name), re.IGNORECASE)\n for name in db_filter]\n else: # str\n filter_con[filter_name] = re.compile(name.lower(), re.IGNORECASE) if filter_name != \"version\" \\\n else re.compile(re.sub( r'([a-zA-Z])([v])', r'\\1 \\2', name), re.IGNORECASE)\n if filter_con:\n request[\"$and\"].append(filter_con)",
"def get_records(self, **kwargs):\n sql = 'SELECT * FROM %s WHERE' % (self.table)\n for key in kwargs:\n if not kwargs[key]:\n continue\n sql += ' %s=\"%s\" AND' % (key, kwargs[key])\n sql = sql[:-4]\n print(sql)\n return self.curs.execute(sql).fetchall()",
"def _get_table_list(self, con, objects):\n # basic sql\n tables_sql = (\"SELECT table_schema as s, table_name as t \"\n \"FROM tables WHERE is_system_table=false AND is_temp_table=false\")\n # extra where clause to find only specific tables\n where = []\n if len(objects) == 0:\n # Means all. We are happy with the default sql\n pass\n else:\n for o in objects:\n (schema, dot, table) = o.partition('.')\n if table == '':\n # we have a schema only\n where.append(\"table_schema='{s}'\".format(s=schema))\n else:\n # we have a table\n where.append(\n \"table_schema='{s}' AND table_name='{t}'\".format(\n t=table, s=schema))\n\n if len(where) > 0:\n tables_sql += ' AND ((' + ') OR ('.join(where) + '))'\n\n tret = con.execute(tables_sql).fetchall()\n return tret",
"def dataTable_request_to_sql(rqv, search_only=False):\n qs = \"\"\n args = list()\n \n # Ordering\n if 'order[0][column]' in rqv:\n col = rqv['order[0][column]']\n col_name = rqv['columns[{}][name]'.format(col)]\n dir = rqv['order[0][dir]']\n\n # search filter? \n if ('search[value]' in rqv) and rqv['search[value]'].strip():\n qs += \" where {} like ?\".format(col_name)\n args.append(rqv['search[value]'] + '%')\n \n # Just a basic search.\n if search_only:\n return qs, args\n \n # Ordering\n if 'order[0][column]' in rqv:\n qs += ' order by {}'.format(rqv['columns[{}][name]'.format(col)])\n if dir in ['dsc', 'des', 'desc']:\n qs += ' desc'\n \n # Limit? \n if 'length' in rqv:\n qs += ' LIMIT {}'.format(rqv['length'])\n if 'start' in rqv:\n qs += ' OFFSET {}'.format(rqv['start'])\n \n return qs, args",
"def get_all(self, table, **query):\n if query:\n name, query = query.popitem()\n self._cursor.execute(f\"\"\"\nSELECT * FROM {table} WHERE {name}=={query!r}\n\"\"\")\n else:\n self._cursor.execute(f\"\"\"\nSELECT * FROM {table}\n\"\"\")\n return self._cursor.fetchall()",
"def query_table(self, table_name, columns_queried = [],predicate= '' ):\r\n columns = '\"' + '\",\"'.join(columns_queried) +'\"'\r\n if predicate == '':\r\n query = \"SELECT %s from %s ;\" % (columns, table_name,)\r\n result = self.engine.execute(query)\r\n else:\r\n query = \"SELECT %s from %s where %s;\" % (columns, table_name, predicate)\r\n result = self.engine.execute(query)\r\n\r\n result = [[row[ci] for ci in columns_queried] for row in result]\r\n\r\n\r\n return result",
"def query():\n if request.method == 'GET':\n print(\"**************************query**************************\")\n args = request.args\n forms = request.form\n #print(\"forms: \" + str(forms.to_dict()))\n #print(\"getlist('HousingType')\" + str(args.getlist('HousingType')))\n for k, v in args.items():\n #print(f\"{k}: {v} (if there is a list, you will only see first item)\")\n print(f\"{k}: \" + str(args.getlist(k)))\n args['search_string']\n # access as a dictionary and key = 'search_string'\n # but now because there can be multiple checkboxes, need to use args.getlist('param_name')\n\n # package data to send for backend seaarch and\n # also persistent search and filters in saved_options\n saved_options = {}\n search = query_helper(args, saved_options, 'search_string', '', take_first_element=True)\n housingType = query_helper(args, saved_options, 'HousingType', [])\n sellOrRent = query_helper(args, saved_options, 'sellOrRent', [])\n petsAllowed = query_helper(args, saved_options, 'petsAllowed', [])\n\n #if they type nothing and choose no filters, show all listings\n if not (search or housingType or sellOrRent or petsAllowed):\n all_listings = db.session.query(Listings)\n all_listings = all_listings.filter(Listings.adminAppr == 1)\n results_list_of_dicts = postMaker(all_listings)\n else:\n results = backendSearch(search_string=search, housingType=housingType, sellOrRent=sellOrRent, petsAllowed=petsAllowed)\n # returns a list of dictionaries matching images with associated posts\n results_list_of_dicts = postMaker(results)\n #pretty_print_results_dictionaries_list(results_list_of_dicts)\n\n return results_list_of_dicts, saved_options",
"def test_filters_equals_table_headers(self): # Ignore PyDocStyleBear\n self.open_filters()\n self.check_filters_and_table_headers_equality()",
"def get_columns(filters):\r\n\r\n\tcolumns = [\r\n {\r\n \"fieldname\":\"item\",\r\n \"fieldtype\":\"Link\",\r\n \"label\":\"Item\",\r\n\t \"options\":\"Item\",\r\n\t \"width\":250\r\n },\r\n {\r\n \"fieldname\":\"warehouse\",\r\n \"fieldtype\":\"Link\",\r\n \"label\":\"Warehouse\",\r\n\t \"options\":\"Warehouse\",\r\n\t \"width\":150\r\n },\r\n {\r\n \"fieldname\":\"item_group\",\r\n \"fieldtype\":\"Link\",\r\n \"label\":\"Item Group\",\r\n\t \"options\":\"Item Group\",\r\n\t \"width\":150\r\n },\r\n\t{\r\n\t \"fieldname\":\"open_qty\",\r\n\t \"fieldtype\":\"Float\",\r\n\t \"label\":\"Opening Qty\",\r\n\t \"width\":150\r\n\t},\r\n {\r\n \"fieldname\":\"in_qty\",\r\n \"fieldtype\":\"Float\",\r\n \"label\":\"In Qty\",\r\n\t \"width\":150\r\n },\r\n {\r\n \"fieldname\":\"out_qty\",\r\n \"fieldtype\":\"Float\",\r\n \"label\":\"Out Qty\",\r\n\t \"width\":150\r\n },\r\n {\r\n \"fieldname\":\"balance_qty\",\r\n \"fieldtype\":\"Float\",\r\n \"label\":\"Balance Qty\",\r\n\t \"width\":150\r\n }\r\n\t]\r\n\r\n\treturn columns",
"def create_table_for(self, model):",
"def get_queryset(self):\n queryset = Trait.objects.all().order_by('-number_of_source_records')\n status = self.request.query_params.get('status', None)\n name = self.request.query_params.get('name', None)\n if status is not None:\n queryset = queryset.filter(status=status)\n if name is not None:\n queryset = queryset.filter(name__contains=name)\n return queryset",
"def read(self, table: str, sql_filter: str) -> list:\n t = sqlalchemy.text('SELECT * FROM {} WHERE {}'.format(table, sql_filter))\n rs = self.conn.execute(t)\n list_of_rows = list()\n for row in rs:\n row_as_dict = dict(row)\n list_of_rows.append(row_as_dict)\n\n return list_of_rows",
"def get_relevant_tables(tables, query):\n table_names = query.get_From()\n tables_to_keep = [t in tables if tables.get_name() in query]\n return tables_to_keep",
"def gettabledict(self, tablename):\n urlpath = '/' + tablename\n return self.getdict(urlpath)",
"def _parse_datatables_parameters(self) -> dict:\n # These are what this function is populating\n filter_params = {}\n include_fields = []\n order_by = None\n text_search = None\n\n # These are internal helpers\n query_columns = []\n hint_helper = []\n\n # Start by pulling out the query parameters\n columns_arg = self.get_query_arguments(\"columns\")\n order_arg = self.get_query_argument(\"order\", default=\"{}\")\n search_arg = self.get_query_argument(\"search\", default=\"{}\")\n child_arg = self.get_query_argument(\"include_children\", default=\"false\")\n hidden_arg = self.get_query_argument(\"include_hidden\", default=\"false\")\n\n # And parse them into usable forms\n columns = [json.loads(c) for c in columns_arg]\n order = json.loads(order_arg)\n search = json.loads(search_arg)\n include_children = bool(child_arg.lower() == \"true\")\n include_hidden = bool(hidden_arg.lower() == \"true\")\n\n # Cool, now we can do stuff\n if search and search[\"value\"]:\n text_search = '\"' + search[\"value\"] + '\"'\n\n if not include_children:\n filter_params[\"has_parent\"] = False\n\n if not include_hidden:\n filter_params[\"hidden__ne\"] = True\n\n for column in columns:\n query_columns.append(column)\n\n if column[\"data\"]:\n include_fields.append(column[\"data\"])\n\n if (\n \"searchable\" in column\n and column[\"searchable\"]\n and column[\"search\"][\"value\"]\n ):\n if column[\"data\"] in [\"created_at\", \"updated_at\"]:\n search_dates = column[\"search\"][\"value\"].split(\"~\")\n\n if search_dates[0]:\n filter_params[column[\"data\"] + \"__gte\"] = search_dates[0]\n if search_dates[1]:\n filter_params[column[\"data\"] + \"__lte\"] = search_dates[1]\n\n elif column[\"data\"] == \"status\":\n filter_params[column[\"data\"] + \"__exact\"] = column[\"search\"][\n \"value\"\n ]\n\n elif column[\"data\"] == \"comment\":\n filter_params[column[\"data\"] + \"__contains\"] = column[\"search\"][\n \"value\"\n ]\n\n else:\n filter_params[column[\"data\"] + \"__startswith\"] = column[\"search\"][\n \"value\"\n ]\n\n hint_helper.append(column[\"data\"])\n\n if order:\n order_by = query_columns[order.get(\"column\")][\"data\"]\n\n hint_helper.append(order_by)\n\n if order.get(\"dir\") == \"desc\":\n order_by = \"-\" + order_by\n\n return {\n \"filter_params\": filter_params,\n \"include_fields\": include_fields,\n \"text_search\": text_search,\n \"order_by\": order_by,\n \"hint\": self._determine_hint(hint_helper, include_children, include_hidden),\n }",
"def _prep_ausgabe(self):\n result = []\n if self._has_header:\n result = [self._header]\n return result + self._table",
"def __call__(self, modelObjects):\r\n for filter in self.filters:\r\n modelObjects = filter(modelObjects)\r\n return modelObjects"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function will run inference on the image at given path
|
def run_inference(self, path):
self.vgg_model.eval()
if use_gpu:
self.vgg_model = self.vgg_model.cuda()
img = Image.open(path).convert('RGB').copy()
# img = img.resize((900, 1200))
img = np.asarray(img)
shape = img.shape
img = img[:, :, ::-1] # switch to BGR
img = np.transpose(img, (2, 0, 1)) / 255.
img[0] -= means[0] # reduce B's mean
img[1] -= means[1] # reduce G's mean
img[2] -= means[2] # reduce R's mean
img = np.expand_dims(img, axis=0)
print("VGG Feature Extraction Image Dimension=", shape)
import time
start_time = time.time()
try:
if use_gpu:
inputs = torch.autograd.Variable(torch.from_numpy(img).cuda().float())
else:
inputs = torch.autograd.Variable(torch.from_numpy(img).float())
d_hist = self.vgg_model(inputs)[pick_layer]
d_hist = np.sum(d_hist.data.cpu().numpy(), axis=0)
d_hist /= np.sum(d_hist) # normalize
print("Time taken = ", time.time() - start_time)
return d_hist
except:
print(img.shape)
|
[
"def inference(path, model_inf):\n inference_dataset = ImageDetectionDataset()\n inference_dataset.load_inference_classes()\n class_names = inference_dataset.get_class_names()\n\n define_path(path, model_inf, class_names)",
"def run_on_path(model, image_path):\n\n with Image.open(image_path) as img:\n predictions = preprocess_and_run(model, img)\n\n return predictions",
"def run_inference(interpreter, input_image):\n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n # Test model on random input data.\n interpreter.set_tensor(input_details[0][\"index\"], input_image)\n interpreter.invoke()\n output_data = interpreter.get_tensor(output_details[0][\"index\"])\n\n return output_data",
"def inference(self, mode, reference_image, image_size=250):\n reference_image = load_image(reference_image, image_size=image_size)\n reference_image = normalize_m11(reference_image)\n reals = self.create_real_pyramid(reference_image, num_scales=len(self.model))\n\n dir = create_dir(os.path.join(self.result_dir, mode))\n if mode == 'random_sample':\n z_fixed = tf.random.normal(reals[0].shape)\n for n in range(self.num_samples):\n fake = self.SinGAN_generate(reals, z_fixed, inject_scale=self.inject_scale)\n imsave(fake, dir + f'/random_sample_{n}.jpg') \n\n elif (mode == 'harmonization') or (mode == 'editing') or (mode == 'paint2image'):\n fake = self.SinGAN_inject(reals, inject_scale=self.inject_scale)\n imsave(fake, dir + f'/inject_at_{self.inject_scale}.jpg') \n\n else:\n print('Inference mode must be: random_sample, harmonization, paint2image, editing')",
"def run_classification_inference(agent, filename):\n # Check if the model for image classification is available\n # The application always uses the latest version of the model in the list of loaded models\n model_name_img_clf = config['mappings']['image-classification-app']\n model_is_loaded = any([m['name']==model_name_img_clf for m in models_loaded])\n if not model_is_loaded:\n logging.info('Model for image classification not loaded, waiting for deployment...')\n return None, None\n\n # Get the identifier of the currently loaded model\n model_dict_img_clf = next((x for x in models_loaded if x['name'] == model_name_img_clf), None)\n if not model_dict_img_clf:\n logging.info('Model for image classification not loaded, waiting for deployment...')\n return None, None\n model_id_img_clf = model_dict_img_clf['identifier']\n\n logging.info('\\nClassification inference with %s' % filename)\n image = PIL.Image.open(filename)\n image = image.convert(mode='RGB')\n\n # Preprocessing\n x_batchified = preprocess_image(image, IMG_WIDTH, IMG_HEIGHT)\n\n # Run inference with agent and time taken\n t_start = timer()\n y = agent.predict(model_id_img_clf, x_batchified)\n t_stop = timer()\n t_ms = np.round((t_stop - t_start) * 1000, decimals=0)\n\n agent.capture_data(model_id_img_clf, x_batchified, y)\n y = y.ravel()\n logging.info(y)\n\n img_clf_class_labels = ['normal', 'anomalous']\n\n for indx, l in enumerate(img_clf_class_labels):\n logging.info('Class probability label \"%s\": %f' % (img_clf_class_labels[indx], y[indx]))\n return y, t_ms",
"def infer_on_imgs(self, img_files, que, img_size=(300, 300)):\n if self.modelgraph_def is None:\n raise AttributeError('Model graph def not loaded.')\n else:\n with tf.Graph().as_default():\n\n # configure input data\n parse_img = partial(_tf_parse_img, img_size=img_size)\n imgs = _tf_get_iter_from_files(img_files, parse_img)\n img, filename = imgs.get_next(name='img')\n img = tf.expand_dims(img, 0)\n\n # connect input with model._detect\n detections = self._detect(img)\n\n with tf.Session() as sess:\n try:\n while True:\n que.put(sess.run([detections, filename]))\n except tf.errors.OutOfRangeError:\n print(\"Finish Inference.\")",
"def infinite_infer_run(): \n try:\n model_directory = \"/opt/awscam/artifacts/\"\n # model_name = \"mnist-8\" # onnx-model\n model_name = \"fingerModel.onnx\" # onnx-model\n\n # Create a local display instance that will dump the image bytes to a FIFO\n # file that the image can be rendered locally.\n local_display = LocalDisplay('480p')\n local_display.start()\n\n # When the ONNX model is imported via DeepLens console, the model is copied\n # to the AWS DeepLens device, which is located in the \"/opt/awscam/artifacts/\".\n model_file_path = os.path.join(model_directory, model_name)\n sess = rt.InferenceSession(model_file_path)\n \n while True:\n # Get a frame from the video stream\n ret, frame = awscam.getLastFrame()\n if not ret:\n raise Exception('Failed to get frame from the stream')\n \n # Preprocess the frame to crop it into a square and\n # resize it to make it the same size as the model's input size.\n input_img = preprocess(frame)\n\n # Inference.\n inferences = makeInferences(sess, input_img)\n inference = np.argmax(inferences)\n\n # TODO: Add the label of predicted digit to the frame used by local display.\n # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html\n # for more information about the cv2.putText method.\n # Method signature: image, text, origin, font face, font scale, color, and thickness \n # cv2.putText()\n cv2.putText(frame, str(inference), (20,120), cv2.FONT_HERSHEY_COMPLEX, 5, (243, 252, 61), 4)\n \n # 255, 0, 0\n # 61, 252, 243\n\n # Set the next frame in the local display stream.\n local_display.set_frame_data(frame)\n \n # Outputting the result logs as \"MQTT messages\" to AWS IoT.\n cloud_output = {}\n cloud_output[\"scores\"] = inferences.tolist()\n print(inference, cloud_output)\n print(input_img.shape, inferences.shape)\n\n except Exception as ex:\n # Outputting error logs as \"MQTT messages\" to AWS IoT.\n print('Error in lambda {}'.format(ex))\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"error details:\" + str(exc_type) + str(fname) + str(exc_tb.tb_lineno))",
"def infer_image(args):\n if args.image is None:\n error('Must specify image file with --image')\n if args.out is None:\n error('Must specify output image file with --out')\n if args.network_snapshot is None:\n error('Must specify trained network filename with --network-snapshot')\n # Note: there's no dnnlib.submission.submit_run here. This is for quick interactive\n # testing, not for long-running training or validation runs.\n validation.infer_image(tf_config, args.network_snapshot, args.image, args.out)",
"def infer_on_stream(args, client):\n # Initialize the Inference Engine\n infer_network = Network()\n\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n # Load the model through `infer_network`\n infer_network.load_model(args.model, args.device, CPU_EXTENSION, num_requests=0)\n\n # Get a Input blob shape\n _, _, in_h, in_w = infer_network.get_input_shape()\n\n # Get a output blob name\n _ = infer_network.get_output_name()\n \n # Handle the input stream\n try:\n cap = cv2.VideoCapture(args.input)\n except FileNotFoundError:\n print(\"Cannot locate video file: \"+ args.input)\n except Exception as e:\n print(\"Something else went wrong with the video file: \", e)\n \n cap.open(args.input)\n _, frame = cap.read()\n\n people_total_count = 0\n people_in_a_frame = 0\n\n g_elapsed = 0\n entre_ROI_xmin = 400\n entre_ROI_ymin = 450\n exit_ROI_xmin = 550\n exit_ROI_ymin = 410\n\n fps = FPS().start()\n\n # Process frames until the video ends, or process is exited\n while cap.isOpened():\n # Read the next frame\n flag, frame = cap.read()\n if not flag:\n break\n \n fh = frame.shape[0]\n fw = frame.shape[1]\n key_pressed = cv2.waitKey(50)\n \n image_resize = cv2.resize(frame, (in_w, in_h), interpolation = cv2.INTER_AREA)\n image = np.moveaxis(image_resize, -1, 0)\n\n # Perform inference on the frame\n infer_network.exec_net(image, request_id=0)\n \n # Get the output of inference\n if infer_network.wait(request_id=0) == 0:\n result = infer_network.get_output(request_id=0)\n for box in result[0][0]: # Output shape is 1x1x100x7\n conf = box[2]\n if conf >= prob_threshold:\n xmin = int(box[3] * fw)\n ymin = int(box[4] * fh)\n xmax = int(box[5] * fw)\n ymax = int(box[6] * fh)\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255), 3)\n\n if xmin < entre_ROI_xmin and ymax < entre_ROI_ymin: \n if fsm.current == \"empty\":\n # Count a people\n people_in_a_frame += 1\n people_total_count += 1\n # Start the timer\n start_time = time.perf_counter()\n # Person entered a room - fsm state change\n fsm.enter()\n print(xmax, ymax)\n if args.output == \"WEB\":\n # Publish people_count messages to the MQTT server\n client.publish(\"person\", json.dumps({\"count\": people_in_a_frame}))\n log.info(\"#########################\")\n log.info(\"Person entered into frame\")\n log.info(\"#########################\")\n\n if xmin > exit_ROI_xmin and ymax < exit_ROI_ymin:\n if fsm.current == \"standing\":\n # Change the state to exit - fsm state change\n fsm.exit()\n stop_time = time.perf_counter()\n elapsed = stop_time - start_time\n \n # Update average time\n log.info(\"elapsed time = {:.12f} seconds\".format(elapsed))\n g_elapsed = (g_elapsed + elapsed) / people_total_count\n log.info(\"g_elapsed time = {:.12f} seconds\".format(g_elapsed))\n \n people_in_a_frame = 0\n\n if args.output == \"WEB\":\n # Publish duration messages to the MQTT server\n client.publish(\"person/duration\", json.dumps({\"duration\": g_elapsed}))\n client.publish(\"person\", json.dumps({\"count\": people_in_a_frame}))\n log.info(\"#########################\")\n log.info(\"Person exited from frame\")\n log.info(\"#########################\")\n\n log.info(\"xmin:{} xmax:{} ymin:{} ymax:{}\".format(xmin, xmax, ymin, ymax))\n \n if args.output != \"WEB\": \n # Update info on frame\n info = [\n (\"people_ccount\", people_total_count),\n ]\n \n # loop over the info tuples and draw them on our frame\n for (i, (k, v)) in enumerate(info):\n text = \"{}: {}\".format(k, v)\n cv2.putText(frame, text, (10, fh - ((i * 20) + 20)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)\n \n if args.output == \"WEB\":\n # Push to FFmpeg server\n sys.stdout.buffer.write(frame)\n\n sys.stdout.flush()\n else:\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n #Break if escape key pressed\n if key_pressed == 27:\n break\n \n fps.update()\n \n # Release the out writer, capture, and destroy any OpenCV windows\n cap.release()\n\n if args.output == \"WEB\":\n client.disconnect()\n else:\n cv2.destroyAllWindows()\n \n fps.stop()\n\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))",
"def inference():\r\n interpreter = MNN.Interpreter(\"../model/yolofastest.mnn\")\r\n interpreter.setCacheFile('.tempcache')\r\n config = {}\r\n # config['precision'] = 'low'\r\n\r\n # # create session\r\n # runtimeinfo, exists = MNN.Interpreter.createRuntime((config,))\r\n # print(runtimeinfo, exists)\r\n # session = interpreter.createSession(config, runtimeinfo)\r\n session = interpreter.createSession(config)\r\n\r\n # show session info\r\n # print('memory_info: %fMB' % interpreter.getSessionInfo(session, 0))\r\n # print('flops_info: %fM' % interpreter.getSessionInfo(session, 1))\r\n # print('backend_info: %d' % interpreter.getSessionInfo(session, 2))\r\n\r\n input_tensor = interpreter.getSessionInput(session)\r\n image = cv2.imread(\"../1.jpg\")\r\n # cv2 read as bgr format\r\n # image = image[..., ::-1]\r\n # change to rgb format\r\n image = cv2.resize(image, (INPUT_SIZE, INPUT_SIZE))\r\n # #resize to mobile_net tensor size\r\n # image = image - (103.94, 116.78, 123.68)\r\n # image = image * (0.017, 0.017, 0.017)\r\n # #preprocess it\r\n # image = image.transpose((2, 0, 1))\r\n # #change numpy data type as np.float32 to match tensor's format\r\n # image = image.astype(np.float32)\r\n # cv2 read shape is NHWC, Tensor's need is NCHW,transpose it\r\n tmp_input = MNN.Tensor((INPUT_SIZE, INPUT_SIZE, 3), MNN.Halide_Type_Float, \\\r\n image, MNN.Tensor_DimensionType_Tensorflow)\r\n input_tensor.copyFrom(tmp_input)\r\n interpreter.runSession(session)\r\n\r\n scores = \"layer125-conv\"\r\n scores2 = \"layer115-conv\"\r\n\r\n output_tensor0 = interpreter.getSessionOutput(session, scores)\r\n output_tensor1 = interpreter.getSessionOutput(session, scores2)\r\n\r\n # constuct a tmp tensor and copy/convert in case output_tensor is nc4hw4\r\n # tmp_output = MNN.Tensor((1, 1001), MNN.Halide_Type_Float, np.ones([1, 1001]).astype(np.float32), MNN.Tensor_DimensionType_Tensorflow)\r\n tmp_output0 = MNN.Tensor((1, 75, 20, 20), MNN.Halide_Type_Float, \\\r\n np.ones([1, 75, 20, 20]).astype(np.float32), MNN.Tensor_DimensionType_Tensorflow)\r\n tmp_output1 = MNN.Tensor((1, 75, 10, 10), MNN.Halide_Type_Float, \\\r\n np.ones([1, 75, 10, 10]).astype(np.float32), MNN.Tensor_DimensionType_Tensorflow)\r\n\r\n output_tensor0.copyToHostTensor(tmp_output0)\r\n output_tensor1.copyToHostTensor(tmp_output1)\r\n print(\"expect 983\")\r\n print(\"output belong to class: {}\".format(np.argmax(tmp_output0.getData())))\r\n print(\"output belong to class: {}\".format(np.argmax(tmp_output1.getData())))",
"def run(args):\n filepaths = [os.path.join(args.input_folder, f) for f in os.listdir(args.input_folder) if\n os.path.isfile(os.path.join(args.input_folder, f)) and f.endswith('.jpg')]\n for filepath in filepaths:\n # If it is not an image continue\n if not filepath.endswith(\".jpg\"):\n continue\n image_name = os.path.basename(filepath).split('.')[0]\n img = cv2.imread(filepath)\n parse_image(args, img, image_name)",
"def run_inference(self, data):\n model = self.model\n log.info(\"running inference\")\n\n model.inference_begin(data)\n\n while True:\n inputs = model.inference_preprocess()\n results = model(inputs, training=False)\n if model.inference_end(results):\n break\n\n metric = SemSegMetric()\n metric.update(\n tf.convert_to_tensor(model.inference_result['predict_scores']),\n tf.convert_to_tensor(data['label']))\n log.info(f\"Accuracy : {metric.acc()}\")\n log.info(f\"IoU : {metric.iou()}\")\n\n return model.inference_result",
"def inference(self, inputs, sess, mode):\n fetches = {}\n if mode == 'depth':\n fetches['depth'] = self.est_depth\n inputs_ph = self.inputs_depth\n if mode == 'egomotion':\n fetches['egomotion'] = self.est_egomotion\n inputs_ph = self.inputs_egomotion\n results = sess.run(fetches, feed_dict={inputs_ph: inputs})\n return results",
"def main(\n source: str,\n destination: str,\n checkpoint: str = \"pretrained/checkpoints/raft-sintel.ckpt\",\n ext: Optional[str] = None,\n overwrite: bool = False,\n iters: int = 24,\n visualize: bool = True,\n):\n destination = Path(destination)\n destination.mkdir(parents=True, exist_ok=overwrite)\n\n dataset = FlowInferenceDataset(source, ext=ext)\n dataloader = DataLoader(dataset, batch_size=1, num_workers=4)\n device = (\n torch.device(\"cuda\", 0) if torch.cuda.is_available() else torch.device(\"cpu\")\n )\n\n model = RAFT.load_from_checkpoint(checkpoint)\n model.to(device)\n\n for i, (img0, img1) in tqdm(enumerate(dataloader), total=len(dataset)):\n img0, img1 = img0.to(device), img1.to(device)\n\n padder = InputPadder(img0.shape)\n padded0, padded1 = padder.pad(img0, img1)\n _, flow = model(padded0, padded1, iters=iters, test_mode=True)\n\n assert flow.shape[0] == 1\n flow = padder.unpad(flow)[0]\n \n flow_raw_file = destination / f\"{i:06d}.flo\"\n optical_flow.write(flow_raw_file, flow)\n\n if visualize:\n img0 = img0[0] / 255.0\n img1 = img1[0] / 255.0\n flow_rgb = optical_flow.flow2rgb(flow)\n flow_rgb_file = flow_raw_file.with_suffix(\".png\")\n torchvision.utils.save_image([img0, img1, flow_rgb], flow_rgb_file)",
"def single_worker_inference(infer_model,\n ckpt,\n inference_input_file,\n inference_output_file,\n hparams):\n output_infer = inference_output_file\n\n # Read data\n infer_data = load_data(inference_input_file, hparams)\n\n with tf.Session(config=utils.get_config_proto(), graph=infer_model.graph) as sess:\n loaded_infer_model = model_helper.load_model(infer_model.model, ckpt, sess, \"infer\")\n sess.run(infer_model.iterator.initializer,\n feed_dict={\n infer_model.src_placeholder: infer_data,\n infer_model.batch_size_placeholder: hparams.infer_batch_size\n })\n # Decode\n utils.print_out(\"# Start decoding\")\n _decode_and_evaluate(\"infer\",\n loaded_infer_model,\n sess,\n output_infer,\n ref_file=None,\n subword_option=None,\n beam_width=hparams.beam_width,\n tgt_eos=hparams.eos,\n num_translations_per_input=hparams.num_translations_per_input)",
"def run_detection(self):\n self.rows = self.result_image.shape[0]\n self.cols = self.result_image.shape[1]\n self.cvNet.setInput(cv2.dnn.blobFromImage(self.input_image, size=self.rsize,\n swapRB=True, crop=False))\n self.cvOut = self.cvNet.forward()\n print(\"[INFO] Inference completed successfully.\")",
"def send_request(self, img_path):\n\n addr = \"http://\" + self.Helpers.confs[\"server\"][\"ip\"] + \\\n ':'+str(self.Helpers.confs[\"server\"][\"port\"]) + '/Inference'\n headers = {'content-type': 'image/jpeg'}\n\n self.Helpers.logger.info(\"Sending request for: \" + img_path)\n\n _, img_encoded = cv2.imencode('.png', cv2.imread(img_path))\n response = requests.post(\n addr, data=img_encoded.tostring(), headers=headers)\n response = json.loads(response.text)\n\n return response",
"def run_inference(retrain_path, model_types=[], all_lambdas=[], feature_group=False, sequential=False):\n for config in os.listdir(retrain_path):\n config_dir = os.path.join(retrain_path, config)\n if not os.path.isdir(config_dir):\n continue\n if 'bottleneck' in config:\n model_type = 'bottleneck'\n elif 'end2end' in config:\n model_type = 'end2end'\n elif 'use_attr' in config and 'onlyAttr' not in config:\n model_type = 'multitask'\n elif 'onlyAttr' not in config:\n model_type = 'simple_finetune'\n else:\n model_type = 'onlyAttr'\n if model_types and model_type not in model_types:\n continue\n all_val_acc = find_best_perf(os.path.join(config_dir, 'log.txt'))\n epoch = all_val_acc.index(max(all_val_acc))\n #epoch = round(epoch, -1) - 20\n if epoch < 0:\n print(config_dir, ' has not started training')\n print(epoch, '\\t', config)\n model_path = os.path.join(config_dir, '%d_model.pth' % epoch)\n if 'attr_loss_weight' in model_path:\n lambda_val = float(re.findall(r\"attr_loss_weight_\\d*\\.\\d+\", config_dir)[0].split('_')[-1])\n else:\n lambda_val = 1\n if any([t in model_types for t in ['multitask', 'end2end']]) and (all_lambdas and lambda_val not in all_lambdas):\n continue\n if 'NEW_SIGMOID_MODEL' in retrain_path or 'NEW_MODEL' in retrain_path:\n command = 'python inference_sigmoid.py -model_dir %s -eval_data test' % model_path\n else:\n command = 'python inference.py -model_dir %s -eval_data test' % model_path\n if feature_group:\n command += ' -feature_group_results' \n if 'use_attr' in model_path:\n command += ' -use_attr -n_attributes 112 -data_dir class_attr_data_10'\n if 'onlyAttr' in model_path:\n continue\n if 'bottleneck' in model_path:\n def find_onlyAttr_dir(retrain_path, model_path):\n if 'few_shots' in retrain_path:\n n_shots = re.findall(r\"\\d+_shot\", model_path)[0]\n if sequential:\n dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr_Ahat' in c and n_shots in c][0]\n else:\n dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr' in c and 'onlyAttr_Ahat' not in c and n_shots in c][0] \n else: \n if sequential:\n dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr_Ahat' in c][0]\n else:\n dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr' in c and 'onlyAttr_Ahat' not in c][0]\n return os.path.join(retrain_path, dir_name)\n\n onlyAttr_dir = find_onlyAttr_dir(retrain_path, model_path)\n val_acc = find_best_perf(os.path.join(onlyAttr_dir, 'log.txt'))\n model2_path = os.path.join(onlyAttr_dir, '%d_model.pth' % (val_acc.index(max(val_acc))))\n config_dir = os.path.join(retrain_path, config)\n command += (' -model_dir2 %s -bottleneck' % model2_path)\n if 'onlyAttr_Ahat' not in model2_path:\n command += ' -use_sigmoid'\n if 'adversarial' in retrain_path:\n command += ' -image_dir CUB_adversarial/CUB_fixed/test/'\n subprocess.run([command])\n #TODO: write test inference results to a separate folder",
"def __infer_eval(self, img):\n if not isinstance(img, Image):\n img = Image(img)\n\n # Bring image into the appropriate format for the implementation\n img = img.convert(format='channels_last', channel_order='bgr')\n\n img_mean = self.img_mean # Defaults to (128, 128, 128)\n img_scale = self.img_scale # Defaults to 1 / 256\n pad_value = self.pad_value # Defaults to (0, 0, 0)\n base_height = self.base_height # Defaults to 256\n scales = self.scales # Defaults to [1]\n stride = self.stride # Defaults to 8\n\n normed_img = normalize(img, img_mean, img_scale)\n height, width, _ = normed_img.shape\n scales_ratios = [scale * base_height / float(height) for scale in scales]\n avg_heatmaps = np.zeros((height, width, 19), dtype=np.float32)\n avg_pafs = np.zeros((height, width, 38), dtype=np.float32)\n\n pad = None\n for ratio in scales_ratios:\n scaled_img = cv2.resize(normed_img, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)\n min_dims = [base_height, max(scaled_img.shape[1], base_height)]\n padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)\n\n tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()\n if \"cuda\" in self.device:\n tensor_img = tensor_img.to(self.device)\n if self.half:\n tensor_img = tensor_img.half()\n stages_output = self.model(tensor_img)\n\n stage2_heatmaps = stages_output[-2]\n heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))\n if self.half:\n heatmaps = np.float32(heatmaps)\n heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)\n heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :]\n heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC)\n avg_heatmaps = avg_heatmaps + heatmaps / len(scales_ratios)\n\n stage2_pafs = stages_output[-1]\n pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))\n if self.half:\n pafs = np.float32(pafs)\n pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)\n pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :]\n pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC)\n avg_pafs = avg_pafs + pafs / len(scales_ratios)\n\n return avg_heatmaps, avg_pafs, scales_ratios, pad"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Low Pass Filter With Gain
|
def lowPassFilterWithGain(inputSignal, freqCutoff, freqSampling, gain):
samplingPeriod = 1/freqSampling
filteredSignal = np.zeros_like(inputSignal)
alpha = (2 * np.pi * freqCutoff * samplingPeriod) / (2 * np.pi * freqCutoff * samplingPeriod + 1)
filteredSignal[0] = alpha * inputSignal[0] + gain
for i in range(1,inputSignal.shape[0]):
filteredSignal[i] = alpha * inputSignal[i] + (1 -alpha) * filteredSignal[i-1] + gain
return filteredSignal
|
[
"def gyroLowPassFilter( bandwidth=None ):\n if bandwidth and bandwidth in [0,1,2,3,4,5,6,7]:\n i2c.writeto_mem(0x68, 0x1A, pack('b',\n (i2c.readfrom_mem(0x68, 0x1A, 1)[0] & ~7 ) | bandwidth\n ))\n return i2c.readfrom_mem(0x68, 0x1A, 1)[0] & 7",
"def low_pass_filter(self, low_pass_filter):\n\n self._low_pass_filter = low_pass_filter",
"def applyLowPass(x, fs, fc=30, N=4):\n wc = fc / (fs / 2)\n b, a = scipy.signal.butter(N, wc)\n return scipy.signal.filtfilt(b, a, x, method='gust')",
"def lowpass(a, cutoff, order, config):\n B, A = signal.butter(order, cutoff / (config[\"sample_rate\"] / 2), btype=\"lowpass\")\n return signal.lfilter(B, A, a, axis=0)",
"def applyHighPass(x, fs, fc=1.6, N=4):\n wc = fc / (fs / 2)\n b, a = scipy.signal.butter(N, wc, btype='highpass')\n return scipy.signal.filtfilt(b, a, x, method='gust')",
"def highpass(a, cutoff, order, config):\n B, A = signal.butter(order, cutoff / (config[\"sample_rate\"] / 2), btype=\"highpass\")\n return signal.lfilter(B, A, a, axis=0)",
"def filter(self, signal):\n return self._butter_lowpass_filter(signal, self.fc, self.fs)",
"def low_flux():\n caput('13IDA:m6.VAL', 0.10)\n caput('13IDA:m8.VAL', 0.20)\n caput('13IDA:m70.VAL', 0.03)\n set_i0amp_gain(2, 'nA/V')\n sleep(10)\n set_mono_tilt()",
"def eeg_filter(raw, lowpass=1, highpass=40, notch=True, method=\"fir\"):\n if notch == True:\n raw.notch_filter(np.arange(50, 451, 50),\n method=method)\n\n if lowpass is not None and highpass is not None:\n raw.filter(lowpass,\n highpass,\n method=method)\n return(raw)",
"def filter_acc_signal(sig, samp_freq=100):\n # Create the lowpass filter\n N = 4 # Filter order\n cutoff = 20 # cut-off frequency (Hz)\n fnyq = samp_freq / 2 # Nyquist frequency\n Wn = cutoff / fnyq # Filter parameter\n b, a = signal.butter(N, Wn, btype=\"low\")\n\n # Process signal\n sig = signal.filtfilt(b, a, sig)\n\n return sig",
"def SetPassLowFrequencyThreshold(self, _arg: 'bool const') -> \"void\":\n return _itkFrequencyBandImageFilterPython.itkFrequencyBandImageFilterICF2_SetPassLowFrequencyThreshold(self, _arg)",
"def process_gain(self):\n return 1",
"def update(self, pTargetAngle):\n return _almathinternal.LowPassFilter_update(self, pTargetAngle)",
"def gain(self, g):\n return self.normalize(0, 1, scale=g)",
"def SetPassLowFrequencyThreshold(self, _arg: 'bool const') -> \"void\":\n return _itkFrequencyBandImageFilterPython.itkFrequencyBandImageFilterICF3_SetPassLowFrequencyThreshold(self, _arg)",
"def apply_gain(infile, gain):\n fs1, x = monoWavRead(filename=infile)\n\n x = np.copy(x)\n x = x * (10 ** (gain / 20.0))\n x = np.minimum(np.maximum(-1.0, x), 1.0)\n #Change the output file name to suit your requirements here\n outfile_name = os.path.basename(infile).split(\".\")[0] + (\"_gain%s.wav\" % str(gain))\n outfile = os.path.join(outfile_path, outfile_name)\n write(filename = outfile, rate = fs1, data = x)\n if (FILE_DELETION):\n extractFeaturesAndDelete(outfile)",
"def lowpass(Wn, Q=1/sqrt(2), analog=False, output='ba'):\n # H(s) = 1 / (s**2 + s/Q + 1)\n b = np.array([1])\n a = np.array([1, 1/Q, 1])\n\n return _transform(b, a, Wn, analog, output)",
"def set_gain(self):\n DescStr = 'Setting Gain for AHF_Camera '\n if (self.AHFgainMode & 2):\n DescStr += 'from current illumination'\n else:\n DescStr += \"from ISO \" + str(self.iso)\n if (self.AHFgainMode & 1):\n DescStr += ' with white balancing'\n else:\n DescStr += \" with No white balancing\"\n print (DescStr)\n if (self.AHFgainMode & 1):\n self.awb_mode = 'auto'\n else:\n self.awb_mode = 'off'\n self.awb_gains = (1, 1)\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'auto'\n # else:\n # self.exposure_mode = 'off'\n super().start_preview(fullscreen=False, window=self.AHFpreview)\n sleep(2.0) # let gains settle, then fix values\n if (self.AHFgainMode & 1):\n savedGain = self.awb_gains\n self.awb_gains = savedGain\n self.awb_mode = \"off\"\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'off'\n super().stop_preview()\n print (\"Red Gain for white balance =\" + str(float(self.awb_gains[0])))\n print (\"Blue Gain for white balance =\" + str(float(self.awb_gains[1])))\n print (\"Analog Gain = \" + str(float(self.analog_gain)))\n print (\"Digital Gain = \" + str(float(self.digital_gain)))\n return",
"def low_pass_filter(adata: np.ndarray, bandlimit: int = 5000) -> np.ndarray:\n\n # TODO: compute Fourier transform of input data\n adata = fft(adata)\n # TODO: set high frequencies above bandlimit to zero, make sure the almost symmetry of the transform is respected\n adata[bandlimit+1:adata.size-bandlimit] = 0\n # TODO: compute inverse transform and extract real component\n var = np.conjugate(fft(np.conjugate(adata)))\n adata_filtered = np.real(1/adata.size * var)\n \n return adata_filtered",
"def GetPassLowFrequencyThreshold(self) -> \"bool const &\":\n return _itkFrequencyBandImageFilterPython.itkFrequencyBandImageFilterICF2_GetPassLowFrequencyThreshold(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes the tangential distance from next point with current angle, divides by dist so has more effect when closer, and the difference in angle to get error. measured is of the form (x1,y1,theta) set_v is of the form (x2,y2)
|
def error_finder(measured, set_v):
x1, y1, theta1 = measured
x2, y2 = set_v
dist = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** (0.5)
x = (y2 - y1)
alpha = math.asin(x / dist)
beta = theta1 - alpha
tangent = math.sin(beta) * dist
dC = -5.0
# tC1 = -3.0
# tC2 = -3.0/(dist**0.25)
# print dist, "dist"
# print dC*tangent, ": error dist"
# print ""
# print tC*(theta2 - theta1), ": error angle"
return dC * tangent / dist # + tC1*(theta2 - theta1)
|
[
"def calculate_error(distance):\n\n u1 = pose2.x - pose1.x\n u2 = pose2.y - pose1.y\n v_error=np.sqrt((u1)**2+(u2)**2)-distance\n theta_goal = np.arctan2(u2,u1)\n u3 = theta_goal - pose1.theta\n theta_error = np.arctan2(np.sin(u3),np.cos(u3))\n return v_error,theta_error",
"def turning_point_set_distance(tps1, tps2):\n # Assume both sets alternate T/P and start with T, P, ...\n if len(tps1) < 2 or len(tps2) < 2:\n return float('inf')\n return (set_to_set_distance(tps1[0::2], tps2[0::2]) + \n set_to_set_distance(tps1[1::2], tps2[1::2]))",
"def calc_error(self, angles):\n errors = [abs(x-y) for x,y in zip(angles, self.target_angles)]\n error = sum(errors)\n error += self.penalty\n self.error = torch.tensor(error)",
"def diffAngle(self, point):\n a = self.getAngle(point)\n # pour connaitre le sens le plus proche, il suffit de regarder dans les 2 sens et on garde le plus petit\n # Les operateurs ternaires sont la uniquement pour eviter l utilisation d un operateur % qui serait plus lent\n right = a - self.angle if self.angle <= a else 360.0 - self.angle + a\n left = self.angle - a if self.angle >= a else self.angle + 360.0 - a\n if right < left:\n return right\n else:\n # on donne un angle negatif s il faut tourner a gauche\n return -left",
"def calculate_delta_t(initial, final, speed):\n delta_x = final[0] - initial[0]\n delta_y = final[1] - initial[1]\n distance = math.sqrt(delta_x * delta_x + delta_y * delta_y)\n return distance / speed",
"def rel_angle(vec_set1, vec_set2):\n return (\n vec_angle(vec_set2[0], vec_set2[1])\n / vec_angle(vec_set1[0], vec_set1[1])\n - 1\n )",
"def angular_diameter_distance_ZKDR(z1, z2, eta, method, cosmo, maxStep=0.01): \n Dset0=[0, cosmo.H0/(cosmo.H(z1)*(1+z1))]\n if method==0: #select Kayser method \n sol=scipy.integrate.solve_ivp(DyerRoeder, [z1,z2], Dset0, args=[eta,cosmo], max_step=maxStep)\n elif method==1: #select Linder method\n sol=scipy.integrate.solve_ivp(GeneralisedDyerRoeder, [z1,z2], Dset0, args=[eta,cosmo], max_step=maxStep)\n else:\n print('For the method parameter select either 0 : Kayser, or 1 : Linder')\n return (sol.y[0,len(sol.t)-1]*const.c/cosmo.H0).decompose()/(const.pc*1e6)*u.Mpc",
"def error(self, F):\n return abs((F(self.b) - F(self.a)) - self.approx)",
"def distance(self, other: \"Point\") -> float:\n return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)",
"def det(v0, v1):\n return v0[0] * v1[1] - v1[0] * v0[1]",
"def _compute_dist_and_rot_dir(self, new_value, old_value):\r\n distance = self.unit_dist * abs(old_value - new_value)\r\n rotation_direction = Motor.CLOCKWISE if new_value > old_value else Motor.ANTICLOCKWISE\r\n return distance, rotation_direction",
"def grad(cls,point1,point2):\n return (point2[1]-point1[1])/(point2[0]-point1[0])",
"def config_dihedral(self):\n # first, set pd->po->pr right-handed\n yc = self.v[1] * (self.pr[0]-self.po[0])\n yr = self.v[0] * (self.pr[1]-self.po[1])\n if yc > yr:\n # update vector po->pd to pd->po\n self.po, self.pd = self.pd, self.po\n self.v = [-i for i in self.v]\n\n # second, calculate norm vector for plane pd->po->pr\n vr = [self.pr[i]-self.po[i] for i in range(3)]\n x = self.v[1]*vr[2] - self.v[2]*vr[1]\n y = self.v[2]*vr[0] - self.v[0]*vr[2]\n z = self.v[0]*vr[1] - self.v[1]*vr[0]\n nr = [x, y, z]\n rr = sum([i*i for i in nr])\n\n # third, calculate norm vector for plane pd->po->pt, be aware!\n # only in this way, the dihedral will be equal to vector angle\n vt = [self.pt[i]-self.po[i] for i in range(3)]\n x = self.v[1]*vt[2] - self.v[2]*vt[1]\n y = self.v[2]*vt[0] - self.v[0]*vt[2]\n z = self.v[0]*vt[1] - self.v[1]*vt[0]\n nt = [x, y, z]\n tt = sum([i*i for i in nt])\n\n # fourth, calculate dihedral\n tr = sum([nt[i]*nr[i] for i in range(3)])\n tmp = max(pow(tt*rr,0.5), 0.000000000001) # to avoid zero division\n self.value = np.arccos(tr/tmp) * 180.0 / np.pi\n\n # fifth, make pt->v->pr right-handed, where v is vector po->pd or pd->po,\n # which means, for vector v, pt is right-handed for pr, considering\n # dihedral between plane po,pd,pr and plane po,pd,pt is less than 180.0\n zc = nr[2] * nt[1]\n zr = nr[1] * nt[2]\n if zc > zr:\n self.po, self.pd = self.pd, self.po\n self.v = [-i for i in self.v]\n\n if self.want == 'more':\n self.info = 'dihedral rotation, righthandness, more/anticlockwise'\n self.lower = self.value\n if self.threshold is None:\n tmp = 1.5 if self.ratio is None else self.ratio\n self.higher = self.value * tmp\n else:\n self.higher = self.threshold\n else:\n self.info = 'dihedral rotation, righthandness, less/clockwise'\n self.po, self.pd = self.pd, self.po\n self.v = [-i for i in self.v]\n self.higher = self.value\n if self.threshold is None:\n tmp = 0.1 if self.ratio is None else self.ratio\n if self.flip is True: tmp = -tmp\n self.lower = self.value * tmp\n else:\n if self.flip is True: self.threshold = -self.threshold\n self.lower = self.threshold\n\n self._calc_num()",
"def distPointToLine(point, line):\n\n [xp, yp] = point\n [a, c] = line\n b = -1\n\n return abs((a*xp + b*yp + c) / np.linalg.norm([a, b]))",
"def distance(point, line):\n \n return point.y - line(point.x)",
"def calc_theta_and_ef(self, vehicle_state, waypoints, goal_heading, goal_velocity):\n\n ############# Calculate closest point to the front axle based on minimum distance calculation ################\n # Calculate Position of the front axle of the vehicle based on current position\n fx = vehicle_state[0] + self.wheelbase * math.cos(vehicle_state[2])\n fy = vehicle_state[1] + self.wheelbase * math.sin(vehicle_state[2])\n position_front_axle = np.array([fx, fy])\n\n # Find target index for the correct waypoint by finding the index with the lowest distance value/hypothenuses\n #wpts = np.vstack((self.waypoints[:, self.conf.wpt_xind], self.waypoints[:, self.conf.wpt_yind])).T\n nearest_point_front, nearest_dist, t, target_index = nearest_point_on_trajectory(position_front_axle, waypoints)\n\n # Calculate the Distances from the front axle to all the waypoints\n distance_nearest_point_x = fx - nearest_point_front[0]\n distance_nearest_point_y = fy - nearest_point_front[1]\n vec_dist_nearest_point = np.array([distance_nearest_point_x, distance_nearest_point_y])\n\n ################### Calculate the current Cross-Track Error ef in [m] ################\n # Project crosstrack error onto front axle vector\n front_axle_vec_rot_90 = np.array([[math.cos(vehicle_state[2] - math.pi / 2.0)],\n [math.sin(vehicle_state[2] - math.pi / 2.0)]])\n\n # vec_target_2_front = np.array([dx[target_index], dy[target_index]])\n\n # Caculate the cross-track error ef by\n ef = np.dot(vec_dist_nearest_point.T, front_axle_vec_rot_90)\n\n ############# Calculate the heading error theta_e normalized to an angle to [-pi, pi] ##########\n # Extract heading on the raceline\n # BE CAREFUL: If your raceline is based on a different coordinate system you need to -+ pi/2 = 90 degrees\n theta_raceline = goal_heading[target_index] + np.pi/2\n\n # Calculate the heading error by taking the difference between current and goal + Normalize the angles\n theta_e = pi_2_pi(theta_raceline - vehicle_state[2])\n\n # Calculate the target Veloctiy for the desired state\n planned_veloctiy = goal_velocity[target_index]\n\n return theta_e, ef, target_index, planned_veloctiy",
"def get_sweep_line_properties(self):\n # if self.pt3 is not None:\n # try:\n # self.d = find_circle(\n # x1=0,\n # y1=0,\n # x2=self.pt2.x,\n # y2=self.pt2.z,\n # x3=self.pt3.x,\n # y3=self.pt3.z,\n # ) # [[h,v] , r]\n #\n # except ZeroDivisionError:\n # return Exception(\n # \"Zero div error. Point 3 not valid to construct curve line\"\n # )\n # # procedure\n # # get tangent at origin\n # self.zeta = 0\n # # get tangent at end of curve line (intersect with second construction line)\n #\n # else:\n # construct straight line sweep path instead\n\n # procedure to identify straight line segment pinpointing length of grillage\n points = [(self.pt1.x, self.pt1.z), (self.pt2.x, self.pt2.z)]\n x_coords, y_coords = zip(*points)\n A = np.vstack([x_coords, np.ones(len(x_coords))]).T\n m, c = np.linalg.lstsq(A, y_coords, rcond=None)[0]\n self.m = round(m, self.decimal_lim)\n # self.c = 0 # default 0 to avoid arithmetic error\n zeta = np.arctan(\n m\n ) # initial angle of inclination of sweep line about mesh origin\n self.zeta = zeta / np.pi * 180 # rad to degrees\n\n return self.zeta, self.m, self.c",
"def distance(p1: sdl2.SDL_Point, p2: sdl2.SDL_Point) -> float:\n\n distances = xy_distances(p1, p2)\n return math.sqrt(distances.x**2 + distances.y**2)",
"def perp_distance(point, line):\n \n return (point.y - line(point.x)) / math.sqrt(1.0 + line.slope ** 2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use getattr(obj, self._name) as default getter if no getter decorated nor provided at init.
|
def _default_getter(self, obj):
try:
return getattr(obj, self._name)
except TypeError:
raise
|
[
"def __get__(self, obj, cls=None):\n return self._getter(obj)",
"def _lazyprop(self):\r\n if not hasattr(self, attr_name):\r\n setattr(self, attr_name, func(self))\r\n return getattr(self, attr_name)",
"def __getattr__(self, key):\n if key.startswith(\"_\"):\n return object.__getattribute__(self, key)\n\n if key in self._kwargs:\n return self._kwargs[key]\n else:\n if key not in self._defaults:\n raise AttributeError(key)\n\n return self._defaults[key]",
"def getattr(object, name, default=None): # known special case of getattr\n pass",
"def _attr_or_key(obj, name, _isinstance=isinstance, _dict=dict, getter=getattr):\n if _isinstance(obj, _dict):\n return obj.get(name)\n else:\n return getter(obj, name, None)",
"def getter(self, func):\n mode, ignored = self.getattr_mode\n self.set_getattr_mode(mode, func)\n return func",
"def getter_factory(self, attr):\n def getter(obj):\n current_locale = self.cast_locale(obj, self.current_locale)\n try:\n return getattr(obj, attr.key)[current_locale]\n except (TypeError, KeyError):\n default_locale = self.cast_locale(\n obj, self.default_locale\n )\n try:\n return getattr(obj, attr.key)[default_locale]\n except (TypeError, KeyError):\n return None\n return getter",
"def __getattr__(self, attr):\n if attr.startswith('_'):\n return DispatchBaseClass.__getattr__(self, attr) \n \n try:\n extendedPropMap = self._prop_map_get_ex_\n except AttributeError:\n extendedPropMap = {}\n \n if attr in extendedPropMap:\n return extendedPropMap[attr](self)\n \n value = DispatchBaseClass.__getattr__(self, attr)\n if attr.endswith('s') and hasattr(self.api, attr):\n try:\n value = getattr(self.api, attr)(value)\n except:\n pass\n return value",
"def get(self, obj):\n return getattr(obj, self.attr)",
"def __getattribute__(self, name):\n # shortcut for things we don't want to go into the prototype chain\n if name in (\n '__class__',\n '__dict__',\n '__metachao_bind__',\n '__metachao_prototype__',\n ):\n return object.__getattribute__(self, name)\n\n # no binding, if served from instance dictionary\n selfdict = self.__dict__\n if name in selfdict:\n return selfdict[name]\n\n # check class' members for properties\n attr = dict((k, v) for k, v in getmembers(self.__class__)\n if k == name and isinstance(v, property)\n ).get(name, ())\n\n # enter prototype chain\n if attr is ():\n prototype = self.__metachao_prototype__\n attr = getattr(prototype, name)\n\n # get to real function in case of methods\n attr = getattr(attr, 'im_func', attr)\n\n # bind descriptors to instance or whatever they are supposed to\n # bind to\n if hasattr(attr, '__get__'):\n bindto = self.__metachao_bind__.get(name)\n if bindto is None:\n bindto = self\n attr = attr.__get__(bindto, bindto.__class__)\n\n return attr",
"def __getattribute__(self, name):\n if name in type(self).visible_attributes:\n return object.__getattribute__(self, name)\n #\n return dict.setdefault(self, name, self.default__value__)",
"def get_value_for(self, instance):\n if callable(self.prop_getter):\n return self.prop_getter(instance)\n return getattr(instance, self.prop_getter)",
"def __getattr__(self, attr):\n\n if attr == '__field_names__':\n return object.__getattribute__(self, attr)\n\n if hasattr(self, '__field_names__') and attr in self.__field_names__ and attr not in self.data:\n self.refresh()\n\n return object.__getattribute__(self.data, attr)",
"def _default_setter(self, obj, value):\n try:\n setattr(obj, self._name, value)\n except TypeError:\n raise",
"def __getattr__(self, key):\n if key.startswith('_'):\n return object.__getattribute__(self, key)\n\n if key in self.__dict__:\n return object.__getattribute__(self, key)\n else:\n properties = object.__getattribute__(self, 'properties')\n return getattr(properties, key)",
"def getter_of(self, name):\n return f\"{self._getter}({name!r})\"",
"def __getattr__(self, name):\n proxy_func = lambda *args, **kwargs: self.call(\n self.prefix + name,\n args,\n kwargs,\n one_way=self.one_way\n )\n return proxy_func",
"def __getattr__(self, attrname):\r\n \r\n # If the value was set, when asked give this value,\r\n # not the original value\r\n if self._updates.has_key(attrname):\r\n return self._updates[attrname]\r\n \r\n if not self._fields.has_key(attrname):\r\n raise AttributeError(\"%s not available\" % attrname)\r\n # Look up the value in the _fields\r\n data = self._fields.get(attrname,None)\r\n \r\n if data is None:\r\n return data\r\n else:\r\n \r\n # if we are dealing with a sub resource and we have not \r\n # already made the call to inflate it - do so\r\n if self.sub_resources.has_key(attrname) and isinstance(data, dict):\r\n \r\n _con = SubResourceAccessor(self.sub_resources[attrname].get(\"klass\", ResourceObject), \r\n data, self._connection, \r\n self)\r\n \r\n # If the subresource is a list of objects\r\n if not self.sub_resources[attrname].get(\"single\", False):\r\n _list = []\r\n for sub_res in _con.enumerate():\r\n _list.append(sub_res)\r\n self._fields[attrname] = _list\r\n \r\n # if the subresource is a single object \r\n else:\r\n self._fields[attrname] = _con.get(\"\")\r\n \r\n # Cast all dicts to Mappings - for . access\r\n elif isinstance(data, dict):\r\n val = Mapping(data)\r\n self._fields[attrname] = val\r\n \r\n return self._fields[attrname]\r\n \r\n raise AttributeError",
"def safer_getattr(object, name, default=None, getattr=getattr):\n if name in ('format', 'format_map') and (\n isinstance(object, str) or\n (isinstance(object, type) and issubclass(object, str))):\n raise NotImplementedError(\n 'Using the format*() methods of `str` is not safe')\n if name.startswith('_'):\n raise AttributeError(\n '\"{name}\" is an invalid attribute name because it '\n 'starts with \"_\"'.format(name=name)\n )\n return getattr(object, name, default)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use setattr(obj, self._name, value) as default setter if no setter decorated nor provided at init.
|
def _default_setter(self, obj, value):
try:
setattr(obj, self._name, value)
except TypeError:
raise
|
[
"def _set_attr_or_key(obj, name, value, _isinstance=isinstance, _dict=dict, setter=setattr):\n if _isinstance(obj, _dict):\n obj[name] = value\n else:\n setter(obj, name, value)",
"def __setattr__(self, name, value):\n if name in self.INTERNAL_ATTRS:\n super(Settings, self).__setattr__(name, value)\n else:\n self.set_attr(name, value)",
"def __setattr__(self, name, value):\n attr = object.__getattribute__(self, name)\n if name in self._fields:\n attr.value = value\n else:\n object.__setattr__(self, name, value)",
"def __setattr__(self, key, val):\n try:\n current = object.__getattribute__(self, key)\n object.__setattr__(self, key, val)\n except AttributeError:\n properties = object.__getattribute__(self, 'properties')\n if hasattr(properties, key):\n object.__setattr__(properties, key, val)\n else:\n object.__setattr__(self, key, val)",
"def __set__(self, instance, value):\n instance.__dict__[self.name] = value",
"def __setattr__(self, name, value):\n # -- To be here means the base class does not have it declared, so\n # -- attempt to check the components of the composite class, and\n # -- set the first we find\n if '_components' in self.__dict__:\n for component in self.__dict__['_components']:\n if hasattr(component, name):\n setattr(component, name, value)\n return\n\n # -- To get here means non of the components implement\n # -- the attribute, so we just apply the attribute to\n # -- ourselves.\n self.__dict__[name] = value",
"def __setattr__(self, attr, value, at=None, label=\"assignment\"):\n if attr in self._fields:\n if self._fields[attr].deprecated is not None:\n fullname = _joinNamePath(self._name, self._fields[attr].name)\n warnings.warn(f\"Config field {fullname} is deprecated: {self._fields[attr].deprecated}\",\n FutureWarning, stacklevel=2)\n if at is None:\n at = getCallStack()\n # This allows Field descriptors to work.\n self._fields[attr].__set__(self, value, at=at, label=label)\n elif hasattr(getattr(self.__class__, attr, None), '__set__'):\n # This allows properties and other non-Field descriptors to work.\n return object.__setattr__(self, attr, value)\n elif attr in self.__dict__ or attr in (\"_name\", \"_history\", \"_storage\", \"_frozen\", \"_imports\"):\n # This allows specific private attributes to work.\n self.__dict__[attr] = value\n else:\n # We throw everything else.\n raise AttributeError(\"%s has no attribute %s\" % (_typeStr(self), attr))",
"def setter(self, setter):\n if setter is NoDefault:\n\n def paramSetter(p_self, value):\n self.assigned = SINCE_ANYTHING\n p_self.assigned = SINCE_ANYTHING\n setattr(p_self, self.fieldName, value)\n\n elif setter is None:\n\n def paramSetter(p_self, value):\n raise ParameterError(\n \"Cannot set value for parameter `{}` on {} to `{}`, it has a restricted setter.\".format(\n self.name, p_self, value\n )\n )\n\n elif callable(setter):\n\n def paramSetter(p_self, value):\n self.assigned = SINCE_ANYTHING\n p_self.assigned = SINCE_ANYTHING\n setter(p_self, value)\n\n else:\n raise ParameterDefinitionError(\n \"The setter for parameter `{}` must be callable. Setter attribute: {}\".format(\n self.name, setter\n )\n )\n\n self._setter = paramSetter\n\n return self",
"def setter(self, func):\n if self.cached:\n raise ValueError(\n \"Cached property are read-only, but a setter was \" \"specified.\"\n )\n self.set_setattr_mode(SetAttr.Property, func)\n return func",
"def getSetter(self, obj, indexName):\n\n # DefaultDublinCoreImpl:\n setterName = \"set\" + indexName\n if getattr(aq_base(obj), setterName, None) is not None:\n return getattr(obj, setterName)\n\n # other\n fieldName = self.fieldNameForIndex(indexName)\n field = None\n\n # Dexterity\n if IDexterityContent.providedBy(obj):\n if fieldName.startswith(\"get\"):\n fieldName = fieldName.lstrip(\"get_\")\n # heuristics\n fieldName = fieldName[0].lower() + fieldName[1:]\n return lambda value: setattr(aq_base(obj), fieldName, value)\n\n # AT and discussions left\n if IComment.providedBy(obj):\n # Discussion\n field = getattr(obj, \"getField\", None)\n else:\n # Archetype\n field = getattr(aq_base(obj), \"getField\", None)\n # Archetypes:\n if field:\n fieldObj = field(fieldName) or field(fieldName.lower())\n if not fieldObj and fieldName.startswith(\"get\"):\n fieldName = fieldName.lstrip(\"get_\")\n fieldName = fieldName[0].lower() + fieldName[1:]\n fieldObj = obj.getField(fieldName)\n if fieldObj is not None:\n return fieldObj.getMutator(obj)\n return None\n\n return None",
"def basic_set_object(obj, value):\n obj.value = value",
"def write_attr(self, fieldname, value):\n meth = self.cls._read_from_class(\"__setattr__\")\n return meth(self, fieldname, value)",
"def _apply_value(self, value):\n\n setattr(self._obj, self._attr, value)",
"def __setattr__(self, key, val):\n if key.startswith(\"_\"):\n object.__setattr__(self, key, val)\n else:\n self._kwargs[key] = val",
"def update_attribute(self, instance, name, field, value):\n field_setter = getattr(self, f\"set_{name}\", None)\n if field_setter:\n field_setter(instance, name, field, value)\n else:\n setattr(instance, name, value)",
"def _attribute_inverter(obj, name, value):\n setattr(obj, name, value)\n return True",
"def __setattr__(self, name, value):\r\n if self.is_parameter(value):\r\n self.register_parameter(name, value)\r\n elif isinstance(value, Module):\r\n self.register_module(name, value)\r\n\r\n object.__setattr__(self, name, value)",
"def setter(self, _setter):\n self._most_recent_linker = self._linked_setter\n if _setter is None:\n self._setter = self._default_setter\n if self._chain:\n self._chain_setter = self._setter\n self._setter = self.chain_setter\n self.linker(self)\n else:\n self._setter = _setter\n if self._chain:\n self._chain_setter = self._setter\n self._setter = self.chain_setter\n return self",
"def setBasicAttribute(self, value, name):\n getattr(self, \"_\" + name + \"_value_\").setValue(value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use delattr(obj, self._name) as default deleter if no deleter decorated nor provided at init.
|
def _default_deleter(self, obj):
try:
delattr(obj, self._name)
except AttributeError:
pass
except TypeError:
raise
|
[
"def deleter(self, func):\n self.set_delattr_mode(DelAttr.Property, func)\n return func",
"def _linked_deleter(self, obj):\n self._hidden_deleter(obj)\n self._update_linked(obj)",
"def deleter(self, _deleter):\n self._most_recent_linker = self._linked_deleter\n if _deleter is None:\n self._deleter = self._default_deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n self.linker(self)\n else:\n self._deleter = _deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n return self",
"def _del(self, _del):\n\n self.__del = _del",
"def __delattr__(self, name):\n # First check if is a valid DICOM name and if we have that data element\n tag = tag_for_name(name)\n if tag and tag in self:\n del self[tag]\n # If not a DICOM name (or we don't have it), check for regular instance name\n # can't do delete directly, that will call __delattr__ again!\n elif name in self.__dict__:\n del self.__dict__[name]\n # Not found, raise an error in same style as python does\n else:\n raise AttributeError, name",
"def _del_original_func(obj):\n _original_funcs.pop(obj.__name__, None)\n if torch.cuda.is_available(): # clean up the cached function\n torch.cuda.synchronize()\n torch.cuda.empty_cache()",
"def __del__(self):\n self.clear()",
"def __del__(self):\n self.cleanup()",
"def onDeinit(self):",
"def remove_method(self, obj, met_name):\n # XXX FIXME, we should also search into base classes\n\n try:\n self[obj].remove(met_name)\n except KeyError:\n # Key Error <=> obj is not in self, so let's try obj.__class__\n try:\n self[obj.__class__].remove(met_name)\n except (AttributeError, KeyError):\n raise",
"def deinit(self) -> None:\n self._is_deinited()\n self._pad.deinit()\n self._cursor.deinit()\n self._cursor = None\n self._event = None",
"def __delattr__( self, name ):\n\n # Figure out the algorithm's index:\n algIndex = -1\n index = 0\n for alg in self:\n if alg.name() == name:\n algIndex = index\n break\n index += 1\n pass\n\n # Check if we were successful:\n if algIndex == -1:\n raise AttributeError( 'Algorithm/sequence with name \"%s\" was not ' \\\n 'found' % name )\n \n # Remove the element from the base class:\n super( AnaAlgSequence, self ).__delattr__( name )\n\n # Now remove the elements from the member lists of this class:\n del self._algorithmMeta[ algIndex ]\n pass",
"def popattr(obj, name, default=klass.sentinel):\n try:\n return obj.__dict__.pop(name)\n except KeyError:\n if default is not klass.sentinel:\n return default\n # force AttributeError to be raised\n getattr(obj, name)",
"def free(self):\n attr = self.getAttributes()\n for a in attr:\n delattr(self, a)",
"def _make_finalizer(self):\n overloads = self.overloads\n targetctx = self.targetctx\n # Early-bind utils.shutting_down() into the function's local namespace\n # (see issue #689)\n def finalizer(shutting_down=utils.shutting_down):\n # The finalizer may crash at shutdown, skip it (resources\n # will be cleared by the process exiting, anyway).\n if shutting_down():\n return\n # This function must *not* hold any reference to self:\n # we take care to bind the necessary objects in the closure.\n for func in overloads.values():\n try:\n targetctx.remove_user_function(func)\n targetctx.remove_native_function(func)\n except KeyError:\n # Not a native function (object mode presumably)\n pass\n\n return finalizer",
"def test_obj_del(self):\n \n # This test is known to fail on win32.\n # See ticket https://bugs.launchpad.net/bugs/366334\n src = (\"class A(object):\\n\"\n \" def __del__(self):\\n\"\n \" print 'object A deleted'\\n\"\n \"a = A()\\n\")\n self.mktmp(src)\n tt.ipexec_validate(self.fname, 'object A deleted')",
"def test_delattr(self):\n\n st_struct = struct.WritableObjectProxy()\n st_struct.hi = True\n st_struct.bye = False\n\n assert st_struct.hi is True\n assert st_struct.bye is False\n\n del st_struct.bye\n\n assert 'bye' not in st_struct\n with self.assertRaises(AttributeError):\n st_struct.bye\n\n # try deleting an invalid attr\n with self.assertRaises(AttributeError):\n del st_struct.i_was_never_here_lol",
"def __del__(self):\n self.trace_stop()\n self.clientobj = None\n while self.clients:\n self.clients.pop()\n # Call base class destructor\n super(NFSUtil, self).__del__()",
"def __del__(self):\n self.__class__.reference_count -= 1\n if self.__class__.reference_count == 0:\n print \"Number of reference_count is 0, Deleting cached objec ...\"\n del self.__class__.cached_object\n print 'Deleted object count of object = ', self.__class__.reference_count"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set keeper and _name / doc from init or decoration.
|
def keeper(self, _keeper):
self._most_recent_linker = self._linked_keeper
self._attribute_name_of_class_instance = _keeper if _keeper is None else name_to_snake_case(
_keeper.__qualname__.split(".")[-2]
)
if _keeper:
self._name = "_" + _keeper.__name__ if self._name is None else self._name # First pref is init name arg
self.__doc__ = _keeper.__doc__ if self.doc is None else self.doc # First pref is init doc arg
else:
if self.doc:
self.__doc__ = self.doc
if self._chain:
self._chain_keeper = _keeper
self._keeper = self.chain_keeper
else:
self._keeper = _keeper
return self
|
[
"def __init__(self,name, parent=None, **meta):\n self.__innerset__(\n name=name,\n meta=meta,\n parent=parent,\n childs={},\n hooks=[],\n )",
"def __init__(self, root, **kw):\n self.root = root\n for k, v in kw.iteritems():\n setattr(self, k, v)",
"def __init__(self):\n self.set_sections()\n for name, section in self.iteritems():\n section.basename = name # set section basename\n section.page = self",
"def __init__(self, name, *args, **kwargs):\n self.name = name\n self.path = os.path.join(KIM_SCHEMAS_DIR,name+'.json')\n super(Schema,self).__init__(self.path,flag='r',*args,**kwargs)",
"def __init__(self):\n self.basename = self.basename or self.__class__.__name__.lower()\n self.set_fields()",
"def __init__(self):\n\n self.name = self.get_param(\"name\", \"rosweld\")\n rospy.init_node(self.name)\n\n self.publishers = {}",
"def __init__(self, obj, holdername=\"metadata_\"):\n if isinstance(obj, basestring):\n obj = pm.PyNode(obj)\n self.obj = self.node = obj\n self.namespace = holdername\n for attr in self.obj.listAttr(ud=True):\n attr_name = str(attr.name().split(\".\")[-1])\n if self.namespace not in attr_name:\n continue\n if self._validate_key(attr_name):\n # pass skip = True, otherwise parameters would override with\n # raw values. Internal use only!\n safe_name = attr_name.replace(self.namespace, \"\")\n self.__setattr__(safe_name, decode(attr.get(), attr), skip=True)",
"def __init__(self, type):\n\t\tself.setter = Committer.get_ospray_setter(type)\n\t\tself.name = None",
"def __init__(self, corpus_name=CORPUS_NAME_DEFAULT):\n self.corpus_name = corpus_name\n self._dictionary = None",
"def __init__(self):\n self.name = self.__class__.__name__.lower()",
"def __init__(self, tree):\n self.tree = tree\n ControlRecord = self._record(\"Control\", Record, tree.records.control)\n DataRecord = self._record(\"Data\", Record, tree.records.data)\n DataRecords = defaultdict(lambda: DataRecord)\n for branch in subtree_walk(tree.segments.idoc.segments):\n name = branch.name\n DataRecords[name] = self._record(name, DataRecord, branch)\n ns = {\n \"__slots__\": [],\n \"ControlRecord\": ControlRecord,\n \"DataRecord\": DataRecord,\n \"DataRecords\": DataRecords,\n }\n self.doc = type(self.tree.segments.idoc.name, (IDoc,), ns)",
"def __init__(self, nested: Any, key: str, *args: set, **kwargs: dict) -> None:\n super().__init__(nested, many=True, *args, **kwargs)\n self.key = key",
"def __init__(self):\n\n self._styleDict = {}",
"def _configure_using_fluent_definition(self):\r\n definition = Parser.parse(self.signature)\r\n\r\n self._config.set_name(definition[\"name\"])\r\n\r\n for name, flags, description, default in definition[\"arguments\"]:\r\n self._config.add_argument(name, flags, description, default)\r\n\r\n for long_name, short_name, flags, description, default in definition[\"options\"]:\r\n self._config.add_option(long_name, short_name, flags, description, default)",
"def __init__(self):\n this = _coin.new_SoAppearanceKit()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def docroot(self, docroot):\n\n self._docroot = docroot",
"def __init__(self):\n object.__init__(self)\n self._parent = None",
"def __init__(self):\n self.__parser = SpaCyParser()\n self.__word_substitutor = WordSubstitutor()",
"def __init__(self, print_root):\n self.print_root = print_root\n self.disable = 0\n self.emittedNoHandlerWarning = 0\n self.printerDict = {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called before getter if getter is linker. (Not default will delete dependents on every access.)
|
def _linked_getter(self, instance):
try:
self._hidden_getter(instance)
except AttributeError:
raise
else:
self._update_linked(instance)
|
[
"def go_for_prop_loading(self, go_for_prop_loading):\n\n\n self._go_for_prop_loading = go_for_prop_loading",
"def _after_import(self):\n return",
"def post_load(self):\n # get all model methods after loading\n self._set_model(self.model)",
"def __clear_dependencies__(self):\n try:\n local = self.__get_local__()\n except:\n raise Exception('Clearing dependencies failed. Please reset any'\n ' patches before clearing dependencies')\n dependencies = dict()\n local.dependencies = dependencies\n self.__dict__['settings'] = ApplicationSettingsStorage()",
"def _init_required_calculators(self):\n self._required_calculators = None\n pass",
"def cacheProperty(getter, attr_name, fdel='None', doc='None'):\n\n pass",
"def _lazyprop(self):\r\n if not hasattr(self, attr_name):\r\n setattr(self, attr_name, func(self))\r\n return getattr(self, attr_name)",
"def uninit():\n sys.meta_path.remove(_importer)",
"def patchLazy():\n Lazy._solr_original__add__ = Lazy.__add__\n Lazy.__add__ = lazyAdd\n if HAS_EXPCAT:\n lazy.Lazy._solr_original__add__ = lazy.Lazy.__add__\n lazy.Lazy.__add__ = lazyExpCatAdd",
"def no_automatic_dependency_tracking_scope(obj):\n previous_value = getattr(obj, '_setattr_tracking', True)\n obj._setattr_tracking = False # pylint: disable=protected-access\n try:\n yield\n finally:\n obj._setattr_tracking = previous_value # pylint: disable=protected-access",
"def set_lazy(self, key, value_callable):\n if key in self._dic:\n del self._dic[key]\n self._lazyload[key] = value_callable",
"def lock_dependencies():\n global g_deps_locked\n g_deps_locked = True",
"def deleter(self, _deleter):\n self._most_recent_linker = self._linked_deleter\n if _deleter is None:\n self._deleter = self._default_deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n self.linker(self)\n else:\n self._deleter = _deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n return self",
"def test_external_getter_no_client(external_getter, code):\n external_getter.Client = None\n\n assert external_getter.event_get() is None\n assert external_getter.station_get(code) is None\n assert external_getter.obs_waveform_get(code) is None",
"def post_creation(self):\n self.deped_org = True",
"def test_related_default_unexpanded(self):\n\n self.mock_related_field.should_full_dehydrate.return_value = False\n self.mock_related_field.instance_name = \"test_resource\"\n\n self.mock_bundle.request.GET = {}\n\n dehydrate_related(self.mock_related_field, self.mock_bundle, self.mock_related_resource)\n\n self.assertEqual(self.mock_related_resource.build_bundle.call_count, 0)\n self.assertEqual(self.mock_related_resource.full_dehydrate.call_count, 0)\n self.mock_related_resource.get_resource_uri.assert_called_once_with(self.mock_bundle)\n self.assertEqual(self.mock_bundle.request.GET, {})",
"def import_dependencies(cls):\n pass",
"def remove_other_references(self):\n# subclasses must call their parent class's remove_other_references\n# method, after performing their own duties\n pass",
"def extract_lazy_object(lo):\n if lo._wrapped is empty:\n lo._setup()\n return lo._wrapped"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set setter if provided else default setter (with linkeddeletion calls if no init linkers).
|
def setter(self, _setter):
self._most_recent_linker = self._linked_setter
if _setter is None:
self._setter = self._default_setter
if self._chain:
self._chain_setter = self._setter
self._setter = self.chain_setter
self.linker(self)
else:
self._setter = _setter
if self._chain:
self._chain_setter = self._setter
self._setter = self.chain_setter
return self
|
[
"def setter(self, setter):\n if setter is NoDefault:\n\n def paramSetter(p_self, value):\n self.assigned = SINCE_ANYTHING\n p_self.assigned = SINCE_ANYTHING\n setattr(p_self, self.fieldName, value)\n\n elif setter is None:\n\n def paramSetter(p_self, value):\n raise ParameterError(\n \"Cannot set value for parameter `{}` on {} to `{}`, it has a restricted setter.\".format(\n self.name, p_self, value\n )\n )\n\n elif callable(setter):\n\n def paramSetter(p_self, value):\n self.assigned = SINCE_ANYTHING\n p_self.assigned = SINCE_ANYTHING\n setter(p_self, value)\n\n else:\n raise ParameterDefinitionError(\n \"The setter for parameter `{}` must be callable. Setter attribute: {}\".format(\n self.name, setter\n )\n )\n\n self._setter = paramSetter\n\n return self",
"def _default_setter(self, obj, value):\n try:\n setattr(obj, self._name, value)\n except TypeError:\n raise",
"def setter(self, func):\n if self.cached:\n raise ValueError(\n \"Cached property are read-only, but a setter was \" \"specified.\"\n )\n self.set_setattr_mode(SetAttr.Property, func)\n return func",
"def solve_set_value(self, set_value: Any) -> None:\n self.apply_node_changes()\n\n if self.path_and_name is None:\n raise ValueError(\"path_and_none should not be None\")\n\n setattr(self.parent.object_ref, self.path_and_name.rsplit(\".\")[-1], set_value)",
"def simplesetter(prop, del_on_exceptions=()): # pragma: no cover\n\n def simplesetter_decorator(fn):\n propname = fn.__name__\n attrname = \"_{}\".format(propname)\n try:\n inspect.getfullargspec(fn)[0]\n inspect.getfullargspec(fn)[1]\n except KeyError: # pragma: no cover\n raise ValueError(\n \"`simplegetter` decorator can only be used for methods that \"\n \"take the object reference as first argument and the new \"\n \"property value as second argument\"\n )\n\n def setter(self, newval):\n try:\n converted = fn(self, newval)\n setattr(self, attrname, converted)\n except del_on_exceptions:\n try:\n delattr(self, attrname)\n except AttributeError:\n pass\n\n setter.__doc__ = fn.__doc__\n setter = prop.setter(setter)\n return setter\n\n return simplesetter_decorator",
"def _set_attr_or_key(obj, name, value, _isinstance=isinstance, _dict=dict, setter=setattr):\n if _isinstance(obj, _dict):\n obj[name] = value\n else:\n setter(obj, name, value)",
"def setter(self, fn):\n self.cb_set = fn",
"def getSetter(self, obj, indexName):\n\n # DefaultDublinCoreImpl:\n setterName = \"set\" + indexName\n if getattr(aq_base(obj), setterName, None) is not None:\n return getattr(obj, setterName)\n\n # other\n fieldName = self.fieldNameForIndex(indexName)\n field = None\n\n # Dexterity\n if IDexterityContent.providedBy(obj):\n if fieldName.startswith(\"get\"):\n fieldName = fieldName.lstrip(\"get_\")\n # heuristics\n fieldName = fieldName[0].lower() + fieldName[1:]\n return lambda value: setattr(aq_base(obj), fieldName, value)\n\n # AT and discussions left\n if IComment.providedBy(obj):\n # Discussion\n field = getattr(obj, \"getField\", None)\n else:\n # Archetype\n field = getattr(aq_base(obj), \"getField\", None)\n # Archetypes:\n if field:\n fieldObj = field(fieldName) or field(fieldName.lower())\n if not fieldObj and fieldName.startswith(\"get\"):\n fieldName = fieldName.lstrip(\"get_\")\n fieldName = fieldName[0].lower() + fieldName[1:]\n fieldObj = obj.getField(fieldName)\n if fieldObj is not None:\n return fieldObj.getMutator(obj)\n return None\n\n return None",
"def is_property_setter(node: astroid.FunctionDef) -> bool:\n return _is_property_kind(node, \"setter\")",
"def _set_mutable(self, mutable):\n # pylint: disable=protected-access\n object.__setattr__(self, \"_mutable\", mutable)\n self.autotune._set_mutable(mutable)\n self.experimental_distribute._set_mutable(mutable)\n self.experimental_optimization._set_mutable(mutable)\n self.threading._set_mutable(mutable)",
"def __set__(self, instance, value):\n instance.__dict__[self.name] = value",
"def _set_explicit_linkers(self, linkers, old_linker):\n if isinstance(linkers, str):\n self._linker(linkers)\n else:\n for linker in linkers:\n self._linker(linker)\n self.linker = old_linker",
"def _create_getter_setter(prop):\n\n def _getter(self):\n return getattr(self._container, prop)\n\n def _setter(self, value):\n return setattr(self._container, prop, value)\n\n return _getter, _setter",
"def set(self, attr, val):\n if not hasattr(self, attr):\n logger.error('model: set: The attribute \"{0}\" is undefined'.format(attr))\n sys.exit(1)\n setattr(self, attr, val)",
"def set(self, instance, value):\n self.descriptor.__set__(instance, value)",
"def setobj(self, obj, key, val='', test=0, force=False):\n san = SanContainer.getInstance().get_san()\n if getattr(obj,'private',False) and san.runmode:\n return (1,'cannot change parameter for private %s' % obj.name)\n if obj.state != ObjState.created and (key in obj.newonly_fields):\n if san.runmode:\n return (1,'parameter %s can only be set on newly created objects, try again with proper settings' % (key))\n elif key not in obj.loadmode_fields:\n return (0,'set for %s was ignored, new only field' % key)\n\n objcls=obj.__class__.__name__\n if test==0:\n if key not in obj._updatedattr:\n obj._updatedattr+=[key]\n if hasattr(obj,'force_update') and force:\n obj.force_update = True\n if hasattr(obj,'set_'+key):\n return getattr(obj,'set_'+key)(san,key,val,test)\n ptype=getattr(obj,key).__class__.__name__\n if ptype=='int' : return getattr(self,'set_int')(obj,key,val,test)\n if ptype=='str' : return getattr(self,'set_str')(obj,key,val,test)\n if ptype=='bool' : return getattr(self,'set_bool')(obj,key,val,test)\n if ptype=='IP' : return getattr(self,'set_IP')(obj,key,val,test)\n if ptype=='list' : return getattr(self,'set_strlist')(obj,key,val,test)\n if ptype=='dict' : return getattr(self,'set_strdict')(obj,key,val,test)\n if ptype=='EnumValue' : return getattr(self,'set_enum')(obj,key,val,test)\n if ptype=='instancemethod' : return getattr(self,'set_method')(obj,key,val,test)\n if ptype=='VSACollection' : return (1,'Cannot set value for collection, specify %s instance' % key)\n er='unknown set type %s in set %s.%s' % (ptype,obj.__class__.__name__,key)\n logger.eventlog.warning(er)\n return (1,er)",
"def __setattr__(self, name, value):\n if name in self.INTERNAL_ATTRS:\n super(Settings, self).__setattr__(name, value)\n else:\n self.set_attr(name, value)",
"def setLinked(self, linked):\n pass",
"def onSetAttr(self, attr, vals, opts):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set deleter if provided else accesssafe default deleter (with linkeddeletion calls if no init linkers.)
|
def deleter(self, _deleter):
self._most_recent_linker = self._linked_deleter
if _deleter is None:
self._deleter = self._default_deleter
if self._chain:
self._chain_deleter = self._deleter
self._deleter = self.chain_deleter
self.linker(self)
else:
self._deleter = _deleter
if self._chain:
self._chain_deleter = self._deleter
self._deleter = self.chain_deleter
return self
|
[
"def _linked_deleter(self, obj):\n self._hidden_deleter(obj)\n self._update_linked(obj)",
"def _default_deleter(self, obj):\n try:\n delattr(obj, self._name)\n except AttributeError:\n pass\n except TypeError:\n raise",
"def deleter(self, func):\n self.set_delattr_mode(DelAttr.Property, func)\n return func",
"def _del(self, _del):\n\n self.__del = _del",
"def is_property_deleter(node: astroid.FunctionDef) -> bool:\n return _is_property_kind(node, \"deleter\")",
"def setDeleteAfterUse(*args, **kwargs):\n \n pass",
"def user32_DdeInitialize(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"pidInst\", \"pfnCallback\", \"afCmd\", \"ulRes\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def registerPreDelete(preDeleteFn):",
"def free(self, ptr): # pylint:disable=unused-argument\n raise NotImplementedError(f\"{self.free.__func__.__name__} not implemented for {self.__class__.__name__}\")",
"def deletion_policy(self):\n return function.resolve(self._deletion_policy) or self.DELETE",
"def remove_freezer_override():\n global freezer_override\n freezer_override = False",
"def user32_DdeFreeDataHandle(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hData\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def _textureDeleter(textureID):\r\n\r\n def cleanup(ref):\r\n if glDeleteTextures:\r\n glDeleteTextures([textureID])\r\n\r\n return cleanup",
"def is_property_setter_or_deleter(node: astroid.FunctionDef) -> bool:\n return _is_property_kind(node, \"setter\", \"deleter\")",
"def __reversed__(self) -> 'ListDeleter':\n if self.direction == DIR_BACKWARD:\n return ListDeleter(self.list_to_process)\n else:\n return ListDeleter(self.list_to_process, DIR_BACKWARD)",
"def release_fd(self):\n if isinstance(self._fd, WeakRef): return\n if self.fd is None: return\n self.get_handle() # because fd might be released unexpectedly, we must keep handle\n self._fd = WeakRef(self._fd)",
"def setDeleteCallback(self, function: 'SoSensorCB *', data: 'void *'=None) -> \"void\":\n return _coin.SoDataSensor_setDeleteCallback(self, function, data)",
"def deleteAfterUse(*args, **kwargs):\n \n pass",
"def attempt_delete(self):\n if self.data_lock.w_acquire_non_blocking():\n if self.data_adaptor:\n try:\n self.data_adaptor.cleanup()\n self.data_adaptor = None\n except Exception:\n # catch all exceptions to ensure the lock is released\n pass\n\n self.data_lock.w_release()\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called before deleter if deleter is linker. (True if no linkers at init and default deleter.)
|
def _linked_deleter(self, obj):
self._hidden_deleter(obj)
self._update_linked(obj)
|
[
"def deleter(self, _deleter):\n self._most_recent_linker = self._linked_deleter\n if _deleter is None:\n self._deleter = self._default_deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n self.linker(self)\n else:\n self._deleter = _deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n return self",
"def _is_deinited(self) -> None:\n if self._cursor is None:\n raise ValueError(\n \"CursorManager object has been deinitialized and can no longer \"\n \"be used. Create a new CursorManager object.\"\n )",
"def _default_deleter(self, obj):\n try:\n delattr(obj, self._name)\n except AttributeError:\n pass\n except TypeError:\n raise",
"def remove_freezer_override():\n global freezer_override\n freezer_override = False",
"def post_creation(self):\n self.deped_org = True",
"def _append_linker(self, linker):\n linkerPm = kml.Placemark(NS)\n linkerPm.geometry = linker.shape\n if not self.linkers_wkt.get(linkerPm.geometry.wkt):\n self.linkers_wkt[linkerPm.geometry.wkt] = True\n self.linkers.append(linkerPm)",
"def onDeinit(self):",
"def registerPreDelete(preDeleteFn):",
"def uninit():\n sys.meta_path.remove(_importer)",
"def is_property_deleter(node: astroid.FunctionDef) -> bool:\n return _is_property_kind(node, \"deleter\")",
"def user32_DdeInitialize(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"pidInst\", \"pfnCallback\", \"afCmd\", \"ulRes\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def basetype_posthook_setup(self):\r\n pass",
"def _set_explicit_linkers(self, linkers, old_linker):\n if isinstance(linkers, str):\n self._linker(linkers)\n else:\n for linker in linkers:\n self._linker(linker)\n self.linker = old_linker",
"def is_linker(self):\n print(\"[Part is_linker] make sure is_linker() matches the same fxn in Variant\")\n return (self.role == 'Linker')",
"def _init_required_calculators(self):\n self._required_calculators = None\n pass",
"def uninitialize():\n\n pass",
"def test_demanglers(self):\n\n merge_libraries.FLAGS.streaming_demanglers = False\n demangler_list = merge_libraries.init_demanglers()\n for demangler in demangler_list:\n logging.info(\"Testing %s\", os.path.basename(demangler.cmdline[0]))\n self.assertEqual(\n demangler.demangle(\"regular_c_symbol_nonstreaming\"),\n \"regular_c_symbol_nonstreaming\")\n self.assertEqual(\n demangler.demangle(\"_ZN12my_namespace19MyClassNonStreamingC2Ev\"),\n \"my_namespace::MyClassNonStreaming::MyClassNonStreaming()\")\n merge_libraries.shutdown_demanglers()\n\n merge_libraries.FLAGS.streaming_demanglers = True\n demangler_list = merge_libraries.init_demanglers()\n for demangler in demangler_list:\n logging.info(\"Testing %s streaming\", os.path.basename(demangler.cmdline[0]))\n self.assertEqual(\n demangler.demangle(\"regular_c_symbol_streaming\"),\n \"regular_c_symbol_streaming\")\n self.assertEqual(\n demangler.demangle(\"_ZN12my_namespace16MyClassStreamingC2Ev\"),\n \"my_namespace::MyClassStreaming::MyClassStreaming()\")\n merge_libraries.shutdown_demanglers()",
"def link_delete_handler(self, ev):\n self.algorithm.init_algorithm(ev.switches, ev.links)\n self.send_event_to_observers(fault_recovery_event.EventFaultRecoveryLinkDelete(\n ev.src_port, ev.dst_port, ev.timestamp))",
"def autonomousInit(self) -> None:\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set explicit linkers at end of init and restore linker decorator.
|
def _set_explicit_linkers(self, linkers, old_linker):
if isinstance(linkers, str):
self._linker(linkers)
else:
for linker in linkers:
self._linker(linker)
self.linker = old_linker
|
[
"def _append_linker(self, linker):\n linkerPm = kml.Placemark(NS)\n linkerPm.geometry = linker.shape\n if not self.linkers_wkt.get(linkerPm.geometry.wkt):\n self.linkers_wkt[linkerPm.geometry.wkt] = True\n self.linkers.append(linkerPm)",
"def refiner_reset(self):\n self._refiner_reset = True",
"def uninit():\n sys.meta_path.remove(_importer)",
"def deleter(self, _deleter):\n self._most_recent_linker = self._linked_deleter\n if _deleter is None:\n self._deleter = self._default_deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n self.linker(self)\n else:\n self._deleter = _deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n return self",
"def hook_lr_init(self, x):\n self.lr_init = x",
"def set_default_decay_lib(self):\n\t\tsystem = self.system\n\t\tself._decay_lib_set = 'yes'\n\t\t#system.set_default_decay_for_all_no_add()\n\t\tself._decay_lib_path = 'default'\n\t\tsystem.set_default_decay_for_all()",
"def teardown(self):\n try:\n if self.orig_import:\n builtins.__import__ = self.orig_import\n except (TypeError, ValueError, Exception):\n pass\n self.orig_import = None",
"def setter(self, _setter):\n self._most_recent_linker = self._linked_setter\n if _setter is None:\n self._setter = self._default_setter\n if self._chain:\n self._chain_setter = self._setter\n self._setter = self.chain_setter\n self.linker(self)\n else:\n self._setter = _setter\n if self._chain:\n self._chain_setter = self._setter\n self._setter = self.chain_setter\n return self",
"def reset(self):\n self.aliases = {}",
"async def async_add_default_links(self):\n self._run_on_wake(self.async_add_default_links_on_wake)",
"def init(command_relevants):\n \n # Location of dotlink directory\n path = to_specific_path(command_relevants[\"<path>\"] or \".\")\n \n # Location of dotlinks.json\n json_path = os.path.join(path, \"dotlinks.json\")\n \n # Location of .dotlinkrc\n dotlinkrc = os.path.join(os.environ[\"HOME\"], \".dotlinkrc\")\n\n # If directory exists, nothing happens to it\n os.makedirs(path, exist_ok=True)\n\n # Don't want to overwrite file if it already has links\n if not os.path.exists(json_path):\n with open(json_path, \"w\") as f:\n json.dump({}, f)\n \n # Identify location of dotlink dir\n # Will have to change once more can be added to dotlinkrc\n with open(dotlinkrc, \"w\") as f:\n f.write(\"dotlink_dir = \" + to_generic_home_path(path))",
"def set_libraries (self, libnames):\r\n self.libraries = copy (libnames)",
"def reset(cls) -> None:\n cls._registered_plugins = {}\n cls._dynamic_modules = set()",
"def finalize(self):\n # we could not fill out links while parsing (referenced sections where not known),\n # so try to set them now, where the document is complete\n for sec in self.itersections(recursive=True):\n if sec._link is not None:\n sec.link = sec._link\n if sec._include is not None:\n sec.include = sec._include",
"def autonomousInit(self) -> None:\n ...",
"def remove_freezer_override():\n global freezer_override\n freezer_override = False",
"def _init_required_calculators(self):\n self._required_calculators = None\n pass",
"def initialize(self, trainer):\n pass",
"def set_default_fy_lib(self):\n\t\tsystem = self.system\n\t\tself._fy_lib_set = 'yes'\n\t\tself._fy_lib_path = 'default'\n\t\t#system.set_default_fy_for_all_no_add()\n\t\tsystem.set_default_fy_for_all()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Waits for a baremetal node attribute to reach given status. The client should have a show_node(node_uuid) method to get the node.
|
def wait_for_bm_node_status(client, node_id, attr, status):
_, node = client.show_node(node_id)
start = int(time.time())
while node[attr] != status:
time.sleep(client.build_interval)
_, node = client.show_node(node_id)
status_curr = node[attr]
if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
'within the required time (%(timeout)s s).' %
{'node_id': node_id,
'attr': attr,
'status': status,
'timeout': client.build_timeout})
message += ' Current state of %s: %s.' % (attr, status_curr)
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise lib_exc.TimeoutException(message)
|
[
"def _multi_check_node(self, status, node_attrs):\r\n error = None\r\n try:\r\n response = self.connection.request(\r\n status['node_response']['selfLink']).object\r\n except GoogleBaseError:\r\n e = self._catch_error(ignore_errors=node_attrs['ignore_errors'])\r\n error = e.value\r\n code = e.code\r\n response = {'status': 'DONE'}\r\n if response['status'] == 'DONE':\r\n status['node_response'] = None\r\n if error:\r\n status['node'] = GCEFailedNode(status['name'],\r\n error, code)\r\n else:\r\n status['node'] = self.ex_get_node(status['name'],\r\n node_attrs['location'])",
"def get_status(self) -> NodeManagerStatus:",
"def wait_for_status(self, cluster, status='Available', failures=None,\n interval=15, wait=1500):\n failures = ['Error'] if failures is None else failures\n return resource.wait_for_status(\n self, cluster, status, failures, interval, wait, attribute=\"status\")",
"def wait_until_node_ready(self, *args, **kwargs):\n self._load_kwargs(kwargs)\n banner(\"PCC.Wait Until Node Ready\")\n conn = BuiltIn().get_variable_value(\"${PCC_CONN}\")\n return easy.wait_until_node_ready(conn, self.Name)",
"def check_ota_status(self, node_obj, service_name, service_read_params):\n ota_status = \"\"\n ota_status_empty_str = \"(empty)\"\n log.debug(\"Received service read params: \" + json.dumps(service_read_params))\n ota_status_key = service_read_params[OTA_PARAMS['status']]\n ota_info_key = service_read_params[OTA_PARAMS['info']]\n log.debug(\"OTA Status Key : \" + str(ota_status_key))\n log.debug(\"OTA Info Key : \" + str(ota_info_key))\n while True:\n curr_status = None\n curr_info = None\n time.sleep(8)\n log.info(\"Getting node params for OTA status\")\n new_node_params = node_obj.get_node_params()\n if service_name not in new_node_params and (curr_status not in [None, ota_status_empty_str]):\n log.info(\"OTA may have completed, check the node to confirm.\")\n print(\"OTA may have completed, check the node to confirm.\")\n ota_status = None\n break\n node_service_params = new_node_params[service_name]\n for k,v in node_service_params.items():\n if ota_status_key and k in ota_status_key and not v:\n if curr_status and k.lower() in ota_status_key and not v == curr_status:\n log.info(\"OTA may have completed, check the node to confirm.\")\n print(\"OTA may have completed, check the node to confirm.\")\n ota_status = None\n break\n if ota_status_key and k in ota_status_key:\n curr_status = v\n elif ota_info_key and k in ota_info_key:\n curr_info = v\n\n log.debug(\"Current OTA status: \" + str(curr_status))\n curr_time = time.time()\n if not curr_status:\n if not ota_status_key:\n print(\"Node param of type: \" + OTA_PARAMS['status'] + \" not found... Exiting...\")\n log.debug(\"Node param of type: \" + OTA_PARAMS['status'] + \" not found...Exiting...\")\n ota_status = \"\"\n break\n curr_status = ota_status_empty_str\n if not curr_info:\n if not ota_info_key:\n print(\"Node param of type: \" + OTA_PARAMS['info'] + \" not found... Exiting...\")\n log.debug(\"Node param of type: \" + OTA_PARAMS['info'] + \" not found...Exiting...\")\n ota_status = \"\"\n break\n curr_info = ota_status_empty_str\n timestamp = datetime.datetime.fromtimestamp(curr_time).strftime('%H:%M:%S')\n log.debug(\"[{:<6}] {:<3} : {:<3}\".format(timestamp, curr_status,curr_info))\n print(\"[{:<8}] {:<3} : {:<3}\".format(timestamp, curr_status,curr_info))\n\n if curr_status in [\"failed\"]:\n ota_status = False\n break\n elif curr_status in [\"success\"]:\n ota_status = True\n break\n\n end_time = time.time()\n log.debug(\"End time set to: \" + str(end_time))\n if end_time - start_time > 120:\n if curr_status:\n print(\"OTA taking too long...Exiting...\")\n log.info(\"OTA taking too long...Exiting...\")\n else:\n print(\"No change in OTA status, check the node to confirm...Exiting...\")\n log.info(\"No change in OTA status, check the node to confirm...Exiting...\")\n break\n return ota_status",
"def update_node_status(self, address, status):\n\n query = f\"\"\"UPDATE public.nodes SET\n status = {status},\n verified_at = CURRENT_TIMESTAMP \n where address = '{address}';\"\"\"\n self.cursor.execute(query)\n self.chunks_db_connection.commit()",
"def set_status(status_name, nodes):\n\n data = {'status_name': status_name,\n 'exact_get': True,\n }\n status = api_submit('/api/statuses', data, method='get_params')\n\n data = {'status_id': status['results'][0]['status_id']}\n\n for n in nodes:\n log.info('Setting status node={0},status={1}'.format(n['node_name'], status['results'][0]['status_name']))\n api_submit('/api/nodes/{0}'.format(n['node_id']), data, method='put')",
"def ping_until_ok(node,wait_str='5s',extra='-c 3'):\n\n device = LOCAL['node'][node]['device']\n ip = GLOBAL['device'][device]['ip']\n result = os.system(\"ping %s %s\" % (extra,ip))\n\n wait = DateTime.convert_time(wait_str)\n time.sleep(wait)\n\n BuiltIn().log(\"Pinged to host `%s(%s)` with result = %d\" % (node,ip,result))\n\n return result",
"def updateNodeStatus(self, nodeName, state=None, bootFrom=None):\n\n value = 'None' if bootFrom is None else \\\n '1 (disk)' if int(bootFrom) == 1 else '0 (network)'\n\n self.getLogger().debug(\n 'updateNodeStatus(): node=[%s], state=[%s], bootFrom=[%s]' % (\n nodeName, state, value))\n\n session = DbManager().openSession()\n\n try:\n dbNode = NodesDbHandler().getNode(session, nodeName)\n\n # Bitfield representing node changes (0 = state change, 1 = bootFrom\n # change)\n changed = 0\n\n if state is not None and state != dbNode.state:\n # 'state' changed\n changed |= 1\n\n if bootFrom is not None and bootFrom != dbNode.bootFrom:\n # 'bootFrom' changed\n changed |= 2\n\n if changed:\n # Create custom log message\n msg = 'Node [%s] state change:' % (dbNode.name)\n\n if changed & 1:\n msg += ' state: [%s] -> [%s]' % (dbNode.state, state)\n\n dbNode.state = state\n\n if changed & 2:\n msg += ' bootFrom: [%d] -> [%d]' % (\n dbNode.bootFrom, bootFrom)\n\n dbNode.bootFrom = bootFrom\n\n self.getLogger().info(msg)\n else:\n self.getLogger().info(\n 'Updated timestamp for node [%s]' % (dbNode.name))\n\n dbNode.lastUpdate = time.strftime(\n '%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\n result = bool(changed)\n\n # Only change local boot configuration if the hardware profile is\n # not marked as 'remote' and we're not acting on the installer node.\n if dbNode.softwareprofile and \\\n dbNode.softwareprofile.type != 'installer' and \\\n dbNode.hardwareprofile.location != 'remote':\n # update local boot configuration for on-premise nodes\n self._bhm.writePXEFile(dbNode, localboot=bootFrom)\n\n session.commit()\n\n return result\n finally:\n DbManager().closeSession()",
"def wait_for_baremetal_node_lock(self, node, timeout=30):\n warnings.warn(\n \"The wait_for_baremetal_node_lock call is deprecated \"\n \"in favor of wait_for_node_reservation on the baremetal \"\n \"proxy\",\n os_warnings.OpenStackDeprecationWarning,\n )\n self.baremetal.wait_for_node_reservation(node, timeout)",
"def _wait_for_lun_status(self, lun_id, expected_status):\n while True:\n status = self._get_lun_status(lun_id)\n if status in expected_status:\n break\n elif status == 'Fault':\n err_msg = (_('_wait_for_lun_status: LUN %(lun_id)s '\n 'status is %(status)s.')\n % {'lun_id': lun_id,\n 'status': status})\n LOG.error(err_msg)\n raise exception.VolumeBackendAPIException(data=err_msg)\n LOG.info(_LI('LUN %s is not ready, waiting 2s...'), lun_id)\n time.sleep(2)",
"def check_node_health(self, *, node=None):\n if node is None:\n endpoint = self.build_url(\"/healthchecks/node\")\n else:\n endpoint = self.build_url(\"/healthchecks/node/{node}\", node=node)\n return self.request('get', endpoint)",
"def wait_for_node(node, leave=False):\n\n tries = DEFAULT_TRIES\n while tries > 0:\n utils.puts(\n 'Waiting for node {} to {}'.format(\n node, 'leave' if leave else 'come back',\n )\n )\n dn_status = get_dn_status(node)\n try:\n rs_status = get_rs_status(node)\n except ValueError:\n rs_status = not leave\n try:\n rm_status = get_rm_status(node)\n except ValueError:\n rm_status = not leave\n if leave:\n if not (dn_status or rs_status or rm_status):\n return\n else:\n if dn_status and rs_status and rm_status:\n return\n tries -= 1\n time.sleep(DEFAULT_INTERVAL)\n console.confirm(\n 'Node {} never {}! Press Enter to continue, '\n 'CTRL+C to abort'.\n format(\n node,\n 'left' if leave else 'came back',\n NN_URL.format(env.namenodes[0]),\n )\n )",
"def wait_until_vm_state_running(self, instance_id: str) -> None:\n start_time = datetime.now()\n end_time = start_time + timedelta(seconds=1200)\n\n node = None\n while end_time > datetime.now():\n sleep(5)\n resp = self.service.get_instance(instance_id)\n node = resp.get_result()\n\n if node[\"status\"] == \"running\":\n end_time = datetime.now()\n duration = (end_time - start_time).total_seconds()\n LOG.info(\n f\"{node['name']} moved to running state in {duration} seconds.\",\n )\n return\n\n if node[\"status\"] == \"error\":\n raise NodeError(f\"{node['name']} has moved to error state.\")\n\n raise NodeError(f\"{node['name']} is in {node['status']} state.\")",
"def wait_for_status(session, resource, status, failures, interval, wait):\n if resource.status == status:\n return resource\n\n total_sleep = 0\n if failures is None:\n failures = []\n\n while total_sleep < wait:\n resource.get(session)\n if resource.status == status:\n return resource\n if resource.status in failures:\n msg = (\"Resource %s transitioned to failure state %s\" %\n (resource.id, resource.status))\n raise exceptions.ResourceFailure(msg)\n time.sleep(interval)\n total_sleep += interval\n msg = \"Timeout waiting for %s to transition to %s\" % (resource.id, status)\n raise exceptions.ResourceTimeout(msg)",
"def is_lcd_reachable():\n\n response = requests.get(NODE_INFO_ENDPOINT)\n return True if response.status_code == 200 else False",
"def wait_for_ip():\n nodes = list_nodes_full()\n if \"primaryIpAddress\" in nodes[hostname]:\n return nodes[hostname][\"primaryIpAddress\"]\n time.sleep(1)\n return False",
"def status(self, timeout: Union[int, float] = consts.DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT) -> dict:\n\n def _attempt_to_get_status() -> dict:\n return self.get()[\"status\"]\n\n return waiting.wait(\n _attempt_to_get_status,\n sleep_seconds=0.5,\n timeout_seconds=timeout,\n waiting_for=f\"nmstate config {self.ref} status\",\n expected_exceptions=KeyError,\n )",
"def ex_run_node(self, node):\r\n # Refresh node state\r\n e_vm = self.connection.request(node.extra['uri_id']).object\r\n state = e_vm.findtext('state')\r\n\r\n if state != 'NOT_ALLOCATED':\r\n raise LibcloudError('Invalid Node state', self)\r\n\r\n # --------------------------------------------------------\r\n # Deploy the Node\r\n # --------------------------------------------------------\r\n self._deploy_remote(e_vm)\r\n\r\n # --------------------------------------------------------\r\n # Retrieve it again, to get some schedule-defined\r\n # values.\r\n # --------------------------------------------------------\r\n edit_vm = get_href(e_vm, 'edit')\r\n headers = {'Accept': self.NODE_MIME_TYPE}\r\n e_vm = self.connection.request(edit_vm, headers=headers).object\r\n return self._to_node(e_vm, self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
exec_config must be of type ConfigNode because we depend on safe_get(self, key) to correctly handle optional exec provider config parameters.
|
def __init__(self, exec_config, cwd):
for key in ['command', 'apiVersion']:
if key not in exec_config:
raise ConfigException(
'exec: malformed request. missing key \'%s\'' % key)
self.api_version = exec_config['apiVersion']
self.args = [exec_config['command']]
if exec_config.safe_get('args'):
self.args.extend(exec_config['args'])
self.env = os.environ.copy()
if exec_config.safe_get('env'):
additional_vars = {}
for item in exec_config['env']:
name = item['name']
value = item['value']
additional_vars[name] = value
self.env.update(additional_vars)
self.cwd = cwd or None
|
[
"def executor_config(self):\n return self._executor_config",
"def executor_config(self, executor_config):\n\n self._executor_config = executor_config",
"def config(self, param: str, /) -> Any:",
"def fetchConfigParam(self):\r\n pass",
"def _process_config_request(self, command_dict: dict):\n request = ConfigRequest(command_dict['kwargs'], command_dict['class_name'])\n value = self.message_processor.process_config_request(request)\n\n return self.serialize_response(value)",
"def get_config(self) -> NodeManagerConfig:",
"def get_config(self):\n return ExecutionConfig(self._j_execution_environment.getConfig())",
"def execution_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TargetExecutionConfigArgs']]]]:\n return pulumi.get(self, \"execution_configs\")",
"def test_get_kv_config(self):\n pass",
"def test_get_kernel_config(self):\n try:\n kernel_config = KernelConfig(self.user)\n value = kernel_config.get(self.config_key)\n assert value == \"y\"\n except (Warning, FileNotFoundError):\n assert True",
"def config(self, config_data, preserve=True):\n if isinstance(config_data, dict):\n if preserve:\n # on env server core doesn't send all required values on cli. inputs that\n # come in via secureParams needs to be updated, but not all of them (e.g. log_path).\n # this code will only update new inputs that are not provided via sys argv.\n for key in list(config_data):\n if f'--{key}' in sys.argv:\n del config_data[key]\n\n # update the arg Namespace via dict\n self._default_args.__dict__.update(config_data)\n\n # register token as soon as possible\n self.register_token()",
"def default_pg_config(execname = 'pg_config', envkey = 'PGINSTALLATION'):\n\tpg_config_path = os.environ.get(envkey)\n\tif pg_config_path:\n\t\t# Trust PGINSTALLATION.\n\t\treturn platform_exe(pg_config_path)\n\treturn find_executable(execname)",
"def get_config(self, key: str):\n query_configs = self.request['url']['query']\n body_configs = self.request['body'].get('nboost', {})\n request_configs = {**query_configs, **body_configs}\n cli_config = self.cli_configs[key]\n config = request_configs.get(key, cli_config)\n return type(cli_config)(config)",
"def task_script(\n config: Optional[Type[TaskConfig]] = None,\n default_config_file: Optional[str] = None,\n config_path: str = \"hydra_configs\", # Override if using a different dir\n) -> Callable[[TaskFunction], Any]:\n if config is not None:\n used_config = config\n else:\n assert default_config_file is not None, \"Must provide one of config or default_config_file\"\n used_config = build_default_task_config(default_config_file)\n register_script_config(name=\"taskconfig\", module=used_config)\n\n def task_script_wrapper(script_func: TaskFunction) -> TaskFunction:\n @functools.wraps(script_func)\n def process_config_and_run_main(cfg: \"DictConfig\"):\n operator, cfg = process_config_and_get_operator(cfg)\n try:\n ret_val = script_func(operator, cfg)\n except Exception as e:\n raise e\n finally:\n if not operator.is_shutdown:\n operator.shutdown()\n return ret_val\n\n absolute_config_path = os.path.abspath(os.path.join(get_run_file_dir(), config_path))\n hydra_wrapper = hydra.main(\n config_path=absolute_config_path,\n config_name=\"taskconfig\",\n version_base=\"1.1\",\n )\n return cast(TaskFunction, hydra_wrapper(process_config_and_run_main))\n\n return task_script_wrapper",
"def apply_autokernel_config(args, kconfig, config):\n log.info(\"Applying autokernel configuration\")\n\n # Build cmdline on demand\n kernel_cmdline = []\n # Reset symbol_changes\n autokernel.symbol_tracking.symbol_changes.clear()\n\n # Asserts that the symbol has the given value\n def get_sym(stmt):\n # Get the kconfig symbol, and change the value\n try:\n return kconfig.syms[stmt.sym_name]\n except KeyError:\n log.die_print_error_at(stmt.at, \"symbol '{}' does not exist\".format(stmt.sym_name))\n\n # Asserts that the symbol has the given value\n def assert_symbol(stmt):\n if not stmt.assert_condition.evaluate(kconfig):\n if stmt.message:\n log.die_print_error_at(stmt.at, \"assertion failed: {}\".format(stmt.message))\n else:\n log.die_print_error_at(stmt.at, \"assertion failed\")\n\n # Sets a symbols value if and asserts that there are no conflicting double assignments\n def set_symbol(stmt):\n # Get the kconfig symbol, and change the value\n sym = get_sym(stmt)\n value = stmt.value\n\n if not autokernel.kconfig.symbol_can_be_user_assigned(sym):\n log.die_print_error_at(stmt.at, \"symbol {} can't be user-assigned\".format(sym.name))\n\n # Skip assignment if value is already pinned and the statement is in try mode.\n if stmt.has_try and sym in autokernel.symbol_tracking.symbol_changes:\n log.verbose(\"skipping {} {}\".format(autokernel.kconfig.value_to_str(value), sym.name))\n return\n\n if util.is_env_var(value):\n value = util.resolve_env_variable(stmt.at, value)\n\n if not set_value_detect_conflicts(sym, value, stmt.at):\n log.die_print_error_at(stmt.at, \"invalid value {} for symbol {}\".format(autokernel.kconfig.value_to_str(value), sym.name))\n\n if sym.str_value != value:\n if not stmt.has_try:\n # Only throw an error if it wasn't a try\n log.die_print_error_at(stmt.at, \"symbol assignment failed: {} from {} → {}\".format(\n sym.name,\n autokernel.kconfig.value_to_str(sym.str_value),\n autokernel.kconfig.value_to_str(value)))\n else:\n log.verbose(\"failed try set {} {} (symbol is currently not assignable to the chosen value)\".format(autokernel.kconfig.value_to_str(stmt.value), sym.name))\n\n # Visit all module nodes and apply configuration changes\n visited = set()\n def visit(module):\n # Ensure we visit only once\n if module.name in visited:\n return\n visited.add(module.name)\n\n def stmt_use(stmt):\n visit(stmt.module)\n\n def stmt_merge(stmt):\n filename = replace_common_vars(args, stmt.filename)\n log.verbose(\"Merging external kconf '{}'\".format(filename))\n kconfig.load_config(os.path.realpath(filename), replace=False)\n\n # Assert that there are no conflicts\n for sym in autokernel.symbol_tracking.symbol_changes:\n sc = autokernel.symbol_tracking.symbol_changes[sym]\n if sym.str_value != sc.value:\n autokernel.symbol_tracking.die_print_conflict(stmt.at, 'merge', sym, sym.str_value, sc)\n\n def stmt_assert(stmt):\n assert_symbol(stmt)\n\n def stmt_set(stmt):\n set_symbol(stmt)\n\n def stmt_add_cmdline(stmt):\n kernel_cmdline.append(stmt.param)\n\n dispatch_stmt = {\n autokernel.config.ConfigModule.StmtUse: stmt_use,\n autokernel.config.ConfigModule.StmtMerge: stmt_merge,\n autokernel.config.ConfigModule.StmtAssert: stmt_assert,\n autokernel.config.ConfigModule.StmtSet: stmt_set,\n autokernel.config.ConfigModule.StmtAddCmdline: stmt_add_cmdline,\n }\n\n def conditions_met(stmt):\n for condition in stmt.conditions:\n if not condition.evaluate(kconfig):\n return False\n return True\n\n for stmt in module.all_statements_in_order:\n # Ensure all attached conditions are met for the statement.\n if conditions_met(stmt):\n dispatch_stmt[stmt.__class__](stmt)\n\n # Visit the root node and apply all symbol changes\n visit(config.kernel.module)\n log.verbose(\" Changed {} symbols\".format(len(autokernel.symbol_tracking.symbol_changes)))\n\n # Lastly, invalidate all non-assigned symbols to process new default value conditions\n for sym in kconfig.unique_defined_syms:\n if sym.user_value is None:\n sym._invalidate() # pylint: disable=protected-access\n\n return kernel_cmdline",
"async def secret_config(self, ctx: commands.Context, key, value):\n pass",
"def handle_app_config(self, app_config, **options):\n raise NotImplementedError(\n \"Subclasses of AppCommand must provide a handle_app_config() method.\"\n )",
"def has_exec(self, ):\n\t\tpass",
"def getBuildConfTaskParamHooks():\n\n def handleParam(bconf, param):\n if param is None:\n return None\n if not isinstance(param, maptype):\n param = { 'cmd' : param }\n param['startdir'] = relpath(bconf.startdir, bconf.rootdir)\n return param\n\n return [('run', handleParam)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function finds the power ratio between the external vehicle and the mlg gear engines. For this the max allowable force on the nose landing gear strut is used.
|
def opt_force_ratio(F_nlg_allow, a_lst):
m_plane = 97400 # [kg] MRW
m_car = 22000 # [kg] Mass of external vehicle
m_tot = m_plane + m_car # [kg] Total mass
Roll_fric = 0.02 # [-] Rolling friction coefficient
F_tot = m_tot*max(a_lst) + Roll_fric*m_tot*9.81 # [N] Total force req to move plane at max acceleration
print("\nMax force: {F} N - POWER RATIO EXTERNAL-INTERNAL: {rat}\n".format(F=F_tot, rat=F_nlg_allow/F_tot))
if F_nlg_allow/F_tot > 1:
return 1
return F_nlg_allow/F_tot
|
[
"def max_power_in_existing_storage_rule(_m, g):\r\n\r\n return self.data.existing_storage_units_dict[g]['REG_CAP']",
"def max_power_output_solar_rule(_m, g, t):\r\n\r\n # Existing solar generators\r\n if g in m.G_E_SOLAR:\r\n return m.p_total[g, t] <= m.Q_SOLAR[g, t] * m.P_MAX[g]\r\n\r\n # Candidate wind generators\r\n if g in m.G_C_SOLAR:\r\n return m.p_total[g, t] <= m.Q_SOLAR[g, t] * m.b[g]",
"def get_solar_generator_power(self):\n return self._get_content_of_own_consumption()[5]",
"def car_power(a, v, ratio, pow_wheel=4):\n # cte\n w_rad_car_1 = 0.537 # [m] wheel radius front tires external truck\n w_rad_car_2 = 0.537 # [m] wheel radius rear tires external truck 0.496\n m_plane = 97400 # [kg] MRW\n m_car = 22000 # [kg] Weight of external vehicle\n m_tot = m_plane + m_car # [kg] Total mass of the system\n weight_ratio = 0.952 # [-] Weight distribution ratio\n Roll_fric = 0.02 # [-] Rolling friction coefficient for MLG gears\n Roll_fric_car = 0.0065 # [-] Rolling friction coefficient for car wheels\n n_hydrostat = 1 # [-] Efficiency hydrostatic motor\n\n # Necessary force and Torque calculations\n N_mlg = m_plane*weight_ratio*9.81 # [N] Total normal force on the MLG\n N_nlg = (m_car + m_plane*(1-weight_ratio))*9.81 # [N] Total normal force on the car\n N_nlg_w = N_nlg/4 # [N] Normal force per MLG wheel\n\n F_tot = m_tot*a + Roll_fric*N_mlg + Roll_fric_car*N_nlg # [N] Total force req to move plane at acceleration\n F_nlg = ratio*F_tot # [N] Force needed from internal\n F_nlg_w = F_nlg/pow_wheel # [N] Force needed from internal per wheel\n\n T_nlg_w_1 = F_nlg_w*w_rad_car_1 # [Nm] Torque per front wheel\n T_nlg_w_2 = F_nlg_w*w_rad_car_2 # [Nm] Torque per rear wheel\n\n # Rotational speed of wheels\n w_1 = v/w_rad_car_1 # [rad/s] rotational speed wheel\n w_2 = v/w_rad_car_2 # [rad/s] rotational speed wheel\n\n # Check if static friction is not exceeded\n if not stat_traction(T_nlg_w_1, N_nlg_w, w_rad_car_1):\n print(\"LOG: Acceleration: {a} \\tTorque: {t} \\tWheel Radius: {r}\".format(a=a, t=T_nlg_w_1, r=w_rad_car_1))\n raise ValueError(\"Exceeds Static friction\")\n elif not stat_traction(T_nlg_w_2, N_nlg_w, w_rad_car_2):\n print(\"LOG: Acceleration: {a} \\tTorque: {t} \\tWheel Radius: {r}\".format(a=a, t=T_nlg_w_2, r=w_rad_car_2))\n raise ValueError(\"Exceeds Static friction\")\n else:\n print(\"\\tStatic friction check rear wheels: [{t}]. Static friction checked front wheels: [{t}].\"\n .format(t=True), end=\"\\r\")\n\n return 1/n_hydrostat*T_nlg_w_1*w_1, 1/n_hydrostat*T_nlg_w_2*w_2",
"def _get_power_at_freq(self) -> float:\n\t\toriginal_span = self.span()\n\t\toriginal_rbw = self.rbw()\n\t\tneeds_reset = False\n\t\tif not (original_span == 0.25e6 and original_rbw == 1e3):\n\t\t\tneeds_reset = True\n\t\t\tself.span(0.25e6)\n\t\t\tself.rbw(1e3)\n\t\tif not self._parameters_synced:\n\t\t\t# call configure to update both\n\t\t\t# the parameters on the device and the\n\t\t\t# setpoints and units\n\t\t\tself.configure()\n\t\tdata = self._get_sweep_data()\n\t\tmax_power = np.max(data)\n\t\tif needs_reset:\n\t\t\tself.span(original_span)\n\t\t\tself.rbw(original_rbw)\n\t\t\tself.configure()\n\t\tsleep(2*self.sleep_time.get())\n\t\treturn max_power",
"def percentage_max_power(self) -> float:\n return self._percentage_max_power",
"def gear_ratio(self):\n return self._gear_ratio",
"def minimum_power_output_rule(_m, g):\r\n\r\n return float(0)",
"def powerflow_max_rule(_m, l):\r\n\r\n if l in m.L_I:\r\n return float(self.data.powerflow_limits[l]['forward'])\r\n else:\r\n # Set arbitrarily loose bound if not an interconnector\r\n return float(1e5)",
"def _accel_limit_multiplier(CS, lead):\n accel_by_speed = OrderedDict([\n # (speed m/s, decel)\n (0., 0.95), # 0 kmh\n (10., 0.95), # 35 kmh\n (20., 0.925), # 72 kmh\n (30., 0.875)]) # 107 kmh\n if CS.teslaModel in [\"SP\",\"SPD\"]:\n accel_by_speed = OrderedDict([\n # (speed m/s, decel)\n (0., 0.95), # 0 kmh\n (10., 0.95), # 35 kmh\n (20., 0.925), # 72 kmh\n (30., 0.875)]) # 107 kmh\n accel_mult = _interp_map(CS.v_ego, accel_by_speed)\n if _is_present(lead):\n safe_dist_m = _safe_distance_m(CS.v_ego,CS)\n accel_multipliers = OrderedDict([\n # (distance in m, acceleration fraction)\n (0.6 * safe_dist_m, 0.15),\n (1.0 * safe_dist_m, 0.2),\n (3.0 * safe_dist_m, 0.4)])\n vrel_multipliers = OrderedDict([\n # vrel m/s, accel mult\n (0. , 1.),\n (10., 1.5)])\n\n return min(accel_mult * _interp_map(lead.vRel, vrel_multipliers) * _interp_map(lead.dRel, accel_multipliers),1.0)\n else:\n return min(accel_mult * 0.4, 1.0)",
"def min_power_output_rule(_m, g):\r\n\r\n return float(self.data.existing_units.loc[g, ('PARAMETERS', 'MIN_GEN')])",
"def calc_jump_power(block_count, total_mass):\n ideal = math.ceil(total_mass * 0.5)\n a = 50.0 - 100.0 * block_count * total_mass\n return (-0.24 * total_mass)*a*a + 4600.0*a + 230000.0 + 1200.0 * ideal",
"def get_max_heating_power(self, max_electric_power = None):\n \n if max_electric_power is None:\n self.max_heating = self.nominal_power*self.cop_cooling[self.time_step]\n else:\n self.max_heating = min(max_electric_power, self.nominal_power)*self.cop_cooling[self.time_step]\n return self.max_heating",
"def max_power_output_wind_rule(_m, g, t):\r\n\r\n # Existing wind generators\r\n if g in m.G_E_WIND:\r\n return m.p_total[g, t] <= m.Q_WIND[g, t] * m.P_MAX[g]\r\n\r\n # Candidate wind generators\r\n if g in m.G_C_WIND:\r\n return m.p_total[g, t] <= m.Q_WIND[g, t] * m.b[g]",
"def fuel_required(mass):\n return mass // 3 - 2",
"def energyMultiplier(self) -> float:\n return self._getMultiplier('energy')",
"def _rew_power_penalty(self) -> float:\n reward = 0.0\n # Get cardiac output and mean arterial pressure from states\n card_out = self.states[-1]\n martp = self.states[7]\n # Compute power\n power = martp * card_out * 0.0022\n\n # Get params\n params = self.params_reward[\"power_penalty\"]\n\n # Compute reward continuous or discrete\n if self.type_rew == \"continuous\":\n l_param = params[\"continuous\"][\"l\"]\n m_param = params[\"continuous\"][\"m\"]\n off_param = params[\"continuous\"][\"off\"]\n # ReLu continuous parametrized function\n reward = m_param * (\n -1 / l_param * np.log(1 + np.exp(l_param * (power - off_param)))\n )\n\n if self.type_rew == \"discrete\":\n if power < params[\"discrete\"][\"low_threshold\"]:\n reward = params[\"discrete\"][\"low_rew\"]\n elif (\n params[\"discrete\"][\"low_threshold\"]\n <= power\n < params[\"discrete\"][\"high_threshold\"]\n ):\n reward = params[\"discrete\"][\"intermed_rew\"]\n else:\n reward = params[\"discrete\"][\"high_rew\"]\n return reward",
"def f_molGas_dyn(self):\n# print self.M_gas, self.M_dyn\n return self.M_gas / self.M_dyn",
"def wavelength_rel(self) -> float:\n wavelength_rel = (\n sc.h\n / np.sqrt(\n 2 * sc.m_e * sc.e * 1000 * self.voltage * (1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c**2))\n )\n * (10**10)\n )\n return wavelength_rel"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function that checks that the static friction / tractive force is not exceeded for a given torque and static friction coefficient. If the torque is higher than the limit the wheel will slip.
|
def stat_traction(torque, N, wheelrad, fric=1):
if torque > (fric*N)*wheelrad:
#print("Too much torque, will slip")
return False
return True
|
[
"def calc_blade_friction_force():\r\n # return c_a * d * w\r\n return 0",
"def constrain(w):\n VEL_LIMIT = 1000 # rad/s\n w = VEL_LIMIT if w >= VEL_LIMIT else w\n w = -VEL_LIMIT if w <= -VEL_LIMIT else w\n return w",
"def friction(self, magnitude) -> None:\n # Stops the player\n if abs(self.vector.x) < magnitude:\n self.vector.x = 0\n self.vector.y = 0\n #slows down the player\n else:\n force = - (copysign(magnitude, self.vector.x))\n self.changeXVector(force)",
"def opt_force_ratio(F_nlg_allow, a_lst):\n m_plane = 97400 # [kg] MRW\n m_car = 22000 # [kg] Mass of external vehicle\n m_tot = m_plane + m_car # [kg] Total mass\n Roll_fric = 0.02 # [-] Rolling friction coefficient\n\n F_tot = m_tot*max(a_lst) + Roll_fric*m_tot*9.81 # [N] Total force req to move plane at max acceleration\n print(\"\\nMax force: {F} N - POWER RATIO EXTERNAL-INTERNAL: {rat}\\n\".format(F=F_tot, rat=F_nlg_allow/F_tot))\n if F_nlg_allow/F_tot > 1:\n return 1\n return F_nlg_allow/F_tot",
"def check_backtrack(self):\n differential = self.character.stats[4] - self.dungeonlevel\n if differential < 0:\n cutoff = float(3 - differential) / float(6 - 6 * differential)\n else:\n cutoff = float(3 + 5 * differential) / float(6 + 6 * differential)\n return random.random() < cutoff",
"def limit_check(self, theta, x):\r\n if 0 <= x < self.x_threshold or - self.x_threshold < x <= 0:\r\n if -self.theta_lim_bonus <= theta < 0 or \\\r\n 0 < theta <= self.theta_lim_bonus or \\\r\n 2 * math.pi - self.theta_lim_bonus <= theta < 2 * math.pi or \\\r\n 2 * math.pi < theta <= 2 * math.pi + self.theta_lim_bonus:\r\n self.bonus_theta = 3\r\n else:\r\n self.bonus_theta = 1\r\n\r\n if 0 <= x < self.x_threshold*2/4 or - self.x_threshold*2/4 < x <= 0:\r\n \"\"\"Gives bonus points if inside this x range\"\"\"\r\n self.bonus_x = 3\r\n else:\r\n self.bonus_x = 1\r\n\r\n\r\n \"\"\"Checks if the cartpole is inside the tracks limits\"\"\"\r\n if ((2*math.pi - self.theta_threshold_radians) <= theta <= 2*math.pi) or (2*math.pi <= theta <= 2*math.pi + self.theta_threshold_radians):\r\n \"\"\"Checks if the pendulum is above ground (swingup clockwise), if so, set self.above = True\"\"\"\r\n self.above = True\r\n elif (- self.theta_threshold_radians <= theta <= 0) or (0 <= theta <= self.theta_threshold_radians):\r\n \"\"\"Checks if the pendulum is above ground (swingup anti-clockwise), if so, set self.above = True\"\"\"\r\n self.above = True\r\n\r\n if self.above is True:\r\n if self.theta_threshold_radians == math.pi/2:\r\n if (3/2*math.pi >= theta >= math.pi/2) or (theta > 5/2*math.pi) or (theta < - math.pi/2):\r\n # pendeln har passerat marken\r\n # avbryt!\r\n return True\r\n if ((2 * math.pi + self.theta_threshold_radians) <= theta) \\\r\n or (3/2*math.pi <= theta <= 2 * math.pi - self.theta_threshold_radians) \\\r\n or (self.theta_threshold_radians <= theta <= math.pi/2) \\\r\n or (theta <= - self.theta_threshold_radians):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return True",
"def check_joint_limit(self, curve, info):\n low_mask = (curve < self.joint_lower_limit - 5e-3).any()\n high_mask = curve > self.joint_upper_limit + 5e-3\n over_joint_limit = (low_mask * high_mask).any() #\n info[\"violate_limit\"] = over_joint_limit\n info[\"terminate\"] = info[\"terminate\"] and (not over_joint_limit)",
"def check_scf_criteria(self):\n tols = ['toldfe', 'tolwfr', 'toldff', 'tolrff', 'tolvrs']\n nonzeros = 0\n for i in tols:\n if i in self.params.keys() and self.params[i] is not None:\n if self.params[i] != 0.0:\n nonzeros += 1\n if nonzeros == 1:\n return True\n else:\n print(\"========================================\\n\")\n print(\" WARNING !!!\\n\")\n print(\"========================================\\n\")\n print(\"you must set one and only one of variables\\n\")\n print(\"below to differ from zero.\\n\")\n print(\"[toldfe, tolwfr, toldff, tolrff, tolvrs]\\n\")\n #print(nonzeros)\n sys.exit(1)",
"def check_scf_criteria(self):\n tols = ['toldfe', 'tolwfr', 'toldff', 'tolrff', 'tolvrs']\n nonzeros = 0\n for i in tols:\n if i in self.params.keys() and self.params[i].as_val() is not None:\n if self.params[i].as_val(t=float, dim=0) != 0.0:\n nonzeros += 1\n if nonzeros == 1:\n return True\n else:\n print(\"========================================\\n\")\n print(\" WARNING !!!\\n\")\n print(\"========================================\\n\")\n print(\"you must set one and only one of variables\\n\")\n print(\"below to differ from zero.\\n\")\n print(\"[toldfe, tolwfr, toldff, tolrff, tolvrs]\\n\")\n #print(nonzeros)\n sys.exit(1)",
"def validate_safety_restrictions(intersection: Intersection, fixed_time_schedule: FixedTimeSchedule,\n tolerance: float = 10**(-2)) -> None:\n validate_bounds(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)\n validate_conflicts(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)\n validate_other_sg_relations(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)\n validate_completeness(intersection=intersection, fts=fixed_time_schedule)\n validate_fixed_orders(intersection=intersection, fts=fixed_time_schedule)",
"def check_force_norm(forces: NDArray, threshold: float = MAX_FORCE_NORM):\n per_atom_force_norms = np.linalg.norm(forces, axis=-1)\n\n if (per_atom_force_norms > threshold).any():\n bad_inds = np.where(per_atom_force_norms > threshold)[0]\n max_atom_force_norm = np.max(per_atom_force_norms)\n message = f\"\"\"\n Minimization failed to reduce large forces below threshold:\n max |frc| = {max_atom_force_norm} > {threshold}\n {len(bad_inds)} / {len(forces)} atoms exceed threshold\n \"\"\"\n raise MinimizationError(message)",
"def test_Cutoff(self):\n\n for top in (self.psf_c, self.psf_x, self.psf_v):\n for method in [CutoffNonPeriodic]:\n system = top.createSystem(self.params, nonbondedMethod=method,\n nonbondedCutoff=2*nanometer,\n constraints=HBonds)\n cutoff_distance = 0.0*nanometer\n cutoff_check = 2.0*nanometer\n for force in system.getForces():\n if isinstance(force, NonbondedForce):\n cutoff_distance = force.getCutoffDistance()\n self.assertEqual(cutoff_distance, cutoff_check)",
"def accident_detected(vel: float, accel: float) -> bool:\n if vel >= 15 and accel >= 4:\n return True\n return False",
"def algorithm_should_terminate(self, config, check_cycling):\n if self.should_terminate:\n # self.primal_bound_progress[0] can only be inf or -inf.\n # If the current primal bound equals inf or -inf, we can infer there is no solution.\n if self.primal_bound == self.primal_bound_progress[0]:\n self.results.solver.termination_condition = tc.noSolution\n else:\n self.results.solver.termination_condition = tc.feasible\n return True\n return (\n self.bounds_converged()\n or self.reached_iteration_limit()\n or self.reached_time_limit()\n or self.reached_stalling_limit()\n or (check_cycling and self.iteration_cycling())\n )",
"def physics():\n\n def problem(*stuff, info='', giveup=0.0):\n while 1:\n print(X.format(*stuff))\n try:\n loop = eval(Input(info))\n except KeyboardInterrupt:\n return -1\n if loop != giveup:\n print(\"{} is Wrong\".format(loop))\n else:\n break\n print(\"{} is Correct\\n=====NewProblem=====\".format(loop))\n\n X = '\\n{} {}\\n{} {}\\n{} {}'\n A, VI, T, D, VF = 'Acceleration:', 'Initial Speed:', 'Time:', 'Distance:', 'Final Speed:'\n while 1:\n a, vi, t, d, c = randint(1, 6), randrange(10), randint(1, 8), 5 * randrange(13), randrange(5)\n vf = randint(vi, 10)\n if c == 0:\n c = problem(VI, vi, T, t, A, a, info=D + ' ', giveup=vi * t + .5 * a * t ** 2)\n elif c == 1:\n c = problem(VF, vf, VI, vi, A, a, info=D + ' ', giveup=(vf * vf - vi * vi) / (2 * a))\n elif c == 2:\n c = problem(VI, vi, A, a, D, d, info=VF + ' ', giveup=(vi * vi + 2 * a * d) ** .5)\n elif c == 3:\n c = problem(VI, vi, A, a, T, t, info=VF + ' ', giveup=vi + a * t)\n else:\n c = problem(VF, vf, VI, vi, A, a, info=T + ' ', giveup=(vf - vi) / a)\n if c == -1: return",
"def friction_factor(self):\n return frifac(\n radius=self.particle_radius,\n dynamic_viscosity=self.dynamic_viscosity(),\n scf=self.slip_correction_factor(),\n )",
"def _constraint(self, action):\n return float(self.vehicle.crashed) + float(self.vehicle.lane_index[2] == 0)/15",
"def calculate_potential_field(self, pos_drones, pos_obstacles):\n alpha = beta = 0.005\n # --- Repulsion drones\n for position in pos_drones:\n distance = (self.location - position).magnitude()\n if 0 < distance < OBSERVABLE_RADIUS:\n # Proporcional to the distance. The closer the stronger needs to be\n f_repulsion = (position - self.location).normalize() / distance \n #f_repulsion = derivativeBivariate(alpha, beta, position, self.location) / SAMPLE_TIME\n #f_repulsion = limit(f_repulsion, SEEK_FORCE)\n self.applyForce(-f_repulsion)\n\n # --- Repulsion obstacles \n for position in pos_obstacles:\n distance = (self.location - position).magnitude()\n if 0 < distance < OBSERVABLE_RADIUS:\n # Proporcional to the distance. The closer the stronger needs to be\n f_repulsion = 2*(position - self.location).normalize() / sqrt(distance)\n #f_repulsion = derivativeBivariate(alpha, beta, position, self.location) / SAMPLE_TIME\n #f_repulsion = limit(f_repulsion, SEEK_FORCE)\n self.applyForce(-f_repulsion)\n\n # --- Repulsion walls\n # Distance to Bottom\n distance = UPPER_Y - self.location[1] \n # Proporcional to the distance. The closer the stronger needs to be\n if distance > 0:\n f_repulsion = pygame.math.Vector2(0,2) / sqrt(distance)\n else:\n f_repulsion = pygame.math.Vector2(0,2) * SEEK_FORCE\n self.applyForce(-f_repulsion)\n \n # Distance to Top\n distance = self.location[1] - LOWER_Y \n # Proporcional to the distance. The closer the stronger needs to be\n if distance > 0:\n f_repulsion = pygame.math.Vector2(0,-2) / sqrt(distance)\n else:\n f_repulsion = pygame.math.Vector2(0,-2) * SEEK_FORCE\n self.applyForce(-f_repulsion)",
"def above_freezing(celcius: float) -> bool:\n return celcius > 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.