content
stringlengths 5
1.05M
|
---|
from neuron import *
h('''
begintemplate A
public x, s, o, xa, oa, f, p
strdef s
objref o, oa[2]
double xa[3]
proc init() { \
x = $1 \
}
func f() { return $1*xa[$2] }
proc p() { x += 1 }
endtemplate A
''')
class A1(hclass(h.A)) :
def __init__(self, arg) : # note, arg used by h.A
#self.bp = hoc.HocObject.baseattr(self, 'p')
self.bp = self.baseattr('p')
def p(self) :
self.bp()
return self.x
|
import time
def prefixes(word):
"A list of the initial sequences of a word, not including the complete word."
return [word[:i] for i in range(len(word))]
def readwordlist(filename):
file = open(filename)
text = file.read().upper()
wordset = set(word for word in text.splitlines())
prefixset = set(p for word in wordset for p in prefixes(word))
return wordset, prefixset
WORDS, PREFIXES = readwordlist(r"C:\Users\Yueleng\OneDrive\CS212\lesson6\words4k.txt")
def find_words(letters, pre='', results=None):
if results is None: results = set()
if pre in WORDS: results.add(pre)
if pre in PREFIXES:
for L in letters:
find_words(letters.replace(L, '', 1), pre+L, results)
return results
def word_plays(hand, board_letters):
"Find all word plays from hand that can be made to abut with a letter on board."
# Find prefix + L + suffix; L from board_letters, rest from hand
results = set()
for pre in find_prefixes(hand, '', set()):
for L in board_letters:
add_suffixes(removed(hand, pre), pre+L, results)
return results
def find_prefixes(hand, pre='', results=None):
"Find all prefixes (of words) that can be made from letters in hand."
if results is None: results = set()
if pre in PREFIXES:
results.add(pre)
for L in hand:
find_prefixes(hand.replace(L, '', 1), pre+L, results)
return results
# find_prefixes improved
prev_hand, prev_results = '', set() # cache for find_prefixes
def find_prefixes_cache(hand, pre='', results=None):
## Cache the most recent full hand (don't cache intermediate results)
global prev_hand, prev_results
if hand == prev_hand: return prev_results
if results is None: results = set()
if pre == '': prev_hand, prev_results = hand, results
# Now do the computation
if pre in WORDS or pre in PREFIXES: results.add(pre)
if pre in PREFIXES:
for L in hand:
find_prefixes_cache(hand.replace(L, '', 1), pre+L, results)
return results
def add_suffixes(hand, pre, results):
"""Return the set of words that can be formed by extending pre with letters in hand."""
if pre in WORDS: results.add(pre)
if pre in PREFIXES:
for L in hand:
add_suffixes(hand.replace(L, '', 1), pre+L, results)
return results
def removed(letters, remove):
"Return a str of letters, but with each letter in remove removed once."
for L in remove:
letters = letters.replace(L, '', 1)
return letters
def timedcall(fn, *args):
"Call function with args; return the time in seconds and result."
t0 = time.time()
result = fn(*args)
t1 = time.time()
return t1-t0, result
hands = { ## Regression test
'ABECEDR': set(['BE', 'CARE', 'BAR', 'BA', 'ACE', 'READ', 'CAR', 'DE', 'BED', 'BEE',
'ERE', 'BAD', 'ERA', 'REC', 'DEAR', 'CAB', 'DEB', 'DEE', 'RED', 'CAD',
'CEE', 'DAB', 'REE', 'RE', 'RACE', 'EAR', 'AB', 'AE', 'AD', 'ED', 'RAD',
'BEAR', 'AR', 'REB', 'ER', 'ARB', 'ARC', 'ARE', 'BRA']),
'AEINRST': set(['SIR', 'NAE', 'TIS', 'TIN', 'ANTSIER', 'TIE', 'SIN', 'TAR', 'TAS',
'RAN', 'SIT', 'SAE', 'RIN', 'TAE', 'RAT', 'RAS', 'TAN', 'RIA', 'RISE',
'ANESTRI', 'RATINES', 'NEAR', 'REI', 'NIT', 'NASTIER', 'SEAT', 'RATE',
'RETAINS', 'STAINER', 'TRAIN', 'STIR', 'EN', 'STAIR', 'ENS', 'RAIN', 'ET',
'STAIN', 'ES', 'ER', 'ANE', 'ANI', 'INS', 'ANT', 'SENT', 'TEA', 'ATE',
'RAISE', 'RES', 'RET', 'ETA', 'NET', 'ARTS', 'SET', 'SER', 'TEN', 'RE',
'NA', 'NE', 'SEA', 'SEN', 'EAST', 'SEI', 'SRI', 'RETSINA', 'EARN', 'SI',
'SAT', 'ITS', 'ERS', 'AIT', 'AIS', 'AIR', 'AIN', 'ERA', 'ERN', 'STEARIN',
'TEAR', 'RETINAS', 'TI', 'EAR', 'EAT', 'TA', 'AE', 'AI', 'IS', 'IT',
'REST', 'AN', 'AS', 'AR', 'AT', 'IN', 'IRE', 'ARS', 'ART', 'ARE']),
'DRAMITC': set(['DIM', 'AIT', 'MID', 'AIR', 'AIM', 'CAM', 'ACT', 'DIT', 'AID', 'MIR',
'TIC', 'AMI', 'RAD', 'TAR', 'DAM', 'RAM', 'TAD', 'RAT', 'RIM', 'TI',
'TAM', 'RID', 'CAD', 'RIA', 'AD', 'AI', 'AM', 'IT', 'AR', 'AT', 'ART',
'CAT', 'ID', 'MAR', 'MA', 'MAT', 'MI', 'CAR', 'MAC', 'ARC', 'MAD', 'TA',
'ARM']),
'ADEINRST': set(['SIR', 'NAE', 'TIS', 'TIN', 'ANTSIER', 'DEAR', 'TIE', 'SIN', 'RAD',
'TAR', 'TAS', 'RAN', 'SIT', 'SAE', 'SAD', 'TAD', 'RE', 'RAT', 'RAS', 'RID',
'RIA', 'ENDS', 'RISE', 'IDEA', 'ANESTRI', 'IRE', 'RATINES', 'SEND',
'NEAR', 'REI', 'DETRAIN', 'DINE', 'ASIDE', 'SEAT', 'RATE', 'STAND',
'DEN', 'TRIED', 'RETAINS', 'RIDE', 'STAINER', 'TRAIN', 'STIR', 'EN',
'END', 'STAIR', 'ED', 'ENS', 'RAIN', 'ET', 'STAIN', 'ES', 'ER', 'AND',
'ANE', 'SAID', 'ANI', 'INS', 'ANT', 'IDEAS', 'NIT', 'TEA', 'ATE', 'RAISE',
'READ', 'RES', 'IDS', 'RET', 'ETA', 'INSTEAD', 'NET', 'RED', 'RIN',
'ARTS', 'SET', 'SER', 'TEN', 'TAE', 'NA', 'TED', 'NE', 'TRADE', 'SEA',
'AIT', 'SEN', 'EAST', 'SEI', 'RAISED', 'SENT', 'ADS', 'SRI', 'NASTIER',
'RETSINA', 'TAN', 'EARN', 'SI', 'SAT', 'ITS', 'DIN', 'ERS', 'DIE', 'DE',
'AIS', 'AIR', 'DATE', 'AIN', 'ERA', 'SIDE', 'DIT', 'AID', 'ERN',
'STEARIN', 'DIS', 'TEAR', 'RETINAS', 'TI', 'EAR', 'EAT', 'TA', 'AE',
'AD', 'AI', 'IS', 'IT', 'REST', 'AN', 'AS', 'AR', 'AT', 'IN', 'ID', 'ARS',
'ART', 'ANTIRED', 'ARE', 'TRAINED', 'RANDIEST', 'STRAINED', 'DETRAINS']),
'ETAOIN': set(['ATE', 'NAE', 'AIT', 'EON', 'TIN', 'OAT', 'TON', 'TIE', 'NET', 'TOE',
'ANT', 'TEN', 'TAE', 'TEA', 'AIN', 'NE', 'ONE', 'TO', 'TI', 'TAN',
'TAO', 'EAT', 'TA', 'EN', 'AE', 'ANE', 'AI', 'INTO', 'IT', 'AN', 'AT',
'IN', 'ET', 'ON', 'OE', 'NO', 'ANI', 'NOTE', 'ETA', 'ION', 'NA', 'NOT',
'NIT']),
'SHRDLU': set(['URD', 'SH', 'UH', 'US']),
'SHROUDT': set(['DO', 'SHORT', 'TOR', 'HO', 'DOR', 'DOS', 'SOUTH', 'HOURS', 'SOD',
'HOUR', 'SORT', 'ODS', 'ROD', 'OUD', 'HUT', 'TO', 'SOU', 'SOT', 'OUR',
'ROT', 'OHS', 'URD', 'HOD', 'SHOT', 'DUO', 'THUS', 'THO', 'UTS', 'HOT',
'TOD', 'DUST', 'DOT', 'OH', 'UT', 'ORT', 'OD', 'ORS', 'US', 'OR',
'SHOUT', 'SH', 'SO', 'UH', 'RHO', 'OUT', 'OS', 'UDO', 'RUT']),
'TOXENSI': set(['TO', 'STONE', 'ONES', 'SIT', 'SIX', 'EON', 'TIS', 'TIN', 'XI', 'TON',
'ONE', 'TIE', 'NET', 'NEXT', 'SIN', 'TOE', 'SOX', 'SET', 'TEN', 'NO',
'NE', 'SEX', 'ION', 'NOSE', 'TI', 'ONS', 'OSE', 'INTO', 'SEI', 'SOT',
'EN', 'NIT', 'NIX', 'IS', 'IT', 'ENS', 'EX', 'IN', 'ET', 'ES', 'ON',
'OES', 'OS', 'OE', 'INS', 'NOTE', 'EXIST', 'SI', 'XIS', 'SO', 'SON',
'OX', 'NOT', 'SEN', 'ITS', 'SENT', 'NOS'])}
def test_word_play():
assert word_plays('ADEQUAT', set('IRE')) == {'RATE', 'AI', 'DATE', 'TEE', 'IT', 'EAU', 'TI', 'DUE', 'QUIT', 'RE', 'RET', 'QAID', 'RAD', 'ART', 'AE', 'AIT', 'AID', 'ER', 'ARE', 'ETA', 'QUIET', 'EAR', 'ED', 'TEAR', 'AQUAE', 'TAR', 'DE', 'ID', 'AREA', 'RED', 'DUI', 'QI', 'TIE', 'TRUE', 'URD', 'ET', 'QUITE', 'IDEA', 'ATE', 'TAE', 'DIT', 'TEA', 'TRADE', 'TED', 'QUID', 'EQUID', 'DEAR', 'DEE', 'TUI', 'AR', 'RAT', 'EAT', 'RUE', 'ADEQUATE', 'ERA', 'RUT', 'READ', 'DIE', 'QUADRATE'}
return 'test passed'
print(test_word_play())
# longest words play
def longest_words(hand, board_letters):
words_list = list(word_plays('ADEQUAT', set('IRE')))
words_list.sort(key=lambda word: len(word), reverse=True)
return words_list
def longest_words_u(hand, board_letters):
words = word_plays(hand, board_letters)
# sort will convert set into list
return sorted(words, reverse=True, key=len)
print(longest_words_u('ADEQUAT',set('IRE')))
# Word Score
POINTS = dict(A=1, B=3, C=3, D=2, E=1, F=4, G=2, H=4, I=1, J=8, K=5, L=1, M=3, N=1, O=1, P=3, Q=10, R=1, S=1, T=1, U=1, V=4, W=4, X=8, Y=4, Z=10, _=0)
def word_score(word):
return sum(POINTS[L] for L in word)
# Top N Hands
def topn(hand, board_letters, n=10):
"Return a list of the top n words that hand can play, sorted by word score."
words = word_plays(hand, board_letters)
return sorted(words, reverse=True, key=word_score)[:n]
print(topn('ADEQUAT', set('IRE')))
# Row Plays
class anchor(set):
"An anchor is where a new word can be placed; has a set of allowable letters."
LETTERS = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
ANY = anchor(LETTERS) # The anchor that can be any letter
# |A.....BE.C...D.|
mnx, moab = anchor('MNX'), anchor('MOAB')
a_row = ['|', 'A', mnx, moab, '.', '.', ANY, 'B', 'E', ANY, 'C', ANY, '.', ANY,
'D', ANY, '|']
a_hand = 'ABCEHKN'
def row_plays(hand, row):
"Return a set of legal plays in row. A row play is an (start, 'WORD') pair"
results = set()
## To each allowable prefix, add all suffixes, keeping words
for (i, sq) in enumerate(row[1:-1], 1):
if isinstance(sq, anchor):
pre, maxsize = legal_prefix(i, row)
if pre: ## Add to the letters already on the borad
start = i - len(pre)
add_suffixes(hand, pre, start, row, results, anchored=False)
else: ## Empty to left: go through the set of all possible prefixes
# for pre in find_prefixes(hand):
for pre in find_prefixes_cache(hand):
if len(pre) <= maxsize:
start = i - len(pre)
add_suffixes(removed(hand, pre), pre, start, row, results, anchored=False)
return results
def legal_prefix(i, row):
"""A legal prefix of an anchor at row[i] is either a string or letters
already on the board, or new letters that fit into empty space.
Return the tuple (prefix_on_board, maxisize) to indicate this.
E.g. legal_prefix(9, a_row) == ('BE', 2) and for 6, ('', 2)"""
s = i # starting index
while is_letter(row[s-1]): s -= 1
if s < i: ## There is a prefix
return ''.join(row[s:i]), i-s
while is_empty(row[s-1]) and not isinstance(row[s-1], anchor): s -= 1
return ('', i-s)
def is_empty(sq):
return sq == '.' or sq == '*' or isinstance(sq, anchor)
def is_letter(sq):
return isinstance(sq, str) and sq in LETTERS
def add_suffixes(hand, pre, start, row, results, anchored=True):
"All all possible suffixes, and accumelate (start, word) pairs in results"
i = start + len(pre)
if pre in WORDS and anchored and not is_letter(row[i]):
results.add((start, pre))
if pre in PREFIXES:
sq = row[i]
if is_letter(sq):
add_suffixes(hand, pre+sq, start, row, results)
elif is_empty(sq):
possibilities = sq if isinstance(sq, anchor) else ANY
for L in hand:
if L in possibilities:
add_suffixes(hand.replace(L, '', 1), pre+L, start, row, results)
return results
def test_row():
assert legal_prefix(2, a_row) == ('A', 1)
assert legal_prefix(3, a_row) == ('', 0)
assert legal_prefix(6, a_row) == ('', 2)
assert legal_prefix(9, a_row) == ('BE', 2)
assert legal_prefix(11, a_row) == ('C', 1)
assert legal_prefix(13, a_row) == ('', 1)
assert is_empty('.') and is_empty(ANY) and is_empty(anchor('ABC'))
assert not is_empty('L') and not is_empty('|')
assert row_plays(a_hand, a_row) == {(3, 'AB'), (3, 'BAN'), (14, 'DE'), (12, 'BED'), (7, 'BENCH'), (12, 'HAD'), (3, 'BAH'), (3, 'AE'), (13, 'EDH'), (3, 'AH'), (12, 'BAD'), (3, 'BA'), (10, 'CAN'), (12, 'AND'), (1, 'ANA'), (3, 'AN'), (12, 'END'), (13, 'ED'), (13, 'AD'), (3, 'BACKBENCH'), (3, 'BEN'), (3, 'BE'), (3, 'ANE'), (1, 'AN'), (10, 'CAB'), (3, 'ACE'), (12, 'CAD')}
return 'test passed'
print(test_row())
print(find_prefixes_cache('ABCDE'))
print(find_prefixes('ABCDE'))
# Show the Board
def a_board():
return list(map(list, ['|||||||||||||||||',
'|J............I.|',
'|A.....BE.C...D.|',
'|GUY....F.H...L.|',
'|||||||||||||||||']))
def show(board):
"Print the board."
result = ''
for row in board:
result += ''.join(row) + '\n'
print(result)
# def show_u(board):
# "Print the board."
# for row in board:
# for sq in row:
# print sq,
# print
show(a_board())
# Horizontal Plays
def horizontal_plays_a(hand, board):
"Find all horizonal plays -- ((i,j), word) pairs -- across all rows."
results = set()
for (j, row) in enumerate(board[1: -1], 1):
set_anchors(row, j, board)
results.union( set(((i, j), word) for (i, word) in row_plays(hand, row)) )
return results
def horizontal_plays(hand, board):
"Find all horizonal plays -- (score, (i,j), word) pairs -- across all rows."
results = set()
for (j, row) in enumerate(board[1: -1], 1):
set_anchors(row, j, board)
for (i, word) in row_plays(hand, row):
score = calculate_score(board, (i, j), ACROSS, hand, word)
results.add( ( score, (i, j), word) )
return results
def all_plays_a(hand, board):
"""
All plays in both directions. A play is a (score, pos, dir, word) tuple,
where pos is an (i, j) pair, and dir is ACROSS or DOWN.
"""
hplays = horizontal_plays(hand, board)
vplays = horizontal_plays(hand, transpose(board))
results = set()
for (score, (i, j), word) in hplays:
results.add((score, (i,j), ACROSS, word))
for (score, (j, i), word) in vplays:
results.add((score, (i,j), DOWN, word))
return results
def all_plays(hand, board):
"""
All plays in both directions. A play is a (pos, dir, word) tuple,
where pos is an (i, j) pair, and dir is ACROSS or DOWN.
"""
hplays = horizontal_plays(hand, board)
vplays = horizontal_plays(hand, transpose(board))
return (set((score, (i,j), ACROSS, w) for (score, (i,j), w) in hplays) |
set((score, (i,j), DOWN, w) for (score, (j,i), w) in vplays) )
# Incrementing 1 in the i direction, (ACROSS)
# Incrementing 1 in the j direction, (DOWN)
ACROSS, DOWN = (1, 0), (0, 1)
# Set Anchors
# Mutate the input: row into row which can feed to row_plays(hand, row)
def set_anchors(row, j, board):
"""
Anchors are empty squares with a neighboring letter. Some are restricted
by crosss-words to be only a subset of letters.
"""
for (i, sq) in enumerate(row[1:-1], 1):
neighborlist = (N, S, E, W) = neighbors(board, i, j)
# Anchors are square adjacent to a letter. Plus the '*' square.
if sq == '*' or (is_empty(sq) and any(map(is_letter, neighborlist))):
if is_letter(N) or is_letter(S):
# Find letters that fit with the cross (vertical) word
(j2, w) = find_cross_word(board, i, j)
row[i] = anchor(L for L in LETTERS if w.replace('.', L) in WORDS)
else: # Unrestricted empty square -- any letter will fit.
row[i] = ANY
# find_corss_word : (board, i, j) => (j2, w) where j2 = i, w = words
# find_cross_word(a_board(), 2, 2) returns (2, '.U')
# find_cross_word(a_board(), 1, 2) returns (1, 'JAG')
w = '.U'
print(anchor(L for L in LETTERS if w.replace('.', L) in WORDS))
def find_cross_word(board, i, j):
"""
Find the vertical word that crosses board[j][i]. Return (j2, w),
where j2 is the starting row, and w is the word
"""
sq = board[j][i]
w = sq if is_letter(sq) else '.'
for j2 in range(j, 0, -1):
sq2 = board[j2-1][i]
if is_letter(sq2): w = sq2 + w
else: break
for j3 in range(j+1, len(board)):
sq3 = board[j3][i]
if is_letter(sq3): w = w + sq3
else: break
return (j2, w)
def neighbors(board, i, j):
"""
Return a list of the contents of the four neighboring squares
in the order N, S, E, W.
"""
return [board[j-1][i], board[j+1][i],
board[j][i+1], board[j][i-1]]
def transpose(matrix):
"""
Transpose e.g. [[1,2,3], [4,5,6]] to [[1,4], [2,5], [3,6]]
"""
return list(map(list, zip(*matrix)))
def transpose_u(matrix):
"""
Transpose e.g. [[1,2,3], [4,5,6]] to [[1,4], [2,5], [3,6]]
"""
return map(list, zip(*matrix))
# Final bird: scoring
def calculate_score(board, pos, direction, hand, word):
"Return the total score for this play"
total, crosstotal, word_mult = 0, 0, 1
starti, startj = pos
di, dj = direction
other_direction = DOWN if direction == ACROSS else ACROSS
for (n, L) in enumerate(word):
i, j = starti + n*di, startj + n*dj
sq = board[j][i]
b = BONUS[j][i]
word_mult *= (1 if is_letter(sq) else 3 if b == TW else 2 if b in (DW, '*') else 1)
letter_mult = (1 if is_letter(sq) else 3 if b == TL else 2 if b == DL else 1)
total += POINTS[L] * letter_mult
if isinstance(sq, anchor) and sq is not ANY and direction is not DOWN: # MAIN FUNC ONLY CALLED IN horizontal_plays
crosstotal += cross_word_score(board, L, (i, j), other_direction)
return crosstotal + word_mult * total
def cross_word_score(board, L, pos, direction):
"""
Return the score of a word made in the other direction from the main word
"""
i, j = pos
(j2, word) = find_cross_word(board, i, j)
return calculate_score(board, (i, j2), DOWN, L, word.replace('.', L))
def bonus_template(quadrant):
"""
Make a board from the upper-left quadrant.
"""
return mirror(list(map(mirror, quadrant.split())))
def bonus_template_u(quadrant):
"""
Make a board from the upper-left quadrant.
"""
return mirror(map(mirror, quadrant.split()))
def mirror(sequence): return sequence + sequence[-2::-1]
SCRABBLE = bonus_template("""
|||||||||
|3..:...3
|.2...;..
|..2...:.
|:..2...:
|....2...
|.;...;..
|..:...:.
|3..:...*
""")
WWF = bonus_template("""
|||||||||
|...3..;.
|..:..2..
|.:..:...
|3..;...2
|..:...:.
|.2...3..
|;...:...
|...:...*
""")
BONUS = WWF
DW, TW, DL, TL = '23:;'
# Tests:
# Pending code
def show_all(board):
"Print the board"
for j, row in enumerate(board):
row1 = ''
for i, sq in enumerate(row):
row1 += sq if (is_letter(sq) or sq == '|') else BONUS[j][i]
print(row1+'\n')
def make_play(play, board):
"Put the word down on the board."
(score, (i, j), (di, dj), word) = play
for (n, L) in enumerate(word):
board[j + n*dj][i + n*di] = L
return board
NOPLAY = None
def best_play_a(hand, board):
# Return the highest-scoring play. Or None
all_possible_plays = all_plays(hand, board)
best_score = 0
best_play = (0, (0,0), (0,1), '')
for play in all_possible_plays:
score, _, _, _ = play
if score > best_score:
best_score = score
best_play = play
if all_possible_plays:
return best_play
return NOPLAY
def best_play(hand, board):
"Return the highest-scoring play. Or None"
plays = all_plays(hand,board)
return sorted(plays)[-1] if plays else NOPLAY
def show_best(hand, board):
print('Current board:')
show_all(board)
play = best_play(hand, board)
if play:
print('\nNew word: %r scores %d' % (play[-1], play[0]))
show_all(make_play(play,board))
else:
print('Sorry, no legal plays')
show_best(a_hand, a_board())
|
import os
import numpy as np
from deeptam_tracker.evaluation.rgbd_sequence import RGBDSequence
def position_diff(pose1, pose2):
"""Computes the position difference between two poses
pose1: Pose
pose2: Pose
"""
return (pose1.R.transpose() * pose1.t - pose2.R.transpose() * pose2.t).norm()
def angle_diff(pose1, pose2):
"""Computes the angular difference [in degrees] between two poses
pose1: Pose
pose2: Pose
"""
dot = pose1.R.row(2).dot(pose2.R.row(2))
return np.rad2deg(np.arccos(np.clip(dot, 0, 1)))
def rgbd_rpe(gt_poses, pr_poses, timestamps, cmdline_options=None):
"""Runs the rgbd command line tool for the RPE error
gt_poses: list of Pose
pr_poses: list of Pose
timestamps: list of float
cmdline_options: str
Options passed to the evaluation tool
Default is '--fixed_delta'
"""
import tempfile
import shlex
from .rgbd_benchmark.evaluate_rpe import evaluate_rpe
assert len(pr_poses) == len(gt_poses)
assert len(pr_poses) == len(timestamps)
f, gt_txt = tempfile.mkstemp()
os.close(f)
RGBDSequence.write_rgbd_pose_format(gt_txt, gt_poses, timestamps)
f, pr_txt = tempfile.mkstemp()
os.close(f)
RGBDSequence.write_rgbd_pose_format(pr_txt, pr_poses, timestamps)
if cmdline_options is None:
cmdline_options = '--fixed_delta'
cmdline = '{0} {1} {2}'.format(cmdline_options, gt_txt, pr_txt)
result = evaluate_rpe(shlex.split(cmdline))
os.remove(gt_txt)
os.remove(pr_txt)
return result
def rgbd_ate(gt_poses, pr_poses, timestamps, cmdline_options=None):
"""Runs the rgbd command line tool for the ATE error
gt_poses: list of Pose
pr_poses: list of Pose
timestamps: list of float
cmdline_options: str
Options passed to the evaluation tool
Default is ''
"""
import tempfile
import shlex
from .rgbd_benchmark.evaluate_ate import evaluate_ate
assert len(pr_poses) == len(gt_poses)
assert len(pr_poses) == len(timestamps)
f, gt_txt = tempfile.mkstemp()
os.close(f)
RGBDSequence.write_rgbd_pose_format(gt_txt, gt_poses, timestamps)
f, pr_txt = tempfile.mkstemp()
os.close(f)
RGBDSequence.write_rgbd_pose_format(pr_txt, pr_poses, timestamps)
if cmdline_options is None:
cmdline_options = ''
cmdline = '{0} {1} {2}'.format(cmdline_options, gt_txt, pr_txt)
result = evaluate_ate(shlex.split(cmdline))
os.remove(gt_txt)
os.remove(pr_txt)
return result
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pinball loss."""
import pytest
import numpy as np
import tensorflow as tf
from tensorflow_addons.losses import quantiles
def test_config():
pin_obj = quantiles.PinballLoss(
reduction=tf.keras.losses.Reduction.SUM, name="pin_1"
)
assert pin_obj.name == "pin_1"
assert pin_obj.reduction == tf.keras.losses.Reduction.SUM
def test_all_correct_unweighted():
pin_obj = quantiles.PinballLoss()
y_true = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = pin_obj(y_true, y_true)
assert loss == 0
def test_unweighted():
pin_obj = quantiles.PinballLoss()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred)
np.testing.assert_almost_equal(loss, 2.75, 3)
def test_scalar_weighted():
pin_obj = quantiles.PinballLoss()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred, sample_weight=2.3)
np.testing.assert_almost_equal(loss, 6.325, 3)
def test_sample_weighted():
pin_obj = quantiles.PinballLoss()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = pin_obj(y_true, y_pred, sample_weight=sample_weight)
np.testing.assert_almost_equal(loss, 40.7 / 6, 3)
def test_timestep_weighted():
pin_obj = quantiles.PinballLoss()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.dtypes.float32)
sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = pin_obj(y_true, y_pred, sample_weight=sample_weight)
np.testing.assert_almost_equal(loss, 41.5 / 6, 3)
def test_zero_weighted():
pin_obj = quantiles.PinballLoss()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred, sample_weight=0)
np.testing.assert_almost_equal(loss, 0.0, 3)
def test_invalid_sample_weight():
pin_obj = quantiles.PinballLoss()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = tf.constant([3, 6, 5, 0], shape=(2, 2))
with pytest.raises(tf.errors.InvalidArgumentError, match="Incompatible shapes"):
pin_obj(y_true, y_pred, sample_weight=sample_weight)
def test_unweighted_quantile_0pc():
pin_obj = quantiles.PinballLoss(tau=0.0)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred)
np.testing.assert_almost_equal(loss, 4.8333, 3)
def test_unweighted_quantile_10pc():
pin_obj = quantiles.PinballLoss(tau=0.1)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred)
np.testing.assert_almost_equal(loss, 4.4166, 3)
def test_unweighted_quantile_90pc():
pin_obj = quantiles.PinballLoss(tau=0.9)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred)
np.testing.assert_almost_equal(loss, 1.0833, 3)
def test_unweighted_quantile_100pc():
pin_obj = quantiles.PinballLoss(tau=1.0)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred)
np.testing.assert_almost_equal(loss, 0.6666, 3)
def test_no_reduction():
pin_obj = quantiles.PinballLoss(reduction=tf.keras.losses.Reduction.NONE)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred, sample_weight=2.3)
np.testing.assert_almost_equal(loss, [5.3666, 7.28333], 1e-3)
def test_sum_reduction():
pin_obj = quantiles.PinballLoss(reduction=tf.keras.losses.Reduction.SUM)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.dtypes.float32)
loss = pin_obj(y_true, y_pred, sample_weight=2.3)
np.testing.assert_almost_equal(loss, 12.65, 3)
|
import os
from http.server import HTTPServer, SimpleHTTPRequestHandler
PORT=8000
def main():
os.chdir("public")
server_address = ('', PORT)
httpd = HTTPServer(server_address, SimpleHTTPRequestHandler)
print(f"Server: http://localhost:{PORT}")
httpd.serve_forever()
|
from .registry import register, make, create, lookup
from . import normalize, other, periodic
from . import relu, sigmoid, tanh
|
"""
-------------------------------------------------
Project Name: LearnFlask
File Name: __init__.py
Author: cjiang
Date: 2020/5/22 5:40 PM
-------------------------------------------------
"""
|
#/*
#* SPDX-License-Identifier: Apache-2.0
#* Copyright 2019 Western Digital Corporation or its affiliates.
#*
#* Licensed under the Apache License, Version 2.0 (the "License");
#* you may not use this file except in compliance with the License.
#* You may obtain a copy of the License at
#*
#* http:*www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing, software
#* distributed under the License is distributed on an "AS IS" BASIS,
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#* See the License for the specific language governing permissions and
#* limitations under the License.
#*/
class demo(object):
def __init__(self):
self.strDemoName = "freertos"
self.rtos_core = "freertos"
self.toolchain = ""
self.toolchainPath = ""
self.strGrpFile = ""
self.strComrvCacheSize = "0"
self.public_defs = [
'D_USE_RTOSAL',
'D_TICK_TIME_MS=4',
'D_ISR_STACK_SIZE=400',
'D_USE_FREERTOS'
]
self.listSconscripts = [
'freertos',
'rtosal',
'demo_rtosal'
]
self.listDemoSpecificCFlags = [
]
self.listDemoSpecificLinkerFlags = [
'-Wl,--defsym=__comrv_cache_size=' + self.strComrvCacheSize
]
self.listDemoSpecificTargets = [
'eh1', 'el2', 'hifive1', 'hifive-un'
]
|
#
# Copyright (C) 2019 DENSO WAVE INCORPORATED
#
# -*- coding: utf-8 -*-
#
# usage: python ./packing_pose.py
#
#!/usr/bin/env python
import os
import sys
import rospy
import actionlib
import math
import moveit_commander
import rosservice
import geometry_msgs.msg
from denso_cobotta_gripper.msg import GripperMoveAction, GripperMoveGoal
from denso_cobotta_driver.srv import GetMotorState
#
# Poses
#
joints_packing_old = [30, 10, 54, 1, 118, -107]
joints_packing_new = [90, -30, 120, -170, -94, 0]
joints_home = [0, 30, 100, 0, 50, 0]
#
# Parallel gripper
#
gripper_parallel_open = 0.015
gripper_parallel_close = 0.0
gripper_parallel_speed = 10.0
gripper_parallel_effort = 10.0
def arm_move(move_group, joint_goal):
pose_radian = [x / 180.0 * math.pi for x in joint_goal]
move_group.go(pose_radian, wait=True)
move_group.stop()
def gripper_move(gripper_client, width, speed, effort):
goal = GripperMoveGoal()
goal.target_position = width
goal.speed = speed
goal.effort = effort
gripper_client.send_goal(goal)
def is_motor_running():
rospy.wait_for_service('/cobotta/get_motor_state', 3.0)
try:
get_motor_state = rospy.ServiceProxy('/cobotta/get_motor_state',
GetMotorState)
res = get_motor_state()
return res.state
except rospy.ServiceException, e:
print >> sys.stderr, " Service call failed: %s" % e
def is_simulation():
service_list = rosservice.get_service_list()
if '/cobotta/get_motor_state' in service_list:
return False
return True
if __name__ == '__main__':
rospy.init_node("packing_pose")
moveit_commander.roscpp_initialize(sys.argv)
robot = moveit_commander.RobotCommander()
move_group = moveit_commander.MoveGroupCommander("arm")
gripper_client = actionlib.SimpleActionClient('/cobotta/gripper_move',
GripperMoveAction)
print(os.path.basename(__file__) + " sets pose goal and moves COBOTTA.")
print("0: Old packing pose, 1: New packing pose, 2: Home pose, Others: Exit")
while True:
input = raw_input(" Select the value: ")
if input.isdigit():
input = int(input)
joints = []
gripper_width = 0.0
if input == 0:
joints = joints_packing_old
gripper_width = gripper_parallel_open
elif input == 1:
joints = joints_packing_new
gripper_width = gripper_parallel_open
elif input == 2:
joints = joints_home
gripper_width = gripper_parallel_close
else:
break
if not is_simulation() and is_motor_running() is not True:
print >> sys.stderr, " Please motor on."
continue
gripper_move(gripper_client, gripper_width,
gripper_parallel_speed, gripper_parallel_effort)
arm_move(move_group, joints)
print("Bye...")
|
"""
Tic Tac Toe Player
"""
from exceptions import CustomException
import math
import copy
import sys
X = "X"
O = "O"
EMPTY = None
def initial_state():
"""
Returns starting state of the board.
"""
return [[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY]]
def player(board):
"""
Returns player who has the next turn on a board.
"""
x_count = 0
o_count = 0
for i in range(0, len(board)):
for j in range(0, len(board)):
if board[i][j] == X:
x_count += 1
elif board[i][j] == O:
o_count += 1
if x_count == 0 and o_count == 0:
return X
elif x_count > o_count:
return O
return X
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
actions = set()
for i in range(0, len(board)):
for j in range(0, len(board)):
if board[i][j] == EMPTY:
actions.add((i,j))
return actions
def result(board, action):
"""
Returns the board that results from making move (i, j) on the board.
"""
new_board = copy.deepcopy(board)
try:
if new_board[action[0]][action[1]] == EMPTY:
new_board[action[0]][action[1]] = player(new_board)
return new_board
except:
raise CustomException.InvalidTTTMoveException("This action is invalid")
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
tmp_board = copy.deepcopy(board)
tmp_board = [j for sub in tmp_board for j in sub]
for i in range(0, len(board) * 3, 3):
if checkline(i, i + 1, i + 2, tmp_board, X):
return X
elif checkline(i, i + 1, i + 2, tmp_board, O):
return O
for i in range(0, len(board)):
if checkline(i, i + 3, i + 6, tmp_board, X):
return X
elif checkline(i, i + 3, i + 6, tmp_board, O):
return O
if checkline(0, 4, 8, tmp_board, X):
return X
if checkline(0, 4, 8, tmp_board, O):
return O
if checkline(2, 4, 6, tmp_board, X):
return X
if checkline(2, 4, 6, tmp_board, O):
return O
return None
def checkline(a, b , c, tmp_board, player):
return tmp_board[a] == tmp_board[b] and \
tmp_board[b] == tmp_board[c] and \
tmp_board[a] == player
def terminal(board):
"""
Returns True if game is over, False otherwise.
"""
if winner(board):
return True
for row in board:
for val in row:
if not val:
return False
return True
def utility(board):
"""
Returns 1 if X has won the game, -1 if O has won, 0 otherwise.
"""
if winner(board) == X:
return 1
elif winner(board) == O:
return -1
return 0
def minimax(board):
"""
Returns the optimal action for the current player on the board.
"""
if terminal(board):
return None
pruning_var = 0
best_action = None
if player(board) == X:
score = -math.inf
for action in actions(board):
v = minvalue(result(board, action), pruning_var)
if v > score:
score = v
best_action = action
return best_action
elif player(board) == O:
score = math.inf
for action in actions(board):
v = maxvalue(result(board, action), pruning_var)
if v < score:
score = v
best_action = action
return best_action
def maxvalue(board, pruning_var):
if terminal(board):
return utility(board)
v = -math.inf
for action in actions(board):
v = max(v, minvalue(result(board, action), pruning_var))
if v > pruning_var:
return v
pruning_var = v
return v
def minvalue(board, pruning_var):
if terminal(board):
return utility(board)
v = math.inf
for action in actions(board):
v = min(v, maxvalue(result(board, action), pruning_var))
if v < pruning_var:
return v
pruning_var = v
return v |
import unittest
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.sraix import TemplateSRAIXNode
from programy.parser.exceptions import ParserException
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphSraixTests(TemplateGraphTestClient):
def test_sraix_template_params_as_attribs(self):
template = ET.fromstring("""
<template>
<sraix host="hostname" botid="testbot" hint="test query" apikey="1234567890" service="ask">
Ask this question
</sraix>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateSRAIXNode)
self.assertEqual("ask", ast.children[0]._service)
def test_sraix_template_params_as_children(self):
template = ET.fromstring("""
<template>
<sraix>
<host>hostname</host>
<botid>testbot</botid>
<hint>test query</hint>
<apikey>1234567890</apikey>
<service>ask</service>
Ask this question
</sraix>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateSRAIXNode)
self.assertEqual("ask", ast.children[0]._service)
def test_sraix_template_no_service(self):
template = ET.fromstring("""
<template>
<sraix>
Ask this question
</sraix>
</template>
""")
with self.assertRaises(ParserException):
ast = self._graph.parse_template_expression(template)
def test_sraix_template_with_children(self):
template = ET.fromstring("""
<template>
<sraix>
<service>ask</service>
Ask this question <get name="somevar" />
</sraix>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateSRAIXNode)
self.assertEqual("ask", ast.children[0]._service)
|
from .imports import lazy_import
from .version import version as __version__ # noqa
__all__ = [ # noqa
"AbortHandshake",
"basic_auth_protocol_factory",
"BasicAuthWebSocketServerProtocol",
"ClientConnection",
"connect",
"ConnectionClosed",
"ConnectionClosedError",
"ConnectionClosedOK",
"Data",
"DuplicateParameter",
"ExtensionHeader",
"ExtensionParameter",
"InvalidHandshake",
"InvalidHeader",
"InvalidHeaderFormat",
"InvalidHeaderValue",
"InvalidMessage",
"InvalidOrigin",
"InvalidParameterName",
"InvalidParameterValue",
"InvalidState",
"InvalidStatusCode",
"InvalidUpgrade",
"InvalidURI",
"NegotiationError",
"Origin",
"parse_uri",
"PayloadTooBig",
"ProtocolError",
"RedirectHandshake",
"SecurityError",
"serve",
"ServerConnection",
"Subprotocol",
"unix_connect",
"unix_serve",
"WebSocketClientProtocol",
"WebSocketCommonProtocol",
"WebSocketException",
"WebSocketProtocolError",
"WebSocketServer",
"WebSocketServerProtocol",
"WebSocketURI",
]
lazy_import(
globals(),
aliases={
"auth": ".legacy",
"basic_auth_protocol_factory": ".legacy.auth",
"BasicAuthWebSocketServerProtocol": ".legacy.auth",
"ClientConnection": ".client",
"connect": ".legacy.client",
"unix_connect": ".legacy.client",
"WebSocketClientProtocol": ".legacy.client",
"Headers": ".datastructures",
"MultipleValuesError": ".datastructures",
"WebSocketException": ".exceptions",
"ConnectionClosed": ".exceptions",
"ConnectionClosedError": ".exceptions",
"ConnectionClosedOK": ".exceptions",
"InvalidHandshake": ".exceptions",
"SecurityError": ".exceptions",
"InvalidMessage": ".exceptions",
"InvalidHeader": ".exceptions",
"InvalidHeaderFormat": ".exceptions",
"InvalidHeaderValue": ".exceptions",
"InvalidOrigin": ".exceptions",
"InvalidUpgrade": ".exceptions",
"InvalidStatusCode": ".exceptions",
"NegotiationError": ".exceptions",
"DuplicateParameter": ".exceptions",
"InvalidParameterName": ".exceptions",
"InvalidParameterValue": ".exceptions",
"AbortHandshake": ".exceptions",
"RedirectHandshake": ".exceptions",
"InvalidState": ".exceptions",
"InvalidURI": ".exceptions",
"PayloadTooBig": ".exceptions",
"ProtocolError": ".exceptions",
"WebSocketProtocolError": ".exceptions",
"protocol": ".legacy",
"WebSocketCommonProtocol": ".legacy.protocol",
"ServerConnection": ".server",
"serve": ".legacy.server",
"unix_serve": ".legacy.server",
"WebSocketServerProtocol": ".legacy.server",
"WebSocketServer": ".legacy.server",
"Data": ".typing",
"Origin": ".typing",
"ExtensionHeader": ".typing",
"ExtensionParameter": ".typing",
"Subprotocol": ".typing",
"parse_uri": ".uri",
"WebSocketURI": ".uri",
},
deprecated_aliases={
"framing": ".legacy",
"handshake": ".legacy",
},
)
|
from .addition_problem import AdditionProblem
from .copy_memory import CopyMemory
from .mnist import MNIST
from .cifar10 import CIFAR10
from .stl10 import STL10
from .cifar100 import CIFAR100
from .speech_commands import (
SpeechCommands,
normalise_data,
split_data,
load_data,
save_data,
)
from .char_trajectories import CharTrajectories
from .imagenet_small import ImagenetDownsampled
|
# Color Filtering
# using HSV (hue satutaiton value )
# create Blurring image
import cv2
import numpy as np
cap = cv2.VideoCapture(0) # select the first camera in the system
while True:
_ , frame = cap.read()
hsv = cv2.cvtColor(frame , cv2.COLOR_BGR2HSV)
# Now filter the video ( remove Noise , and )
# The idea is detect the unique color in the video streaming
lower_red = np.array([0,153,0])
upper_red = np.array([153,200,255])
# create many blurs based on Noisy
mask = cv2.inRange(hsv , lower_red , upper_red) # select a unique color # noisy
result = cv2.bitwise_and(frame , frame , mask =mask )
kernel = np.ones((15,15) , np.float32 )/255 # getting numpy array of 15 x15
smoothed = cv2.filter2D(result , -1 , kernel)
# median Blur # Best among of these # less noisy
median = cv2.medianBlur(result , 15)
# bilateral Blur
bilateral = cv2.bilateralFilter(result , 15,75,75) # less useful #too much noisy
# Gaussian Blur # Best among of these
blur = cv2.GaussianBlur(result , (15,15),0)
cv2.imshow ( 'frame ' , frame )
cv2.imshow('mask', mask)
cv2.imshow('result',result)
cv2.imshow('Blury',smoothed)
cv2.imshow('median',median)
cv2.imshow('bilateral',bilateral)
cv2.imshow('Gauss',gauss)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
from setuptools import setup, find_packages
requirements = [
'django>=3.1.2',
'django-registration>=3.1.1',
'django-crispy-forms>=1.9.2',
'gunicorn>=20.0.4',
'dj-database-url>=0.5.0',
'psycopg2-binary>=2.8.6',
'django-pwa>=1.0.10',
'whitenoise>=5.2.0',
]
setup(
name='feelya',
author='Stefan Schneider',
version=1.0,
description="FeelYa: The app that gets you! Keep track of what you eat and do and improve how you feel.",
url='https://feelya-app.herokuapp.com/',
find_packages=find_packages(),
python_requires=">=3.8.*",
install_requires=requirements,
zip_safe=False
)
|
from django.contrib.auth.models import User
from .user import Friend, UserProfile, FriendRequest
from .comment import Comment
from .post import Post
from .server import Server
|
import pycep_correios , sys , requests
from bs4 import BeautifulSoup
"""
Agradecimento : Michell Stuttgart , @evertonmatos
E ao Script de https://github.com/th3ch4os/rastreio/blob/master/rastreio , serviu de base para implementar e entender
Requests usando o método post
Alterações Básicas : TH3 CH4OS
"""
# Uso essa função para verificar se os parametros de fato existem
def list_check_value(value,lista):
"""[Verifica se um Value na List existe ou não]
Args:
value ([int]): [Index da lista]
lista ([list]): [Lista Desejada]
Returns:
[bool]: [True ou False]
"""
try:
if (lista[int(value)] in lista):
return True
except IndexError:
return False
if (list_check_value(1,sys.argv) == False):
print('Digite Algo')
quit()
def cep(x):
print("========== CEP=============")
# O CEP é armazenado como um dicionário e utilizando o módulo get_address_from_cep()
inf_cep = pycep_correios.get_address_from_cep(x) #sys.argv[1] irá pegar o input do usuário
print("CEP => {}".format(inf_cep["cep"]))
print("Cidade => {}".format(inf_cep["cidade"]))
print("Estado => {}".format(inf_cep["uf"]))
print("Bairro => {}".format(inf_cep["bairro"]))
print("Endereço => {}".format(inf_cep["logradouro"]))
print("Complemento => {}".format(inf_cep["complemento"]))
def rastreio(codigo):
print("==== Código de Rastreio ====")
s = requests.Session()
payload = { 'objetos':codigo , 'btnPesq':'+Buscar'}
s.headers.update({
'Host': 'www2.correios.com.br',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'pt-BR,pt;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www2.correios.com.br/sistemas/rastreamento/default.cfm',
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': '37',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
})
req = s.post('https://www2.correios.com.br/sistemas/rastreamento/resultado.cfm?', data=payload, allow_redirects=True)
req.encoding = 'ISO-8859-1'
soup = BeautifulSoup(req.content,"html.parser")
conteudo = soup.find_all('td')
for i in conteudo:
print(i.text)
if ( sys.argv[1] != '') and ( list_check_value(2,sys.argv)==False):
cep(str(sys.argv[1]))
try:
if ( sys.argv[2]== "--c"):
rastreio(str(sys.argv[1]))
except IndexError:
pass
|
import pickle
import os
class Oneshot:
service_file = '/var/run/stard/running.services'
@classmethod
def mark_running_services(cls, services, trunc=False):
os.makedirs(os.path.dirname(cls.service_file), exist_ok=True)
with open(cls.service_file, 'wb' if trunc else 'ab') as f:
for service in services:
pickle.dump(service, f)
@classmethod
def mark_running(cls, service):
cls.mark_running_services({service})
@classmethod
def unmark_running_services(cls, services):
running_services = set()
with open(cls.service_file, 'rb') as f:
while True:
try:
running_services.add(pickle.load(f))
except (EOFError, pickle.UnpicklingError):
break
cls.mark_running_services(running_services - services, trunc=True)
@classmethod
def unmark_running(cls, service):
cls.unmark_running_services({service})
@classmethod
def is_running(cls, service):
try:
with open(cls.service_file, 'rb') as f:
while True:
if service == pickle.load(f):
return True
except (FileNotFoundError, EOFError, pickle.UnpicklingError):
return False
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import setuptools
from setuptools.dist import Distribution
DEPENDENCIES = [
'packaging'
]
CLASSIFIERS = [
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
]
with open("README.md", "r") as fh:
long_description = fh.read()
class BinaryDistribution(Distribution):
"""Distribution which always forces a binary package with platform name"""
def has_ext_modules(foo):
return True
setuptools.setup(
name="pyis-onnx",
version="0.1.dev2",
author="Lu Ye, Ze Tao, Hao Jin",
author_email="luye@microsoft.com, zetao@microsoft.com, haoji@microsoft.com",
description="Python Inference Script for Authoring Cross-Platform End-to-end Models",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/microsoft/python-inference-script",
packages=setuptools.find_namespace_packages(where = '.', include = ['pyis.*']),
python_requires="~=3.6", # 3.6 or later, but not version 4.0 or later
# distclass=BinaryDistribution,
classifiers=CLASSIFIERS,
install_requires=DEPENDENCIES,
include_package_data=True
) |
from collections import defaultdict
from aoc import get_input_as, submit
inp = get_input_as(int, sep=',')
lanternfish = defaultdict(int)
triggers = defaultdict(list)
for i in inp:
lanternfish[i + 1] += 1
counter = len(inp)
for i in range(1, 256 + 1):
if i in lanternfish:
counter += lanternfish[i]
lanternfish[i + 7] += lanternfish[i]
lanternfish[i + 9] += lanternfish[i]
del lanternfish[i]
submit(counter)
|
'''
user - package supports user management for xtilities products
=========================================================================================
'''
# pypi
from flask import Flask, g
from flask_security import Security, SQLAlchemyUserDatastore, LoginForm, ForgotPasswordForm
# homegrown
from loutilities.configparser import getitems
from loutilities.user.model import User, Role
# hold application here
app = None
user_datastore = None
# TODO: should these messages be localized? See https://flask-security-too.readthedocs.io/en/stable/customizing.html#localization
user_messages = {
'ACCOUNT_NOT_PERMITTED' : 'Account not permitted for this application'
}
# login_form for application management
class UserLoginForm(LoginForm):
def validate(self):
# if some error was detected from standard validate(), we're done
if not super().validate():
return False
# if all ok otherwise, check roles to verify user allowed for this application
## collect applications
apps = set()
for thisrole in self.user.roles:
apps |= set(thisrole.applications)
## disallow login if this app isn't in one of user's roles
if g.loutility not in apps:
self.email.errors.append(user_messages['ACCOUNT_NOT_PERMITTED'])
return False
return True
# forgot_password for application management
class UserForgotPasswordForm(ForgotPasswordForm):
def validate(self):
# if some error was detected from standard validate(), we're done
if not super().validate():
return False
# if all ok otherwise, check roles to verify user allowed for this application
## collect applications
apps = set()
for thisrole in self.user.roles:
apps |= set(thisrole.applications)
## disallow login if this app isn't in one of user's roles
if g.loutility not in apps:
self.email.errors.append(user_messages['ACCOUNT_NOT_PERMITTED'])
return False
return True
# extend flask_security.Security to support application verification
class UserSecurity(Security):
def __init__(self, app=None, datastore=None, register_blueprint=True, **kwargs):
'''
replaces flask_security.Security
add login_form=UserLoginForm if caller hasn't already supplied
:param kwargs:
'''
if not 'login_form' in kwargs:
kwargs['login_form'] = UserLoginForm
if not 'forgot_password_form' in kwargs:
kwargs['forgot_password_form'] = UserForgotPasswordForm
return super().__init__(app, datastore, register_blueprint, **kwargs)
# used only for database initialization
# TODO: future use for loutilities.com landing page
def create_app(config_obj, configfiles=None):
'''
apply configuration object, then configuration files
'''
global app
app = Flask('loutilities')
app.config.from_object(config_obj)
if configfiles:
# backwards compatibility
if type(configfiles) == str:
configfiles = [configfiles]
for configfile in configfiles:
appconfig = getitems(configfile, 'app')
app.config.update(appconfig)
from .model import db
db.init_app(app)
global user_datastore
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# need to force app context else get
# RuntimeError: Working outside of application context.
# RuntimeError: Attempted to generate a URL without the application context being pushed.
# see http://kronosapiens.github.io/blog/2014/08/14/understanding-contexts-in-flask.html
with app.app_context():
# set up scoped session
from sqlalchemy.orm import scoped_session, sessionmaker
# the following code causes binds not to work, because the session is artificially
# set to the base database engine via bind parameter
# db.session = scoped_session(sessionmaker(autocommit=False,
# autoflush=False,
# bind=db.engine))
# db.query = db.session.query_property()
return app |
"""
Script to perform batch scoring.
"""
import os
import pickle
import pandas as pd
from utils.constants import FEATURE_COLS
FEATURES_DATA = os.path.join(
os.getenv("TEMP_DATA_BUCKET"), os.getenv("FEATURES_DATA"))
OUTPUT_MODEL_NAME = os.getenv("OUTPUT_MODEL_NAME")
BIGQUERY_PROJECT = os.getenv("BIGQUERY_PROJECT")
BIGQUERY_DATASET = os.getenv("BIGQUERY_DATASET")
DEST_SUBSCRIBER_SCORE_TABLE = os.getenv("DEST_SUBSCRIBER_SCORE_TABLE")
def main():
"""Batch scoring pipeline"""
print("\tLoading active subscribers")
subscriber_df = pd.read_csv(FEATURES_DATA)
subscriber_pd_df = (
subscriber_df
.query("Churn==0")
.drop(columns=["Churn"])
)
print(f"\tNumber of active subscribers = {len(subscriber_pd_df)}")
print("\tLoading model")
with open("/artefact/" + OUTPUT_MODEL_NAME, "rb") as model_file:
clf = pickle.load(model_file)
print("\tScoring")
subscriber_pd_df["Prob"] = (
clf.predict_proba(subscriber_pd_df[FEATURE_COLS])[:, 1]
)
print("\tSaving scores to BigQuery")
subscriber_pd_df[["User_id", "Prob"]].to_gbq(
f"{BIGQUERY_DATASET}.{DEST_SUBSCRIBER_SCORE_TABLE}",
project_id=BIGQUERY_PROJECT,
if_exists="replace",
)
if __name__ == "__main__":
main()
|
# encoding=utf8
import logging
from numpy import apply_along_axis, argmin, argmax, sum, sqrt, round, argsort, fabs, asarray, where
from NiaPy.algorithms.algorithm import Algorithm
from NiaPy.util import fullArray
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')
__all__ = ['FireworksAlgorithm', 'EnhancedFireworksAlgorithm', 'DynamicFireworksAlgorithm', 'DynamicFireworksAlgorithmGauss', 'BareBonesFireworksAlgorithm']
class BareBonesFireworksAlgorithm(Algorithm):
r"""Implementation of Bare Bones Fireworks Algorithm.
Algorithm:
Bare Bones Fireworks Algorithm
Date:
2018
Authors:
Klemen Berkovič
License:
MIT
Reference URL:
https://www.sciencedirect.com/science/article/pii/S1568494617306609
Reference paper:
Junzhi Li, Ying Tan, The bare bones fireworks algorithm: A minimalist global optimizer, Applied Soft Computing, Volume 62, 2018, Pages 454-462, ISSN 1568-4946, https://doi.org/10.1016/j.asoc.2017.10.046.
Attributes:
Name (lsit of str): List of strings representing algorithm names
n (int): Number of spraks
C_a (float): amplification coefficient
C_r (float): reduction coefficient
"""
Name = ['BareBonesFireworksAlgorithm', 'BBFWA']
@staticmethod
def algorithmInfo():
r"""Get default information of algorithm.
Returns:
str: Basic information.
See Also:
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo`
"""
return r"""Junzhi Li, Ying Tan, The bare bones fireworks algorithm: A minimalist global optimizer, Applied Soft Computing, Volume 62, 2018, Pages 454-462, ISSN 1568-4946, https://doi.org/10.1016/j.asoc.2017.10.046."""
@staticmethod
def typeParameters(): return {
'n': lambda x: isinstance(x, int) and x > 0,
'C_a': lambda x: isinstance(x, (float, int)) and x > 1,
'C_r': lambda x: isinstance(x, (float, int)) and 0 < x < 1
}
def setParameters(self, n=10, C_a=1.5, C_r=0.5, **ukwargs):
r"""Set the arguments of an algorithm.
Arguments:
n (int): Number of sparks :math:`\in [1, \infty)`.
C_a (float): Amplification coefficient :math:`\in [1, \infty)`.
C_r (float): Reduction coefficient :math:`\in (0, 1)`.
"""
ukwargs.pop('NP', None)
Algorithm.setParameters(self, NP=1, **ukwargs)
self.n, self.C_a, self.C_r = n, C_a, C_r
def initPopulation(self, task):
r"""Initialize starting population.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, float, Dict[str, Any]]:
1. Initial solution.
2. Initial solution function/fitness value.
3. Additional arguments:
* A (numpy.ndarray): Starting aplitude or search range.
"""
x, x_fit, d = Algorithm.initPopulation(self, task)
d.update({'A': task.bRange})
return x, x_fit, d
def runIteration(self, task, x, x_fit, xb, fxb, A, **dparams):
r"""Core function of Bare Bones Fireworks Algorithm.
Args:
task (Task): Optimization task.
x (numpy.ndarray): Current solution.
x_fit (float): Current solution fitness/function value.
xb (numpy.ndarray): Current best solution.
fxb (float): Current best solution fitness/function value.
A (numpy.ndarray): Serach range.
dparams (Dict[str, Any]): Additional parameters.
Returns:
Tuple[numpy.ndarray, float, numpy.ndarray, float, Dict[str, Any]]:
1. New solution.
2. New solution fitness/function value.
3. New global best solution.
4. New global best solutions fitness/objective value.
5. Additional arguments:
* A (numpy.ndarray): Serach range.
"""
S = apply_along_axis(task.repair, 1, self.uniform(x - A, x + A, [self.n, task.D]), self.Rand)
S_fit = apply_along_axis(task.eval, 1, S)
iS = argmin(S_fit)
if S_fit[iS] < x_fit: x, x_fit, A = S[iS], S_fit[iS], self.C_a * A
else: A = self.C_r * A
return x, x_fit, x.copy(), x_fit, {'A': A}
class FireworksAlgorithm(Algorithm):
r"""Implementation of fireworks algorithm.
Algorithm:
Fireworks Algorithm
Date:
2018
Authors:
Klemen Berkovič
License:
MIT
Reference URL:
https://www.springer.com/gp/book/9783662463529
Reference paper:
Tan, Ying. "Fireworks algorithm." Heidelberg, Germany: Springer 10 (2015): 978-3
Attributes:
Name (List[str]): List of stirngs representing algorithm names.
"""
Name = ['FireworksAlgorithm', 'FWA']
@staticmethod
def algorithmInfo():
r"""Get default information of algorithm.
Returns:
str: Basic information.
See Also:
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo`
"""
return r"""Tan, Ying. "Fireworks algorithm." Heidelberg, Germany: Springer 10 (2015): 978-3."""
@staticmethod
def typeParameters(): return {
'N': lambda x: isinstance(x, int) and x > 0,
'm': lambda x: isinstance(x, int) and x > 0,
'a': lambda x: isinstance(x, (int, float)) and x > 0,
'b': lambda x: isinstance(x, (int, float)) and x > 0,
'epsilon': lambda x: isinstance(x, float) and 0 < x < 1
}
def setParameters(self, N=40, m=40, a=1, b=2, A=40, epsilon=1e-31, **ukwargs):
r"""Set the arguments of an algorithm.
Arguments:
N (int): Number of Fireworks
m (int): Number of sparks
a (int): Limitation of sparks
b (int): Limitation of sparks
A (float): --
epsilon (float): Small number for non 0 devision
"""
Algorithm.setParameters(self, NP=N, **ukwargs)
self.m, self.a, self.b, self.A, self.epsilon = m, a, b, A, epsilon
def initAmplitude(self, task):
r"""Initialize amplitudes for dimensions.
Args:
task (Task): Optimization task.
Returns:
numpy.ndarray[float]: Starting amplitudes.
"""
return fullArray(self.A, task.D)
def SparsksNo(self, x_f, xw_f, Ss):
r"""Calculate number of sparks based on function value of individual.
Args:
x_f (float): Individuals function/fitness value.
xw_f (float): Worst individual function/fitness value.
Ss (): TODO
Returns:
int: Number of sparks that individual will create.
"""
s = self.m * (xw_f - x_f + self.epsilon) / (Ss + self.epsilon)
return round(self.b * self.m) if s > self.b * self.m and self.a < self.b < 1 else round(self.a * self.m)
def ExplosionAmplitude(self, x_f, xb_f, A, As):
r"""Calculate explosion amplitude.
Args:
x_f (float): Individuals function/fitness value.
xb_f (float): Best individuals function/fitness value.
A (numpy.ndarray): Amplitudes.
As ():
Returns:
numpy.ndarray: TODO.
"""
return A * (x_f - xb_f - self.epsilon) / (As + self.epsilon)
def ExplodeSpark(self, x, A, task):
r"""Explode a spark.
Args:
x (numpy.ndarray): Individuals creating spark.
A (numpy.ndarray): Amplitude of spark.
task (Task): Optimization task.
Returns:
numpy.ndarray: Sparks exploded in with specified amplitude.
"""
return self.Mapping(x + self.rand(task.D) * self.uniform(-A, A, task.D), task)
def GaussianSpark(self, x, task):
r"""Create gaussian spark.
Args:
x (numpy.ndarray): Individual creating a spark.
task (Task): Optimization task.
Returns:
numpy.ndarray: Spark exploded based on gaussian amplitude.
"""
return self.Mapping(x + self.rand(task.D) * self.normal(1, 1, task.D), task)
def Mapping(self, x, task):
r"""Fix value to bounds..
Args:
x (numpy.ndarray): Individual to fix.
task (Task): Optimization task.
Returns:
numpy.ndarray: Individual in search range.
"""
ir = where(x > task.Upper)
x[ir] = task.Lower[ir] + x[ir] % task.bRange[ir]
ir = where(x < task.Lower)
x[ir] = task.Lower[ir] + x[ir] % task.bRange[ir]
return x
def R(self, x, FW):
r"""Calculate ranges.
Args:
x (numpy.ndarray): Individual in population.
FW (numpy.ndarray): Current population.
Returns:
numpy,ndarray[float]: Ranges values.
"""
return sqrt(sum(fabs(x - FW)))
def p(self, r, Rs):
r"""Calculate p.
Args:
r (float): Range of individual.
Rs (float): Sum of ranges.
Returns:
float: p value.
"""
return r / Rs
def NextGeneration(self, FW, FW_f, FWn, task):
r"""Generate new generation of individuals.
Args:
FW (numpy.ndarray): Current population.
FW_f (numpy.ndarray[float]): Currents population fitness/function values.
FWn (numpy.ndarray): New population.
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float]]:
1. New population.
2. New populations fitness/function values.
"""
FWn_f = apply_along_axis(task.eval, 1, FWn)
ib = argmin(FWn_f)
if FWn_f[ib] < FW_f[0]: FW[0], FW_f[0] = FWn[ib], FWn_f[ib]
R = asarray([self.R(FWn[i], FWn) for i in range(len(FWn))])
Rs = sum(R)
P = asarray([self.p(R[i], Rs) for i in range(len(FWn))])
isort = argsort(P)[-(self.NP - 1):]
FW[1:], FW_f[1:] = asarray(FWn)[isort], FWn_f[isort]
return FW, FW_f
def initPopulation(self, task):
r"""Initialize starting population.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:
1. Initialized population.
2. Initialized populations function/fitness values.
3. Additional arguments:
* Ah (numpy.ndarray): Initialized amplitudes.
See Also:
* :func:`NiaPy.algorithms.algorithm.Algorithm.initPopulation`
"""
FW, FW_f, d = Algorithm.initPopulation(self, task)
Ah = self.initAmplitude(task)
d.update({'Ah': Ah})
return FW, FW_f, d
def runIteration(self, task, FW, FW_f, xb, fxb, Ah, **dparams):
r"""Core function of Fireworks algorithm.
Args:
task (Task): Optimization task.
FW (numpy.ndarray): Current population.
FW_f (numpy.ndarray[float]): Current populations function/fitness values.
xb (numpy.ndarray): Global best individual.
fxb (float): Global best individuals fitness/function value.
Ah (numpy.ndarray): Current amplitudes.
**dparams (Dict[str, Any)]: Additional arguments
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]:
1. Initialized population.
2. Initialized populations function/fitness values.
3. New global best solution.
4. New global best solutions fitness/objective value.
5. Additional arguments:
* Ah (numpy.ndarray): Initialized amplitudes.
See Also:
* :func:`FireworksAlgorithm.SparsksNo`.
* :func:`FireworksAlgorithm.ExplosionAmplitude`
* :func:`FireworksAlgorithm.ExplodeSpark`
* :func:`FireworksAlgorithm.GaussianSpark`
* :func:`FireworksAlgorithm.NextGeneration`
"""
iw, ib = argmax(FW_f), 0
Ss, As = sum(FW_f[iw] - FW_f), sum(FW_f - FW_f[ib])
S = [self.SparsksNo(FW_f[i], FW_f[iw], Ss) for i in range(self.NP)]
A = [self.ExplosionAmplitude(FW_f[i], FW_f[ib], Ah, As) for i in range(self.NP)]
FWn = [self.ExplodeSpark(FW[i], A[i], task) for i in range(self.NP) for _ in range(S[i])]
for i in range(self.m): FWn.append(self.GaussianSpark(self.randint(self.NP), task))
FW, FW_f = self.NextGeneration(FW, FW_f, FWn, task)
xb, fxb = self.getBest(FW, FW_f, xb, fxb)
return FW, FW_f, xb, fxb, {'Ah': Ah}
class EnhancedFireworksAlgorithm(FireworksAlgorithm):
r"""Implementation of enganced fireworks algorithm.
Algorithm:
Enhanced Fireworks Algorithm
Date:
2018
Authors:
Klemen Berkovič
License:
MIT
Reference URL:
https://ieeexplore.ieee.org/document/6557813/
Reference paper:
S. Zheng, A. Janecek and Y. Tan, "Enhanced Fireworks Algorithm," 2013 IEEE Congress on Evolutionary Computation, Cancun, 2013, pp. 2069-2077. doi: 10.1109/CEC.2013.6557813
Attributes:
Name (List[str]): List of strings representing algorithm names.
Ainit (float): Initial amplitude of sparks.
Afinal (float): Maximal amplitude of sparks.
"""
Name = ['EnhancedFireworksAlgorithm', 'EFWA']
@staticmethod
def algorithmInfo():
r"""Get default information of algorithm.
Returns:
str: Basic information.
See Also:
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo`
"""
return r"""S. Zheng, A. Janecek and Y. Tan, "Enhanced Fireworks Algorithm," 2013 IEEE Congress on Evolutionary Computation, Cancun, 2013, pp. 2069-2077. doi: 10.1109/CEC.2013.6557813"""
@staticmethod
def typeParameters():
r"""Get dictionary with functions for checking values of parameters.
Returns:
Dict[str, Callable]:
* Ainit (Callable[[Union[int, float]], bool]): TODO
* Afinal (Callable[[Union[int, float]], bool]): TODO
See Also:
* :func:`FireworksAlgorithm.typeParameters`
"""
d = FireworksAlgorithm.typeParameters()
d['Ainit'] = lambda x: isinstance(x, (float, int)) and x > 0
d['Afinal'] = lambda x: isinstance(x, (float, int)) and x > 0
return d
def setParameters(self, Ainit=20, Afinal=5, **ukwargs):
r"""Set EnhancedFireworksAlgorithm algorithms core parameters.
Args:
Ainit (float): TODO
Afinal (float): TODO
**ukwargs (Dict[str, Any]): Additional arguments.
See Also:
* :func:`FireworksAlgorithm.setParameters`
"""
FireworksAlgorithm.setParameters(self, **ukwargs)
self.Ainit, self.Afinal = Ainit, Afinal
def initRanges(self, task):
r"""Initialize ranges.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray[float], numpy.ndarray[float], numpy.ndarray[float]]:
1. Initial amplitude values over dimensions.
2. Final amplitude values over dimensions.
3. uAmin.
"""
Ainit, Afinal = fullArray(self.Ainit, task.D), fullArray(self.Afinal, task.D)
return Ainit, Afinal, self.uAmin(Ainit, Afinal, task)
def uAmin(self, Ainit, Afinal, task):
r"""Calculate the value of `uAmin`.
Args:
Ainit (numpy.ndarray[float]): Initial amplitude values over dimensions.
Afinal (numpy.ndarray[float]): Final amplitude values over dimensions.
task (Task): Optimization task.
Returns:
numpy.ndarray[float]: uAmin.
"""
return Ainit - sqrt(task.Evals * (2 * task.nFES - task.Evals)) * (Ainit - Afinal) / task.nFES
def ExplosionAmplitude(self, x_f, xb_f, Ah, As, A_min=None):
r"""Calculate explosion amplitude.
Args:
x_f (float): Individuals function/fitness value.
xb_f (float): Best individual function/fitness value.
Ah (numpy.ndarray):
As (): TODO.
A_min (Optional[numpy.ndarray]): Minimal amplitude values.
task (Task): Optimization task.
Returns:
numpy.ndarray: New amplitude.
"""
A = FireworksAlgorithm.ExplosionAmplitude(self, x_f, xb_f, Ah, As)
ifix = where(A < A_min)
A[ifix] = A_min[ifix]
return A
def GaussianSpark(self, x, xb, task):
r"""Create new individual.
Args:
x (numpy.ndarray):
xb (numpy.ndarray):
task (Task): Optimization task.
Returns:
numpy.ndarray: New individual generated by gaussian noise.
"""
return self.Mapping(x + self.rand(task.D) * (xb - x) * self.normal(1, 1, task.D), task)
def NextGeneration(self, FW, FW_f, FWn, task):
r"""Generate new population.
Args:
FW (numpy.ndarray): Current population.
FW_f (numpy.ndarray[float]): Current populations fitness/function values.
FWn (numpy.ndarray): New population.
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float]]:
1. New population.
2. New populations fitness/function values.
"""
FWn_f = apply_along_axis(task.eval, 1, FWn)
ib = argmin(FWn_f)
if FWn_f[ib] < FW_f[0]: FW[0], FW_f[0] = FWn[ib], FWn_f[ib]
for i in range(1, self.NP):
r = self.randint(len(FWn))
if FWn_f[r] < FW_f[i]: FW[i], FW_f[i] = FWn[r], FWn_f[r]
return FW, FW_f
def initPopulation(self, task):
r"""Initialize population.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:
1. Initial population.
2. Initial populations fitness/function values.
3. Additional arguments:
* Ainit (numpy.ndarray): Initial amplitude values.
* Afinal (numpy.ndarray): Final amplitude values.
* A_min (numpy.ndarray): Minimal amplitude values.
See Also:
* :func:`FireworksAlgorithm.initPopulation`
"""
FW, FW_f, d = FireworksAlgorithm.initPopulation(self, task)
Ainit, Afinal, A_min = self.initRanges(task)
d.update({'Ainit': Ainit, 'Afinal': Afinal, 'A_min': A_min})
return FW, FW_f, d
def runIteration(self, task, FW, FW_f, xb, fxb, Ah, Ainit, Afinal, A_min, **dparams):
r"""Core function of EnhancedFireworksAlgorithm algorithm.
Args:
task (Task): Optimization task.
FW (numpy.ndarray): Current population.
FW_f (numpy.ndarray[float]): Current populations fitness/function values.
xb (numpy.ndarray): Global best individual.
fxb (float): Global best individuals function/fitness value.
Ah (numpy.ndarray[float]): Current amplitude.
Ainit (numpy.ndarray[float]): Initial amplitude.
Afinal (numpy.ndarray[float]): Final amplitude values.
A_min (numpy.ndarray[float]): Minial amplitude values.
**dparams (Dict[str, Any]): Additional arguments.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]:
1. Initial population.
2. Initial populations fitness/function values.
3. New global best solution.
4. New global best solutions fitness/objective value.
5. Additional arguments:
* Ainit (numpy.ndarray): Initial amplitude values.
* Afinal (numpy.ndarray): Final amplitude values.
* A_min (numpy.ndarray): Minimal amplitude values.
"""
iw, ib = argmax(FW_f), 0
Ss, As = sum(FW_f[iw] - FW_f), sum(FW_f - FW_f[ib])
S = [self.SparsksNo(FW_f[i], FW_f[iw], Ss) for i in range(self.NP)]
A = [self.ExplosionAmplitude(FW_f[i], FW_f[ib], Ah, As, A_min) for i in range(self.NP)]
A_min = self.uAmin(Ainit, Afinal, task)
FWn = [self.ExplodeSpark(FW[i], A[i], task) for i in range(self.NP) for _ in range(S[i])]
for i in range(self.m): FWn.append(self.GaussianSpark(self.randint(self.NP), FW[ib], task))
FW, FW_f = self.NextGeneration(FW, FW_f, FWn, task)
xb, fxb = self.getBest(FW, FW_f, xb, fxb)
return FW, FW_f, xb, fxb, {'Ah': Ah, 'Ainit': Ainit, 'Afinal': Afinal, 'A_min': A_min}
class DynamicFireworksAlgorithmGauss(EnhancedFireworksAlgorithm):
r"""Implementation of dynamic fireworks algorithm.
Algorithm:
Dynamic Fireworks Algorithm
Date:
2018
Authors:
Klemen Berkovič
License:
MIT
Reference URL:
http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6900485&isnumber=6900223
Reference paper:
S. Zheng, A. Janecek, J. Li and Y. Tan, "Dynamic search in fireworks algorithm," 2014 IEEE Congress on Evolutionary Computation (CEC), Beijing, 2014, pp. 3222-3229. doi: 10.1109/CEC.2014.6900485
Attributes:
Name (List[str]): List of strings representing algorithm names.
A_cf (Union[float, int]): TODO
C_a (Union[float, int]): Amplification factor.
C_r (Union[float, int]): Reduction factor.
epsilon (Union[float, int]): Small value.
"""
Name = ['DynamicFireworksAlgorithmGauss', 'dynFWAG']
@staticmethod
def algorithmInfo():
r"""Get default information of algorithm.
Returns:
str: Basic information.
See Also:
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo`
"""
return r"""S. Zheng, A. Janecek, J. Li and Y. Tan, "Dynamic search in fireworks algorithm," 2014 IEEE Congress on Evolutionary Computation (CEC), Beijing, 2014, pp. 3222-3229. doi: 10.1109/CEC.2014.6900485"""
@staticmethod
def typeParameters():
r"""Get dictionary with functions for checking values of parameters.
Returns:
Dict[str, Callable]:
* A_cr (Callable[[Union[float, int], bool]): TODo
See Also:
* :func:`FireworksAlgorithm.typeParameters`
"""
d = FireworksAlgorithm.typeParameters()
d['A_cf'] = lambda x: isinstance(x, (float, int)) and x > 0
d['C_a'] = lambda x: isinstance(x, (float, int)) and x > 1
d['C_r'] = lambda x: isinstance(x, (float, int)) and 0 < x < 1
d['epsilon'] = lambda x: isinstance(x, (float, int)) and 0 < x < 1
return d
def setParameters(self, A_cf=20, C_a=1.2, C_r=0.9, epsilon=1e-8, **ukwargs):
r"""Set core arguments of DynamicFireworksAlgorithmGauss.
Args:
A_cf (Union[int, float]):
C_a (Union[int, float]):
C_r (Union[int, float]):
epsilon (Union[int, float]):
**ukwargs (Dict[str, Any]): Additional arguments.
See Also:
* :func:`FireworksAlgorithm.setParameters`
"""
FireworksAlgorithm.setParameters(self, **ukwargs)
self.A_cf, self.C_a, self.C_r, self.epsilon = A_cf, C_a, C_r, epsilon
def initAmplitude(self, task):
r"""Initialize amplitude.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray]:
1. Initial amplitudes.
2. Amplitude for best spark.
"""
return FireworksAlgorithm.initAmplitude(self, task), task.bRange
def Mapping(self, x, task):
r"""Fix out of bound solution/individual.
Args:
x (numpy.ndarray): Individual.
task (Task): Optimization task.
Returns:
numpy.ndarray: Fixed individual.
"""
ir = where(x > task.Upper)
x[ir] = self.uniform(task.Lower[ir], task.Upper[ir])
ir = where(x < task.Lower)
x[ir] = self.uniform(task.Lower[ir], task.Upper[ir])
return x
def repair(self, x, d, epsilon):
r"""Repair solution.
Args:
x (numpy.ndarray): Individual.
d (numpy.ndarray): Default value.
epsilon (float): Limiting value.
Returns:
numpy.ndarray: Fixed solution.
"""
ir = where(x <= epsilon)
x[ir] = d[ir]
return x
def NextGeneration(self, FW, FW_f, FWn, task):
r"""TODO.
Args:
FW (numpy.ndarray): Current population.
FW_f (numpy.ndarray[float]): Current populations function/fitness values.
FWn (numpy.ndarray): New population.
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float]]:
1. New population.
2. New populations function/fitness values.
"""
FWn_f = apply_along_axis(task.eval, 1, FWn)
ib = argmin(FWn_f)
for i, f in enumerate(FW_f):
r = self.randint(len(FWn))
if FWn_f[r] < f: FW[i], FW_f[i] = FWn[r], FWn_f[r]
FW[0], FW_f[0] = FWn[ib], FWn_f[ib]
return FW, FW_f
def uCF(self, xnb, xcb, xcb_f, xb, xb_f, Acf, task):
r"""TODO.
Args:
xnb:
xcb:
xcb_f:
xb:
xb_f:
Acf:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, float, numpy.ndarray]:
1. TODO
"""
xnb_f = apply_along_axis(task.eval, 1, xnb)
ib_f = argmin(xnb_f)
if xnb_f[ib_f] <= xb_f: xb, xb_f = xnb[ib_f], xnb_f[ib_f]
Acf = self.repair(Acf, task.bRange, self.epsilon)
if xb_f >= xcb_f: xb, xb_f, Acf = xcb, xcb_f, Acf * self.C_a
else: Acf = Acf * self.C_r
return xb, xb_f, Acf
def ExplosionAmplitude(self, x_f, xb_f, Ah, As, A_min=None):
return FireworksAlgorithm.ExplosionAmplitude(self, x_f, xb_f, Ah, As)
def initPopulation(self, task):
r"""Initialize population.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:
1. Initialized population.
2. Initialized population function/fitness values.
3. Additional arguments:
* Ah (): TODO
* Ab (): TODO
"""
FW, FW_f, _ = Algorithm.initPopulation(self, task)
Ah, Ab = self.initAmplitude(task)
return FW, FW_f, {'Ah': Ah, 'Ab': Ab}
def runIteration(self, task, FW, FW_f, xb, fxb, Ah, Ab, **dparams):
r"""Core function of DynamicFireworksAlgorithmGauss algorithm.
Args:
task (Task): Optimization task.
FW (numpy.ndarray): Current population.
FW_f (numpy.ndarray): Current populations function/fitness values.
xb (numpy.ndarray): Global best individual.
fxb (float): Global best fitness/function value.
Ah (Union[numpy.ndarray, float]): TODO
Ab (Union[numpy.ndarray, float]): TODO
**dparams (Dict[str, Any]): Additional arguments.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]:
1. New population.
2. New populations fitness/function values.
3. New global best solution.
4. New global best solutions fitness/objective value.
5. Additional arguments:
* Ah (Union[numpy.ndarray, float]): TODO
* Ab (Union[numpy.ndarray, float]): TODO
"""
iw, ib = argmax(FW_f), argmin(FW_f)
Ss, As = sum(FW_f[iw] - FW_f), sum(FW_f - FW_f[ib])
S, sb = [self.SparsksNo(FW_f[i], FW_f[iw], Ss) for i in range(len(FW))], self.SparsksNo(fxb, FW_f[iw], Ss)
A = [self.ExplosionAmplitude(FW_f[i], FW_f[ib], Ah, As) for i in range(len(FW))]
FWn, xnb = [self.ExplodeSpark(FW[i], A[i], task) for i in range(self.NP) for _ in range(S[i])], [self.ExplodeSpark(xb, Ab, task) for _ in range(sb)]
for i in range(self.m): FWn.append(self.GaussianSpark(self.randint(self.NP), FW[ib], task))
FW, FW_f = self.NextGeneration(FW, FW_f, FWn, task)
iw, ib = argmax(FW_f), 0
xb, fxb, Ab = self.uCF(xnb, FW[ib], FW_f[ib], xb, fxb, Ab, task)
return FW, FW_f, xb, fxb, {'Ah': Ah, 'Ab': Ab}
class DynamicFireworksAlgorithm(DynamicFireworksAlgorithmGauss):
r"""Implementation of dynamic fireworks algorithm.
Algorithm:
Dynamic Fireworks Algorithm
Date:
2018
Authors:
Klemen Berkovič
License:
MIT
Reference URL:
http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6900485&isnumber=6900223
Reference paper:
S. Zheng, A. Janecek, J. Li and Y. Tan, "Dynamic search in fireworks algorithm," 2014 IEEE Congress on Evolutionary Computation (CEC), Beijing, 2014, pp. 3222-3229. doi: 10.1109/CEC.2014.6900485
Attributes:
Name (List[str]): List of strings representing algorithm name.
See Also:
* :class:`NiaPy.algorithms.basic.DynamicFireworksAlgorithmGauss`
"""
Name = ['DynamicFireworksAlgorithm', 'dynFWA']
@staticmethod
def algorithmInfo():
r"""Get default information of algorithm.
Returns:
str: Basic information.
See Also:
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo`
"""
return r"""S. Zheng, A. Janecek, J. Li and Y. Tan, "Dynamic search in fireworks algorithm," 2014 IEEE Congress on Evolutionary Computation (CEC), Beijing, 2014, pp. 3222-3229. doi: 10.1109/CEC.2014.6900485"""
def runIteration(self, task, FW, FW_f, xb, fxb, Ah, Ab, **dparams):
r"""Core function of Dynamic Fireworks Algorithm.
Args:
task (Task): Optimization task
FW (numpy.ndarray): Current population
FW_f (numpy.ndarray[float]): Current population fitness/function values
xb (numpy.ndarray): Current best solution
fxb (float): Current best solution's fitness/function value
Ah (): TODO
Ab (): TODO
**dparams:
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:
1. New population.
2. New population function/fitness values.
3. Additional arguments:
* Ah (): TODO
* Ab (): TODO
"""
iw, ib = argmax(FW_f), argmin(FW_f)
Ss, As = sum(FW_f[iw] - FW_f), sum(FW_f - FW_f[ib])
S, sb = [self.SparsksNo(FW_f[i], FW_f[iw], Ss) for i in range(len(FW))], self.SparsksNo(fxb, FW_f[iw], Ss)
A = [self.ExplosionAmplitude(FW_f[i], FW_f[ib], Ah, As) for i in range(len(FW))]
FWn, xnb = [self.ExplodeSpark(FW[i], A[i], task) for i in range(self.NP) for _ in range(S[i])], [self.ExplodeSpark(xb, Ab, task) for _ in range(sb)]
FW, FW_f = self.NextGeneration(FW, FW_f, FWn, task)
iw, ib = argmax(FW_f), 0
xb, fxb, Ab = self.uCF(xnb, FW[ib], FW_f[ib], xb, fxb, Ab, task)
return FW, FW_f, xb, fxb, {'Ah': Ah, 'Ab': Ab}
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
|
import uuid
from datetime import datetime
import pytz
from sqlalchemy.dialects.postgresql import JSONB, UUID, TEXT, TIMESTAMP, BOOLEAN
from sqlalchemy import ForeignKey
from lockheed_141310 import db
class CMMeta(db.Model):
__tablename__ = 'cm_meta'
uuid = db.Column(UUID(as_uuid=True), default=uuid.uuid4(), primary_key=True)
name = db.Column(TEXT)
def __init__(self, name: str):
self.uuid = uuid.uuid4()
self.name = name
@classmethod
def create(cls, name: str) -> None:
new_meta = cls(name)
db.session.add(new_meta)
db.session.commit()
class CMLog(db.Model):
__tablename__ = 'cm_log'
__table_args__ = (
db.ForeignKeyConstraint(('cm_uuid', 'log_type'), ('cm_log_types.cm_uuid', 'cm_log_types.log_type')),
)
id = db.Column(db.Integer, primary_key=True)
cm_uuid = db.Column(UUID(as_uuid=True), default=uuid.uuid4())
timestamp = db.Column(TIMESTAMP)
log_type = db.Column(TEXT)
data = db.Column(JSONB)
def __init__(self, cm_uuid: UUID, log_type: str, data: dict):
self.cm_uuid = cm_uuid
self.log_type = log_type
self.timestamp = str(datetime.now(pytz.utc))
self.data = data
@classmethod
def create(cls, cm_uuid: UUID, log_type: str, data: dict) -> dict:
new_log = cls(cm_uuid, log_type, data)
db.session.add(new_log)
db.session.commit()
return new_log.to_dict()
def to_dict(self) -> dict:
return {
"id": self.id,
"cm_uuid": self.cm_uuid,
"timestamp": str(self.timestamp),
"log_type": self.log_type,
"data": self.data
}
class Users(db.Model):
__tablename__ = 'users'
username = db.Column(TEXT, unique=True)
password = db.Column(TEXT)
id = db.Column(UUID(as_uuid=True), default=uuid.uuid4(), primary_key=True)
is_owner = db.Column(BOOLEAN)
active = db.Column(BOOLEAN)
email = db.Column(TEXT, unique=True)
def __init__(self, username: str, password: str, email: str, active: bool):
self.username = username
self.password = password
self.id = uuid.uuid4()
self.is_owner = False
self.active = active
self.email = email
@classmethod
def create(cls, username: str, hashed_password: str, email: str, active: bool = False) -> dict:
new_user = cls(username, hashed_password, email, active)
db.session.add(new_user)
db.session.commit()
return new_user.to_dict()
# TODO: differentiate role name vs role id, and change in add role route
def has_role_name(self, search_role: str) -> bool:
"""
Determines if user is member of role, given role name
"""
roles = {role.id: role.name for role in RoleDefinitions.query.all()}
owned_role_ids = [role.role_id for role in Roles.query.filter_by(user_id=self.id).all()]
for owned_role_id in owned_role_ids:
if roles[owned_role_id] == search_role:
return True
return False
def has_role_id(self, role_id: int) -> bool:
"""
Determines if user is member of role, given role id
"""
owned_role_ids = [role.role_id for role in Roles.query.filter_by(user_id=self.id).all()]
print(owned_role_ids)
for owned_role_id in owned_role_ids:
if owned_role_id == role_id:
print("Match!")
return True
return False
def roles(self):
roles_query = Roles.query.filter_by(user_id=self.id).all()
roles = []
for role in roles_query:
role_definition = RoleDefinitions.query.filter_by(id=role.role_id).first()
role_data = {
"role_id": role_definition.id,
"name": role_definition.name,
}
roles.append(role_data)
return roles
def to_dict(self):
return {
"username": self.username,
"id": self.id,
"is_owner": self.is_owner,
"roles": self.roles(),
"active": self.active
}
class RoleDefinitions(db.Model):
__tablename__ = 'role_descriptions'
name = db.Column(TEXT)
id = db.Column(db.Integer, primary_key=True)
create_user = db.Column(BOOLEAN)
get_log = db.Column(BOOLEAN)
post_log = db.Column(BOOLEAN)
def __init__(self, name: str, **kwargs):
self.name = name
self.create_user = kwargs.get("create_user", False)
self.get_log = kwargs.get("get_log", False)
self.post_log = kwargs.get("post_log", False)
@classmethod
def create(cls, name: str, **kwargs):
new_role = cls(name, kwargs=kwargs)
db.session.add(new_role)
db.session.commit()
def to_json(self) -> dict:
return {
"name": self.name,
"is_admin": self.is_admin,
"get_log": self.get_log,
"post_log": self.post_log
}
def has_permission(self, permission: str) -> bool:
"""
Determines if user has permission
"""
if hasattr(self, permission):
return getattr(self, permission)
return False
class Roles(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(UUID(as_uuid=True), ForeignKey('users.id'), default=uuid.uuid4())
role_id = db.Column(db.Integer, ForeignKey('role_descriptions.id'))
def __init__(self, user_id: UUID, role_id: int):
self.user_id = user_id
self.role_id = role_id
@classmethod
def create(cls, user_id: UUID, role_id: int) -> None:
new_role = cls(user_id, role_id)
db.session.add(new_role)
db.session.commit()
def get_name(self) -> str:
return RoleDefinitions.query.filter_by(id=self.role_id).first().name
def has_permission(self, permission: str) -> bool:
"""
Determines if role has the specified permission
"""
definition = RoleDefinitions.query.filter_by(self.role_id).first()
if hasattr(definition, permission):
return getattr(definition, permission)
return False
class CMLogTypes(db.Model):
__tablename__ = 'cm_log_types'
__table_args__ = (
db.UniqueConstraint('cm_uuid', 'log_type', name='log_type'),
)
id = db.Column(db.Integer, primary_key=True)
cm_uuid = db.Column(UUID, ForeignKey("cm_meta.uuid"))
log_type = db.Column(TEXT)
description = db.Column(TEXT)
def __init__(self, cm_uuid: str, log_type: str, description: str):
self.cm_uuid = cm_uuid
self.log_type = log_type
self.description = description
@classmethod
def create(cls, cm_uuid: str, log_type: str, description: str = None) -> dict:
new_cm_log_type = cls(cm_uuid, log_type, description)
db.session.add(new_cm_log_type)
db.session.commit()
return new_cm_log_type.to_dict()
def to_dict(self):
return {
"id": self.id,
"cm_uuid": self.cm_uuid,
"log_type": self.log_type,
"description": self.description
}
|
import os
import sys
import Utils.PrimaryKeyInfo
from DbSmellDetector.SmellDetector import SmellDetector
from Model.MetaModel import MetaModel
import shutil
import Aggregator.Aggregator
def get_folder_paths():
if len(sys.argv) <= 1:
print ("Please provide path of the root folder containing SQL statements files to analyze.")
sys.exit(1)
repoStoreRoot = sys.argv[1]
repoResultRoot = os.path.join(repoStoreRoot, "dbSmellsData")
if os.path.exists(repoResultRoot):
shutil.rmtree(repoResultRoot)
os.makedirs(repoResultRoot)
aggregated_result_file = os.path.join(repoResultRoot, "aggregated_results.csv")
return repoStoreRoot, repoResultRoot, aggregated_result_file
def detect_db_smells():
print("Detecting database schema smells...")
counter = 1
logFile = os.path.join(resultRoot, "dbdeo_log.txt")
primary_key_file = os.path.join(resultRoot, "primaryKeyInfo.csv")
for file in os.listdir(repoStoreRoot):
if file.endswith(".sql"):
print(str(counter) + " : Analyzing " + str(file))
counter += 1
cur_out_file = file.strip(".sql") + ".txt"
if os.path.exists(os.path.join(resultRoot, cur_out_file)):
continue
metaModel = MetaModel()
metaModel.prepareMetaModel(os.path.join(repoStoreRoot, file), logFile)
# just to extract primary key information
Utils.PrimaryKeyInfo.printPrimaryKeyInfo(metaModel, primary_key_file)
smellDetector = SmellDetector(metaModel, resultRoot, file)
smellDetector.detectAllDbSmells()
print("Detecting database schema smells...Done.")
def aggregate_results():
print("Aggregating generated results...")
Aggregator.Aggregator.aggregate_results(resultRoot, aggregated_result_file)
print("Aggregating generated results...Done.")
repoStoreRoot, resultRoot, aggregated_result_file = get_folder_paths()
detect_db_smells()
aggregate_results()
print("Analysis complete. Thank you.")
|
from .base import Logger
from .tensorboard import TensorBoardLogger
from .wandb import WandBLogger
|
import mitmproxy
from mitmproxy.net import tcp
from mitmproxy import ctx
class CheckALPN:
def __init__(self):
self.failed = False
def configure(self, options, updated):
self.failed = mitmproxy.ctx.master.options.http2 and not tcp.HAS_ALPN
if self.failed:
ctx.log.warn(
"HTTP/2 is disabled because ALPN support missing!\n"
"OpenSSL 1.0.2+ required to support HTTP/2 connections.\n"
"Use --no-http2 to silence this warning."
)
|
"""
Demonstrate the use of Starlette middleware to Asgineer handlers.
Because Asgineer produces an ASGI-compatible application class, we can
wrap it with ASGI middleware, e.g. from Starlette. Hooray for standards!
"""
import asgineer
from starlette.middleware.gzip import GZipMiddleware
@asgineer.to_asgi
async def main(req):
return "hello world " * 1000
# All requests that have a body over 1 KiB will be zipped
main = GZipMiddleware(main, minimum_size=1024)
if __name__ == "__main__":
asgineer.run("__main__:main", "uvicorn")
|
from .._model import scim_exceptions
class ScimResponse(dict):
def __init__(self, data, core_schema_definitions, extension_schema_definitions):
super().__init__()
for key in data.keys():
self[key] = data[key]
(
self._core_meta_schemas,
self._extension_schema_definitions,
) = self._get_meta_schemas(
core_schema_definitions, extension_schema_definitions
)
if len(self._core_meta_schemas) != 1:
raise AssertionError(
"Response must specify exactly one core schema - {}".format(
", ".join([s.id for s in self._core_meta_schemas])
)
)
def _get_meta_schemas(self, core_schema_definitions, extension_schema_definitions):
schema_names = self.get("schemas")
if schema_names is None or len(schema_names) == 0:
raise AssertionError("Response has no specified schema")
core_schema_names = list(core_schema_definitions.keys())
core_meta_schemas = [
core_schema_definitions[schema_name]
for schema_name in schema_names
if schema_name in core_schema_names
]
extension_meta_schemas = [
extension_schema_definitions[schema_name]
for schema_name in schema_names
if schema_name not in core_schema_names
]
return core_meta_schemas, extension_meta_schemas
def validate(self):
data = self.copy()
exceptions = []
for extension_schema_model in self._extension_schema_definitions:
tmp_data = data.pop(extension_schema_model.id, {})
try:
extension_schema_model.validate(tmp_data)
except AssertionError as ae:
exceptions.append(ae)
for core_schema_model in self._core_meta_schemas:
try:
core_schema_model.validate(data)
except AssertionError as ae:
exceptions.append(ae)
if len(exceptions) > 0:
raise scim_exceptions.AggregatedScimMultValueAttributeValidationExceptions(
location="Scim response", exceptions=exceptions
)
|
import pandas as pd #to form the dataframe
import folium #for rendering the map
import tkinter as tk #for GUI
win = tk.Tk()
win.title("Covid Hotspot Detector")
win.geometry("512x550")
win.iconbitmap(r'C:\Users\deepa\Downloads\Icons8-Windows-8-Healthcare-Clinic.ico')
def maha(): #Maharashtra
cm_df = pd.DataFrame({'city': ['Mumbai', 'Pune', 'Aurangabad', 'Sangli', 'Nagpur','Nashik','Thane','Buldhana','Ahmednagar','Yavatmal'],#hotspot cities
'latitude': [19.075983, 18.520430, 19.876165, 16.852398, 21.145800,19.997453,19.197817,20.523737,19.085455,20.389623],#latitudes
'longitude': [72.877655, 73.856743, 75.343315, 74.581474, 79.088158,73.789802,72.967810,76.188482,74.735486,78.127160] #longitudes
})
print(cm_df)
map_cm_multiple = folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],popup=i.city).add_to(map_cm_multiple) #for on map markers
def delhi(): #Delhi
cm_df = pd.DataFrame({'city': [' Shahdara', 'New Delhi'],
'latitude': [28.671679, 28.626963],
'longitude': [77.294197, 77.215396]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def UP(): #UttarPradesh
cm_df = pd.DataFrame({'city': ['Noida', 'Meerut','Gautam Budhh Nagar','Agra','Lucknow','Firozabad','Shamli','Moradabad','Saharanpur','Ghaziabad'],
'latitude': [28.5726442, 28.916667,27.42397,27.1752554,26.8381,27.1773663,29.5008816,28.8638424,29.9880774,28.711241],
'longitude': [77.3547609, 77.683333,77.09922,78.0098161,78.389912,77.3483826,78.8057783,77.5081299,77.4445372,77.4445372]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def Kerala(): #Kerala
cm_df = pd.DataFrame({'city': ['Kasaragod', 'Kannur','Mallapuram','Ernakulam','Pathanamthitta','Thiruvananthapuram'],
'latitude': [12.421713,11.8762254,14.6094453,10.0383947,9.2834044,8.5058909],
'longitude': [75.1904498,75.3738043,79.3024381,76.5074145,76.9606628,76.9570481]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def Tamil(): #Tamil Nadu
cm_df = pd.DataFrame({'city': ['Chennai', 'Coimbature','Dindigul','Erode','Vellore','Tuticorin','Madurai','Tiruchirapalli','Thiruvarur','Kanyakumari'],
'latitude': [13.0801721,10.9961974,10.3303299,11.3692044,12.7948109,8.7235577,9.9223354,10.7620859,10.7361861,8.079252],
'longitude': [80.2838331,77.0055029,78.0673979,77.67662691,79.0006411,78.025154,78.1493658,78.712771,79.6331866,77.5499338]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def Telang(): #Telangana
cm_df = pd.DataFrame({'city': ['Hyderabad','Warangal','Nizamabad','Jogulamba Gadwal','Karimnagar','Nirmal'],
'latitude': [17.3887859,17.9806094,26.0553179,16.0999202,18.4346438,19.0915209],
'longitude': [78.4610647,79.5982115,82.9931387,77.7341584,79.1322648,78.396609]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def raj(): #Rajasthan
cm_df = pd.DataFrame({'city': ['Bhilwara','Jaipur','Jodhpur','Jaisalmer','Kota','Bikaner'],
'latitude': [25.48877352,26.916194,26.2967719,27.0280161,25.1968256,28.0159286],
'longitude': [74.6996128,75.820349,73.0351433,70.7785056,76.0008933,73.3171367]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def guj(): #Gujarat
cm_df = pd.DataFrame({'city': ['Ahmedabad','Surat','Vadodara','Rajkot','Bhavnagar'],
'latitude': [23.0216238,21.1864607,22.2973142,22.3051991,21.7718836],
'longitude': [72.5797068,72.8081281,73.1942567,70.8028335,72.1416449]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def mp(): #Madhya Pradesh
cm_df = pd.DataFrame({'city': ['Bhopal','Indore','Ujjain','Khargone','Hoshangabad'],
'latitude': [23.2530923,22.7203616,23.174597,21.8187743,22.6001502],
'longitude': [77.3962718,75.8681996,75.7851423,75.6064577,77.9266452]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def kar(): #Karnatka
cm_df = pd.DataFrame({'city': ['Bengaluru','Mysore','Belagavi'],
'latitude': [12.9791198,12.3051828,15.8572666],
'longitude': [77.5912997,76.6553609,74.5069343]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def ap(): #Andhrapradesh
cm_df = pd.DataFrame({'city': ['Kurnool','Nellore','Vishakhapatnam','Chittoor','Anantapur'],
'latitude': [15.8309251,14.4493717,17.68009,13.1601048,14.6546235],
'longitude': [78.0425373,79.9873763,83.20161,79.1555506,77.5562598]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def wb(): #West Bengal
cm_df = pd.DataFrame({'city': ['Kolkata','Howrah','Medinipur East'],
'latitude': [22.5677459,22.5882216,22.4207025],
'longitude': [88.3476023,88.323139,87.3269963]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def bihar(): #Bihar
cm_df = pd.DataFrame({'city': ['Siwan'],
'latitude': [26.1310043],
'longitude': [84.3912566]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def Chandigarh(): #Chandigarh
cm_df = pd.DataFrame({'city': ['Chandigarh'],
'latitude': [30.7194022],
'longitude': [76.7646552]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def Chhattisgarh (): #Chhattisgarh
cm_df = pd.DataFrame({'city': ['Korba'],
'latitude': [22.5197695],
'longitude': [82.6295146]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def hr(): #Haryana
cm_df = pd.DataFrame({'city': ['Gurugram','Palwal','Faridabad'],
'latitude': [28.4646148,28.1250257,28.402837],
'longitude': [77.0299194,77.358313,77.3085626]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def jk(): #Jammu Kashmir
cm_df = pd.DataFrame({'city': ['Jammu','Udhampur','Srinagar','Bandipora','Baramulla'],
'latitude': [32.7185614,33,34.0747444,34.4563234,34.2050056],
'longitude': [74.8580917,75.166667,74.8204443,75.1834458,74.3622108]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def od(): #Odisha
cm_df = pd.DataFrame({'city': ['Khordha'],
'latitude': [20.2256472],
'longitude': [85.5605947]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def pb(): #Punjab
cm_df = pd.DataFrame({'city': ['Jalandhar','Pathankot','Shaheed Bhagat Singh Nagar'],
'latitude': [31.2920107,32.3017104,31.1268889],
'longitude': [75.5680577,75.6586425,76.1576848]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
def uk(): #Uttarakhand
cm_df = pd.DataFrame({'city': ['Dehradun'],
'latitude': [30.3255646],
'longitude': [78.0436813]
})
print(cm_df)
map_cm_multiple1= folium.Map(location=[18, 80], zoom_start=4)
for i in cm_df.itertuples():
folium.Marker(location=[i.latitude, i.longitude],
popup=i.city).add_to( map_cm_multiple1)
button = tk.Button(win,text = "Maharashtra",command = maha,bg = 'black',fg = 'green') #buttons for respective states
button.pack()
button1 = tk.Button(win,text = "Delhi",command = delhi,bg = 'black',fg = 'green')
button1.pack()
button2 = tk.Button(win,text = "Uttar Pradesh",command = UP,bg = 'black',fg = 'green')
button2.pack()
button3 = tk.Button(win,text = "Kerala",command = Kerala,bg = 'black',fg = 'green')
button3.pack()
button4 = tk.Button(win,text = "Tamil Nadu",command = Tamil,bg = 'black',fg = 'green')
button4.pack()
button5 = tk.Button(win,text = "Telangana",command = Telang,bg = 'black',fg = 'green')
button5.pack()
button6 = tk.Button(win,text = "Rajasthan",command = raj,bg = 'black',fg = 'green')
button6.pack()
button7 = tk.Button(win,text = "Gujarat",command = guj,bg = 'black',fg = 'green')
button7.pack()
button8 = tk.Button(win,text = "Madhya Pradesh",command = mp,bg = 'black',fg = 'green')
button8.pack()
button9 = tk.Button(win,text = "Karnatka",command = kar,bg = 'black',fg = 'green')
button9.pack()
button10= tk.Button(win,text = "Andhra Pradesh",command = ap,bg = 'black',fg = 'green')
button10.pack()
button11= tk.Button(win,text = "West Bengal",command = wb,bg = 'black',fg = 'green')
button11.pack()
button12= tk.Button(win,text = "Bihar",command = bihar,bg = 'black',fg = 'green')
button12.pack()
button13= tk.Button(win,text = "Chandigarh",command =Chandigarh,bg = 'black',fg = 'green')
button13.pack()
button14= tk.Button(win,text = "Chhattisgarh",command =Chhattisgarh,bg = 'black',fg = 'green')
button14.pack()
button15= tk.Button(win,text = "Haryana",command =hr,bg = 'black',fg = 'green')
button15.pack()
button16= tk.Button(win,text = "Jammu and Kashmir",command =jk,bg = 'black',fg = 'green')
button16.pack()
button17= tk.Button(win,text = "Odisha",command =od,bg = 'black',fg = 'green')
button17.pack()
button18= tk.Button(win,text = "Punjab",command =pb,bg = 'black',fg = 'green')
button18.pack()
button19= tk.Button(win,text = "Uttarakhand",command =uk,bg = 'black',fg = 'green')
button19.pack()
button20 = tk.Button(text = "Click and Quit", command = win.destroy,bg = 'red',fg = 'black') #button to close the window
button20.pack()
win.mainloop() |
##############################################################################
#
# Copyright (c) 2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
import doctest
from Testing.ZopeTestCase import FunctionalDocTestSuite
from five.grok.testing import FunctionalLayer
options = doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE
def test_suite():
suite = unittest.TestSuite()
for name in ['adapters', 'annotation', 'multiadapter', 'utilities',
'subscribers']:
test = FunctionalDocTestSuite(
module='five.grok.tests.%s' % name,
optionflags=options)
test.layer = FunctionalLayer
suite.addTest(test)
return suite
|
# Le 3 valores do usuario (numeros ou strings) e entao retorna o maior valor
# +entre eles.
# Recebe 3 valores e retorna o maior.
def maximumValue ( x, y, z ) :
max = x
if y > x :
max = y
if z > max :
max = z
return max
# Testando a funcao maximumValue com valores inteiros.
a = int( raw_input( "Entre com o 1o inteiro: " ) )
b = int( raw_input( "Entre com o 2o inteiro: " ) )
c = int( raw_input( "Entre com o 3o inteiro: " ) )
# Testando a funcao com valores reais.
d = float( raw_input( "Entre com o 1o real: " ) )
e = float( raw_input( "Entre com o 2o real: " ) )
f = float( raw_input( "Entre com o 3o real: " ) )
# Testando com strings.
g = raw_input( "Entre com a 1a string: " )
h = raw_input( "Entre com a 2a string: " )
i = raw_input( "Entre com a 3a string: " )
# Imprimindo os resultados.
print "O maior valor entre %d, %d e %d e' %d. " % \
( a, b, c, maximumValue( a, b, c ) )
print "O maior valor entre %.2f, %.2f e %.2f e' %.2f. " % \
( d, e, f, maximumValue( d, e, f ) )
print "O maior valor entre %s, %s e %s e' %s. " % \
( g, h, i, maximumValue( g, h, i ) ) |
# coding: utf-8
# In[1]:
# things we need for NLP
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
# things we need for Tensorflow
import numpy as np
import tflearn
import tensorflow as tf
import random
# In[2]:
# restore all of our data structures
import pickle
data = pickle.load( open( "training_data", "rb" ) )
words = data['words']
classes = data['classes']
train_x = data['train_x']
train_y = data['train_y']
# import our chat-bot intents file
import json
with open('intents.json') as json_data:
intents = json.load(json_data)
# In[3]:
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
# Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# In[4]:
def clean_up_sentence(sentence):
# tokenize the pattern
sentence_words = nltk.word_tokenize(sentence)
# stem each word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=False):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
# In[5]:
p = bow("is your shop open today?", words)
print (p)
print (classes)
# In[6]:
# load our saved model
model.load('./model.tflearn')
# In[7]:
# create a data structure to hold user context
context = {}
ERROR_THRESHOLD = 0.25
def classify(sentence):
# generate probabilities from the model
results = model.predict([bow(sentence, words)])[0]
# filter out predictions below a threshold
results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((classes[r[0]], r[1]))
# return tuple of intent and probability
return return_list
def response(sentence, userID='123', show_details=False):
results = classify(sentence)
# if we have a classification then find the matching intent tag
if results:
# loop as long as there are matches to process
while results:
for i in intents['intents']:
# find a tag matching the first result
if i['tag'] == results[0][0]:
# set context for this intent if necessary
if 'context_set' in i:
if show_details: print ('context:', i['context_set'])
context[userID] = i['context_set']
# check if this intent is contextual and applies to this user's conversation
if not 'context_filter' in i or (userID in context and 'context_filter' in i and i['context_filter'] == context[userID]):
if show_details: print ('tag:', i['tag'])
# a random response from the intent
some_random_response = random.choice(i['responses'])
print some_random_response
return random.choice(i['responses'])
results.pop(0)
# In[8]:
classify('is your shop open today?')
# In[9]:
response('is your shop open today?')
# In[10]:
response('do you take cash?')
# In[11]:
response('what kind of mopeds do you rent?')
# In[12]:
response('Goodbye, see you later')
# In[13]:
context
# In[14]:
response('we want to rent a moped')
# In[15]:
# show context
context
# In[16]:
response('today')
# In[17]:
classify('today')
# In[18]:
# clear context
response("Hi there!", show_details=True)
response('Sleepy fella?')
# In[19]:
response('today')
classify('today')
# In[20]:
response("thanks, your great")
# clear context
response("Hi there!", show_details=True)
#classify('Shark i presume?')
#response('Shark i presume?', show_details=True)
#classify('Dog i presume?')
#response('Dog i presume?', show_details=True)
#response('Shark i presume?', show_details=True)
response('Sleepy?',show_details=True)
|
from datetime import datetime
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.utils import timezone
class FieldsTestMixin():
"""
Provides tests of required and optional fields.
Subclasses are responsible for defining class attributes.
Attributes:
model: The subclass of ``django.db.models.Model`` being tested.
factory: A subclass of ``factory.django.DjangoModelFactory`` for
``model``, with defaults for required and optional fields.
required_fields: A list of required field names.
"""
model = None
factory = None
required_fields = None
def test_errors_on_required_fields(self):
if not self.required_fields: # pragma: no cover
return
with self.assertRaises(ValidationError) as cm:
self.model().full_clean()
blank_fields = cm.exception.message_dict.keys()
self.assertEquals(set(self.required_fields), set(blank_fields))
def test_save_with_all_fields(self):
try:
m = self.factory.create()
m.full_clean()
except (TypeError, ValidationError): # pragma: no cover
raise AssertionError
class PublishTestMixin():
"""
Provides tests for subclasses of ``PublishedModel``.
Subclasses must also inherit ``FieldsTestMixin``.
"""
def test_can_publish(self):
now = timezone.now()
p = self.factory.create(publish=True)
self.assertTrue(p.publish)
self.assertEqual(p.publish_on.date(), now.date())
p = self.model.objects.first()
self.assertTrue(p.publish)
self.assertEqual(p.publish_on.date(), now.date())
def test_draft_by_default(self):
p = self.factory.create()
self.assertFalse(p.publish)
self.assertIsNone(p.publish_on)
def test_can_set_date(self):
y2k = datetime(2000, 1, 1, tzinfo=timezone.utc)
p = self.factory.create(publish_on=y2k)
p = self.model.objects.first()
self.assertEqual(p.publish_on, y2k)
def test_published_filter(self):
p = self.factory.create(publish=True)
d = self.factory.create()
objects = list(self.model.objects.all())
self.assertIn(p, objects)
self.assertIn(d, objects)
published = list(self.model.objects.published())
self.assertIn(p, published)
self.assertNotIn(d, published)
class TitleTestMixin():
"""
Provides tests for subclasses of ``TitledModel``.
Subclasses must also inherit ``FieldsTestMixin``.
"""
def test_slug_must_be_unique(self):
self.factory.create(slug='test')
with self.assertRaises(IntegrityError):
self.factory.create(slug='test')
def test_str_is_title(self):
p = self.factory.build(title='Test Title')
self.assertEqual(str(p), 'Test Title')
|
import math
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Tuple, Union
import h5py
import numpy as np
# Default parameters for each heatmap layer
# https://github.com/napari/napari/blob/d5a28122129e6eae0f5ada77cb62c4bd5a714b60/napari/layers/base/base.py#L26
RENDERING_DEFAULTS = {
"visible": True,
"colormap": "turbo",
"opacity": 0.5,
}
@dataclass
class Patches:
coords: np.ndarray
scores: np.ndarray
counts: int
patch_size: Tuple[int, int]
labels: List[str]
@classmethod
def from_h5(cls, path: Union[Path, str]):
with h5py.File(path) as f:
coords = f["coords"][:]
scores = f["A_coattn"][:]
counts = f["counts"][:][0]
# TODO: derive these from the blockmap source
patch_size = (512, 512)
labels = [
"Tumor Suppressor Genes",
"Oncogenes",
"Protein Kinases",
"Cell Differentiation Markers",
"Transcription Factors",
"Cytokines and Growth Factors",
]
return cls(coords, scores, counts, patch_size, labels)
def as_layer(
self, normalize=True, meta=RENDERING_DEFAULTS
) -> Tuple[np.ndarray, Dict, str]:
# Compute the size of the given raster
size_x, size_y = self.patch_size
x_min, y_min = np.amin(self.coords, axis=0)
x_max, y_max = np.amax(self.coords, axis=0) + (size_x, size_y)
x_len = math.ceil((x_max - x_min) / size_x)
y_len = math.ceil((y_max - y_min) / size_y)
# Scale and translate each pixel resolution heatmap to reference size
meta = {
**meta,
**{"translate": (0, y_min, x_min), "scale": (1, size_y, size_x)},
}
# Create dense pixel heatmap
data = np.zeros((len(self.labels), y_len, x_len), dtype="f4")
for i, label in enumerate(self.labels):
# Extract attention scores for current heatmap
scores = self.scores[:, 0, i]
if normalize:
# normalize scores for current heatmap between 0-1
min_score = np.amin(scores)
scores = (scores - min_score) / (np.amax(scores) - min_score)
# Fill dense array with corresponding attention scores
for coord_idx, (left, top) in enumerate(self.coords):
idx = (
i,
math.ceil((top - y_min) / size_y),
math.ceil((left - x_min) / size_x),
)
data[idx] = scores[coord_idx]
return (data, {**meta, **{"name": "heatmap"}}, "image")
|
from cluster_manager import ClusterManager
from cluster import Cluster
def main():
physics_cluster = Cluster("Physics", stopwords_threshold=50, champion_list_size=10)
math_cluster = Cluster("Mathematics", stopwords_threshold=50, champion_list_size=10)
health_cluster = Cluster("Health", stopwords_threshold=50, champion_list_size=10)
history_cluster = Cluster("History", stopwords_threshold=50, champion_list_size=10)
tech_cluster = Cluster("Technology", stopwords_threshold=50, champion_list_size=10)
cluster_manager = ClusterManager()
cluster_manager.add_cluster(physics_cluster)
cluster_manager.add_cluster(math_cluster)
cluster_manager.add_cluster(health_cluster)
cluster_manager.add_cluster(history_cluster)
cluster_manager.add_cluster(tech_cluster)
running = True
while running:
query = input("Enter your query (Or type 'terminate'): ")
if query == 'terminate':
running = False
else:
results = cluster_manager.search(query, top=5)
print()
if len(results) == 0:
print("No Results :(")
for i in range(len(results)):
print(f"{i + 1}. Doc Title : {results[i][0]} \t\t -- \t\t Cosine Score : {results[i][1]}")
print()
if __name__ == '__main__':
main()
|
"""
# -*- coding: utf-8 -*-
-------------------------------------------------
# @Project :meiduo_mall
# @File :view_extend.py
# @Date :2021/11/5 16:09
# @Author :turbo
# @Email :2647387166
# @Software :PyCharm
-------------------------------------------------
"""
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import JsonResponse
class LoginrequiredJsonMixin(LoginRequiredMixin):
'''禁止匿名访问'''
def handle_no_permission(self):
return JsonResponse({'code':400,'errmsg':'匿名用户,请登录'}) |
from django import forms
from .models import JobOffer
class JobOfferForm(forms.ModelForm):
# Form model for a JobOffer object
class Meta:
model = JobOffer
fields = ('candidate_first_name', 'candidate_last_name', 'candidate_email', 'job_title', 'offer_amount')
# Set form labels
labels = {
'candidate_first_name': 'Candidate First Name',
'candidate_last_name': 'Candidate Last Name',
'candidate_email': 'Candidate Email Address',
'job_title': 'Job Title',
'offer_amount': 'Offer Amount'
}
def __init__(self, *args, **kwargs):
super(JobOfferForm, self).__init__(*args, **kwargs)
self.fields['job_title'].empty_label = 'Select'
|
from glob import glob
import os.path
from setuptools import find_packages, setup
from typing import List
def package_files(directory: str) -> List[str]:
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
script_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(script_dir, 'README.md'), 'r') as f:
README = f.read()
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Games/Entertainment',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Version Control :: Git',
'Topic :: Utilities'
]
setup(
name='shulkr',
version='0.4.2',
description='Decompile multiple versions of Minecraft with a single command (for research)',
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/clabe45/shulkr',
author='Caleb Sacks',
license='GPLv3',
classifiers=classifiers,
keywords=['minecraft', 'git', 'decompile', 'game'],
packages=find_packages(exclude=['tests']),
py_modules=['shulkr'],
# include_package_data=True,
# package_data={'': package_files('shulkr/DecompilerMC')},
install_requires=['gitpython', 'javalang', 'unidiff', 'requests'],
entry_points={
'console_scripts': ['shulkr=shulkr.__main__:main']
}
) |
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
__license__ = "Apache License 2.0"
|
import unittest
from rnapipe.samples import *
class TestSamples(unittest.TestCase):
'''Unit tests for Samples'''
def test_sample(self):
f1 = SeqFile("sample.fastq.gz")
samp = Sample(name="sample", condition="c1", covariates=[], files=[f1])
self.assertEqual(samp.name, "sample")
self.assertEqual(samp.condition, "c1")
self.assertEqual(len(samp.files), 1)
self.assertEqual(samp.technical_replicates, False)
def test_PE_sample(self):
f1_R1 = SeqFile("sample_ID_x_L001_R1.fastq.gz")
f1_R2 = SeqFile("sample_ID_x_L001_R2.fastq.gz")
samp = Sample(name="sample", condition="c1", covariates=[], files=[f1_R1, f1_R2])
self.assertEqual(samp.name, "sample")
self.assertEqual(samp.condition, "c1")
self.assertEqual(len(samp.files), 2)
self.assertEqual(samp.technical_replicates, False)
def test_replicate_samples(self):
f1 = SeqFile("sample_L001_R1.fastq.gz")
f2 = SeqFile("sample_L002_R1.fastq.gz")
samp = Sample(name="sample", condition="c1", covariates=[], files=[f1, f2])
self.assertEqual(samp.name, "sample")
self.assertEqual(samp.condition, "c1")
self.assertEqual(len(samp.files), 2)
self.assertEqual(samp.technical_replicates, True)
if __name__ == '__main__':
unittest.main()
|
import pytest
from pynormalizenumexp.expression.base import NotationType
from pynormalizenumexp.expression.abstime import AbstimePattern
from pynormalizenumexp.utility.dict_loader import DictLoader
from pynormalizenumexp.utility.digit_utility import DigitUtility
@pytest.fixture(scope="class")
def digit_utility():
return DigitUtility(DictLoader("ja"))
class TestDigitUtility:
def test_init_kansuji(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
# 位ごとに1つだけ確認する
assert digit_utility.kansuji_09_to_value["〇"] == 0
assert digit_utility.kansuji_kurai_to_power_val["十"] == 1
assert digit_utility.kansuji_kurai_to_power_val[" "] == 0
# notation_typeごとに1つだけ確認する
assert digit_utility.str_to_notation_type["〇"] == NotationType.KANSUJI_09
assert digit_utility.str_to_notation_type["十"] == NotationType.KANSUJI_KURAI_SEN
assert digit_utility.str_to_notation_type["万"] == NotationType.KANSUJI_KURAI_MAN
def test_is_hankakusuji(self, digit_utility: DigitUtility):
assert digit_utility.is_hankakusuji("1") == True
assert digit_utility.is_hankakusuji("1") == False
assert digit_utility.is_hankakusuji("一") == False
assert digit_utility.is_hankakusuji("あ") == False
def test_is_zenkakusuji(self, digit_utility: DigitUtility):
assert digit_utility.is_zenkakusuji("1") == False
assert digit_utility.is_zenkakusuji("1") == True
assert digit_utility.is_zenkakusuji("一") == False
assert digit_utility.is_zenkakusuji("あ") == False
def test_is_arabic(self, digit_utility: DigitUtility):
assert digit_utility.is_arabic("1") == True
assert digit_utility.is_arabic("1") == True
assert digit_utility.is_arabic("一") == False
assert digit_utility.is_arabic("あ") == False
def test_is_notation_type(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.is_notation_type("〇", NotationType.KANSUJI_09) == True
assert digit_utility.is_notation_type("〇", NotationType.KANSUJI_KURAI_MAN) == False
assert digit_utility.is_notation_type("nothing", NotationType.KANSUJI_09) == False
assert digit_utility.is_notation_type(None, NotationType.KANSUJI_09) == False
def test_is_kansuji09(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.is_kansuji09("1") == False
assert digit_utility.is_kansuji09("1") == False
assert digit_utility.is_kansuji09("一") == True
assert digit_utility.is_kansuji09("十") == False
assert digit_utility.is_kansuji09("万") == False
assert digit_utility.is_kansuji09("あ") == False
def test_is_kansuji_kurai_sen(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.is_kansuji_kurai_sen("1") == False
assert digit_utility.is_kansuji_kurai_sen("1") == False
assert digit_utility.is_kansuji_kurai_sen("一") == False
assert digit_utility.is_kansuji_kurai_sen("十") == True
assert digit_utility.is_kansuji_kurai_sen("百") == True
assert digit_utility.is_kansuji_kurai_sen("千") == True
assert digit_utility.is_kansuji_kurai_sen("万") == False
assert digit_utility.is_kansuji_kurai_sen("あ") == False
def test_is_kansuji_kurai_man(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.is_kansuji_kurai_man("1") == False
assert digit_utility.is_kansuji_kurai_man("1") == False
assert digit_utility.is_kansuji_kurai_man("一") == False
assert digit_utility.is_kansuji_kurai_man("十") == False
assert digit_utility.is_kansuji_kurai_man("万") == True
assert digit_utility.is_kansuji_kurai_man("億") == True
assert digit_utility.is_kansuji_kurai_man("兆") == True
assert digit_utility.is_kansuji_kurai_man("あ") == False
def test_is_kansuji_kurai(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.is_kansuji_kurai("1") == False
assert digit_utility.is_kansuji_kurai("1") == False
assert digit_utility.is_kansuji_kurai("一") == False
assert digit_utility.is_kansuji_kurai("十") == True
assert digit_utility.is_kansuji_kurai("万") == True
assert digit_utility.is_kansuji_kurai("あ") == False
def test_is_kansuji(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.is_kansuji("1") == False
assert digit_utility.is_kansuji("1") == False
assert digit_utility.is_kansuji("一") == True
assert digit_utility.is_kansuji("十") == True
assert digit_utility.is_kansuji("万") == True
assert digit_utility.is_kansuji("あ") == False
def test_is_number(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.is_number("1") == True
assert digit_utility.is_number("1") == True
assert digit_utility.is_number("一") == True
assert digit_utility.is_number("十") == True
assert digit_utility.is_number("万") == True
assert digit_utility.is_number("あ") == False
def test_is_comma(self, digit_utility: DigitUtility):
assert digit_utility.is_comma("、") == True
assert digit_utility.is_comma(",") == True
assert digit_utility.is_comma(",") == True
assert digit_utility.is_comma("。") == False
def test_is_decimal_point(self, digit_utility: DigitUtility):
assert digit_utility.is_decimal_point(".") == True
assert digit_utility.is_decimal_point(".") == True
assert digit_utility.is_decimal_point("・") == True
assert digit_utility.is_decimal_point("、") == False
def test_is_range_expression(self, digit_utility: DigitUtility):
assert digit_utility.is_range_expression("~") == True
assert digit_utility.is_range_expression("~") == True
assert digit_utility.is_range_expression("〜") == True
assert digit_utility.is_range_expression("-") == True
assert digit_utility.is_range_expression("−") == True
assert digit_utility.is_range_expression("ー") == True
assert digit_utility.is_range_expression("―") == True
assert digit_utility.is_range_expression("から") == True
assert digit_utility.is_range_expression("あ") == False
def test_kansuji_kurai2power_value(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.kansuji_kurai2power_value("十") == 1
with pytest.raises(ValueError):
digit_utility.kansuji_kurai2power_value("一")
def test_chars2notation_type(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.chars2notation_type("1") == NotationType.HANKAKU
assert digit_utility.chars2notation_type("1") == NotationType.ZENKAKU
assert digit_utility.chars2notation_type("一") == NotationType.KANSUJI
assert digit_utility.chars2notation_type("あ") == NotationType.NOT_NUMBER
def test_chars2full_notation_type(self, digit_utility: DigitUtility):
digit_utility.init_kansuji()
assert digit_utility.chars2full_notation_type("1") == NotationType.HANKAKU
assert digit_utility.chars2full_notation_type("1") == NotationType.ZENKAKU
assert digit_utility.chars2full_notation_type("一") == NotationType.KANSUJI_09
assert digit_utility.chars2full_notation_type("十") == NotationType.KANSUJI_KURAI_SEN
assert digit_utility.chars2full_notation_type("万") == NotationType.KANSUJI_KURAI_MAN
assert digit_utility.chars2full_notation_type("あ") == NotationType.NOT_NUMBER
|
# TestSwiftTypeAliasFormatters.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that Swift typealiases get formatted properly
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
class TestSwiftTypeAliasFormatters(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@swiftTest
def test_swift_type_alias_formatters(self):
"""Test that Swift typealiases get formatted properly"""
self.build()
target, process, thread, a_breakpoint = \
lldbutil.run_to_source_breakpoint(
self, 'break here', lldb.SBFileSpec('main.swift'))
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
self.addTearDownHook(cleanup)
self.expect("frame variable f", substrs=['Foo) f = (value = 12)'])
self.expect("frame variable b", substrs=['Bar) b = (value = 24)'])
self.runCmd('type summary add a.Foo -v -s "hello"')
self.expect("frame variable f", substrs=['Foo) f = hello'])
self.expect("frame variable b", substrs=['Bar) b = hello'])
self.runCmd('type summary add a.Bar -v -s "hi"')
self.expect("frame variable f", substrs=['Foo) f = hello'])
self.expect("frame variable b", substrs=['Bar) b = hi'])
self.runCmd("type summary delete a.Foo")
self.expect("frame variable f", substrs=['Foo) f = (value = 12)'])
self.expect("frame variable b", substrs=['Bar) b = hi'])
self.runCmd("type summary delete a.Bar")
self.runCmd("type summary add -C no -v a.Foo -s hello")
self.expect("frame variable f", substrs=['Foo) f = hello'])
self.expect("frame variable b", substrs=['Bar) b = (value = 24)'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
import os
from db_eplusout_reader.db_esofile import DBEsoFile, DBEsoFileCollection
from db_eplusout_reader.sql_reader import get_results_from_sql
def get_results(
file_or_path, variables, frequency, alike=False, start_date=None, end_date=None
):
r"""
Extract results from given file.
Use a single or list of 'Variable' named tuples to specify requested outputs.
v = Variable(
key="PEOPLE BLOCK1:ZONE2",
type="Zone Thermal Comfort Fanger Model",
units=None
)
When one (or multiple) 'Variable' fields would be set as None, filtering
for specific part of variable will not be applied.
Variable(None, None, None) returns all outputs
Variable(None, None, "J") returns all 'energy' outputs.
Frequency defines output interval - it can be one of "timestep", "hourly", "daily",
"monthly" "annual" and "runperiod". Constants module includes helpers TS, H, D, M, A, RP.
Function needs to be called multiple times to get results from various intervals.
Alike optional argument defines whether variable search should filter results by
full or just a substring (search is always case insensitive).
Start and end date optional arguments can slice resulting array based on timestamp data.
Examples
--------
from datetime import datetime
from db_esofile_reader import Variable, get_results
from db_esofile_reader.constants import D
variables = [
Variable("", "Electricity:Facility", "J"), # standard meter
Variable("Cumulative", "Electricity:Facility", "J"), # cumulative meter
Variable(None, None, None), # get all outputs
Variable("PEOPLE BLOCK1:ZONE2", "Zone Thermal Comfort Fanger Model PMV", ""),
Variable("PEOPLE BLOCK", "Zone Thermal Comfort Fanger Model PMV", "")
]
# get results for variables fully matching output variables
# the last variable above won't be found as variable 'key' does not fully match
# variables will be extracted from 'daily' interval results
# start and end date slicing is not applied
results = get_results(
r"C:\some\path\eplusout.sql",
variables=variables,
frequency=D,
alike=False
)
# 'alike' argument is set to True so even substring match is enough to match variable
# the last variable will be found ("PEOPLE BLOCK" matches "PEOPLE BLOCK1:ZONE2")
# start and end dates are specified so only 'May' data will be included
results = get_results(
r"C:\some\path\eplusout.sql",
variables=variables,
frequency=D,
alike=True,
start_date=datetime(2002, 5, 1, 0),
end_date=datetime(2002, 5, 31, 23, 59)
)
Parameters
----------
file_or_path : DBEsoFile, DBEsoFileCollection or PathLike
A processed EnergyPlus .eso file, path to unprocessed .eso file
or path to unprocessed .sql file.
variables : Variable or List of Variable
Requested output variables.
frequency : str
An output interval, this can be one of {TS, H, D, M, A, RP} constants.
alike : default False, bool
Specify if full string or only part of variable attribute
needs to match, filtering is case insensitive in both cases.
start_date : default None, datetime.datetime
Lower datetime interval boundary, inclusive.
end_date : default None, datetime.datetime
Upper datetime interval boundary, inclusive.
Returns
-------
ResultsDictionary : Dict of {Variable, list of float}
A dictionary like class with some properties to easily extract output values.
"""
if isinstance(file_or_path, str):
_, ext = os.path.splitext(file_or_path)
if ext == ".sql":
results = get_results_from_sql(
file_or_path,
variables,
frequency,
alike=alike,
start_date=start_date,
end_date=end_date,
)
elif ext == ".eso":
raise NotImplementedError("Sorry, this has not been implemented yet.")
else:
raise TypeError("Unsupported file type '{}' provided!".format(ext))
else:
if isinstance(file_or_path, (DBEsoFile, DBEsoFileCollection)):
raise NotImplementedError("Sorry, this has not been implemented yet.")
else:
raise TypeError(
"Unsupported class '{}' provided!".format(type(file_or_path).__name__)
)
return results
|
#!/usr/bin/python3
import logging
import asyncio
from hbmqtt.client import MQTTClient, ClientException
from hbmqtt.mqtt.constants import QOS_0, QOS_1, QOS_2
from hbmqtt.session import ApplicationMessage
from mqtt_config import CONFIG_CLIENT as CONFIG
from params import params
import pickle
logger = logging.getLogger()
class mqtt_client(MQTTClient):
def __init__(self, client_id=None, config=CONFIG, loop=None):
MQTTClient.__init__(self, client_id, config, loop)
def connect(self, username=None, password=None):
if username and password:
uri = CONFIG['broker']['uri']
header = uri.split(':')[0]
addr = uri.split('@')[-1]
uri = header + '://' + str(username) + ':' + str(password) + '@' + addr
else:
uri = self.config['broker']['uri']
self.logger.debug("MQTT client connect to %s" % uri)
# yield from MQTTClient.connect(self, uri=uri)
self._loop.run_until_complete(MQTTClient.connect(self, uri=uri))
def publish(self, message, topic=None, qos=None, retain=None):
if not topic:
topic = 'devices/' + self.session.username
if isinstance(message, str):
message = bytes(message, encoding='utf-8')
elif isinstance(message, object):
message = pickle.loads(message)
else:
message = bytes(str(message), encoding='utf-8')
# yield from MQTTClient.publish(self, topic, message, qos=qos, retain=retain)
self._loop.run_until_complete(MQTTClient.publish(self, topic, message, qos=qos, retain=retain))
def disconnect(self):
self._loop.run_until_complete(MQTTClient.disconnect(self))
@asyncio.coroutine
def test_coro():
C = mqtt_client(client_id='test', config=CONFIG)
yield from C.connect()
tasks = [
asyncio.ensure_future(C.publish(b'TEST MESSAGE WITH QOS_0', qos=QOS_0)),
asyncio.ensure_future(C.publish(b'TEST MESSAGE WITH QOS_1', qos=QOS_1)),
asyncio.ensure_future(C.publish(b'TEST MESSAGE WITH QOS_2', qos=QOS_2)),
asyncio.ensure_future(C.publish(b'TEST MESSAGE WITH QOS_2', topic='devices/a', qos=QOS_2)),
]
yield from asyncio.wait(tasks)
logger.info("messages published")
yield from C.disconnect()
def test():
C = mqtt_client(client_id='test', config=CONFIG)
C.connect('test', 'test')
C.publish('TEST MESSAGE WITH QOS_0', topic='devices/test', qos=QOS_0)
C.publish('TEST MESSAGE WITH QOS_1', topic='devices/test', qos=QOS_1)
C.publish('TEST MESSAGE WITH QOS_2', topic='devices/test', qos=QOS_2)
C.disconnect()
if __name__ == '__main__':
formatter = "[%(asctime)s] :: %(levelname)s :: %(name)s :: %(message)s"
logging.basicConfig(level=logging.DEBUG, format=formatter)
# asyncio.get_event_loop().run_until_complete(test_coro())
# asyncio.get_event_loop().run_forever()
test()
|
import bpy
from bpy.props import *
from .. events import propertyChanged
from .. data_structures import BezierSpline, PolySpline
from .. base_types import AnimationNodeSocket, PythonListSocket
from .. data_structures.splines.from_blender import (createSplinesFromBlenderObject,
createSplineFromBlenderSpline)
class SplineSocket(bpy.types.NodeSocket, AnimationNodeSocket):
bl_idname = "an_SplineSocket"
bl_label = "Spline Socket"
dataType = "Spline"
drawColor = (0.8, 0.4, 1.0, 1.0)
storable = True
comparable = False
object: PointerProperty(type = bpy.types.Object,
description = "Use the first spline from this object",
update = propertyChanged)
useWorldSpace: BoolProperty(default = True,
description = "Convert points to world space",
update = propertyChanged)
def drawProperty(self, layout, text, node):
row = layout.row(align = True)
row.prop(self, "object", text = text)
self.invokeFunction(row, node, "handleEyedropperButton", icon = "EYEDROPPER", passEvent = True,
description = "Assign active object to this socket (hold CTRL to open a rename object dialog)")
if self.object:
row.prop(self, "useWorldSpace", text = "", icon = "WORLD")
def getValue(self):
if getattr(self.object, "type", "") != "CURVE":
return BezierSpline()
bSplines = self.object.data.splines
if len(bSplines) > 0:
spline = createSplineFromBlenderSpline(bSplines[0])
# is None when the spline type is not supported
if spline is not None:
if self.useWorldSpace:
spline.transform(self.object.matrix_world)
return spline
return BezierSpline()
def setProperty(self, data):
self.object, self.useWorldSpace = data
def getProperty(self):
return self.object, self.useWorldSpace
def handleEyedropperButton(self, event):
if event.ctrl:
bpy.ops.an.rename_datablock_popup("INVOKE_DEFAULT",
oldName = self.object.name,
path = "bpy.data.objects",
icon = "OUTLINER_OB_CURVE")
else:
object = bpy.context.active_object
if getattr(object, "type", "") == "CURVE":
self.object = object
@classmethod
def getDefaultValue(cls):
return BezierSpline()
@classmethod
def getCopyExpression(cls):
return "value.copy()"
@classmethod
def correctValue(cls, value):
if isinstance(value, (BezierSpline, PolySpline)):
return value, 0
return cls.getDefaultValue(), 2
class SplineListSocket(bpy.types.NodeSocket, PythonListSocket):
bl_idname = "an_SplineListSocket"
bl_label = "Spline List Socket"
dataType = "Spline List"
baseType = SplineSocket
drawColor = (0.8, 0.4, 1.0, 0.7)
storable = True
comparable = False
object: PointerProperty(type = bpy.types.Object,
description = "Use the splines from this object",
update = propertyChanged)
useWorldSpace: BoolProperty(default = True,
description = "Convert points to world space",
update = propertyChanged)
def drawProperty(self, layout, text, node):
row = layout.row(align = True)
row.prop(self, "object", text = text)
self.invokeFunction(row, node, "assignActiveObject", icon = "EYEDROPPER")
if self.object:
row.prop(self, "useWorldSpace", text = "", icon = "WORLD")
def getValue(self):
splines = createSplinesFromBlenderObject(self.object)
if self.useWorldSpace:
for spline in splines:
spline.transform(self.object.matrix_world)
return splines
def setProperty(self, data):
self.object, self.useWorldSpace = data
def getProperty(self):
return self.object, self.useWorldSpace
def assignActiveObject(self):
object = bpy.context.active_object
if getattr(object, "type", "") == "CURVE":
self.object = object
@classmethod
def getCopyExpression(cls):
return "[element.copy() for element in value]"
@classmethod
def correctValue(cls, value):
if isinstance(value, list):
if all(isinstance(element, (BezierSpline, PolySpline)) for element in value):
return value, 0
return cls.getDefaultValue(), 2
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import numpy as np
import time
import os
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
import data
from args import *
import lm_model
import logging
logging.basicConfig()
import pickle
def prepare_batch_input(batch, args):
x = batch['token_ids']
x_r = batch['token_ids_reverse']
y = batch['next_token_id']
y_r = batch['next_token_id_reverse']
inst = []
for i in range(len(x)):
if args.use_custom_samples:
custom_samples_array = np.zeros(
(args.num_steps, args.n_negative_samples_batch + 1),
dtype='int64')
custom_samples_array_r = np.zeros(
(args.num_steps, args.n_negative_samples_batch + 1),
dtype='int64')
custom_probabilities_array = np.zeros(
(args.num_steps, args.n_negative_samples_batch + 1),
dtype='float32')
for j in range(args.num_steps):
for k in range(args.n_negative_samples_batch + 1):
custom_samples_array[j][k] = k
custom_samples_array_r[j][k] = k
custom_probabilities_array[j][k] = 1.0
custom_samples_array[j][0] = y[i][j]
custom_samples_array_r[j][0] = y_r[i][j]
inst.append([
x[i], y[i], x_r[i], y_r[i], custom_samples_array,
custom_samples_array_r, custom_probabilities_array
])
else:
inst.append([x[i], y[i], x_r[i], y_r[i]])
return inst
def batch_reader(batch_list, args):
res = []
for batch in batch_list:
res.append(prepare_batch_input(batch, args))
return res
def read_multiple(reader, batch_size, count, clip_last=True):
"""
Stack data from reader for multi-devices.
"""
def __impl__():
# one time read batch_size * count data for rnn
for data in reader():
inst_num_per_part = batch_size
split_data = {}
len_check = True
for k in data.keys():
if data[k] is not None:
if len(data[k]) != batch_size * count:
len_check = False
print("data check error!!, data=" + data[k] + ", k=" +
k)
break
if len_check:
res = []
for i in range(count):
split_data = {}
for k in data.keys():
if data[k] is not None:
split_data[k] = data[k][inst_num_per_part * i:
inst_num_per_part * (i + 1)]
res.append(split_data)
yield res
return __impl__
def LodTensor_Array(lod_tensor):
lod = lod_tensor.lod()
array = np.array(lod_tensor)
new_array = []
for i in range(len(lod[0]) - 1):
new_array.append(array[lod[0][i]:lod[0][i + 1]])
return new_array
def get_current_model_para(train_prog, train_exe):
param_list = train_prog.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
vals = {}
for p_name in param_name_list:
p_array = np.array(fluid.global_scope().find_var(p_name).get_tensor())
vals[p_name] = p_array
return vals
def save_para_npz(train_prog, train_exe):
logger.info("begin to save model to model_base")
param_list = train_prog.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
vals = {}
for p_name in param_name_list:
p_array = np.array(fluid.global_scope().find_var(p_name).get_tensor())
vals[p_name] = p_array
emb = vals["embedding_para"]
logger.info("begin to save model to model_base")
np.savez("mode_base", **vals)
def prepare_input(batch, epoch_id=0, with_lr=True):
x, y = batch
inst = []
for i in range(len(x)):
inst.append([x[i], y[i]])
return inst
def eval(vocab, infer_progs, dev_count, logger, args):
infer_prog, infer_startup_prog, infer_model = infer_progs
feed_order = infer_model.feed_order
loss = infer_model.loss
# prepare device
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
exe = Executor(place)
if not args.use_gpu:
place = fluid.CPUPlace()
import multiprocessing
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
total_loss = 0.0
total_cnt = 0
n_batch_cnt = 0
n_batch_loss = 0.0
val_feed_list = [
infer_prog.global_block().var(var_name) for var_name in feed_order
]
val_feeder = fluid.DataFeeder(val_feed_list, place)
dev_data = data.BidirectionalLMDataset(
args.test_path, vocab, test=True, shuffle_on_load=False)
dev_data_iter = lambda: dev_data.iter_batches(args.batch_size * dev_count, args.num_steps)
dev_reader = read_multiple(dev_data_iter, args.batch_size, dev_count)
last_hidden_values = np.zeros(
(dev_count, args.num_layers * 2 * args.batch_size * args.embed_size),
dtype='float32')
last_cell_values = np.zeros(
(dev_count, args.num_layers * 2 * args.batch_size * args.hidden_size),
dtype='float32')
for batch_id, batch_list in enumerate(dev_reader(), 1):
feed_data = batch_reader(batch_list, args)
feed = list(val_feeder.feed_parallel(feed_data, dev_count))
for i in range(dev_count):
init_hidden_tensor = fluid.core.LoDTensor()
if args.use_gpu:
placex = fluid.CUDAPlace(i)
else:
placex = fluid.CPUPlace()
init_hidden_tensor.set(last_hidden_values[i], placex)
init_cell_tensor = fluid.core.LoDTensor()
init_cell_tensor.set(last_cell_values[i], placex)
feed[i]['init_hiddens'] = init_hidden_tensor
feed[i]['init_cells'] = init_cell_tensor
last_hidden_values = []
last_cell_values = []
for i in range(dev_count):
val_fetch_outs = exe.run(program=infer_prog,
feed=feed[i],
fetch_list=[
infer_model.loss.name,
infer_model.last_hidden.name,
infer_model.last_cell.name
],
return_numpy=False)
last_hidden_values.append(np.array(val_fetch_outs[1]))
last_cell_values.append(np.array(val_fetch_outs[2]))
total_loss += np.array(val_fetch_outs[0]).sum()
n_batch_cnt += len(np.array(val_fetch_outs[0]))
total_cnt += len(np.array(val_fetch_outs[0]))
n_batch_loss += np.array(val_fetch_outs[0]).sum()
last_hidden_values = np.array(last_hidden_values).reshape((
dev_count, args.num_layers * 2 * args.batch_size * args.embed_size))
last_cell_values = np.array(last_cell_values).reshape(
(dev_count,
args.num_layers * 2 * args.batch_size * args.hidden_size))
log_every_n_batch = args.log_interval
if log_every_n_batch > 0 and batch_id % log_every_n_batch == 0:
logger.info('Average dev loss from batch {} to {} is {}'.format(
batch_id - log_every_n_batch + 1, batch_id, "%.10f" % (
n_batch_loss / n_batch_cnt)))
n_batch_loss = 0.0
n_batch_cnt = 0
batch_offset = 0
ppl = np.exp(total_loss / total_cnt)
return ppl
def train():
args = parse_args()
if args.random_seed == 0:
args.random_seed = None
print("random seed is None")
if args.enable_ce:
random.seed(args.random_seed)
np.random.seed(args.random_seed)
logger = logging.getLogger("lm")
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.info('Running with args : {}'.format(args))
logger.info('Running paddle : {}'.format(paddle.version.commit))
hidden_size = args.hidden_size
batch_size = args.batch_size
data_path = args.data_path
logger.info("begin to load vocab")
vocab = data.Vocabulary(args.vocab_path, validate_file=True)
vocab_size = vocab.size
logger.info("finished load vocab")
logger.info('build the model...')
# build model
train_prog = fluid.Program()
train_startup_prog = fluid.Program()
if args.enable_ce:
train_prog.random_seed = args.random_seed
train_startup_prog.random_seed = args.random_seed
# build infer model
infer_prog = fluid.Program()
infer_startup_prog = fluid.Program()
with fluid.program_guard(infer_prog, infer_startup_prog):
with fluid.unique_name.guard():
# Infer process
infer_model = lm_model.LanguageModel(
args, vocab_size, test_mode=True)
infer_model.build()
infer_progs = infer_prog, infer_startup_prog, infer_model
with fluid.program_guard(train_prog, train_startup_prog):
with fluid.unique_name.guard():
# Training process
train_model = lm_model.LanguageModel(
args, vocab_size, test_mode=False)
train_model.build()
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(
clip_norm=args.max_grad_norm))
# build optimizer
if args.optim == 'adagrad':
optimizer = fluid.optimizer.Adagrad(
learning_rate=args.learning_rate,
epsilon=0.0,
initial_accumulator_value=1.0)
elif args.optim == 'sgd':
optimizer = fluid.optimizer.SGD(
learning_rate=args.learning_rate)
elif args.optim == 'adam':
optimizer = fluid.optimizer.Adam(
learning_rate=args.learning_rate)
elif args.optim == 'rprop':
optimizer = fluid.optimizer.RMSPropOptimizer(
learning_rate=args.learning_rate)
else:
logger.error('Unsupported optimizer: {}'.format(args.optim))
exit(-1)
optimizer.minimize(train_model.loss * args.num_steps)
# initialize parameters
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
exe = Executor(place)
train_progs = train_prog, train_startup_prog, train_model
if args.local:
logger.info("local start_up:")
train_loop(args, logger, vocab, train_progs, infer_progs, optimizer)
else:
if args.update_method == "nccl2":
trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
if args.test_nccl:
worker_endpoints_env = os.getenv("PADDLE_WORK_ENDPOINTS")
worker_endpoints = worker_endpoints_env.split(',')
trainers_num = len(worker_endpoints)
current_endpoint = worker_endpoints[trainer_id]
else:
port = os.getenv("PADDLE_PORT")
worker_ips = os.getenv("PADDLE_TRAINERS")
worker_endpoints = []
for ip in worker_ips.split(","):
worker_endpoints.append(':'.join([ip, port]))
worker_endpoints_env = ','.join(worker_endpoints)
trainers_num = len(worker_endpoints)
current_endpoint = os.getenv("POD_IP") + ":" + port
if trainer_id == 0:
logger.info("train_id == 0, sleep 60s")
time.sleep(60)
logger.info("trainers_num:{}".format(trainers_num))
logger.info("worker_endpoints:{}".format(worker_endpoints))
logger.info("current_endpoint:{}".format(current_endpoint))
config = fluid.DistributeTranspilerConfig()
config.mode = "nccl2"
t = fluid.DistributeTranspiler(config=config)
t.transpile(
trainer_id,
trainers=worker_endpoints_env,
current_endpoint=current_endpoint,
program=train_prog,
startup_program=train_startup_prog)
train_progs = train_prog, train_startup_prog, train_model
train_loop(args, logger, vocab, train_progs, infer_progs, optimizer,
trainers_num, trainer_id, worker_endpoints)
else:
port = os.getenv("PADDLE_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVERS")
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist)
trainers = int(os.getenv("PADDLE_TRAINERS_NUM", "0"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
logger.info("pserver_endpoints:{}".format(pserver_endpoints))
logger.info("current_endpoint:{}".format(current_endpoint))
logger.info("trainer_id:{}".format(trainer_id))
logger.info("pserver_ips:{}".format(pserver_ips))
logger.info("port:{}".format(port))
t = fluid.DistributeTranspiler()
t.transpile(
trainer_id,
pservers=pserver_endpoints,
trainers=trainers,
program=train_prog,
startup_program=startup_prog)
if training_role == "PSERVER":
logger.info("distributed: pserver started")
current_endpoint = os.getenv("POD_IP") + ":" + os.getenv(
"PADDLE_PORT")
if not current_endpoint:
logger.critical("need env SERVER_ENDPOINT")
exit(1)
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
logger.info("distributed: trainer started")
trainer_prog = t.get_trainer_program()
train_loop(args, logger, vocab, train_progs, infer_progs,
optimizer)
else:
logger.critical(
"environment var TRAINER_ROLE should be TRAINER os PSERVER")
exit(1)
def train_loop(args,
logger,
vocab,
train_progs,
infer_progs,
optimizer,
nccl2_num_trainers=1,
nccl2_trainer_id=0,
worker_endpoints=None):
train_prog, train_startup_prog, train_model = train_progs
infer_prog, infer_startup_prog, infer_model = infer_progs
# prepare device
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
exe = Executor(place)
if not args.use_gpu:
place = fluid.CPUPlace()
import multiprocessing
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
if args.load_dir:
logger.info('load pretrained checkpoints from {}'.format(args.load_dir))
fluid.io.load_persistables(exe, args.load_dir, main_program=train_prog)
elif args.load_pretraining_params:
logger.info('load pretrained params from {}'.format(
args.load_pretraining_params))
exe.run(train_startup_prog)
init_pretraining_params(
exe, args.load_pretraining_params, main_program=train_prog)
else:
exe.run(train_startup_prog)
# prepare data
feed_list = [
train_prog.global_block().var(var_name)
for var_name in train_model.feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
logger.info('Training the model...')
exe_strategy = fluid.parallel_executor.ExecutionStrategy()
parallel_executor = fluid.ParallelExecutor(
loss_name=train_model.loss.name,
main_program=train_prog,
use_cuda=bool(args.use_gpu),
exec_strategy=exe_strategy,
num_trainers=nccl2_num_trainers,
trainer_id=nccl2_trainer_id)
logger.info("begin to load data")
train_data = data.BidirectionalLMDataset(
args.train_path,
vocab,
test=(not args.shuffle),
shuffle_on_load=args.shuffle)
logger.info("finished load vocab")
# get train epoch size
log_interval = args.log_interval
total_time = 0.0
batch_size = args.batch_size
hidden_size = args.hidden_size
custom_samples_array = np.zeros(
(batch_size, args.num_steps, args.n_negative_samples_batch + 1),
dtype='int64')
custom_probabilities_array = np.zeros(
(batch_size, args.num_steps, args.n_negative_samples_batch + 1),
dtype='float32')
for i in range(batch_size):
for j in range(0, args.num_steps):
for k in range(0, args.n_negative_samples_batch + 1):
custom_samples_array[i][j][k] = k
custom_probabilities_array[i][j][k] = 1.0
start_time = time.time()
train_data_iter = lambda: train_data.iter_batches(batch_size * dev_count, args.num_steps)
train_reader = read_multiple(train_data_iter, batch_size, dev_count)
total_num = 0
n_batch_loss = 0.0
n_batch_cnt = 0
last_hidden_values = np.zeros(
(dev_count, args.num_layers * 2 * batch_size * args.embed_size),
dtype='float32')
last_cell_values = np.zeros(
(dev_count, args.num_layers * 2 * batch_size * hidden_size),
dtype='float32')
n_tokens_per_batch = args.batch_size * args.num_steps
n_batches_per_epoch = int(args.all_train_tokens / n_tokens_per_batch)
n_batches_total = args.max_epoch * n_batches_per_epoch
begin_time = time.time()
for batch_id, batch_list in enumerate(train_reader(), 1):
if batch_id > n_batches_total:
break
feed_data = batch_reader(batch_list, args)
feed = list(feeder.feed_parallel(feed_data, dev_count))
for i in range(dev_count):
init_hidden_tensor = fluid.core.LoDTensor()
if args.use_gpu:
placex = fluid.CUDAPlace(i)
else:
placex = fluid.CPUPlace()
init_hidden_tensor.set(last_hidden_values[i], placex)
init_cell_tensor = fluid.core.LoDTensor()
init_cell_tensor.set(last_cell_values[i], placex)
feed[i]['init_hiddens'] = init_hidden_tensor
feed[i]['init_cells'] = init_cell_tensor
fetch_outs = parallel_executor.run(feed=feed,
fetch_list=[
train_model.loss.name,
train_model.last_hidden.name,
train_model.last_cell.name
],
return_numpy=False)
cost_train = np.array(fetch_outs[0]).mean()
last_hidden_values = np.array(fetch_outs[1])
last_hidden_values = last_hidden_values.reshape(
(dev_count, args.num_layers * 2 * batch_size * args.embed_size))
last_cell_values = np.array(fetch_outs[2])
last_cell_values = last_cell_values.reshape(
(dev_count, args.num_layers * 2 * batch_size * args.hidden_size))
total_num += args.batch_size * dev_count
n_batch_loss += np.array(fetch_outs[0]).sum()
n_batch_cnt += len(np.array(fetch_outs[0]))
if batch_id > 0 and batch_id % log_interval == 0:
smoothed_ppl = np.exp(n_batch_loss / n_batch_cnt)
ppl = np.exp(
np.array(fetch_outs[0]).sum() / len(np.array(fetch_outs[0])))
used_time = time.time() - begin_time
speed = log_interval / used_time
logger.info(
"[train] step:{}, loss:{:.3f}, ppl:{:.3f}, smoothed_ppl:{:.3f}, speed:{:.3f}".
format(batch_id, n_batch_loss / n_batch_cnt, ppl, smoothed_ppl,
speed))
n_batch_loss = 0.0
n_batch_cnt = 0
begin_time = time.time()
if batch_id > 0 and batch_id % args.dev_interval == 0:
valid_ppl = eval(vocab, infer_progs, dev_count, logger, args)
logger.info("valid ppl {}".format(valid_ppl))
if batch_id > 0 and batch_id % args.save_interval == 0:
epoch_id = int(batch_id / n_batches_per_epoch)
model_path = os.path.join(args.para_save_dir,
str(batch_id + epoch_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(
executor=exe, dirname=model_path, main_program=train_prog)
end_time = time.time()
total_time += end_time - start_time
epoch_id = int(batch_id / n_batches_per_epoch)
model_path = os.path.join(args.para_save_dir, str(epoch_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(
executor=exe, dirname=model_path, main_program=train_prog)
valid_ppl = eval(vocab, infer_progs, dev_count, logger, args)
logger.info("valid ppl {}".format(valid_ppl))
test_ppl = eval(vocab, infer_progs, dev_count, logger, args)
if __name__ == '__main__':
train()
|
# Generated by Django 3.2.7 on 2021-09-07 17:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('reviews', '0036_auto_20210906_2320'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Invite',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invitee_email', models.EmailField(blank=True, max_length=254, verbose_name='invitee email')),
('status', models.CharField(choices=[('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected')], default='pending', max_length=32, verbose_name='status')),
('code', models.UUIDField(default=uuid.uuid4, editable=False, verbose_name='code')),
('date_sent', models.DateTimeField(auto_now_add=True, verbose_name='date sent')),
('date_answered', models.DateTimeField(blank=True, null=True, verbose_name='date answered')),
('invited_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invites_sent', to=settings.AUTH_USER_MODEL, verbose_name='invited by')),
('invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invites_received', to=settings.AUTH_USER_MODEL, verbose_name='invitee')),
('review', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invites', to='reviews.review', verbose_name='review')),
],
options={
'verbose_name': 'invite',
'verbose_name_plural': 'invites',
},
),
]
|
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2020 Bootlin
# Author: Joao Marcos Costa <joaomarcos.costa@bootlin.com>
import os
import pytest
from sqfs_common import *
@pytest.mark.boardspec('sandbox')
@pytest.mark.buildconfigspec('cmd_fs_generic')
@pytest.mark.buildconfigspec('cmd_squashfs')
@pytest.mark.buildconfigspec('fs_squashfs')
@pytest.mark.requiredtool('mksquashfs')
def test_sqfs_ls(u_boot_console):
build_dir = u_boot_console.config.build_dir
for opt in comp_opts:
try:
opt.gen_image(build_dir)
except RuntimeError:
opt.clean_source(build_dir)
# skip unsupported compression types
continue
path = os.path.join(build_dir, "sqfs-" + opt.name)
output = u_boot_console.run_command("host bind 0 " + path)
try:
# list files in root directory
output = u_boot_console.run_command("sqfsls host 0")
assert str(len(opt.files) + 1) + " file(s), 0 dir(s)" in output
assert "<SYM> sym" in output
output = u_boot_console.run_command("sqfsls host 0 xxx")
assert "** Cannot find directory. **" in output
except:
opt.cleanup(build_dir)
assert False
opt.cleanup(build_dir)
|
import storage
storage.remount("/", readonly=False)
m = storage.getmount("/")
m.label = "INFINITREE"
storage.remount("/", readonly=True)
|
class TableLayoutColumnStyleCollection(TableLayoutStyleCollection,IList,ICollection,IEnumerable):
""" A collection that stores System.Windows.Forms.ColumnStyle objects. """
def Add(self,*__args):
"""
Add(self: TableLayoutColumnStyleCollection,columnStyle: ColumnStyle) -> int
Adds an item to the System.Windows.Forms.TableLayoutColumnStyleCollection.
columnStyle: The System.Windows.Forms.ColumnStyle to add to the
System.Windows.Forms.TableLayoutColumnStyleCollection.
Returns: The position into which the new element was inserted.
"""
pass
def Contains(self,columnStyle):
"""
Contains(self: TableLayoutColumnStyleCollection,columnStyle: ColumnStyle) -> bool
Determines whether the specified System.Windows.Forms.ColumnStyle is in the
collection.
columnStyle: The System.Windows.Forms.ColumnStyle to locate in the
System.Windows.Forms.TableLayoutColumnStyleCollection. The value can be null.
Returns: true if the System.Windows.Forms.ColumnStyle is found in the
System.Windows.Forms.TableLayoutColumnStyleCollection; otherwise,false.
"""
pass
def IndexOf(self,columnStyle):
"""
IndexOf(self: TableLayoutColumnStyleCollection,columnStyle: ColumnStyle) -> int
Determines the index of a specific item in the
System.Windows.Forms.TableLayoutColumnStyleCollection.
columnStyle: The System.Windows.Forms.ColumnStyle to locate in the
System.Windows.Forms.TableLayoutColumnStyleCollection.
Returns: The index of columnStyle if found in the
System.Windows.Forms.TableLayoutColumnStyleCollection; otherwise,-1.
"""
pass
def Insert(self,index,columnStyle):
"""
Insert(self: TableLayoutColumnStyleCollection,index: int,columnStyle: ColumnStyle)
Inserts a System.Windows.Forms.ColumnStyle into the
System.Windows.Forms.TableLayoutColumnStyleCollection at the specified
position.
index: The zero-based index at which System.Windows.Forms.ColumnStyle should be
inserted.
columnStyle: The System.Windows.Forms.ColumnStyle to insert into the
System.Windows.Forms.TableLayoutColumnStyleCollection.
"""
pass
def Remove(self,columnStyle):
"""
Remove(self: TableLayoutColumnStyleCollection,columnStyle: ColumnStyle)
Removes the first occurrence of a specific System.Windows.Forms.ColumnStyle
from the System.Windows.Forms.TableLayoutColumnStyleCollection.
columnStyle: The System.Windows.Forms.ColumnStyle to remove from the
System.Windows.Forms.TableLayoutColumnStyleCollection. The value can be null.
"""
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
|
from datetime import datetime
# @Fábio C Nunes
contador_maior = 0
contador_menor = 0
ano_atual = datetime.today().year
for i in range(1,8):
ano_nasc = int(input(' Em que ano a {}ª pessoa nasceu? '.format(i) ))
idade = ano_atual - ano_nasc
if idade >= 21:
contador_maior += 1
else:
contador_menor += 1
print('Número de pessoas maiores: {}'.format(contador_maior))
print('Número de pessoas menores: {}'.format(contador_menor))
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Clyde McQueen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Bring up all nodes."""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, SetEnvironmentVariable
from launch.conditions import IfCondition, LaunchConfigurationEquals, UnlessCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from nav2_common.launch import RewrittenYaml
# SLAM strategies:
slams = [
'vlam', # fiducial_vlam
'orb', # orb_slam2_ros
]
def generate_launch_description():
camera_name = 'forward_camera'
orca_bringup_dir = get_package_share_directory('orca_bringup')
nav2_bringup_dir = get_package_share_directory('nav2_bringup')
orca_description_dir = get_package_share_directory('orca_description')
nav2_bringup_launch_dir = os.path.join(nav2_bringup_dir, 'launch')
urdf_file = os.path.join(orca_description_dir, 'urdf', 'hw7.urdf') # TODO choose urdf
teleop_params_file = os.path.join(orca_bringup_dir, 'params', 'xbox_holonomic_3d.yaml')
nav2_bt_file = os.path.join(orca_bringup_dir, 'behavior_trees', 'orca3_bt.xml')
use_sim_time = LaunchConfiguration('use_sim_time')
orca_params_file = LaunchConfiguration('orca_params_file')
nav2_params_file = LaunchConfiguration('nav2_params_file')
vlam_map_file = LaunchConfiguration('vlam_map')
nav2_map_file = LaunchConfiguration('nav2_map')
# ORB features vocabulary file
# This works well in simulation, but I'm sure how it will do in a marine environment
orb_slam_dir = get_package_share_directory('orb_slam2_ros')
orb_voc_file = os.path.join(orb_slam_dir, 'orb_slam2', 'Vocabulary', 'ORBvoc.txt')
# Read the params file and make some global substitutions
configured_orca_params = RewrittenYaml(
source_file=orca_params_file,
param_rewrites={
'use_sim_time': use_sim_time,
'marker_map_load_full_filename': vlam_map_file,
},
convert_types=True)
configured_nav2_params = RewrittenYaml(
source_file=nav2_params_file,
param_rewrites={
'use_sim_time': use_sim_time,
'yaml_filename': nav2_map_file,
},
convert_types=True)
return LaunchDescription([
SetEnvironmentVariable('RCUTILS_LOGGING_BUFFERED_STREAM', '1'),
DeclareLaunchArgument(
'use_sim_time',
default_value='False', # TODO sim time broken
description='Use simulation (Gazebo) clock (BROKEN BROKEN BROKEN)?'),
DeclareLaunchArgument(
'slam',
default_value='orb',
description='Choose SLAM strategy: ' + ', '.join(slams)),
DeclareLaunchArgument(
'vlam_map',
default_value='install/orca_gazebo/share/orca_gazebo/worlds/medium_ring_map.yaml',
description='Full path to Vlam map file'),
DeclareLaunchArgument(
'nav2_map',
default_value='install/orca_bringup/share/orca_bringup/worlds/empty_world.yaml',
description='Full path to Nav2 map file'),
DeclareLaunchArgument(
'orca_params_file',
default_value=os.path.join(orca_bringup_dir, 'params', 'orca_params.yaml'),
description='Full path to the ROS2 parameters file to use for Orca nodes'),
DeclareLaunchArgument(
'nav2_params_file',
default_value=os.path.join(orca_bringup_dir, 'params', 'nav2_params.yaml'),
description='Full path to the ROS2 parameters file to use for Nav2 nodes'),
# Publish static /tf
Node(
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
name='robot_state_publisher',
arguments=[urdf_file],
parameters=[configured_orca_params]),
# Publish /joy
Node(
package='joy',
executable='joy_node',
output='screen',
name='joy_node',
parameters=[configured_orca_params]),
# Subscribe to /joy and publish /cmd_vel
Node(
package='teleop_twist_joy',
executable='teleop_node',
output='screen',
name='teleop_node',
parameters=[teleop_params_file, {
'use_sim_time': LaunchConfiguration('use_sim_time'),
}]),
# Subscribe to /cmd_vel and publish /thrusters, /odom and /tf odom->base_link
Node(
package='orca_base',
executable='base_controller',
output='screen',
name='base_controller',
parameters=[configured_orca_params]),
# fiducial_vlam: publish a map of ArUco markers
Node(
package='fiducial_vlam',
executable='vmap_main',
output='screen',
name='vmap_main',
parameters=[configured_orca_params],
condition=LaunchConfigurationEquals('slam', 'vlam')),
# fiducial_vlam: find ArUco markers and publish the camera pose
Node(
package='fiducial_vlam',
executable='vloc_main',
output='screen',
name='vloc_main',
namespace=camera_name,
parameters=[configured_orca_params],
condition=LaunchConfigurationEquals('slam', 'vlam')),
# fiducial_vlam: subscribe to the camera pose and publish /tf map->odom
Node(
package='orca_localize',
executable='fiducial_localizer',
output='screen',
name='fiducial_localizer',
parameters=[configured_orca_params],
remappings=[
('camera_pose', '/' + camera_name + '/camera_pose'),
],
condition=LaunchConfigurationEquals('slam', 'vlam')),
# orb_slam2: build a map of 3d points, localize against the map, and publish the camera pose
Node(
package='orb_slam2_ros',
executable='orb_slam2_ros_stereo',
output='screen',
name='orb_slam2_stereo',
parameters=[configured_orca_params, {
'voc_file': orb_voc_file,
}],
remappings=[
('/image_left/image_color_rect', '/stereo/left/image_raw'),
('/image_right/image_color_rect', '/stereo/right/image_raw'),
('/camera/camera_info', '/stereo/left/camera_info'),
],
condition=LaunchConfigurationEquals('slam', 'orb')),
# orb_slam2: subscribe to the camera pose and publish /tf map->odom
Node(
package='orca_localize',
executable='orb_slam2_localizer',
output='screen',
name='orb_slam2_localizer',
parameters=[configured_orca_params],
remappings=[
('/camera_pose', '/orb_slam2_stereo_node/pose'),
],
condition=LaunchConfigurationEquals('slam', 'orb')),
# Publish a [likely empty] nav2 map
Node(
package='nav2_map_server',
executable='map_server',
name='map_server',
output='screen',
parameters=[configured_nav2_params]),
# Manage the lifecycle of map_server
Node(
package='nav2_lifecycle_manager',
executable='lifecycle_manager',
name='lifecycle_manager_map_server',
output='screen',
parameters=[{
'use_sim_time': use_sim_time,
'autostart': True,
'node_names': ['map_server'],
}]),
# Include the rest of Nav2
IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(nav2_bringup_launch_dir, 'navigation_launch.py')),
launch_arguments={
'use_sim_time': use_sim_time,
'autostart': 'True',
'params_file': nav2_params_file,
'map_subscribe_transient_local': 'true',
'default_bt_xml_filename': nav2_bt_file,
}.items()),
])
|
# 2019-11-24 00:33:09(JST)
import collections
import sys
def main():
n, *a = map(int, sys.stdin.read().split())
c = collections.Counter(a)
ans = 'No'
if len(c) == 2 and 0 in c and c[0] == n / 3:
ans = 'Yes'
elif len(c) == 3 and not 0 in c:
for i in c.values():
if i != n / 3:
break
else:
ans = 'Yes'
elif 0 in c and c[0] == n:
ans = 'Yes'
print(ans)
if __name__ == '__main__':
main()
|
"""
Author: Nathan Dunne
Date last modified: 16/11/2018
Purpose: Generate a csv and json file from a data set.
"""
import csv # Python has a built in csv library we can use to create a csv file
import json # Python has a built in json library we can use to create a json file.
class FileGenerator:
def __init__(self):
pass # There is nothing to initialise so pass is called here. The pass statement is a null operation.
@staticmethod # Method is static as it neither accesses nor alters any values or behaviour of the self object.
def createCSV(tweets, filename):
print("\nCreating CSV file: " + filename)
headers = tweets[0] # Use an index of the list, which has the dictionary keys, as the headers.
with open(filename+".csv", 'w', newline='') as csv_file:
writer = csv.writer(csv_file) # Instantiate the writer object.
writer.writerow(headers) # Write the first row using the headers.
for each_tweet in tweets: # For each dictionary object (tweet data) in the list (of tweet data sets)
# Write the values of the dictionary object (tweet data) as a new row.
writer.writerow(each_tweet.values())
@staticmethod # Method is static as it neither accesses nor alters any values or behaviour of the self object.
def createJSON(tweets, filename):
print("Creating JSON file: " + filename)
with open(filename+".json", 'w') as outfile:
json.dump(tweets, outfile)
|
"""
https://github.github.com/gfm/#links
"""
import pytest
from .utils import act_and_assert
# pylint: disable=too-many-lines
@pytest.mark.gfm
def test_reference_links_535():
"""
Test case 535: Here is a simple example:
"""
# Arrange
source_markdown = """[foo][bar]
[bar]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/url:title:::bar:foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::bar:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_535a():
"""
Test case 535a: variation of 535 with trailing space
"""
# Arrange
source_markdown = """[foo][bar]\a
[bar]: /url "title"
""".replace(
"\a", " "
)
expected_tokens = [
"[para(1,1):: ]",
"[link(1,1):full:/url:title:::bar:foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::bar:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_535b():
"""
Test case 535b: variation of 535 with trailing space and text
"""
# Arrange
source_markdown = """[foo][bar] abc
[bar]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/url:title:::bar:foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[text(1,11): abc:]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::bar:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title">foo</a> abc</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_536():
"""
Test case 536: (part 1) The link text may contain balanced brackets, but not unbalanced ones, unless they are escaped:
"""
# Arrange
source_markdown = """[link [foo [bar]]][ref]
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/uri::::ref:link [foo [bar]]:::::]",
"[text(1,2):link :]",
"[text(1,7):[:]",
"[text(1,8):foo :]",
"[text(1,12):[:]",
"[text(1,13):bar:]",
"[text(1,16):]:]",
"[text(1,17):]:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri">link [foo [bar]]</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_537():
"""
Test case 537: (part 2) The link text may contain balanced brackets, but not unbalanced ones, unless they are escaped:
"""
# Arrange
source_markdown = """[link \\[bar][ref]
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/uri::::ref:link \\[bar:::::]",
"[text(1,2):link \\\b[bar:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri">link [bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_538():
"""
Test case 538: (part 1) The link text may contain inline content:
"""
# Arrange
source_markdown = """[link *foo **bar** `#`*][ref]
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/uri::::ref:link *foo **bar** `#`*:::::]",
"[text(1,2):link :]",
"[emphasis(1,7):1:*]",
"[text(1,8):foo :]",
"[emphasis(1,12):2:*]",
"[text(1,14):bar:]",
"[end-emphasis(1,17)::]",
"[text(1,19): :]",
"[icode-span(1,20):#:`::]",
"[end-emphasis(1,23)::]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri">link <em>foo <strong>bar</strong> <code>#</code></em></a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_539():
"""
Test case 539: (part 2) The link text may contain inline content:
"""
# Arrange
source_markdown = """[][ref]
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/uri::::ref::::::]",
"[image(1,2):inline:moon.jpg::moon::::moon:False::::]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri"><img src="moon.jpg" alt="moon" /></a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_540():
"""
Test case 540: (part 1) However, links may not contain other links, at any level of nesting.
"""
# Arrange
source_markdown = """[foo [bar](/uri)][ref]
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo :]",
"[link(1,6):inline:/uri:::::bar:False::::]",
"[text(1,7):bar:]",
"[end-link::]",
"[text(1,17):]:]",
"[link(1,18):shortcut:/uri:::::ref:::::]",
"[text(1,19):ref:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p>[foo <a href="/uri">bar</a>]<a href="/uri">ref</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_541():
"""
Test case 541: (part 2) However, links may not contain other links, at any level of nesting.
"""
# Arrange
source_markdown = """[foo *bar [baz][ref]*][ref]
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo :]",
"[emphasis(1,6):1:*]",
"[text(1,7):bar :]",
"[link(1,11):full:/uri::::ref:baz:::::]",
"[text(1,12):baz:]",
"[end-link::]",
"[end-emphasis(1,21)::]",
"[text(1,22):]:]",
"[link(1,23):shortcut:/uri:::::ref:::::]",
"[text(1,24):ref:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = (
"""<p>[foo <em>bar <a href="/uri">baz</a></em>]<a href="/uri">ref</a></p>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_542():
"""
Test case 542: (part 1) The following cases illustrate the precedence of link text grouping over emphasis grouping:
"""
# Arrange
source_markdown = """*[foo*][ref]
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):*:]",
"[link(1,2):full:/uri::::ref:foo*:::::]",
"[text(1,3):foo:]",
"[text(1,6):*:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p>*<a href="/uri">foo*</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_543():
"""
Test case 543: (part 2) The following cases illustrate the precedence of link text grouping over emphasis grouping:
"""
# Arrange
source_markdown = """[foo *bar][ref]*
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/uri::::ref:foo *bar:::::]",
"[text(1,2):foo :]",
"[text(1,6):*:]",
"[text(1,7):bar:]",
"[end-link::]",
"[text(1,16):*:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri">foo *bar</a>*</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_544():
"""
Test case 544: (part 1) The following cases illustrate the precedence of link text grouping over emphasis grouping:
"""
# Arrange
source_markdown = """[foo <bar attr="][ref]">
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo :]",
'[raw-html(1,6):bar attr="][ref]"]',
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p>[foo <bar attr="][ref]"></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_545():
"""
Test case 545: (part 2) The following cases illustrate the precedence of link text grouping over emphasis grouping:
"""
# Arrange
source_markdown = """[foo`][ref]`
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[icode-span(1,5):][ref]:`::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p>[foo<code>][ref]</code></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_546():
"""
Test case 546: (part 3) The following cases illustrate the precedence of link text grouping over emphasis grouping:
"""
# Arrange
source_markdown = """[foo<http://example.com/?search=][ref]>
[ref]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[uri-autolink(1,5):http://example.com/?search=][ref]]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref:: :/uri:::::]",
]
expected_gfm = """<p>[foo<a href="http://example.com/?search=%5D%5Bref%5D">http://example.com/?search=][ref]</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_547():
"""
Test case 547: Matching is case-insensitive:
"""
# Arrange
source_markdown = """[foo][BaR]
[bar]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/url:title:::BaR:foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::bar:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_548():
"""
Test case 548: Unicode case fold is used:
"""
# Arrange
source_markdown = """[ẞ]
[SS]: /url"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:/url:::::ẞ:::::]",
"[text(1,2):ẞ:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ss:SS: :/url:::::]",
]
expected_gfm = """<p><a href="/url">ẞ</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_549():
"""
Test case 549: Consecutive internal whitespace is treated as one space for purposes of determining matching:
"""
# Arrange
source_markdown = """[Foo
bar]: /url
[Baz][Foo bar]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo bar:Foo\n bar: :/url:::::]",
"[BLANK(3,1):]",
"[para(4,1):]",
"[link(4,1):full:/url::::Foo bar:Baz:::::]",
"[text(4,2):Baz:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/url">Baz</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_550():
"""
Test case 550: (part 1) No whitespace is allowed between the link text and the link label:
"""
# Arrange
source_markdown = """[foo] [bar]
[bar]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[text(1,6): :]",
"[link(1,7):shortcut:/url:title::::bar:::::]",
"[text(1,8):bar:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::bar:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p>[foo] <a href="/url" title="title">bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_551():
"""
Test case 551: (part 2) No whitespace is allowed between the link text and the link label:
"""
# Arrange
source_markdown = """[foo]
[bar]
[bar]: /url "title"
"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[text(1,6):\n::\n]",
"[link(2,1):shortcut:/url:title::::bar:::::]",
"[text(2,2):bar:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(3,1):]",
'[link-ref-def(4,1):True::bar:: :/url:: :title:"title":]',
"[BLANK(5,1):]",
]
expected_gfm = """<p>[foo]
<a href="/url" title="title">bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_552():
"""
Test case 552: When there are multiple matching link reference definitions, the first is used:
"""
# Arrange
source_markdown = """[foo]: /url1
[foo]: /url2
[bar][foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo:: :/url1:::::]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):False::foo:: :/url2:::::]",
"[BLANK(4,1):]",
"[para(5,1):]",
"[link(5,1):full:/url1::::foo:bar:::::]",
"[text(5,2):bar:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/url1">bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_553():
"""
Test case 553: Note that matching is performed on normalized strings, not parsed inline content. So the following does not match, even though the labels define equivalent inline content:
"""
# Arrange
source_markdown = """[bar][foo\\!]
[foo!]: /url"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):bar:]",
"[text(1,5):]:]",
"[text(1,6):[:]",
"[text(1,7):foo\\\b!:]",
"[text(1,12):]:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo!:: :/url:::::]",
]
expected_gfm = """<p>[bar][foo!]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_554():
"""
Test case 554: (part 1) Link labels cannot contain brackets, unless they are backslash-escaped:
"""
# Arrange
source_markdown = """[foo][ref[]
[ref[]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[text(1,6):[:]",
"[text(1,7):ref:]",
"[text(1,10):[:]",
"[text(1,11):]:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):[:]",
"[text(3,2):ref:]",
"[text(3,5):[:]",
"[text(3,6):]:]",
"[text(3,7):: /uri:]",
"[end-para:::True]",
]
expected_gfm = """<p>[foo][ref[]</p>
<p>[ref[]: /uri</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_555():
"""
Test case 555: (part 2) Link labels cannot contain brackets, unless they are backslash-escaped:
"""
# Arrange
source_markdown = """[foo][ref[bar]]
[ref[bar]]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[text(1,6):[:]",
"[text(1,7):ref:]",
"[text(1,10):[:]",
"[text(1,11):bar:]",
"[text(1,14):]:]",
"[text(1,15):]:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):[:]",
"[text(3,2):ref:]",
"[text(3,5):[:]",
"[text(3,6):bar:]",
"[text(3,9):]:]",
"[text(3,10):]:]",
"[text(3,11):: /uri:]",
"[end-para:::True]",
]
expected_gfm = """<p>[foo][ref[bar]]</p>
<p>[ref[bar]]: /uri</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_556():
"""
Test case 556: (part 3) Link labels cannot contain brackets, unless they are backslash-escaped:
"""
# Arrange
source_markdown = """[[[foo]]]
[[[foo]]]: /url"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):[:]",
"[text(1,3):[:]",
"[text(1,4):foo:]",
"[text(1,7):]:]",
"[text(1,8):]:]",
"[text(1,9):]:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):[:]",
"[text(3,2):[:]",
"[text(3,3):[:]",
"[text(3,4):foo:]",
"[text(3,7):]:]",
"[text(3,8):]:]",
"[text(3,9):]:]",
"[text(3,10):: /url:]",
"[end-para:::True]",
]
expected_gfm = """<p>[[[foo]]]</p>
<p>[[[foo]]]: /url</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_557():
"""
Test case 557: (part 4) Link labels cannot contain brackets, unless they are backslash-escaped:
"""
# Arrange
source_markdown = """[foo][ref\\[]
[ref\\[]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/uri::::ref\\[:foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ref\\[:: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_558():
"""
Test case 558: Note that in this example ] is not backslash-escaped:
"""
# Arrange
source_markdown = """[bar\\\\]: /uri
[bar\\\\]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar\\\\:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar\\\\:::::]",
"[text(3,2):bar\\\b\\:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar\\</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_558a():
"""
Test case 558a: variation of 558 with reference
"""
# Arrange
source_markdown = """[bar\]: /uri
[bar\]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar\:bar\: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar\:::::]",
"[text(3,2):bar\a\\a\\\a:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar\\</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_558b():
"""
Test case 558b: variation of 558 with reference
"""
# Arrange
source_markdown = """[barβ]: /uri
[barβ]"""
expected_tokens = [
"[link-ref-def(1,1):True::barβ:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::barβ:::::]",
"[text(3,2):bar\aβ\aβ\a:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">barβ</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_559():
"""
Test case 559: (part 1) A link label must contain at least one non-whitespace character:
"""
# Arrange
source_markdown = """[]
[]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):]:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):[:]",
"[text(3,2):]:]",
"[text(3,3):: /uri:]",
"[end-para:::True]",
]
expected_gfm = """<p>[]</p>
<p>[]: /uri</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_560():
"""
Test case 560: (part 2) A link label must contain at least one non-whitespace character:
"""
# Arrange
source_markdown = """[
]
[
]: /uri"""
expected_tokens = [
"[para(1,1):\n ]",
"[text(1,1):[:]",
"[text(1,2):\n::\n]",
"[text(2,2):]:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[para(4,1):\n ]",
"[text(4,1):[:]",
"[text(4,2):\n::\n]",
"[text(5,2):]:]",
"[text(5,3):: /uri:]",
"[end-para:::True]",
]
expected_gfm = """<p>[
]</p>
<p>[
]: /uri</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_561():
"""
Test case 561: (part 1) Thus, [foo][] is equivalent to [foo][foo].
"""
# Arrange
source_markdown = """[foo][]
[foo]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):collapsed:/url:title::::foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::foo:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_562():
"""
Test case 562: (part 2) Thus, [foo][] is equivalent to [foo][foo].
"""
# Arrange
source_markdown = """[*foo* bar][]
[*foo* bar]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):collapsed:/url:title::::*foo* bar:::::]",
"[emphasis(1,2):1:*]",
"[text(1,3):foo:]",
"[end-emphasis(1,6)::]",
"[text(1,7): bar:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::*foo* bar:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title"><em>foo</em> bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_563():
"""
Test case 563: The link labels are case-insensitive:
"""
# Arrange
source_markdown = """[Foo][]
[foo]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):collapsed:/url:title::::Foo:::::]",
"[text(1,2):Foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::foo:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title">Foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_564():
"""
Test case 564: As with full reference links, whitespace is not allowed between the two sets of brackets:
"""
# Arrange
source_markdown = """[foo]\a
[]
[foo]: /url "title"
""".replace(
"\a", " "
)
expected_tokens = [
"[para(1,1):\n]",
"[link(1,1):shortcut:/url:title::::foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[text(1,6):\n:: \n]",
"[text(2,1):[:]",
"[text(2,2):]:]",
"[end-para:::True]",
"[BLANK(3,1):]",
'[link-ref-def(4,1):True::foo:: :/url:: :title:"title":]',
"[BLANK(5,1):]",
]
expected_gfm = """<p><a href="/url" title="title">foo</a>
[]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_565():
"""
Test case 565: (part 1) A shortcut reference link consists of a link label that matches a link reference definition elsewhere in the document and is not followed by [] or a link label.
"""
# Arrange
source_markdown = """[foo]
[foo]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:/url:title::::foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::foo:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_566():
"""
Test case 566: (part 2) A shortcut reference link consists of a link label that matches a link reference definition elsewhere in the document and is not followed by [] or a link label.
"""
# Arrange
source_markdown = """[*foo* bar]
[*foo* bar]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:/url:title::::*foo* bar:::::]",
"[emphasis(1,2):1:*]",
"[text(1,3):foo:]",
"[end-emphasis(1,6)::]",
"[text(1,7): bar:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::*foo* bar:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title"><em>foo</em> bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_567():
"""
Test case 567: (part 3) A shortcut reference link consists of a link label that matches a link reference definition elsewhere in the document and is not followed by [] or a link label.
"""
# Arrange
source_markdown = """[[*foo* bar]]
[*foo* bar]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[link(1,2):shortcut:/url:title::::*foo* bar:::::]",
"[emphasis(1,3):1:*]",
"[text(1,4):foo:]",
"[end-emphasis(1,7)::]",
"[text(1,8): bar:]",
"[end-link::]",
"[text(1,13):]:]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::*foo* bar:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p>[<a href="/url" title="title"><em>foo</em> bar</a>]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_568():
"""
Test case 568: (part 4) A shortcut reference link consists of a link label that matches a link reference definition elsewhere in the document and is not followed by [] or a link label.
"""
# Arrange
source_markdown = """[[bar [foo]
[foo]: /url"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):[:]",
"[text(1,3):bar :]",
"[link(1,7):shortcut:/url:::::foo:::::]",
"[text(1,8):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo:: :/url:::::]",
]
expected_gfm = """<p>[[bar <a href="/url">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_569():
"""
Test case 569: The link labels are case-insensitive:
"""
# Arrange
source_markdown = """[Foo]
[foo]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:/url:title::::Foo:::::]",
"[text(1,2):Foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::foo:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p><a href="/url" title="title">Foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_570():
"""
Test case 570: A space after the link text should be preserved:
"""
# Arrange
source_markdown = """[foo] bar
[foo]: /url"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:/url:::::foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[text(1,6): bar:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo:: :/url:::::]",
]
expected_gfm = """<p><a href="/url">foo</a> bar</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_570a():
"""
Test case 570a: variation of 570 to show how link inside of link doesn't work.
"""
# Arrange
source_markdown = """[foo[foo]]
[foo]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[link(1,5):shortcut:/url:title::::foo:::::]",
"[text(1,6):foo:]",
"[end-link::]",
"[text(1,10):]:]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::foo:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p>[foo<a href=\"/url\" title=\"title\">foo</a>]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_571():
"""
Test case 571: If you just want bracketed text, you can backslash-escape the opening bracket to avoid links
"""
# Arrange
source_markdown = """\\[foo]
[foo]: /url "title"
"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\\\b[foo:]",
"[text(1,6):]:]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::foo:: :/url:: :title:"title":]',
"[BLANK(4,1):]",
]
expected_gfm = """<p>[foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_572():
"""
Test case 572: Note that this is a link, because a link label ends with the first following closing bracket:
"""
# Arrange
source_markdown = """[foo*]: /url
*[foo*]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo*:: :/url:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):*:]",
"[link(3,2):shortcut:/url:::::foo*:::::]",
"[text(3,3):foo:]",
"[text(3,6):*:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p>*<a href="/url">foo*</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_573():
"""
Test case 573: (part 1) Full and compact references take precedence over shortcut references:
"""
# Arrange
source_markdown = """[foo][bar]
[foo]: /url1
[bar]: /url2"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/url2::::bar:foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo:: :/url1:::::]",
"[link-ref-def(4,1):True::bar:: :/url2:::::]",
]
expected_gfm = """<p><a href="/url2">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_574():
"""
Test case 574: (part 2) Full and compact references take precedence over shortcut references:
"""
# Arrange
source_markdown = """[foo][]
[foo]: /url1"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):collapsed:/url1:::::foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo:: :/url1:::::]",
]
expected_gfm = """<p><a href="/url1">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_575():
"""
Test case 575: (part 1) Inline links also take precedence:
"""
# Arrange
source_markdown = """[foo]()
[foo]: /url1"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline::::::foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo:: :/url1:::::]",
]
expected_gfm = """<p><a href="">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_576():
"""
Test case 576: (part 2) Inline links also take precedence:
"""
# Arrange
source_markdown = """[foo](not a link)
[foo]: /url1"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:/url1:::::foo:False::: :]",
"[text(1,2):foo:]",
"[end-link::]",
"[text(1,6):(not a link):]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo:: :/url1:::::]",
]
expected_gfm = """<p><a href="/url1">foo</a>(not a link)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_577():
"""
Test case 577: In the following case [bar][baz] is parsed as a reference, [foo] as normal text:
"""
# Arrange
source_markdown = """[foo][bar][baz]
[baz]: /url"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[link(1,6):full:/url::::baz:bar:::::]",
"[text(1,7):bar:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baz:: :/url:::::]",
]
expected_gfm = """<p>[foo]<a href="/url">bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_578():
"""
Test case 578: Here, though, [foo][bar] is parsed as a reference, since [bar] is defined:
"""
# Arrange
source_markdown = """[foo][bar][baz]
[baz]: /url1
[bar]: /url2"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):full:/url2::::bar:foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[link(1,11):shortcut:/url1:::::baz:::::]",
"[text(1,12):baz:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baz:: :/url1:::::]",
"[link-ref-def(4,1):True::bar:: :/url2:::::]",
]
expected_gfm = """<p><a href="/url2">foo</a><a href="/url1">baz</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_579():
"""
Test case 579: Here [foo] is not parsed as a shortcut reference, because it is followed by a link label (even though [bar] is not defined):
"""
# Arrange
source_markdown = """[foo][bar][baz]
[baz]: /url1
[foo]: /url2"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[link(1,6):full:/url1::::baz:bar:::::]",
"[text(1,7):bar:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baz:: :/url1:::::]",
"[link-ref-def(4,1):True::foo:: :/url2:::::]",
]
expected_gfm = """<p>[foo]<a href="/url1">bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_01():
"""
Test case extra 01: variation on 644
"""
# Arrange
source_markdown = """[foo <!-- this is a
comment - with hyphen --> bar]: /uri
[foo <!-- this is a
comment - with hyphen --> bar]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo <!-- this is a comment - with hyphen --> bar:foo <!-- this is a\ncomment - with hyphen --> bar: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[link(4,1):shortcut:/uri:::::foo <!-- this is a\ncomment - with hyphen --> bar:::::]",
"[text(4,2):foo :]",
"[raw-html(4,6):!-- this is a\ncomment - with hyphen --]",
"[text(5,26): bar:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">foo <!-- this is a
comment - with hyphen --> bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_02():
"""
Test case extra 02: variation on 345
"""
# Arrange
source_markdown = """[foo ``
foo
bar\a\a
baz
`` bar]: /uri
[foo ``
foo
bar\a\a
baz
`` bar]""".replace(
"\a", " "
)
expected_tokens = [
"[link-ref-def(1,1):True::foo `` foo bar baz `` bar:foo ``\nfoo\nbar \nbaz\n`` bar: :/uri:::::]",
"[BLANK(6,1):]",
"[para(7,1):\n\n\n\n]",
"[link(7,1):shortcut:/uri:::::foo ``\nfoo\nbar \nbaz\n`` bar:::::]",
"[text(7,2):foo :]",
"[icode-span(7,6):foo\a\n\a \abar \a\n\a \abaz:``:\a\n\a \a:\a\n\a \a]",
"[text(11,3): bar:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">foo <code>foo bar baz</code> bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03x():
"""
Test case extra 03x: variation on 558
"""
# Arrange
source_markdown = """[bar\\foo]: /uri
[bar\\foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar\\foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar\\foo:::::]",
"[text(3,2):bar\\foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar\\foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03xa():
"""
Test case extra 03xa: variation of 3 with extra text in label
"""
# Arrange
source_markdown = """[xx[bar\\foo]yy](/uri)
[bar\\foo]: /uri1"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):xx:]",
"[link(1,4):shortcut:/uri1:::::bar\\foo:::::]",
"[text(1,5):bar\\foo:]",
"[end-link::]",
"[text(1,13):yy:]",
"[text(1,15):]:]",
"[text(1,16):(/uri):]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar\\foo:: :/uri1:::::]",
]
expected_gfm = """<p>[xx<a href="/uri1">bar\\foo</a>yy](/uri)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03xb():
"""
Test case extra 03xb: variation of 3xa as image
"""
# Arrange
source_markdown = """![xx[bar\\foo]yy](/uri)
[bar\\foo]: /uri1"""
expected_tokens = [
"[para(1,1):]",
"[image(1,1):inline:/uri::xxbar\\fooyy::::xx[bar\\foo]yy:False::::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar\\foo:: :/uri1:::::]",
]
expected_gfm = """<p><img src="/uri" alt="xxbar\\fooyy" /></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03xc():
"""
Test case extra 03xc: variation of 3xa with inner as image
"""
# Arrange
source_markdown = """[xx![bar\\foo]yy](/uri)
[bar\\foo]: /uri1"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline:/uri:::::xx![bar\\foo]yy:False::::]",
"[text(1,2):xx:]",
"[image(1,4):shortcut:/uri1::bar\\foo::::bar\\foo:::::]",
"[text(1,14):yy:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar\\foo:: :/uri1:::::]",
]
expected_gfm = (
"""<p><a href="/uri">xx<img src="/uri1" alt="bar\\foo" />yy</a></p>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03a():
"""
Test case extra 03a: variation of 3 with reference
"""
# Arrange
source_markdown = """[bar&foo]: /uri
[bar&foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar&foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar&foo:::::]",
"[text(3,2):bar\a&\a\a&\a&\a\afoo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar&foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03aa():
"""
Test case extra 03aa: variation of 3a with extra text
"""
# Arrange
source_markdown = """[xx[bar&foo]yy](/uri1)
[bar&foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):xx:]",
"[link(1,4):shortcut:/uri:::::bar&foo:::::]",
"[text(1,5):bar\a&\a\a&\a&\a\afoo:]",
"[end-link::]",
"[text(1,17):yy:]",
"[text(1,19):]:]",
"[text(1,20):(/uri1):]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar&foo:: :/uri:::::]",
]
expected_gfm = """<p>[xx<a href="/uri">bar&foo</a>yy](/uri1)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ab():
"""
Test case extra 03ab: variation of 3a with outer image
"""
# Arrange
source_markdown = """![xx[bar&foo]yy](/uri1)
[bar&foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[image(1,1):inline:/uri1::xxbar&fooyy::::xx[bar&foo]yy:False::::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar&foo:: :/uri:::::]",
]
expected_gfm = """<p><img src="/uri1" alt="xxbar&fooyy" /></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ac():
"""
Test case extra 03ac: variation of 3a with inner image
"""
# Arrange
source_markdown = """[xx![bar<&>foo]yy](/uri1)
[bar<&>foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline:/uri1:::::xx![bar<&>foo]yy:False::::]",
"[text(1,2):xx:]",
"[image(1,4):shortcut:/uri::bar<&>foo::::bar<&>foo:::::]",
"[text(1,26):yy:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar<&>foo:: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri1">xx<img src="/uri" alt="bar<&>foo" />yy</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03b():
"""
Test case extra 03b: variation of 3 with copyright
"""
# Arrange
source_markdown = """[bar©foo]: /uri
[bar©foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar©foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar©foo:::::]",
"[text(3,2):bar\a©\a©\afoo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar©foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ba():
"""
Test case extra 03ba: variation of 3b with text
"""
# Arrange
source_markdown = """[bar©foo]: /uri
[xx[bar©foo]yy](/uri1)"""
expected_tokens = [
"[link-ref-def(1,1):True::bar©foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):[:]",
"[text(3,2):xx:]",
"[link(3,4):shortcut:/uri:::::bar©foo:::::]",
"[text(3,5):bar\a©\a©\afoo:]",
"[end-link::]",
"[text(3,18):yy:]",
"[text(3,20):]:]",
"[text(3,21):(/uri1):]",
"[end-para:::True]",
]
expected_gfm = """<p>[xx<a href="/uri">bar©foo</a>yy](/uri1)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03bb():
"""
Test case extra 03bb: variation of 3b with outer image
"""
# Arrange
source_markdown = """[bar©foo]: /uri
![xx[bar©foo]yy](/uri1)"""
expected_tokens = [
"[link-ref-def(1,1):True::bar©foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[image(3,1):inline:/uri1::xxbar©fooyy::::xx[bar©foo]yy:False::::]",
"[end-para:::True]",
]
expected_gfm = """<p><img src="/uri1" alt="xxbar©fooyy" /></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03bc():
"""
Test case extra 03bc: variation of 3b with inner image
"""
# Arrange
source_markdown = """[bar©foo]: /uri
[xx![bar©foo]yy](/uri1)"""
expected_tokens = [
"[link-ref-def(1,1):True::bar©foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):inline:/uri1:::::xx![bar©foo]yy:False::::]",
"[text(3,2):xx:]",
"[image(3,4):shortcut:/uri::bar©foo::::bar©foo:::::]",
"[text(3,19):yy:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri1">xx<img src="/uri" alt="bar©foo" />yy</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03c():
"""
Test case extra 03c: variation of 3 with code span
"""
# Arrange
source_markdown = """[bar` span `foo]: /uri
[bar` span `foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar` span `foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar` span `foo:::::]",
"[text(3,2):bar:]",
"[icode-span(3,5):span:`: : ]",
"[text(3,13):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar<code>span</code>foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ca():
"""
Test case extra 03ca: variation of 3c with text
"""
# Arrange
source_markdown = """[xx[bar` span `foo]yy](/uri1)
[bar` span `foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):xx:]",
"[link(1,4):shortcut:/uri:::::bar` span `foo:::::]",
"[text(1,5):bar:]",
"[icode-span(1,8):span:`: : ]",
"[text(1,16):foo:]",
"[end-link::]",
"[text(1,20):yy:]",
"[text(1,22):]:]",
"[text(1,23):(/uri1):]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar` span `foo:: :/uri:::::]",
]
expected_gfm = """<p>[xx<a href="/uri">bar<code>span</code>foo</a>yy](/uri1)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03cb():
"""
Test case extra 03cb: variation of 3c with outer image
"""
# Arrange
source_markdown = """![xx[bar` span `foo]yy](/uri1)
[bar` span `foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[image(1,1):inline:/uri1::xxbarspanfooyy::::xx[bar` span `foo]yy:False::::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar` span `foo:: :/uri:::::]",
]
expected_gfm = """<p><img src="/uri1" alt="xxbarspanfooyy" /></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03cc():
"""
Test case extra 03cc: variation of 3c with inner image
"""
# Arrange
source_markdown = """[xx![bar` span `foo]yy](/uri1)
[bar` span `foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline:/uri1:::::xx![bar` span `foo]yy:False::::]",
"[text(1,2):xx:]",
"[image(1,4):shortcut:/uri::barspanfoo::::bar` span `foo:::::]",
"[text(1,21):yy:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar` span `foo:: :/uri:::::]",
]
expected_gfm = (
"""<p><a href="/uri1">xx<img src="/uri" alt="barspanfoo" />yy</a></p>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03d():
"""
Test case extra 03d: variation of 3 with emphasis
"""
# Arrange
source_markdown = """[bar*span*foo]: /uri
[bar*span*foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar*span*foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar*span*foo:::::]",
"[text(3,2):bar:]",
"[emphasis(3,5):1:*]",
"[text(3,6):span:]",
"[end-emphasis(3,10)::]",
"[text(3,11):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar<em>span</em>foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03da():
"""
Test case extra 03da: variation of 3d with text
"""
# Arrange
source_markdown = """[xx[bar*span*foo]yy](/uri1)
[bar*span*foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):xx:]",
"[link(1,4):shortcut:/uri:::::bar*span*foo:::::]",
"[text(1,5):bar:]",
"[emphasis(1,8):1:*]",
"[text(1,9):span:]",
"[end-emphasis(1,13)::]",
"[text(1,14):foo:]",
"[end-link::]",
"[text(1,18):yy:]",
"[text(1,20):]:]",
"[text(1,21):(/uri1):]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar*span*foo:: :/uri:::::]",
]
expected_gfm = """<p>[xx<a href="/uri">bar<em>span</em>foo</a>yy](/uri1)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03db():
"""
Test case extra 03db: variation of 3d with outer image
"""
# Arrange
source_markdown = """![xx[bar*span*foo]yy](/uri1)
[bar*span*foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[image(1,1):inline:/uri1::xxbarspanfooyy::::xx[bar*span*foo]yy:False::::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar*span*foo:: :/uri:::::]",
]
expected_gfm = """<p><img src="/uri1" alt="xxbarspanfooyy" /></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03dc():
"""
Test case extra 03dc: variation of 3d with inner image
"""
# Arrange
source_markdown = """[xx![bar*span*foo]yy](/uri1)
[bar*span*foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline:/uri1:::::xx![bar*span*foo]yy:False::::]",
"[text(1,2):xx:]",
"[image(1,4):shortcut:/uri::barspanfoo::::bar*span*foo:::::]",
"[text(1,19):yy:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar*span*foo:: :/uri:::::]",
]
expected_gfm = (
"""<p><a href="/uri1">xx<img src="/uri" alt="barspanfoo" />yy</a></p>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03e():
"""
Test case extra 03e: variation of 3 with autolink
"""
# Arrange
source_markdown = """[bar<http://autolink.com>foo]: /uri
[bar<http://autolink.com>foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar<http://autolink.com>foo:: :/uri:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/uri:::::bar<http://autolink.com>foo:::::]",
"[text(3,2):bar:]",
"[uri-autolink(3,5):http://autolink.com]",
"[text(3,26):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar<a href="http://autolink.com">http://autolink.com</a>foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ea():
"""
Test case extra 03ea: variation of 3e with text
"""
# Arrange
source_markdown = """[xx[bar<http://autolink.com>foo]yy](/uri1)
[bar<http://autolink.com>foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):xx:]",
"[link(1,4):shortcut:/uri:::::bar<http://autolink.com>foo:::::]",
"[text(1,5):bar:]",
"[uri-autolink(1,8):http://autolink.com]",
"[text(1,29):foo:]",
"[end-link::]",
"[text(1,33):yy:]",
"[text(1,35):]:]",
"[text(1,36):(/uri1):]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar<http://autolink.com>foo:: :/uri:::::]",
]
expected_gfm = """<p>[xx<a href="/uri">bar<a href="http://autolink.com">http://autolink.com</a>foo</a>yy](/uri1)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03eb():
"""
Test case extra 03eb: variation of 3e with outer image
"""
# Arrange
source_markdown = """![xx[bar<http://autolink.com>foo]yy](/uri1)
[bar<http://autolink.com>foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[image(1,1):inline:/uri1::xxbarhttp://autolink.comfooyy::::xx[bar<http://autolink.com>foo]yy:False::::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar<http://autolink.com>foo:: :/uri:::::]",
]
expected_gfm = """<p><img src="/uri1" alt="xxbarhttp://autolink.comfooyy" /></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ec():
"""
Test case extra 03ec: variation of 3e with inner image
"""
# Arrange
source_markdown = """[xx![bar<http://autolink.com>foo]yy](/uri1)
[bar<http://autolink.com>foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline:/uri1:::::xx![bar<http://autolink.com>foo]yy:False::::]",
"[text(1,2):xx:]",
"[image(1,4):shortcut:/uri::barhttp://autolink.comfoo::::bar<http://autolink.com>foo:::::]",
"[text(1,34):yy:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar<http://autolink.com>foo:: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri1">xx<img src="/uri" alt="barhttp://autolink.comfoo" />yy</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03f():
"""
Test case extra 03f: variation of 3 with raw html
"""
# Arrange
source_markdown = """[bar<image src="xx">foo]: /uri
[bar<image src="xx">foo]"""
expected_tokens = [
'[link-ref-def(1,1):True::bar<image src="xx">foo:: :/uri:::::]',
"[BLANK(2,1):]",
"[para(3,1):]",
'[link(3,1):shortcut:/uri:::::bar<image src="xx">foo:::::]',
"[text(3,2):bar:]",
'[raw-html(3,5):image src="xx"]',
"[text(3,21):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar<image src="xx">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03fa():
"""
Test case extra 03fa: variation of 3f with text
"""
# Arrange
source_markdown = """[xx[bar<image src="xx">foo]yy](/uri1)
[bar<image src="xx">foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):xx:]",
'[link(1,4):shortcut:/uri:::::bar<image src="xx">foo:::::]',
"[text(1,5):bar:]",
'[raw-html(1,8):image src="xx"]',
"[text(1,24):foo:]",
"[end-link::]",
"[text(1,28):yy:]",
"[text(1,30):]:]",
"[text(1,31):(/uri1):]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::bar<image src="xx">foo:: :/uri:::::]',
]
expected_gfm = """<p>[xx<a href="/uri">bar<image src="xx">foo</a>yy](/uri1)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03fb():
"""
Test case extra 03fb: variation of 3f with outer image
"""
# Arrange
source_markdown = """![xx[bar<image src="xx">foo]yy](/uri1)
[bar<image src="xx">foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
'[image(1,1):inline:/uri1::xxbar<image src="xx">fooyy::::xx[bar<image src="xx">foo]yy:False::::]',
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::bar<image src="xx">foo:: :/uri:::::]',
]
expected_gfm = """<p><img src="/uri1" alt="xxbar<image src="xx">fooyy" /></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03fc():
"""
Test case extra 03fc: variation of 3f with inner iage
"""
# Arrange
source_markdown = """[xx![bar<image src="xx">foo]yy](/uri1)
[bar<image src="xx">foo]: /uri"""
expected_tokens = [
"[para(1,1):]",
'[link(1,1):inline:/uri1:::::xx![bar<image src="xx">foo]yy:False::::]',
"[text(1,2):xx:]",
'[image(1,4):shortcut:/uri::bar<image src="xx">foo::::bar<image src="xx">foo:::::]',
"[text(1,29):yy:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
'[link-ref-def(3,1):True::bar<image src="xx">foo:: :/uri:::::]',
]
expected_gfm = """<p><a href="/uri1">xx<img src="/uri" alt="bar<image src="xx">foo" />yy</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03g():
"""
Test case extra 03g: variation of 3 with newline
"""
# Arrange
source_markdown = """[bar
foo]: /uri
[bar\nfoo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar foo:bar\nfoo: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[link(4,1):shortcut:/uri:::::bar\nfoo:::::]",
"[text(4,2):bar\nfoo::\n]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar
foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ga():
"""
Test case extra 03ga: variation of 3g with text
"""
# Arrange
source_markdown = """[xx[bar
foo]yy](/uri1)
[bar
foo]: /uri
"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[text(1,2):xx:]",
"[link(1,4):shortcut:/uri:::::bar\nfoo:::::]",
"[text(1,5):bar\nfoo::\n]",
"[end-link::]",
"[text(2,5):yy:]",
"[text(2,7):]:]",
"[text(2,8):(/uri1):]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::bar foo:bar\nfoo: :/uri:::::]",
"[BLANK(6,1):]",
]
expected_gfm = """<p>[xx<a href="/uri">bar
foo</a>yy](/uri1)</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03gb():
"""
Test case extra 03gb: variation of 3g with outer image
"""
# Arrange
source_markdown = """![xx[bar
foo]yy](/uri1)
[bar
foo]: /uri
"""
expected_tokens = [
"[para(1,1):\n]",
"[image(1,1):inline:/uri1::xxbar\nfooyy::::xx[bar\nfoo]yy:False::::]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::bar foo:bar\nfoo: :/uri:::::]",
"[BLANK(6,1):]",
]
expected_gfm = """<p><img src="/uri1" alt="xxbar
fooyy" /></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03gc():
"""
Test case extra 03gc: variation of 3g with inner image
"""
# Arrange
source_markdown = """[xx![bar
foo]yy](/uri1)
[bar
foo]: /uri"""
expected_tokens = [
"[para(1,1):\n]",
"[link(1,1):inline:/uri1:::::xx![bar\nfoo]yy:False::::]",
"[text(1,2):xx:]",
"[image(1,4):shortcut:/uri::bar\nfoo::::bar\nfoo:::::]",
"[text(2,5):yy:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::bar foo:bar\nfoo: :/uri:::::]",
]
expected_gfm = """<p><a href="/uri1">xx<img src="/uri" alt="bar
foo" />yy</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03hx():
"""
Test case extra 03h: variation of 3 with backslash
"""
# Arrange
source_markdown = """[bar\\
foo]: /uri
[bar\\
foo]"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[text(1,2):bar:]",
"[hard-break(1,5):\\:\n]",
"[text(2,1):foo:]",
"[text(2,4):]:]",
"[text(2,5):: /uri:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):[:]",
"[text(4,2):bar:]",
"[hard-break(4,5):\\:\n]",
"[text(5,1):foo:]",
"[text(5,4):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[bar<br />
foo]: /uri</p>
<p>[bar<br />
foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03hxa():
"""
Test case extra 03hxa: variation of 3h with newline
"""
# Arrange
source_markdown = """[\\
foo]: /uri
[\\
foo]"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[hard-break(1,2):\\:\n]",
"[text(2,1):foo:]",
"[text(2,4):]:]",
"[text(2,5):: /uri:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):[:]",
"[hard-break(4,2):\\:\n]",
"[text(5,1):foo:]",
"[text(5,4):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[<br />
foo]: /uri</p>
<p>[<br />
foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03hy():
"""
Test case extra 03hy: variation of 3h with multiple backslashes
"""
# Arrange
source_markdown = """[b\\ar\\
foo]: /uri
[b\\ar\\
foo]"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[text(1,2):b\\ar:]",
"[hard-break(1,6):\\:\n]",
"[text(2,1):foo:]",
"[text(2,4):]:]",
"[text(2,5):: /uri:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):[:]",
"[text(4,2):b\\ar:]",
"[hard-break(4,6):\\:\n]",
"[text(5,1):foo:]",
"[text(5,4):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[b\\ar<br />
foo]: /uri</p>
<p>[b\\ar<br />
foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03hz():
"""
Test case extra 03hz: variation of 3h with double backslash
"""
# Arrange
source_markdown = """[bar\\\\
foo]: /uri
[bar\\\\
foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::bar\\\\ foo:bar\\\\\nfoo: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[link(4,1):shortcut:/uri:::::bar\\\\\nfoo:::::]",
"[text(4,2):bar\\\b\\\nfoo::\n]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar\\
foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ha():
"""
Test case extra 03ha: variation of 3h with text
"""
# Arrange
source_markdown = """[bar\\
foo]: /uri
[xx[bar\\
foo]yy]"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[text(1,2):bar:]",
"[hard-break(1,5):\\:\n]",
"[text(2,1):foo:]",
"[text(2,4):]:]",
"[text(2,5):: /uri:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):[:]",
"[text(4,2):xx:]",
"[text(4,4):[:]",
"[text(4,5):bar:]",
"[hard-break(4,8):\\:\n]",
"[text(5,1):foo:]",
"[text(5,4):]:]",
"[text(5,5):yy:]",
"[text(5,7):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[bar<br />
foo]: /uri</p>
<p>[xx[bar<br />
foo]yy]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03hb():
"""
Test case extra 03hb: variation of 3h with outer image
"""
# Arrange
source_markdown = """[bar\\
foo]: /uri
![xx[bar\\
foo]yy]"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[text(1,2):bar:]",
"[hard-break(1,5):\\:\n]",
"[text(2,1):foo:]",
"[text(2,4):]:]",
"[text(2,5):: /uri:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):![:]",
"[text(4,3):xx:]",
"[text(4,5):[:]",
"[text(4,6):bar:]",
"[hard-break(4,9):\\:\n]",
"[text(5,1):foo:]",
"[text(5,4):]:]",
"[text(5,5):yy:]",
"[text(5,7):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[bar<br />
foo]: /uri</p>
<p>![xx[bar<br />
foo]yy]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03hc():
"""
Test case extra 03hc: variation of 3h with inner image
"""
# Arrange
source_markdown = """[bar\\
foo]: /uri
[xx![bar\\
foo]yy]"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[text(1,2):bar:]",
"[hard-break(1,5):\\:\n]",
"[text(2,1):foo:]",
"[text(2,4):]:]",
"[text(2,5):: /uri:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):[:]",
"[text(4,2):xx:]",
"[text(4,4):![:]",
"[text(4,6):bar:]",
"[hard-break(4,9):\\:\n]",
"[text(5,1):foo:]",
"[text(5,4):]:]",
"[text(5,5):yy:]",
"[text(5,7):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[bar<br />
foo]: /uri</p>
<p>[xx![bar<br />
foo]yy]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03i():
"""
Test case extra 03i: variation of 3 with double space
"""
# Arrange
source_markdown = """[bar\a\a
foo]: /uri
[bar\a\a
foo]""".replace(
"\a", " "
)
expected_tokens = [
"[link-ref-def(1,1):True::bar foo:bar \nfoo: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[link(4,1):shortcut:/uri:::::bar \nfoo:::::]",
"[text(4,2):bar:]",
"[hard-break(4,5): :\n]",
"[text(5,1):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/uri">bar<br />
foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ia():
"""
Test case extra 03ia: variation of 3i with text
"""
# Arrange
source_markdown = """[bar\a\a
foo]: /uri
[xx[bar\a\a
foo]yy]""".replace(
"\a", " "
)
expected_tokens = [
"[link-ref-def(1,1):True::bar foo:bar \nfoo: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):[:]",
"[text(4,2):xx:]",
"[link(4,4):shortcut:/uri:::::bar \nfoo:::::]",
"[text(4,5):bar:]",
"[hard-break(4,8): :\n]",
"[text(5,1):foo:]",
"[end-link::]",
"[text(5,5):yy:]",
"[text(5,7):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[xx<a href="/uri">bar<br />
foo</a>yy]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ib():
"""
Test case extra 03ib: variation of 3i with outer image
"""
# Arrange
source_markdown = """[bar\a\a
foo]: /uri
![xx[bar\a\a
foo]yy]""".replace(
"\a", " "
)
expected_tokens = [
"[link-ref-def(1,1):True::bar foo:bar \nfoo: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):![:]",
"[text(4,3):xx:]",
"[link(4,5):shortcut:/uri:::::bar \nfoo:::::]",
"[text(4,6):bar:]",
"[hard-break(4,9): :\n]",
"[text(5,1):foo:]",
"[end-link::]",
"[text(5,5):yy:]",
"[text(5,7):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>![xx<a href="/uri">bar<br />
foo</a>yy]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ic():
"""
Test case extra 03ic: variation of 03 with inner image
"""
# Arrange
source_markdown = """[bar\a\a
foo]: /uri
[xx![bar\a\a
foo]yy]""".replace(
"\a", " "
)
expected_tokens = [
"[link-ref-def(1,1):True::bar foo:bar \nfoo: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n]",
"[text(4,1):[:]",
"[text(4,2):xx:]",
"[image(4,4):shortcut:/uri::bar\nfoo::::bar \nfoo:::::]",
"[text(5,5):yy:]",
"[text(5,7):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[xx<img src="/uri" alt="bar
foo" />yy]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03jx():
"""
Test case extra 03j: variation of 3 with double backslash
"""
# Arrange
source_markdown = """[bar\\\\
foo]: /uri
abc
[bar\\\\
foo]
abc"""
expected_tokens = [
"[link-ref-def(1,1):True::bar\\\\ foo:bar\\\\\nfoo: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n\n\n]",
"[text(4,1):abc\n::\n]",
"[link(5,1):shortcut:/uri:::::bar\\\\\nfoo:::::]",
"[text(5,2):bar\\\b\\\nfoo::\n]",
"[end-link::]",
"[text(6,5):\nabc::\n]",
"[end-para:::True]",
]
expected_gfm = """<p>abc
<a href="/uri">bar\\
foo</a>
abc</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_reference_links_extra_03ja():
"""
Test case extra 03ja: variation of 3 with text
"""
# Arrange
source_markdown = """[bar\\\\
foo]: /uri
abc
[bar\\\\
foo][]
abc"""
expected_tokens = [
"[link-ref-def(1,1):True::bar\\\\ foo:bar\\\\\nfoo: :/uri:::::]",
"[BLANK(3,1):]",
"[para(4,1):\n\n\n]",
"[text(4,1):abc\n::\n]",
"[link(5,1):collapsed:/uri:::::bar\\\\\nfoo:::::]",
"[text(5,2):bar\\\b\\\nfoo::\n]",
"[end-link::]",
"[text(6,7):\nabc::\n]",
"[end-para:::True]",
]
expected_gfm = """<p>abc
<a href="/uri">bar\\
foo</a>
abc</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
|
from __future__ import absolute_import, division, print_function
import inspect
import types
import os
from . import config
from .core import HomothetyOperator, IdentityOperator, Operator, ZeroOperator
from .warnings import warn, PyOperatorsWarning
import collections
__all__ = ['rule_manager']
_triggers = {}
_default_triggers = {
'inplace': False,
'none': False}
_description_triggers = {
'inplace': 'Allow inplace simplifications',
'none': 'Inhibit all rule simplifications'}
class Rule(object):
"""
Abstract class for operator rules.
An operator rule is a relation that can be expressed by the sentence
"'subjects' are 'predicate'". An instance of this class, when called with
checks if the inputs are subjects to the rule, and returns the predicate
if it is the case. Otherwise, it returns None.
"""
def __init__(self, subjects, predicate):
if not isinstance(subjects, (list, str, tuple)):
raise TypeError("The input {0} is invalid.".format(subjects))
subjects_ = self._split_subject(subjects)
if any(not isinstance(s, str) and (not isinstance(s, type) or
not issubclass(s, Operator)) for s in subjects_):
raise TypeError("The subjects {0} are invalid.".format(subjects))
if len(subjects_) == 0:
raise ValueError('No rule subject is specified.')
if len(subjects_) > 2:
raise ValueError('No more than 2 subjects can be specified.')
if not isinstance(self, UnaryRule) and len(subjects_) == 1:
self.__class__ = UnaryRule
self.__init__(subjects, predicate)
return
if not isinstance(self, BinaryRule) and len(subjects_) == 2:
self.__class__ = BinaryRule
self.__init__(subjects, predicate)
return
if '1' in subjects_:
raise ValueError("'1' cannot be a subject.")
if not isinstance(predicate, (str, types.FunctionType)):
raise TypeError('Invalid predicate.')
if isinstance(predicate, str) and '{' in predicate:
raise ValueError("Predicate cannot be a subclass.")
self.subjects = subjects_
self.predicate = predicate
def __eq__(self, other):
if not isinstance(other, Rule):
return NotImplemented
if self.subjects != other.subjects:
return False
if isinstance(self.predicate, types.FunctionType):
if type(self.predicate) is not type(other.predicate):
return False
return self.predicate.__code__ is other.predicate.__code__
if isinstance(self.predicate, str):
return self.predicate == other.predicate
return self.predicate is other.predicate
@staticmethod
def _symbol2operator(op, symbol):
if not isinstance(symbol, str):
return symbol
if symbol == '1':
return IdentityOperator()
if symbol == '.':
return op
try:
return {'C': op._C,
'T': op._T,
'H': op._H,
'I': op._I}[symbol]
except (KeyError):
raise ValueError("Invalid symbol: '{0}'.".format(symbol))
@classmethod
def _split_subject(cls, subject):
if isinstance(subject, str):
subject = subject.split(',')
if not isinstance(subject, (list, tuple)):
raise TypeError('The rule subject is invalid.')
subject = tuple(s.replace(' ', '') if isinstance(s, str) else s
for s in subject)
valid = '.,C,T,H,I,IC,IT,IH'.split(',')
if any((not isinstance(s, str) or s not in valid) and
(not isinstance(s, type) or not issubclass(s, Operator))
for s in subject):
raise ValueError('The rule subject is invalid.')
return subject
def __str__(self):
subjects = [s if isinstance(s, str) else s.__name__
for s in self.subjects]
spredicate = ' '.join(s.strip() for s in inspect.getsource(
self.predicate).split('\n')) \
if isinstance(self.predicate, types.LambdaType) \
else self.predicate
return '{0} = {1}'.format(','.join(subjects), spredicate)
__repr__ = __str__
class UnaryRule(Rule):
"""
Binary rule on operators.
An operator unary rule is a relation that can be expressed by the sentence
"'subject' is 'predicate'".
Parameters
----------
subject : str
It defines the property of the operator for which the predicate holds:
'C' : the operator conjugate
'T' : the operator transpose
'H' : the operator adjoint
'I' : the operator adjoint
'IC' : the operator inverse-conjugate
'IT' : the operator inverse-transpose
'IH' : the operator inverse-adjoint
predicate : function or str
What is returned by the rule when is applies. It can be:
'1' : the identity operator
'.' : the operator itself
or a callable of one argument.
Example
-------
>>> rule = UnaryRule('T', '.')
>>> o = Operator()
>>> oT = rule(o)
>>> oT is o
True
"""
def __init__(self, subjects, predicate):
super(UnaryRule, self).__init__(subjects, predicate)
if len(self.subjects) != 1:
raise ValueError('This is not a unary rule.')
if self.subjects[0] == '.':
raise ValueError('The subject cannot be the operator itself.')
if isinstance(predicate, collections.Callable) or predicate in ('.', '1'):
return
raise ValueError("Invalid predicate: '{0}'.".format(predicate))
def __call__(self, reference):
predicate = self._symbol2operator(reference, self.predicate)
if predicate is None:
return None
if not isinstance(predicate, Operator) and isinstance(predicate, collections.Callable):
predicate = predicate(reference)
if not isinstance(predicate, Operator):
raise TypeError('The predicate is not an operator.')
return predicate
class BinaryRule(Rule):
"""
Binary rule on operators.
An operator rule is a relation that can be expressed by the sentence
"'subjects' are 'predicate'". An instance of this class, when called with
two input arguments checks if the inputs are subjects to the rule, and
returns the predicate if it is the case. Otherwise, it returns None.
Parameters
----------
subjects : str
It defines the relationship between the two subjects that must be
verified for the rule to apply. It is a pair of two
expressions. One has to be '.' and stands for the reference subject.
It determines if the reference operator is on the right or left hand
side of the operator pair. The other expression constrains the other
subject, which must be:
'.' : the reference operator itself.
'C' : the conjugate of the reference object
'T' : the transpose of the reference object
'H' : the adjoint of the reference object
or an Operator subclass.
For instance, given a string 'C,.', the rule will apply to the inputs
o1 and o2 if o1 is o2.C. For a condition ('.', DiagonalOperator), the
rule will apply if o2 is a DiagonalOperator instance.
predicate : function or str
If the two objects o1, o2, are subjects of the rule, the predicate
will be returned. The predicate can be '.', '1' or a callable
of two arguments.
Example
-------
>>> rule = BinaryRule('.,.', '.')
>>> o = Operator()
>>> rule(o, o) is o
True
>>> rule(o, IdentityOperator()) is None
True
"""
def __init__(self, subjects, predicate):
super(BinaryRule, self).__init__(subjects, predicate)
if len(self.subjects) != 2:
raise ValueError('This is not a binary rule.')
self.reference = 1 if self.subjects[1] == '.' else 0
self.other = self.subjects[1-self.reference]
def __call__(self, o1, o2):
reference, other = (o1, o2) if self.reference == 0 else (o2, o1)
subother = self._symbol2operator(reference, self.other)
if isinstance(subother, (type, tuple)):
if subother is HomothetyOperator:
subother = (HomothetyOperator, ZeroOperator)
if not isinstance(other, subother):
return None
elif other != subother:
return None
predicate = self._symbol2operator(reference, self.predicate)
if predicate is None:
return None
if not isinstance(predicate, Operator) and isinstance(predicate, collections.Callable):
predicate = predicate(o1, o2)
if predicate is None:
return None
if isinstance(predicate, (list, tuple)) and len(predicate) == 1:
predicate = predicate[0]
if not isinstance(predicate, Operator) \
and not (isinstance(predicate, (list, tuple))
and all(isinstance(o, Operator)
for o in predicate)):
raise TypeError("The predicate '{0}' is not an operator.".format(
predicate))
return predicate
class RuleManager(object):
"""
Manage a set of rule prescriptions.
It is a proxy for the global dictionary that contains the rule names
and values. It also provides a context manager to change the rules inside
a with statement.
Rule defaults can be stored in a file 'rules.txt' in the user directory
pyoperators.config.LOCAL_PATH.
Examples
--------
To prevent rule simplifications:
>>> from pyoperators.rules import rule_manager
>>> rule_manager['none'] = True
To re-enable rule simplifications:
>>> rule_manager['none'] = False
or:
>>> with rule_manager(none=True):
... # in this context, operator simplification rules are inhibited
... print(rule_manager['none'])
True
>>> print(rule_manager['none'])
False
It is possible to nest contexts:
>>> print(rule_manager['none'])
False
>>> with rule_manager(none=True) as new_rule_manager:
... print(rule_manager['none'])
... with new_rule_manager(none=False):
... print(rule_manager['none'])
... print(rule_manager['none'])
True
False
True
>>> print(rule_manager['none'])
False
"""
def __init__(self):
self._deferred_triggers = {}
if len(self) == 0:
self.update(_default_triggers)
self._update_user_default_triggers()
def __call__(self, **keywords):
for key in keywords:
if key not in self:
raise KeyError('Unknown rule: {!r}'.format(key))
self._deferred_triggers = keywords
return self
def __enter__(self):
self._old_triggers = self.copy()
self.update(self._deferred_triggers)
return RuleManager()
def __exit__(self, exc_type, exc_val, exc_tb):
global _triggers
_triggers = self._old_triggers
return False
def __getitem__(self, key):
return _triggers[key]
def __setitem__(self, key, value):
if key not in self:
raise KeyError('Unknown rule: {!r}'.format(key))
_triggers[key] = value
def __contains__(self, key):
return key in _triggers
def __iter__(self):
return iter(sorted(_triggers.keys()))
def __len__(self):
return len(_triggers)
def __str__(self):
nk = max(len(k) for k in self)
nv = max(len(repr(v)) for v in self.values())
s = '{0:' + str(nk) + '} = {1!r:' + str(nv) + '} # {2}'
return '\n'.join(s.format(k, self[k], _description_triggers.get(k, ''))
for k in self)
def clear(self):
""" Clear the global rule dictionary. """
_triggers.clear()
def copy(self):
""" Copy the global rule dictionary. """
return _triggers.copy()
def get(self, k, *args):
""" Get a rule value in the global rule dictionary. """
return _triggers.get(k, *args)
def items(self):
""" Return the global rule items. """
return _triggers.items()
def keys(self):
""" Return the global rule names. """
return _triggers.keys()
def pop(self, k, *args):
""" Pop a given item from the global rule dictionary. """
return _triggers.pop(k, *args)
def popitem(self):
""" Pop any item from the global rule dictionary. """
return _triggers.popitem()
def register(self, rule, default, description):
""" Add a new rule. """
# should not be called in a managed context
if not isinstance(rule, str):
raise TypeError('The rule is not a string.')
if not isinstance(description, str):
raise TypeError('The rule description is not a string.')
rule = rule.lower()
_triggers[rule] = default
_description_triggers[rule] = description
def update(self, *args, **keywords):
""" Update the global rule dictionary. """
_triggers.update(*args, **keywords)
def values(self):
""" Return the global rule values. """
return _triggers.values()
def _update_user_default_triggers(self):
# read user 'rules.txt' to update defaults
path = os.path.join(config.LOCAL_PATH, 'rules.txt')
if not os.path.exists(path):
return
if not os.access(path, os.R_OK):
warn('The file {0!r} cannot be read.'.format(path),
PyOperatorsWarning)
return
with open(path) as f:
for iline, line in enumerate(f.readlines()):
line = line.strip()
line_orig = line
try:
index = line.index('#')
except ValueError:
pass
else:
line = line[:index].rstrip()
try:
index = line.index('=')
except ValueError:
if len(line) == 0:
continue
warn('In file {0!r}, line {1} does not define a rule: {2!r'
'}.'.format(path, iline + 1, line_orig),
PyOperatorsWarning)
continue
key = line[:index].rstrip().lower()
value = line[index+1:].lstrip()
try:
value = eval(value, {})
except Exception:
warn('In file {0!r}, line {1}: {2!r} cannot be evaluated'.
format(path, iline+1, value), PyOperatorsWarning)
continue
_triggers[key] = value
__repr__ = __str__
rule_manager = RuleManager()
|
from typing import Any, Callable, Optional, Tuple
# isort:imports-thirdparty
import torch
from pl_bolts.utils import _TORCHVISION_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
# isort:imports-firstparty
from trackseg.datasets import LocalMP4FramesDataset, SingleImageDataset
if _TORCHVISION_AVAILABLE:
from torchvision import transforms as transform_lib
else: # pragma: no cover
warn_missing_pkg("torchvision")
IMAGENET_MEANS = [0.485, 0.456, 0.406]
IMAGENET_STDS = [0.229, 0.224, 0.225]
def get_transforms(
normalize: Optional[bool],
resize_size: Optional[Tuple[int, int]],
) -> Callable:
transforms = list()
if resize_size is not None:
transforms.append(transform_lib.Resize(size=resize_size))
if normalize is not None and normalize:
transforms.append(
transform_lib.Normalize(mean=IMAGENET_MEANS, std=IMAGENET_STDS)
)
transforms.append(transform_lib.ToTensor())
transforms = transform_lib.Compose(transforms)
return transforms
class UnsupervisedSegmentationDataModule(LightningDataModule):
"""
Unsupervised Segmentation data and transforms
Transforms::
video_mean = ...
video_std = ...
transforms = transform_lib.Compose([
transform_lib.ToTensor(),
transform_lib.Normalize(
mean=video_mean,
std=video_std
)
])
example:
```
dm = UnsupervisedSegmentationDataModule(
name="example_dataZ",
mp4_fpath="/path/to/video.mp4",
data_dir="/path/to/data",
im_size=(256, 480),
desired_frame_rate=20,
video_start_sec=1,
video_end_sec=3,
batch_size=4,
)
dl = dm.train_dataloader()
for batch in dl:
im = batch[0].permute(1, 2, 0) # HWC
```
"""
name = "UnsupervisedSegmentation"
extra_args: dict = {}
def __init__(
self,
name: str,
mp4_fpath: str,
data_dir: str,
im_size: Optional[Tuple[int, int]],
desired_frame_rate: float,
allow_above_fps: bool = False,
video_start_sec: int = 0,
video_end_sec: Optional[int] = None,
num_workers: int = 16,
batch_size: int = 32,
seed: int = 42,
shuffle: bool = False,
pin_memory: bool = False,
drop_last: bool = False,
*args: Any,
**kwargs: Any,
) -> None:
"""
Args:
mp4_fpath: path to mp4. Can be None if extracted_data_dir already populated
data_dir: path to save frames of extracted video
num_workers: how many workers to use for loading data
batch_size: number of examples per training/eval step
seed: random seed to be used for train/val/test splits
shuffle: If true shuffles the data every epoch
pin_memory: If true, the data loader will copy Tensors into CUDA pinned
memory before returning them
drop_last: If true drops the last incomplete batch
"""
super().__init__(*args, **kwargs)
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError(
"Transforms require `torchvision` which is not installed yet."
)
self.name = name
self.mp4_fpath = mp4_fpath
self.data_dir = data_dir
self.resize_size = im_size
self.desired_frame_rate = desired_frame_rate
self.allow_above_fps = allow_above_fps
self.video_start_sec = video_start_sec
self.video_end_sec = video_end_sec
self.num_workers = num_workers
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.target_transforms = None
@property
def num_classes(self) -> int:
"""
Return:
-1
"""
# TODO
return -1
def train_dataloader(self) -> DataLoader:
"""
train set
"""
# TODO im size??
transforms = self.train_transforms or get_transforms(
normalize=True,
resize_size=self.resize_size,
)
dataset = LocalMP4FramesDataset(
name=self.name,
mp4_fpath=self.mp4_fpath,
data_dir=self.data_dir,
desired_fps=self.desired_frame_rate,
allow_above_fps=self.allow_above_fps,
video_start_sec=self.video_start_sec,
video_end_sec=self.video_end_sec,
transform=transforms,
)
loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
return loader
class UnsupervisedSingleImageDataModule(LightningDataModule):
train_dataset = None
test_dataset = None
def __init__(
self,
image: torch.Tensor,
target: Optional[torch.Tensor] = None,
im_size: Optional[Tuple[int, int]] = None,
num_workers: int = 1,
seed: int = 42,
pin_memory: bool = False,
normalize: Optional[bool] = None,
*args: Any,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self.resize_size = im_size
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError(
"Transforms require `torchvision` which is not installed yet."
)
self.num_workers = num_workers
self.batch_size = 1
self.seed = seed
self.shuffle = False
self.pin_memory = pin_memory
self.drop_last = False
image_transforms = self.train_transforms or get_transforms(
normalize=normalize, resize_size=self.resize_size
)
target_transforms = get_transforms(
normalize=False, resize_size=self.resize_size
)
self.train_dataset = SingleImageDataset(
image=image,
target=None,
image_transform=image_transforms,
target_transform=target_transforms,
)
if target is not None:
self.test_dataset = SingleImageDataset(
image=image,
target=target,
image_transform=image_transforms,
target_transform=target_transforms,
)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
|
import numpy as np
import random
class DiscreteTable():
"""
Table Environment with clean and dirty dishes during Human Robot Collaboration
"""
def __init__(self, sizes, n_clean=3, n_dirty=2, n_human=1, cam_pos=(2, 2), partial=False, noise=False):
self.rows = sizes[0]
self.cols = sizes[1]
# Partial Observability and Noisy Sensor Reading
self.partial = partial
self.noise = noise
# Positions of Camera
self.init_cam_pos = np.asarray(cam_pos)
self.cam_pos = self.init_cam_pos
# Restrict Camera Motion
self.cam_row_bound = self.rows - 1
self.cam_col_bound = self.cols - 1
# Number of dishes and humans
self.num_clean_dish = n_clean
self.num_dirty_dish = n_dirty
self.num_human = n_human
self.tot_obj = self.num_clean_dish + self.num_dirty_dish + self.num_human
self.obj_list = [""] + ["c"] * self.num_clean_dish + ["d"] * self.num_dirty_dish + ["h"] * self.num_human
# Observation and Action dimensions
# TODO: add observation and action dimensions (if partial 9, else row*col)
# Generate Grid
self.grid = np.zeros((self.rows, self.cols), dtype=int)
self._gen_grid()
def _gen_pos(self):
r = random.randint(0, self.rows - 1)
c = random.randint(0, self.cols - 1)
return r, c
def _gen_grid(self):
for i in range(self.tot_obj):
r, c = self._gen_pos()
while self.grid[r][c]:
r, c = self._gen_pos()
self.grid[r][c] = i + 1
assert np.count_nonzero(self.grid) == self.tot_obj, "All objects failed to be placed on table."
self.init_grid = self.grid
self._gen_obs()
def _gen_obs(self):
if self.partial:
# Obtain a 3x3 square camera view around camera position, reshape to row vector 1x9
r = self.cam_pos[0]
c = self.cam_pos[1]
obs = np.squeeze(self.grid[r-1:r+2, c-1:c+2].reshape(1,-1))
else:
# Return full grid as a row vector 1x(Rows*Cols)
obs = self.grid.reshape(1,-1)
if self.noise:
# Mask observation with noise
mask = np.random.randint(0,2,size=obs.shape).astype(np.bool)
noise = np.random.randint(0, self.tot_obj, size=obs.shape)
obs[mask] = noise[mask]
return obs
def _get_obj_type(self, obj_id):
return self.obj_list[obj_id]
def print(self):
print(self.grid)
def reset(self):
self.grid = self.init_grid
self.cam_pos = self.init_cam_pos
return self._gen_obs()
def step(self, action):
# TODO: Complete
done = False
rew = 0.0
if action == 0:
done = True
# Get indexes of each object
range_c = np.asarray(range(1,self.num_clean_dish+1))
range_d = np.asarray(range(self.num_clean_dish+1, self.num_clean_dish+self.num_dirty_dish+1))
range_h = np.asarray(range(self.tot_obj-self.num_human+1, self.tot_obj+1))
# Number of remaining objects
num_c = len(np.intersect1d(self.grid, range_c))
num_d = len(np.intersect1d(self.grid, range_d))
num_h = len(np.intersect1d(self.grid, range_h))
# Compute reward wrt to the remaining objects
cost1 = 20 * num_c # Remaining clean dishes (+)
cost2 = -10 * num_d # Remaining dirty dishes (-)
cost3 = - 5 * (self.num_clean_dish - num_c) # Clean dishes in the wash (-)
cost4 = 25 * (self.num_dirty_dish - num_d) # Dirty dishes in the wash (+)
rew = cost1 + cost2 + cost3 + cost4
elif action == 1:
# Move camera up (row-1)
if self.cam_pos[0] > 1:
self.cam_pos[0] -= 1
rew = -1.0
else:
rew = 5.0 # punish to attempt to move camera outside boundary
elif action == 2:
# Move camera down (row+1)
if self.cam_pos[0] < self.cam_row_bound:
self.cam_pos[0] += 1
rew = -1.0
else:
rew = -5.0 # punish to attempt to move camera outside boundary
elif action == 3:
# Move camera left (col-1)
if self.cam_pos[1] > 1:
self.cam_pos[1] -= 1
rew = -1.0
else:
rew = -5.0 # punish to attempt to move camera outside boundary
elif action == 4:
# Move camera right (col+1)
if self.cam_pos[1] < self.cam_col_bound:
self.cam_pos[1] += 1
rew = -1.0
else:
rew -= 5.0 # punish to attempt to move camera outside boundary
elif action < self.tot_obj + 5:
# Remove object from table
obj_id = action - 4
obj_type = self._get_obj_type(obj_id)
idx = np.where(self.grid==obj_id)
r = np.squeeze(idx[0])
c = np.squeeze(idx[1])
self.grid[r][c] = 0
# Generate reward depending on removed object type
if obj_type == 'c':
rew = -10
elif obj_type == 'd':
rew = 10
elif obj_type == 'h':
rew = -50
done = True
else:
print("Unrecognized object type. Cannot remove object.")
else:
print("Unrecognized action")
obs = self._gen_obs()
info = None
return obs, rew, done, info
def render(self):
self.print()
env = DiscreteTable([5, 5], partial=True, noise=False)
env.reset()
env.render()
ep_rew = 0.0
for _ in range(10):
# env.render()
a = np.random.randint(5)
obs, rew, done, info = env.step(a)
print("Reward: ", rew)
ep_rew += rew
if done:
# print("Episode reward: ". ep_rew)
ep_rew = 0
continue
env.render() |
import pandas as pd
import sqlite3
import sys
HANDLE_IDS = sys.argv[1:]
PATH_TO_DB = "" # add your own PATH
conn = sqlite3.connect(PATH_TO_DB)
c = conn.cursor()
# get message df with the handles
command = f"""
SELECT text,
is_from_me,
datetime(date/1000000000 + strftime('%s', '2001-01-01 00:00:00'), 'unixepoch') as date
FROM message WHERE handle_id={HANDLE_IDS[0]}
OR handle_id={HANDLE_IDS[1]}
ORDER BY date ASC;
"""
c.execute(command)
messages = pd.DataFrame(c.fetchall(), columns=['text', 'is_from_me', 'date'])
messages['date'] = pd.to_datetime(messages['date'])
# remove just attachments
errors = [i for i, text in enumerate(messages.text) if not isinstance(text,str)]
messages.drop(errors, axis=0, inplace=True)
# reindex
messages.index = pd.RangeIndex(len(messages.index))
# -- SYNTHETIC COLUMNS ---
NUM_PERIODS = (max(messages.date) - min(messages.date)).days // 7 + 2
# the week it was sent
messages['week'] = pd.cut(
messages.date,
bins=pd.date_range(
pd.to_datetime(min(messages.date)).strftime("%Y-%m-%d"),
periods=NUM_PERIODS,
freq='W-TUE'),
labels=range(NUM_PERIODS - 1))
# day it was sent. 0 == Monday, etc..
messages['day'] = messages['date'].apply(lambda x: x.weekday())
# the hour in which it was sent in 24h format
messages['hourly_bin'] = messages['date'].apply(lambda x: x.hour)
# holds the text_length
messages['text_length'] = [len(text) for text in messages.text]
# holds the word length of the text
messages['num_words'] = [len(text.split(" ")) for text in messages.text]
# export as csv file
messages.to_csv("messages.csv", sep=',', encoding='utf-8')
|
#!/usr/bin/env python
import sys
import argparse
import json
import random
import time
import bluetooth
class myClient():
def __init__(self):
self.blah = None
def connect(self, uuid):
for n in range(0, 5):
service_matches = bluetooth.find_service(uuid = uuid)
if len(service_matches) == 0:
print "couldn't find the service so in retry %d" % (n+1)
sleep_time = (2**n) + random.random()
print "going to retry in %f seconds" % sleep_time
time.sleep(sleep_time)
elif len(service_matches) > 0:
self.make_connection(service_matches)
return
else:
break
print "couldn't find the FooBar service"
sys.exit(0)
def make_connection(self, service_matches):
first_match = service_matches[0]
port = first_match["port"]
name = first_match["name"]
host = first_match["host"]
print "connecting to \"%s\" on %s" % (name, host)
sock = bluetooth.BluetoothSocket( bluetooth.RFCOMM )
sock.connect((host, port))
request = {"msg": "hello from cient!!"}
sock.send(json.dumps(request))
response = sock.recv(1024)
print json.loads(response)
sock.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="a bluetooth client")
#parser.add_argument("-v", "--verbose", help="turn on verbose mode", action="store_true")
parser.add_argument("-u", "--uuid", help="specify a uuid", default="1e0ca4ea-299d-4335-93eb-27fcfe7fa848")
args = parser.parse_args()
client = myClient()
client.connect(uuid = args.uuid)
|
"""Test for the LED-Pi API."""
import pytest
import sys
from unittest.mock import MagicMock, Mock
mock_aiohttp_session = MagicMock()
mock_aiohttp_client = MagicMock()
mock_aiohttp_client.async_get_clientsession.return_value = mock_aiohttp_session
sys.modules["homeassistant.helpers.aiohttp_client"] = mock_aiohttp_client
from homeassistant.components.light import ATTR_RGB_COLOR, ATTR_BRIGHTNESS
from custom_components.ledpi import API
from custom_components.ledpi.api import UnknownStateException
from custom_components.ledpi.const import ATTR_STATE, ATTR_LEDS
class TestAPI:
@pytest.fixture
def api(self):
yield API(None, "host")
def test_is_on(self, api):
api.data[ATTR_STATE] = "on"
assert api.is_on()
def test_is_on_off(self, api):
api.data[ATTR_STATE] = "off"
assert not api.is_on()
def test_is_on_no_state(self, api):
with pytest.raises(UnknownStateException):
api.is_on()
def test_rgb_hex_color(self, api):
api.data[ATTR_RGB_COLOR] = "#ffffff"
assert api.rgb_hex_color() == "#ffffff"
def test_rgb_hex_color_no_state(self, api):
with pytest.raises(UnknownStateException):
api.rgb_hex_color()
def test_rgb_color(self, api):
api.data[ATTR_RGB_COLOR] = "#ffffff"
assert api.rgb_color() == [255, 255, 255]
def test_rgb_color_invalid(self, api):
api.data[ATTR_RGB_COLOR] = "#xxxxxx"
with pytest.raises(UnknownStateException):
api.rgb_color()
def test_rgb_color_no_state(self, api):
with pytest.raises(UnknownStateException):
api.rgb_color()
def test_rgb_name(self, api):
api.data[ATTR_RGB_COLOR] = "#ffffff"
assert api.rgb_name() == "white"
def test_rgb_color_name(self, api):
api.data[ATTR_RGB_COLOR] = "#xxxxxx"
with pytest.raises(UnknownStateException):
api.rgb_name()
def test_rgb_name_no_state(self, api):
with pytest.raises(UnknownStateException):
api.rgb_name()
@pytest.mark.asyncio
async def test_set_rgb(self, api):
await api.set_rgb((255, 255, 255))
assert api.data[ATTR_RGB_COLOR] == "#ffffff"
@pytest.mark.asyncio
async def test_set_rgb_invalid_color(self, api):
await api.set_rgb((-1000, -1000, -1000))
assert ATTR_RGB_COLOR not in api.data[ATTR_RGB_COLOR]
@pytest.mark.asyncio
async def test_set_rgb_push(self, api):
await api.set_rgb((255, 255, 255), True)
mock_aiohttp_session.post.assert_called_with(
"http://host/api/v1/state",
json={
ATTR_RGB_COLOR: "#ffffff",
},
)
@pytest.mark.asyncio
async def test_set_rgb_push_http_error(self, api):
mock_aiohttp_session.post.side_effect = Mock(side_effect=Exception("error"))
await api.set_rgb((255, 255, 255), True)
assert mock_aiohttp_session.post.called
def test_brightness(self, api):
api.data[ATTR_BRIGHTNESS] = 1.0
assert api.brightness() == 1.0
def test_brightness_no_state(self, api):
with pytest.raises(UnknownStateException):
api.brightness()
@pytest.mark.asyncio
async def test_set_brightness(self, api):
await api.set_brightness(1.0)
assert api.data[ATTR_BRIGHTNESS] == 1.0
@pytest.mark.asyncio
async def test_set_brightness_push(self, api):
await api.set_brightness(1.0, True)
mock_aiohttp_session.post.assert_called_with(
"http://host/api/v1/state",
json={
ATTR_BRIGHTNESS: 1.0,
},
)
@pytest.mark.asyncio
async def test_set_brightness_push_http_error(self, api):
mock_aiohttp_session.post.side_effect = Mock(side_effect=Exception("error"))
await api.set_brightness(1.0, True)
assert mock_aiohttp_session.post.called
def test_leds(self, api):
api.data[ATTR_LEDS] = 1
assert api.leds() == 1
def test_leds_no_state(self, api):
with pytest.raises(UnknownStateException):
api.leds()
@pytest.mark.asyncio
async def test_turn_on(self, api):
api.data = {ATTR_STATE: "off", ATTR_BRIGHTNESS: 1.0, ATTR_RGB_COLOR: "#ffffff"}
await api.turn_on()
mock_aiohttp_session.post.assert_called_with(
"http://host/api/v1/state",
json={ATTR_STATE: "on", ATTR_BRIGHTNESS: 1.0, ATTR_RGB_COLOR: "#ffffff"},
)
@pytest.mark.asyncio
async def test_turn_on_http_error(self, api):
api.data = {ATTR_STATE: "off", ATTR_BRIGHTNESS: 1.0, ATTR_RGB_COLOR: "#ffffff"}
mock_aiohttp_session.post.side_effect = Mock(side_effect=Exception("error"))
await api.turn_on()
assert mock_aiohttp_session.post.called
@pytest.mark.asyncio
async def test_turn_off(self, api):
await api.turn_off()
mock_aiohttp_session.post.assert_called_with(
"http://host/api/v1/state",
json={
ATTR_STATE: "off",
},
)
@pytest.mark.asyncio
async def test_turn_off_http_error(self, api):
mock_aiohttp_session.post.side_effect = Mock(side_effect=Exception("error"))
await api.turn_off()
assert mock_aiohttp_session.post.called
@pytest.mark.asyncio
async def test_update_with_data(self, api):
await api.update({ATTR_LEDS: 10})
assert not mock_aiohttp_session.get.called
assert api.data[ATTR_LEDS] == 10
@pytest.mark.asyncio
async def test_update(self, api):
mock_response = MagicMock()
mock_response.json.return_value = {ATTR_LEDS: 10}
mock_aiohttp_session.get.return_value = mock_response
await api.update()
mock_aiohttp_session.get.assert_called_with("http://host/api/v1/state")
assert api.data != {}
@pytest.mark.asyncio
async def test_update_http_error(self, api):
api.data = {ATTR_LEDS: 10}
mock_aiohttp_session.get.side_effect = Mock(side_effect=Exception("error"))
await api.update()
mock_aiohttp_session.get.assert_called_with("http://host/api/v1/state")
assert api.data == {}
|
import datetime
import random
import time
def random_datetime(fix_year=None, fix_month=None, fix_day=None):
try:
new_datetime = datetime.datetime.fromtimestamp(random.randint(1, int(time.time())))
if fix_year:
new_datetime = new_datetime.replace(year=fix_year)
if fix_month:
new_datetime = new_datetime.replace(month=fix_month)
if fix_day:
new_datetime = new_datetime.replace(day=fix_day)
except:
new_datetime = random_datetime(fix_year, fix_month, fix_day)
return new_datetime
def try_luck(possibility):
good_range = int(possibility * 100)
luck = random.randint(1, 100)
return luck <= good_range
|
from collections import namedtuple, defaultdict
from functools import lru_cache
import numpy as np
import itertools
Obstacle = namedtuple('Obstacle', 'up down left right')
Point = namedtuple('Point', 'x y')
Adjacency = namedtuple('Adjacency', 'distance point')
class Agent:
"""
Represents a rectangular Agent moving among rectangular obstacles.
The position of the Agent is located in the center of his rectangle.
The value of size_x represents the distance from Agent's position
to both the left and right side of that rectangle. Similarly, the
value of size_y represents the distance from Agent's position to
both the upper and lower side of that rectangle. Therefore, the
dimension of the Agent's rectangle is (2*size_x, 2*size_y).
"""
def __init__(self, position, size_x, size_y, velocity=1.0):
self.position = position
self.size_x = size_x
self.size_y = size_y
self.path = []
self.velocity = velocity
def is_moving(self):
"""
Checks if the Agent is moving (if he has a path to follow).
"""
return bool(self.path)
def calculate_new_path(self, destination, obstacles):
"""
Calculates new path from Agent's current position to given destination and stores this path.
"""
obstacles = self.create_obstacles_in_configuration_space(obstacles)
self.path = find_path(self.position, destination, obstacles)
@lru_cache(maxsize=8)
def create_obstacles_in_configuration_space(self, obstacles):
"""
Creates new 'inflated' obstacles.
Each obstacle is transformed by increasing its size by the size
of the Agent. That allows the Agent to be represented as a single
point instead of a rectangle.
"""
obstacles_in_configuration_space = [
Obstacle(
obs.up - self.size_y, obs.down + self.size_y,
obs.left - self.size_x, obs.right + self.size_x)
for obs in obstacles]
return tuple(obstacles_in_configuration_space)
def move_along_path(self):
"""
Moves the Agent and updates his path.
Takes the next point of current path. If that point is close enough
(within one velocity distance), moves the agent to that point and
removes the point from path. Otherwise moves the agent by one velocity
distance toward that point.
If the path is empty, the method doesn't do anything.
"""
if not self.path:
return
next_point = self.path[0]
next_point_delta = np.subtract(next_point, self.position)
distance_to_next_point = np.linalg.norm(next_point_delta)
if distance_to_next_point < self.velocity:
self.position = self.path.pop(0)
else:
velocity_vector = np.multiply(next_point_delta, self.velocity / distance_to_next_point)
new_position = np.add(self.position, velocity_vector)
self.position = Point(*new_position)
def find_path(start, destination, obstacles):
"""
Calculates the path between start and destination, avoiding the obstacles.
Both the start and the destination are considered to be points
(with no dimensions). Returned path is a list of points that need
to be visited (in order) to reach from start to destination.
The path does not contain the starting point, since that point
is already visited. The path contains the destination as its
last element.
"""
visibility_graph = create_visibility_graph(start, destination, obstacles)
path = find_path_using_visibility_graph(start, destination, visibility_graph)
return path
def create_visibility_graph(start, destination, obstacles):
"""
Creates a visibility graph.
The graph is a dictionary. The key set contains all the vertices
(corners) of all the obstacles as well as start and destination
points. The value for each key is a list of adjacent points and
distances to those points. Each entry on the list is in a form
of a tuple containing a distance and a point. A point is
considered adjacent to a given one if there is an unobstructed
line between them (if the line does not intersect with any
obstacle). If a list of adjacent points for p1 contains p2,
then the list of adjacent points for p2 will contain p1. The
list of adjacent points might be empty, if the point has no
adjacent ones.
"""
visibility_graph = create_visibility_graph_for_obstacles(obstacles)
add_vertex_to_visibility_graph(start, obstacles, visibility_graph)
add_vertex_to_visibility_graph(destination, obstacles, visibility_graph)
return visibility_graph
def find_path_using_visibility_graph(start, destination, visibility_graph):
"""
Finds path from start to destination using visibility graph and A* algorithm.
"""
nodes_to_visit = set()
nodes_to_visit.add(start)
visited_nodes = set()
came_from_graph = {}
distance_from_start = defaultdict(lambda: float('inf'))
distance_from_start[start] = 0
estimated_distance = defaultdict(lambda: float('inf'))
estimated_distance[start] = distance_estimate(start, destination)
while nodes_to_visit:
min_estimated_distance = min(estimated_distance[n] for n in nodes_to_visit)
current_node = next(node for node in nodes_to_visit if estimated_distance[node] == min_estimated_distance)
if current_node == destination:
return reconstruct_path_to_point(destination, came_from_graph)
nodes_to_visit.remove(current_node)
visited_nodes.add(current_node)
for adjacency in visibility_graph[current_node]:
neighbour_node = adjacency.point
if neighbour_node in visited_nodes:
continue
neighbour_distance = distance_from_start[current_node] + adjacency.distance
if neighbour_node not in nodes_to_visit or neighbour_distance < distance_from_start[neighbour_node]:
came_from_graph[neighbour_node] = current_node
distance_from_start[neighbour_node] = neighbour_distance
estimated_distance[neighbour_node] = neighbour_distance + distance_estimate(neighbour_node, destination)
if neighbour_node not in nodes_to_visit:
nodes_to_visit.add(neighbour_node)
return None
def reconstruct_path_to_point(point, came_from_graph):
"""
Creates a path from start to destination.
Uses the graph (dictionary) of preceding nodes (created by A* algorithm).
The path does not contain a starting point.
"""
path = []
while point in came_from_graph:
path.insert(0, point)
point = came_from_graph[point]
return path
@lru_cache(maxsize=128)
def distance_estimate(point, goal):
"""
Returns Euclidean distance between given points.
"""
return np.linalg.norm(np.subtract(point, goal))
@lru_cache(maxsize=8)
def get_all_vertices(obstacles):
"""
Returns a set of all vertices (corners) of given obstacles.
"""
vertices = set()
for obs in obstacles:
vertices.update([Point(x, y) for x, y in itertools.product([obs.left, obs.right], [obs.up, obs.down])])
return vertices
@lru_cache(maxsize=8)
def create_visibility_graph_for_obstacles(obstacles):
"""
Creates a visibility graph only for given obstacles (with no start and destination).
This was extracted as a separate method to allow caching.
Obstacles in this program are considered immutable: no new
obstacles appear and the existing ones do not move. Therefore,
there is no reason to calculate the visibility graph for
obstacles more than once. However, the start and destination
change very often, so visibility for them is calculated using another method.
"""
vertices = get_all_vertices(obstacles)
visited_vertices = set()
graph = {v: [] for v in vertices}
for p1 in vertices:
visited_vertices.add(p1)
for p2 in vertices - visited_vertices:
check_connection_between_points(graph, obstacles, p1, p2)
return graph
def check_connection_between_points(graph, obstacles, point1, point2):
"""
Checks if there is an unobstructed line between point1 and point2. If so, adds the adjacency to graph.
"""
crossed_obstacles = [obs for obs in obstacles if line_crosses_obstacle(point1, point2, obs)]
if not crossed_obstacles:
distance = np.linalg.norm(np.subtract(point1, point2))
graph[point1].append(Adjacency(distance, point2))
graph[point2].append(Adjacency(distance, point1))
def add_vertex_to_visibility_graph(point, obstacles, graph):
"""
Adds one vertex to visibility graph and calculates adjacent points for it.
"""
points = set(graph.keys())
graph[point] = []
for existing_point in points:
check_connection_between_points(graph, obstacles, point, existing_point)
def line_crosses_obstacle(point1, point2, obstacle, threshold=1e-10):
"""
Checks if a line between 2 points crosses an obstacle.
Line that overlaps the obstacle's side or shares only
one point with the obstacle (e.g. one point is outside
the obstacle and the other is in its vertex) is not
considered to cross that obstacle.
"""
if point1 == point2:
return False
e = Point(*point1)
d = np.subtract(point2, point1)
d_len = np.linalg.norm(d)
d = np.divide(d, d_len)
d = Point(*d)
with np.errstate(divide='ignore', invalid='ignore'):
ax = np.divide(1, d.x)
ay = np.divide(1, d.y)
if ax >= 0:
txmin = np.multiply(ax, obstacle.left - e.x)
txmax = np.multiply(ax, obstacle.right - e.x)
else:
txmin = np.multiply(ax, obstacle.right - e.x)
txmax = np.multiply(ax, obstacle.left - e.x)
if ay >= 0:
tymin = np.multiply(ay, obstacle.up - e.y)
tymax = np.multiply(ay, obstacle.down - e.y)
else:
tymin = np.multiply(ay, obstacle.down - e.y)
tymax = np.multiply(ay, obstacle.up - e.y)
intervals_intersect = txmin < tymax - threshold and tymin < txmax - threshold
intervals_are_valid = txmin < d_len and tymin < d_len and txmax > 0 and tymax > 0
return intervals_intersect and intervals_are_valid
|
# Find how many numbers from 1 to 1000 are multiples of 7 or 5.
# Constants for first_num and last_num
FIRST_NUM = 1
LAST_NUM = 1000
# Return number of multiples of 7 or 5 from first_num to last_num
def get_multiples(first_num, last_num):
# Initialize count of multiples
count_multiples = 0
# Count number of multiples of 7 or 5
for n in range(first_num, last_num + 1):
if n % 7 == 0 or n % 5 == 0:
count_multiples += 1
return count_multiples
# Define main function
def main():
# Obtain number of multiples of 7 or 5 from 1 to 1000
multiples = get_multiples(FIRST_NUM, LAST_NUM)
# Display result
print(f'There are {multiples} multiples of 7 or 5 from 1 to 1000.')
# Call main function
main() |
from adaptive_alerting_detector_build.exceptions import (
AdaptiveAlertingDetectorBuildError,
)
class DatasourceQueryException(AdaptiveAlertingDetectorBuildError):
"""Raised when query fails."""
class base_datasource:
def __init__(self, **kwargs):
pass
def query(self):
raise NotImplementedError
|
from .normalizer import ImageNormalizer
|
import subprocess
from datetime import timedelta
from os import path
from config import HEC_HMS_HOME, HEC_HMS_SH, HEC_DSSVUE_HOME, HEC_DSSVUE_SH, HEC_EVENT_SCRIPT,\
PRE_PROCESSING_SCRIPT,POST_PROCESSING_SCRIPT, RAIN_FALL_FILE_NAME, DISCHARGE_FILE_NAME, \
HEC_INPUT_DSS, HEC_OUTPUT_DSS
def execute_pre_dssvue(run_date_time, back_days):
python_script_fp = PRE_PROCESSING_SCRIPT
run_date = run_date_time.strftime('%Y-%m-%d')
run_time = run_date_time.strftime('%H:%M:%S')
ts_start_date_time = run_date_time - timedelta(days=back_days)
ts_start_date = ts_start_date_time.strftime('%Y-%m-%d')
ts_start_time = ts_start_date_time.strftime('%H:%M:%S')
return _execute_hec_dssvue(python_script_fp, run_date, run_time, ts_start_date, ts_start_time)
def execute_post_dssvue(run_date_time, back_days):
python_script_fp = POST_PROCESSING_SCRIPT
run_date = run_date_time.strftime('%Y-%m-%d')
run_time = run_date_time.strftime('%H:%M:%S')
ts_start_date_time = run_date_time - timedelta(days=back_days)
ts_start_date = ts_start_date_time.strftime('%Y-%m-%d')
ts_start_time = ts_start_date_time.strftime('%H:%M:%S')
return _execute_hec_dssvue(python_script_fp, run_date, run_time, ts_start_date, ts_start_time)
def _execute_hec_dssvue(python_script, run_date, run_time, ts_start_date, ts_start_time):
dssvue_sh = path.join(HEC_DSSVUE_HOME, HEC_DSSVUE_SH)
#bash_command = '/home/curw/distributed_hec/hec-dssvue201/hec-dssvue.sh {PYTHON_SCRIPT} --date 2019-02-20 --time 14:00:00 --start-date 2019-02-18 --start-time 14:00:00'
bash_command = '{dssvue_sh} {python_script} --date {run_date} --time {run_time} --start-date {ts_start_date} --start-time {ts_start_time}'\
.format(dssvue_sh=dssvue_sh, python_script=python_script, run_date=run_date, run_time=run_time, ts_start_date=ts_start_date, ts_start_time=ts_start_time)
print('execute_hec_dssvue|bash_command : ', bash_command)
ret_code = subprocess.call(bash_command, shell=True)
return ret_code
def execute_hechms(model_name, run_path):
hec_hms_sh_fp = path.join(HEC_HMS_HOME, HEC_HMS_SH)
model_event_script_fp = path.join(run_path, HEC_EVENT_SCRIPT.replace('{MODEL_NAME}', model_name))
bash_command = "{hec_hms_sh} -s {hec_event_script}" \
.format(hec_hms_sh=hec_hms_sh_fp, hec_event_script=model_event_script_fp)
print('execute_hechms|bash_command : ', bash_command)
ret_code = subprocess.call(bash_command, shell=True)
return ret_code
|
"""
The purpose to this module is to provide a convenient way to create static neural
network
"""
from .ordered_set import OrderedSet
from .simple_layers import SimpleOutputBase, SimpleMergeBase, SimpleLayerBase, SimpleModule
from .simple_layers_implementations import Input, OutputClassification, Flatten, Conv2d, ReLU, MaxPool2d, Linear, \
ConcatChannels, OutputEmbedding, Conv3d, MaxPool3d, Reshape, BatchNorm2d, BatchNorm3d
from .compiled_net import compile_nn, find_layer_type, nodes_mark_output_dependencies, CompiledNet
from .denses import denses
from .convs import convs_3d, convs_2d
from .global_pooling import global_average_pooling_2d, global_average_pooling_3d, global_max_pooling_2d, \
global_max_pooling_3d
from .shift_scale import ShiftScale
from .sub_tensor import SubTensor
|
"""main entry for meteolab command-line interface"""
def main():
from meteolab import Meteolab
ret, fwds = Meteolab().run_command()
return ret
if __name__ == "__main__":
main()
|
"""The SolArk Modbus Integration."""
import asyncio
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from .const import DEFAULT_NAME, DEFAULT_SCAN_INTERVAL, DOMAIN
from .hub import SolArkModbusHub
_LOGGER = logging.getLogger(__name__)
SOLARK_MODBUS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.positive_int,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.slug: SOLARK_MODBUS_SCHEMA})}, extra=vol.ALLOW_EXTRA
)
PLATFORMS = ["sensor"]
async def async_setup(hass, config):
"""Set up the SolArk modbus component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a SolArk modbus."""
name = entry.data[CONF_NAME]
scan_interval = entry.data[CONF_SCAN_INTERVAL]
host = entry.data[CONF_HOST]
_LOGGER.debug("Setup %s.%s", DOMAIN, name)
hub = SolArkModbusHub(hass, name, host, scan_interval)
"""Register the hub."""
hass.data[DOMAIN][name] = {"hub": hub}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass, entry):
"""Unload SolArk mobus entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if not unload_ok:
return False
hass.data[DOMAIN].pop(entry.data["name"])
return True
|
from zope.interface import classProvides
from twisted.plugin import IPlugin
from axiom.attributes import integer, inmemory
from axiom.item import Item
from eridanus import util as eutil
from eridanus.ieridanus import IEridanusPluginProvider
from eridanus.plugin import Plugin, usage, rest
from eridanusstd import urbandict
class UrbanDict(Item, Plugin):
"""
Urban Dictionary.
An API key for `urbandict` is required in order for this plugin to work.
"""
classProvides(IPlugin, IEridanusPluginProvider)
typeName = 'eridanus_plugins_urbandict'
dummy = integer()
service = inmemory()
def activate(self):
apiKey = eutil.getAPIKey(self.store, u'urbandict')
self.service = urbandict.UrbanDictService(apiKey)
@rest
@usage(u'define <term>')
def cmd_define(self, source, term):
"""
Get all definitions for <term> on Urban Dictionary.
"""
def formatResults(results):
for i, result in enumerate(results):
word = result[u'word']
# XXX: this should be a paginated/multiline output
dfn = eutil.unescapeEntities(result[u'definition'])
dfn = u' '.join(dfn.splitlines())
yield u'\002%d. %s\002: %s;' % (i + 1, word, dfn)
def displayResults(formattedResults):
source.reply(u' '.join(formattedResults))
return self.service.lookup(term
).addCallback(formatResults
).addCallback(displayResults)
@usage(u'verifyKey')
def cmd_verifykey(self, source):
"""
Verify that the currently set API key is valid.
"""
def gotResult(isValid):
result = (u'is not', 'is')[isValid]
msg = u'The API key %s valid.' % (result,)
source.reply(msg)
return self.service.verify_key().addCallback(gotResult)
|
import disnake
from dice_roll_game import current_game, game_implementation
from dice_roll_game.game_setup import DiceRollSetup
from disnake.ext import commands
class DiceRollCog(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot: commands.Bot = bot
@commands.slash_command(name="dice")
async def game(self, inter: disnake.ApplicationCommandInteraction) -> None:
await inter.response.defer()
# Set the game's ID as the player's ID
user_id = inter.author.id
if current_game.check_if_exists(user_id):
await inter.send("You are already playing this game!")
return
ai_level_view = DiceRollSetup()
await inter.edit_original_message(view=ai_level_view)
ai_level = await ai_level_view.wait() # noqa: F841
new_game = game_implementation.DiceRollGame(100)
current_game.update_game(user_id, new_game)
# TODO: Represent a game using a view
def setup(bot: commands.Bot):
bot.add_cog(DiceRollCog(bot))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 9 22:30:35 2020
@author: manal
"""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'kernels.cpython-38-x86_64-linux-gnu.so')
# __file__ = pkg_resources.resource_filename(__name__,'kernels.cpython-38-x86_64-linux-gnu.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import logging
import pathlib
from dataclasses import dataclass
from datetime import datetime
from typing import cast
from vdk.api.job_input import IJobArguments
from vdk.api.plugin.core_hook_spec import JobRunHookSpecs
from vdk.api.plugin.hook_markers import hookimpl
from vdk.internal.builtin_plugins.run.execution_results import ExecutionResult
from vdk.internal.builtin_plugins.run.execution_results import StepResult
from vdk.internal.builtin_plugins.run.file_based_step import JobFilesLocator
from vdk.internal.builtin_plugins.run.file_based_step import StepFuncFactory
from vdk.internal.builtin_plugins.run.file_based_step import TYPE_PYTHON
from vdk.internal.builtin_plugins.run.file_based_step import TYPE_SQL
from vdk.internal.builtin_plugins.run.job_context import JobContext
from vdk.internal.builtin_plugins.run.job_input_error_classifier import whom_to_blame
from vdk.internal.builtin_plugins.run.run_status import ExecutionStatus
from vdk.internal.builtin_plugins.run.step import Step
from vdk.internal.core import errors
from vdk.internal.core.context import CoreContext
from vdk.internal.core.statestore import CommonStoreKeys
log = logging.getLogger(__name__)
@dataclass
class JobArguments(IJobArguments):
arguments: dict
def get_arguments(self) -> dict:
return self.arguments
class DataJobFactory:
@staticmethod
def new_datajob(
data_job_directory: pathlib.Path | None,
core_context: CoreContext,
name: str | None = None,
) -> DataJob:
"""
Create a new data job
:param data_job_directory: the source code of the data job that will be executed
:param core_context: Core context of the CLI. Upon run , the data job will span child context to keep its context/state
:param name: the name of the job. Leave it out and it will be infered from the directory name.
"""
return DataJob(data_job_directory, core_context, name)
class DataJobDefaultHookImplPlugin:
"""
Default implementation of main plugin hooks in Data Job Run Cycle.
Plugins may decorate or replace some of the implementations
"""
@staticmethod
@hookimpl(trylast=True)
def run_step(context: JobContext, step: Step) -> StepResult:
start_time = datetime.utcnow()
exception = None
details = None
blamee = None
try:
log.debug(f"Processing step {step.name} ...")
step_executed = step.runner_func(step, context.job_input)
log.debug("Processing step %s completed successfully" % step.name)
status = (
ExecutionStatus.SUCCESS
if step_executed
else ExecutionStatus.NOT_RUNNABLE
)
except Exception as e:
status = ExecutionStatus.ERROR
details = errors.MSG_WHY_FROM_EXCEPTION(e)
blamee = whom_to_blame(e, __file__, context.job_directory)
exception = e
errors.log_exception(
blamee,
log,
what_happened=f"Processing step {step.name} completed with error.",
why_it_happened=errors.MSG_WHY_FROM_EXCEPTION(e),
consequences="I will not process the remaining steps (if any), "
"and this Data Job execution will be marked as failed.",
countermeasures="See exception and fix the root cause, so that the exception does "
"not appear anymore.",
exception=e,
)
return StepResult(
name=step.name,
type=step.type,
start_time=start_time,
end_time=datetime.utcnow(),
status=status,
details=details,
exception=exception,
blamee=blamee,
)
@staticmethod
@hookimpl(trylast=True)
def run_job(context: JobContext) -> ExecutionResult:
"""The script that runs the actual run of the data job.
It executes the provided steps starting from context.steps in sequential order
"""
start_time = datetime.utcnow()
exception = None
steps = context.step_builder.get_steps()
step_results = []
if len(steps) == 0:
errors.log_and_throw(
to_be_fixed_by=errors.ResolvableBy.USER_ERROR,
log=log,
what_happened="Data Job execution has failed.",
why_it_happened="Data Job has no steps.",
consequences="Data job execution will not continue.",
countermeasures="Please include at least 1 valid step in your Data Job. Also make sure you are passing the correct data job directory.",
)
execution_status = ExecutionStatus.SUCCESS
for current_step in steps:
step_start_time = datetime.utcnow()
try:
res = context.core_context.plugin_registry.hook().run_step(
context=context, step=current_step
)
except BaseException as e:
blamee = whom_to_blame(e, __file__, context.job_directory)
errors.log_exception(
blamee,
log,
what_happened=f"Processing step {current_step.name} completed with error.",
why_it_happened=errors.MSG_WHY_FROM_EXCEPTION(e),
consequences="I will not process the remaining steps (if any), "
"and this Data Job execution will be marked as failed.",
countermeasures="See exception and fix the root cause, so that the exception does "
"not appear anymore.",
exception=e,
)
res = StepResult(
name=current_step.name,
type=current_step.type,
start_time=step_start_time,
end_time=datetime.utcnow(),
status=ExecutionStatus.ERROR,
details=errors.MSG_WHY_FROM_EXCEPTION(e),
exception=e,
blamee=blamee,
)
step_results.append(res)
# errors.clear_intermediate_errors() # step completed successfully, so we can forget errors
if res.status == ExecutionStatus.ERROR:
execution_status = ExecutionStatus.ERROR
break
execution_result = ExecutionResult(
context.name,
context.core_context.state.get(CommonStoreKeys.EXECUTION_ID),
start_time,
datetime.utcnow(),
execution_status,
exception,
step_results,
)
return execution_result
@staticmethod
@hookimpl
def initialize_job(context: JobContext):
# TODO: consider split into collect_steps hooks for better clarity and ease.
# though let's first gather some data on how useful such new hooks would be.
if context.job_directory is None:
log.info(
"Data Job directory is not specified. Default job initialization will be skipped."
)
return
file_locator: JobFilesLocator = JobFilesLocator()
script_files = file_locator.get_script_files(context.job_directory)
for file_path in script_files:
if file_path.name.lower().endswith(".sql"):
step = Step(
name=file_path.name,
type=TYPE_SQL,
runner_func=StepFuncFactory.run_sql_step,
file_path=file_path,
job_dir=context.job_directory,
)
elif file_path.name.lower().endswith(".py"):
# TODO: check for run method.
step = Step(
name=file_path.name,
type=TYPE_PYTHON,
runner_func=StepFuncFactory.run_python_step,
file_path=file_path,
job_dir=context.job_directory,
)
else:
log.info("Skipping file as it is not a valid job step: %s" % file_path)
continue
context.step_builder.add_step(step)
@staticmethod
@hookimpl
def finalize_job(context: JobContext):
pass
class DataJob:
"""
Object representing a data job.
Data Job is abstraction representing definition of a data job set by data engineer.
Data Job is a sequence of steps executed in a certain order.
Data JOb run cycle is encapsulated by run method.
Prefer to use DataJobFactory to create new data job.
"""
def __init__(
self,
data_job_directory: pathlib.Path | None,
core_context: CoreContext,
name: str | None = None,
):
if data_job_directory is None and name is None:
raise ValueError(
"Cannot initialize DataJob. "
"At least one of data job directory or data job name need to be set. "
)
self._name = data_job_directory.name if name is None else name
self._data_job_directory = data_job_directory
"""
We need to create child context which will contain only for this job execution.
This is since we want to have multiple job executions within same process (for example templates executions)
"""
self._core_context = core_context.create_child_context()
self._plugin_hook = cast(
JobRunHookSpecs, self._core_context.plugin_registry.hook()
)
@property
def name(self):
return self._name
# TODO: this also can be a hook - e.g job run_cycle_algorithm
def run(self, args: dict = None) -> ExecutionResult:
"""
This is basic implementation of Data Job run(execution) cycle algorithm.
All stages are pluggable as hooks.
* Initialize - Initialize job's main functionalities -e.g database connections,logging,collecting steps, etc.
* Run Job -Takes care of starting and running a data job
* Run steps - run the actual steps. This is where hte user code will be invoked.
* Finalize - after the job finishes, do any finalization -clean up, send monitoring, etc.
"""
if args is None:
args = {}
if not self._core_context.plugin_registry.has_plugin(
DataJobDefaultHookImplPlugin.__name__
):
self._core_context.plugin_registry.load_plugin_with_hooks_impl(
DataJobDefaultHookImplPlugin(), DataJobDefaultHookImplPlugin.__name__
)
from vdk.internal.builtin_plugins.templates.template_impl import TemplatesImpl
job_context = JobContext(
name=self._name,
job_directory=self._data_job_directory,
core_context=self._core_context,
job_args=JobArguments(args),
templates=TemplatesImpl(
job_name=self.name, core_context=self._core_context
),
)
self._plugin_hook.initialize_job(context=job_context)
start_time = datetime.utcnow()
try:
return self._plugin_hook.run_job(context=job_context)
except BaseException as ex:
blamee = whom_to_blame(ex, __file__, job_context.job_directory)
errors.log_exception(
blamee,
log,
what_happened=f"Data Job {self._name} completed with error.",
why_it_happened=errors.MSG_WHY_FROM_EXCEPTION(ex),
consequences="I will not process the remaining steps (if any), "
"and this Data Job execution will be marked as failed.",
countermeasures="See exception and fix the root cause, so that the exception does "
"not appear anymore.",
exception=ex,
)
execution_result = ExecutionResult(
self._name,
self._core_context.state.get(CommonStoreKeys.EXECUTION_ID),
start_time,
datetime.utcnow(),
ExecutionStatus.ERROR,
ex,
[],
)
return execution_result
finally: # TODO: we should pass execution result to finalize_job somehow ...
self._plugin_hook.finalize_job(context=job_context)
|
from Snake_Game import *
from Feed_Forward_Neural_Network import *
def run_game_with_ML(display, clock, weights):
max_score = 0
avg_score = 0
test_games = 1
score1 = 0
steps_per_game = 2500
score2 = 0
for _ in range(test_games):
snake_start, snake_position, apple_position, score = starting_positions()
count_same_direction = 0
prev_direction = 0
for _ in range(steps_per_game):
current_direction_vector, is_front_blocked, is_left_blocked, is_right_blocked = blocked_directions(
snake_position)
angle, snake_direction_vector, apple_direction_vector_normalized, snake_direction_vector_normalized = angle_with_apple(
snake_position, apple_position)
predictions = []
predicted_direction = np.argmax(np.array(forward_propagation(np.array(
[is_left_blocked, is_front_blocked, is_right_blocked, apple_direction_vector_normalized[0],
snake_direction_vector_normalized[0], apple_direction_vector_normalized[1],
snake_direction_vector_normalized[1]]).reshape(-1, 7), weights))) - 1
if predicted_direction == prev_direction:
count_same_direction += 1
else:
count_same_direction = 0
prev_direction = predicted_direction
new_direction = np.array(snake_position[0]) - np.array(snake_position[1])
if predicted_direction == -1:
new_direction = np.array([new_direction[1], -new_direction[0]])
if predicted_direction == 1:
new_direction = np.array([-new_direction[1], new_direction[0]])
button_direction = generate_button_direction(new_direction)
next_step = snake_position[0] + current_direction_vector
if collision_with_boundaries(snake_position[0]) == 1 or collision_with_self(next_step.tolist(),
snake_position) == 1:
score1 += -150
break
else:
score1 += 0
snake_position, apple_position, score = play_game(snake_start, snake_position, apple_position,
button_direction, score, display, clock)
if score > max_score:
max_score = score
if count_same_direction > 8 and predicted_direction != 0:
score2 -= 1
else:
score2 += 2
return score1 + score2 + max_score * 5000 |
""" This post-process selects which technologies can provide reserves"""
# Standard packages
import os
import shutil
# Third-party packages
import pandas as pd
from switch_model.wecc.get_inputs.register_post_process import post_process_step
@post_process_step(
msg="Removing fossil fuels from reserves.",
)
def post_process(_):
"""This function sets to zero the column that allows each candidate technology to
provide"""
fname = "generation_projects_info.csv"
df = pd.read_csv(fname)
# Energy sources to exclude from reserves
filter_techs = ["ResidualFuelOil", "Gas", "DistillateFuelOil", "Coal"]
# Set to zero column that allows technology to provide reserves
df.loc[
df["gen_energy_source"].isin(filter_techs), "gen_can_provide_cap_reserves"
] = 0
# Save file again
df.to_csv(fname, index=False)
|
#/dlmp/sandbox/cgslIS/rohan/Python-2.7.11/python
"""
This script will asses and plot the difference is
in allele frequency in the vcf
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib_venn import *
import re
import pprint
from scipy.stats import norm
import scipy.stats as stats
import logging
import seaborn as sns
#TODO: Dataframe for GATK
def main(dict1, dict2, sample_name1, sample_name2, annotate,logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False):
plot_freq(dict1, dict2,sample_name1, sample_name2, annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False)
plot_dp_AF(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False)
plot_dp_bar(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False)
plot_violin_af(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False)
plot_dist_af(dict1, dict2, sample_name1, sample_name2,annotate, logger)
# dict3=False, dict4=False, sample_name3=False, sample_name4=False)
# plot_dist_dp(dict1, dict2, sample_name1, sample_name2,annotate, logger)
# dict3=False, dict4=False, sample_name3=False, sample_name4=False)
# plot_af(dict1, dict2, sample_name1, sample_name2,annotate, logger,
# dict3=False, dict4=False, sample_name3=False, sample_name4=False)
def dict_to_dataframe(some_dict, logger):
df = pd.DataFrame(some_dict.items(), columns=['position', 'value'])
df['frequency'] = df.ix[:,1].str[0].str[4]
df['DP'] = df.ix[:,1].str[0].str[5]
return df
def dict_to_dataframe_gatk(some_dict, logger):
df = pd.DataFrame(some_dict.items(), columns=['position', 'value'])
df['frequency'] = df.ix[:,1].str[0].str[3]
df['DP'] = df.ix[:,1].str[0].str[2]
return df
def plot_af(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False):
fi_df = dict_to_dataframe_gatk(dict1, logger)
si_df = dict_to_dataframe_gatk(dict2, logger)
fig = plt.figure(figsize=(20,16))
ax1 = fig.add_subplot(1,1,1)
fi_df = fi_df.sort_index(by=['frequency'], ascending=[True])
x1 = np.linspace(0, 1, len(fi_df['frequency']))
si_df = si_df.sort_index(by=['frequency'],ascending=[True])
x2 = np.linspace(0, 1, len(si_df['frequency']))
ax1.scatter(x1, fi_df['frequency'], color='k', marker='x', s=350, vmin=0.,vmax=0.6,label=sample_name1)
ax1.scatter(x2, si_df['frequency'], color='g', marker='o', s=200, vmin=0.,vmax=0.6,label=sample_name2)
if dict3:
ti_df = dict_to_dataframe_gatk(dict3, logger)
x3 = np.linspace(0, 1, len(si_df['ftiquency']))
ax1.scatter(x1, ti_df['frequency'], color='b', marker='D', s=300, vmin=0.,vmax=0.6,label=sample_name3)
if dict4:
fo_df = dict_to_dataframe_gatk(dict4, logger)
x4 = np.linspace(0, 1, len(fo_df['ftiquency']))
ax1.scatter(x4, fo_df['frequency'], color='m', marker='d', s=300, vmin=0.,vmax=0.6,label=sample_name4)
plt.legend(loc='upper right')
if annotate:
annotate_plot(fi_df, ax1, 'total')
annotate_plot(si_df, ax1, 'total')
ax1.set_title('Variant frequency correlation', fontsize=20)
ax1.set_xlabel('Total Variants', fontsize=20)
ax1.set_ylabel('Variant frequency', fontsize=20)
plt.savefig('Allele_frequncy_trend.png')
#TODO:dataframe for GATK
def plot_freq(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False):
fi_df = dict_to_dataframe_gatk(dict1, logger)
si_df = dict_to_dataframe_gatk(dict2, logger)
fig = plt.figure(figsize=(20,16))
ax1 = fig.add_subplot(1,1,1)
ax1.scatter(fi_df.index, fi_df['frequency'], color='r', marker='x', s=350, vmin=0.,vmax=0.6,label=sample_name1)
ax1.scatter(si_df.index, si_df['frequency'], color='g', marker='o', s=200, vmin=0.,vmax=0.6,label=sample_name2)
if dict3:
ti_df = dict_to_dataframe_gatk(dict3, logger)
ax1.scatter(ti_df.index, ti_df['frequency'], color='b', marker='D', s=300, vmin=0.,vmax=0.6,label=sample_name3)
if dict4:
fo_df = dict_to_dataframe_gatk(dict4, logger)
ax1.scatter(fo_df.index, fo_df['frequency'], color='m', marker='d', s=300, vmin=0.,vmax=0.6,label=sample_name4)
plt.legend(loc='upper left')
if annotate:
annotate_plot(fi_df, ax1, 'total')
annotate_plot(si_df, ax1, 'total')
ax1.set_title('Variant frequency correlation', fontsize=20)
ax1.set_xlabel('Total Variants', fontsize=20)
ax1.set_ylabel('Variant frequency', fontsize=20)
plt.savefig('Allele_frequency_total_variants.png')
def plot_dp_AF(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False):
fi_df = dict_to_dataframe(dict1, logger)
si_df = dict_to_dataframe(dict2, logger)
fig = plt.figure(figsize=(20,16))
ax1 = fig.add_subplot(1,1,1)
ax1.scatter(fi_df['DP'], fi_df['frequency'], color='k', marker='x', s=350, vmin=0., vmax=0.6, label=sample_name1)
ax1.scatter(si_df['DP'], si_df['frequency'], color='g', marker='o', s=200, vmin=0., vmax=0.6, label=sample_name2)
if dict3:
ti_df = dict_to_dataframe_gatk(dict3, logger)
ax1.scatter(ti_df.index, ti_df['frequency'], color='b', marker='D', s=300, vmin=0.,vmax=0.6,label=sample_name3)
if dict4:
fo_df = dict_to_dataframe_gatk(dict4, logger)
ax1.scatter(fo_df.index, fo_df['frequency'], color='m', marker='d', s=300, vmin=0.,vmax=0.6,label=sample_name4)
plt.legend(loc='upper left')
if annotate:
annotate_plot(fi_df, ax1)
annotate_plot(si_df, ax1)
ax1.set_title('Variant frequency correlation', fontsize=20)
ax1.set_xlabel('DP', fontsize=20)
ax1.set_ylabel('Variant frequency', fontsize=20)
try:
plt.savefig('allele_frequency_DP.png')
except ValueError:
logger.debug("turn off annotation too many variants for plots")
def plot_dp_bar(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False):
fi_df = dict_to_dataframe_gatk(dict1, logger)
si_df = dict_to_dataframe_gatk(dict2, logger)
fig = plt.figure(figsize=(20,16))
ax1 = fig.add_subplot(1,1,1)
width = 0.50
reacts1 = ax1.bar(fi_df.index, fi_df['DP'], width, color='green')
reacts2 = ax1.bar(si_df.index, si_df['DP'], width, color='red')
if dict3:
ti_df = dict_to_dataframe_gatk(dict3, logger)
reacts3 = ax1.bar(ti_df.index, ti_df['DP'], width, color='blue')
if dict4:
fo_df = dict_to_dataframe_gatk(dict4, logger)
reacts4 = ax1.bar(fo_df.index, fo_df['DP'], width, color='black')
plt.legend(loc='upper left')
ax1.set_title('DP Distribution', fontsize=20)
ax1.set_xlabel('Total Variants', fontsize=20)
ax1.set_ylabel('DP', fontsize=20)
ax1.legend((reacts1[0], reacts2[0]),(sample_name1, sample_name2))
try:
plt.savefig('read_depth_distribution.png')
except ValueError:
logger.debug("turn off annotation too many variants for plots")
def plot_violin_af(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False):
fi_df = dict_to_dataframe_gatk(dict1, logger)
si_df = dict_to_dataframe_gatk(dict2, logger)
if len(fi_df['frequency']) == len(si_df['frequency']):
df = pd.DataFrame({sample_name1: fi_df['frequency'], sample_name2: si_df['frequency']})
# df = pd.concat([fi_df['frequency'], si_df['frequency']],
fig = plt.figure(figsize=(20,16))
sns.set(font_scale=1.8)
ax = sns.violinplot(df)
ax.set(xlabel='frequency', ylabel='sample')
plt.savefig(sample_name1 + "_" + sample_name2 +'_Allele_frequency_distribution.png')
if dict3:
ti_df = dict_to_dataframe_gatk(dict3, logger)
if len(fi_df['frequency']) == len(si_df['frequency']) == len(ti_df['frequency']):
df = pd.DataFrame({sample_name1: fi_df['frequency'], sample_name2: si_df['frequency'],
sample_name3: ti_df['frequency']})
fig = plt.figure(figsize=(20,16))
sns.set(font_scale=1.8)
ax = sns.violinplot(df)
ax.set(xlabel='frequency', ylabel='sample')
plt.savefig(sample_name1 + "_" + sample_name2 + "_" + sample_name3 + "_" +'_Allele_frequency_distribution.png')
else:
pass
def plot_dist_af(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False):
fi_df = dict_to_dataframe_gatk(dict1, logger)
si_df = dict_to_dataframe_gatk(dict2, logger)
print("si_df")
print si_df
fig = plt.figure(figsize=(20,16))
sns.set(font_scale=1.8)
ax1 = sns.distplot(fi_df.frequency.dropna())
ax1 = sns.distplot(si_df.frequency.dropna())
ax1.set(xlabel='frequency', ylabel='sample')
ax1.set_title(sample_name1 + " vs " + sample_name2)
plt.legend(loc='upper left')
if dict3:
ti_df = dict_to_dataframe_gatk(dict3, logger)
ax1.set_tile(sample_name1 + " vs " + sample_name2 + " vs " + sample_name3)
ax1 = sns.distplot(ti_df.frequency.dropna())
if dict3 and dict4:
fo_df = dict_to_dataframe_gatk(dict3, logger)
ax1 = sns.distplot(fo_df.frequency.dropna())
plt.savefig('sampleAllele_frequency_distribution_dist.png')
def plot_dist_dp(dict1, dict2, sample_name1, sample_name2,annotate, logger,
dict3=False, dict4=False, sample_name3=False, sample_name4=False):
fi_df = dict_to_dataframe_gatk(dict1, logger)
si_df = dict_to_dataframe_gatk(dict2, logger)
fig = plt.figure(figsize=(20,16))
sns.set(font_scale=1.8)
ax1 = sns.distplot(fi_df['DP'].dropna())
ax1.set(xlabel='DP', ylabel='sample')
ax1.set_title("sample " + sample_name1)
ax2 = sns.distplot(si_df['DP'].dropna())
ax2.set(xlabel='DP', ylabel='sample')
ax2.set_title(sample_name1 + " vs " + sample_name2)
if dict3:
ti_df = dict_to_dataframe_gatk(dict3, logger)
ax1.set_tile(sample_name1 + " vs " + sample_name2 + " vs " + sample_name3)
ax1 = sns.distplot(ti_df['DP'].dropna())
if dict3 and dict4:
fo_df = dict_to_dataframe_gatk(dict3, logger)
ax1 = sns.distplot(fo_df['DP'].dropna())
plt.savefig('sample_DP_dist.png')
def annotate_plot(some_df, plot, total=False):
annotate = some_df['position'].tolist()
if total:
index = some_df.index.tolist()
freq = some_df['frequency'].tolist()
texts = []
for i, txt in enumerate(annotate):
texts.append(plot.text(index[i], freq[i], txt, rotation=45))
else:
pass
else:
index = some_df['DP'].tolist()
freq = some_df['frequency'].tolist()
texts = []
for i, txt in enumerate(annotate):
texts.append(plot.text(index[i], freq[i], txt, rotation=45))
else:
pass
if __name__ == "__main__":
main()
|
from . import pyoptsparse
from . import scipy |
import cherrypy
import CanvasLMSTool
import memcache
# This file stores my CANVAS_CLIENT_ID and CANVAS_CLIENT_SECRET. I'm not going to release that on Github
from secretglobals import *
# Do not include the trailing slash - this is where your Canvas installation is located
CANVAS_URL = 'https://my-canvas-installation.com'
MC = memcache.Client(['127.0.0.1:11211'], debug=0)
cherrypy.tools.canvas = CanvasLMSTool.CanvasLMSTool(CANVAS_URL, CANVAS_CLIENT_ID, CANVAS_CLIENT_SECRET, MC)
canvas = cherrypy.tools.canvas
class testCanvasTool(object):
def __init__(self):
pass
@cherrypy.tools.canvas()
@cherrypy.expose
def index(self, **kwargs):
returnvalue = 'You are logged in as: ' + kwargs['canvas_user']['name'] + '\n'
returnvalue += '<h1>Your Courses</h1>\n'
# Get the Canvas user's list of courses and cache the result for 300 seconds
courses = canvas.api('get', '/api/v1/courses', ttl=300)
for course in courses:
returnvalue += course.get('name', str(course['id'])) + ' <br>\n'
return returnvalue
cherrypy.config.update({'server.socket_host': '0.0.0.0',
'server.socket_port': 80,
})
cherrypy.quickstart(testCanvasTool(), '/', 'testCanvasTool.conf')
|
"""Plot gps information."""
from typing import List, Optional, Sequence
import json
import numpy as np
import plotly.express as px
from constants import (
BLUE_COLOR,
DEFAULT_COLOR,
DEFAULT_SIZE,
LATITUDE,
LONGITUDE,
MAPS_STYLE,
OPACITY_LEVEL,
SIZE_MAX,
)
class PlotlyAutoZoomer:
"""Class to find zoom level automatically based on long / lat diff as reference."""
LONG_LAT_DIFFS = (0.00001, 0.0001, 0.0005, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.5, 1, 2, 3)
ZOOM_LEVELS = (24, 20, 18, 17, 14, 12, 11, 10, 9, 8, 7, 6, 4)
@classmethod
def calc_zoom_level(cls, lats: List[float], longs: List[float]) -> int:
"""Calculate zoom level based on lats and lons values.
Args:
lats : list of latitudes
longs : list of longitudes
Returns:
zoom level for plotly
"""
lats = np.array(lats)
longs = np.array(longs)
lat_diff = lats.max() - lats.min()
long_diff = longs.max() - longs.min()
max_diff = max(lat_diff, long_diff)
return np.round(np.interp(max_diff, cls.LONG_LAT_DIFFS, cls.ZOOM_LEVELS))
def plot_gps(
longs: Sequence[float],
lats: Sequence[float],
colours: Optional[List[str]] = None,
sizes: Optional[List[int]] = None,
show: bool = False,
):
"""Plot gps coordinates on map.
Args:
longs : gps points longitudes
lats : gps points latitudes
colours : colour of points on the map
sizes : size of points on the map
show : show map on browser if True, otherwise wait for further plots
"""
assert len(longs) == len(lats)
if not colours:
colours = [DEFAULT_COLOR] * len(longs)
if not sizes:
sizes = [DEFAULT_SIZE] * len(longs)
fig = px.scatter_mapbox(
lat=lats,
lon=longs,
color=colours,
size=sizes,
zoom=PlotlyAutoZoomer.calc_zoom_level(lats, longs),
size_max=SIZE_MAX,
opacity=OPACITY_LEVEL,
)
fig.update_layout(mapbox_style=MAPS_STYLE)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
if show:
fig.show()
def plot_gps_track_from_dataset_sequence(oxts_values: np.ndarray):
"""Plot GPS track on the map from dataset sequence.
Args:
oxts_values : OxTS values
"""
longs, lats = oxts_values[LONGITUDE], oxts_values[LATITUDE]
colors = [BLUE_COLOR] * len(longs)
sizes = [1] * len(longs)
plot_gps(longs, lats, colors, sizes, show=True)
def show_gps_for_all_frames(filename: str):
"""Show GPS points for all extracted frames in dataset.
Args:
filename: path to JSON file containing GPS coordinates for all frames in dataset
"""
with open(filename) as opened:
content = json.load(opened)
lats, lons = [], []
vehicles = set()
for frame_id, points in content.items():
vehicles.add(frame_id.split("_")[0])
lons.append(points[0])
lats.append(points[1])
print(
f"Total frames in dataset: {len(content)}, vehicles: {vehicles}",
)
plot_gps(lons, lats, ["blue"] * len(lons), [1] * len(lons), show=True)
|
"""stub action for load
"""
from . import _actions as actions
from .run import Action as BaseAction
class MissingAttribute:
# pylint: disable=too-few-public-methods
"""Raise an attribute error for any get"""
def __get__(self, instance, owner):
raise AttributeError()
@actions.register
class Action(MissingAttribute, BaseAction):
# pylint: disable=too-many-instance-attributes
""":load"""
KEGEX = r"""(?x)
^
(?P<load>l(?:oad)?
(\s(?P<params_load>\S+))?)
$"""
run_stdout = MissingAttribute() # type: ignore
|
import json, requests, urllib3
from flask import Flask, request, jsonify
from datetime import datetime
import time
import traceback
import os
import redis
import cPickle as pickle
import virtualservice_static
import serviceengine_static
import servicediscovery
import pool_static
import controller_static
from multiprocessing import Process
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
if hasattr(urllib3, 'disable_warnings'):
urllib3.disable_warnings()
#------------------------------------
avi_controller = os.environ['AVICONTROLLER']
avi_user = os.environ['AVIUSER']
avi_pass = os.environ['AVIPASSWORD']
#------------------------------------
#----- entity lists greater than this value will be replaced with wildcard
#----- interval in seconds to refresh the metrics cache
if 'EN_METRIC_REFRESH_INTERVAL' in os.environ:
metric_refresh_interval = int(os.environ['EN_METRIC_REFRESH_INTERVAL'])
if metric_refresh_interval < 60:
metric_refresh_interval = 60
else:
metric_refresh_interval = 300
#----- When refreshing cache, if wait is true the cache is refreshed first before returning metrics
#----- if wait is False, metrics from current cache are returned and then the cache is refreshed
#----- set to false if very large config resulting in timeouts while cache is being refreshed
if 'EN_WAIT_FOR_CACHE' in os.environ:
wait_for_cache = os.environ['EN_WAIT_FOR_CACHE'].lower()
if 'false' in wait_for_cache:
wait_for_cache = False
else:
wait_for_cache = True
else:
wait_for_cache = True
#------------------------------------
#----- Default List of Metrics for each entity type
default_vs_metric_list = [
'l4_client.apdexc',
'l4_client.avg_bandwidth',
'l4_client.avg_application_dos_attacks',
'l4_client.avg_complete_conns',
'l4_client.avg_connections_dropped',
'l4_client.avg_new_established_conns',
'l4_client.avg_policy_drops',
'l4_client.avg_rx_pkts',
'l4_client.avg_tx_pkts',
'l4_client.avg_rx_bytes',
'l4_client.avg_tx_bytes',
'l4_client.max_open_conns',
'l4_client.avg_lossy_connections',
'l7_client.avg_complete_responses',
'l7_client.avg_client_data_transfer_time',
'l7_client.avg_client_txn_latency',
'l7_client.sum_application_response_time',
'l7_client.avg_resp_4xx_avi_errors',
'l7_client.avg_resp_5xx_avi_errors',
'l7_client.avg_resp_2xx',
'l7_client.avg_resp_4xx',
'l7_client.avg_resp_5xx',
'l4_client.avg_total_rtt',
'l7_client.avg_page_load_time',
'l7_client.apdexr',
'l7_client.avg_ssl_handshakes_new',
'l7_client.avg_ssl_connections',
'l7_client.sum_get_reqs',
'l7_client.sum_post_reqs',
'l7_client.sum_other_reqs',
'l7_client.avg_frustrated_responses',
'l7_client.avg_waf_attacks',
'l7_client.pct_waf_attacks',
'l7_client.sum_total_responses',
'l7_client.avg_waf_rejected',
'l7_client.avg_waf_evaluated',
'l7_client.avg_waf_matched',
'l7_client.avg_waf_disabled',
'l7_client.pct_waf_disabled',
'l7_client.avg_http_headers_count',
'l7_client.avg_http_headers_bytes',
'l7_client.pct_get_reqs',
'l7_client.pct_post_reqs',
'l7_client.avg_http_params_count',
'l7_client.avg_uri_length',
'l7_client.avg_post_bytes',
'dns_client.avg_complete_queries',
'dns_client.avg_domain_lookup_failures',
'dns_client.avg_tcp_queries',
'dns_client.avg_udp_queries',
'dns_client.avg_udp_passthrough_resp_time',
'dns_client.avg_unsupported_queries',
'dns_client.pct_errored_queries',
'dns_client.avg_domain_lookup_failures',
'dns_client.avg_avi_errors',
'dns_server.avg_complete_queries',
'dns_server.avg_errored_queries',
'dns_server.avg_tcp_queries',
'dns_server.avg_udp_queries',
'l4_server.avg_rx_pkts',
'l4_server.avg_tx_pkts',
'l4_server.avg_rx_bytes',
'l4_server.avg_tx_bytes',
'l4_server.avg_bandwidth',
'l7_server.avg_complete_responses',
'l4_server.avg_new_established_conns',
'l4_server.avg_pool_open_conns',
'l4_server.avg_pool_complete_conns',
'l4_server.avg_open_conns',
'l4_server.max_open_conns',
'l4_server.avg_errored_connections',
'l4_server.apdexc',
'l4_server.avg_total_rtt',
'l7_server.avg_resp_latency',
'l7_server.apdexr',
'l7_server.avg_application_response_time',
'l7_server.pct_response_errors',
'l7_server.avg_frustrated_responses',
'l7_server.avg_total_requests',
'healthscore.health_score_value'
]
default_vs_metric_list = ','.join(default_vs_metric_list)
#------
default_se_metric_list = [
'se_if.avg_bandwidth',
'se_stats.avg_connection_mem_usage',
'se_stats.avg_connections',
'se_stats.avg_connections_dropped',
'se_stats.avg_cpu_usage',
'se_stats.avg_disk1_usage',
'se_stats.avg_mem_usage',
'se_stats.avg_dynamic_mem_usage',
'se_stats.avg_persistent_table_usage',
'se_stats.avg_rx_bandwidth',
'se_if.avg_rx_bytes',
'se_if.avg_rx_pkts',
'se_if.avg_rx_pkts_dropped_non_vs',
'se_if.avg_tx_pkts',
'se_if.avg_tx_bytes',
'se_stats.avg_ssl_session_cache_usage',
'se_if.avg_connection_table_usage',
'se_stats.max_se_bandwidth',
'se_stats.avg_eth0_bandwidth',
'se_stats.pct_syn_cache_usage',
'se_stats.avg_packet_buffer_usage',
'se_stats.avg_packet_buffer_header_usage',
'se_stats.avg_packet_buffer_large_usage',
'se_stats.avg_packet_buffer_small_usage',
'healthscore.health_score_value'
]
default_se_metric_list = ','.join(default_se_metric_list)
#------
default_controller_metric_list = [
'controller_stats.avg_cpu_usage',
'controller_stats.avg_disk_usage',
'controller_stats.avg_mem_usage'
]
default_controller_metric_list = ','.join(default_controller_metric_list)
#----
default_pool_metric_list = [
'l4_server.avg_rx_pkts',
'l4_server.avg_tx_pkts',
'l4_server.avg_rx_bytes',
'l4_server.avg_tx_bytes',
'l4_server.avg_bandwidth',
'l7_server.avg_complete_responses',
'l4_server.avg_new_established_conns',
'l4_server.avg_pool_open_conns',
'l4_server.avg_pool_complete_conns',
'l4_server.avg_open_conns',
'l4_server.max_open_conns',
'l4_server.avg_errored_connections',
'l4_server.apdexc',
'l4_server.avg_total_rtt',
'l7_server.avg_resp_latency',
'l7_server.apdexr',
'l7_server.avg_application_response_time',
'l7_server.pct_response_errors',
'l7_server.avg_frustrated_responses',
'l7_server.avg_total_requests',
'healthscore.health_score_value'
]
default_pool_metric_list = ','.join(default_pool_metric_list)
#------------------------------------
def avi_login():
global login
try:
if r.get('avi_login') == None:
login = requests.post('https://%s/login' %avi_controller, verify=False, data={'username': avi_user, 'password': avi_pass},timeout=15)
r.set('avi_login',pickle.dumps(login))
return login
else:
cookies=dict()
login = pickle.loads(r.get('avi_login'))
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "admin", 'content-type': 'application/json'})
resp = requests.get('https://%s/api/cluster' %avi_controller, verify=False, headers = headers,cookies=cookies,timeout=5)
if resp.status_code == 200:
return login
else:
login = requests.post('https://%s/login' %avi_controller, verify=False, data={'username': avi_user, 'password': avi_pass},timeout=15)
r.set('avi_login',pickle.dumps(login))
return login
except:
login = requests.post('https://%s/login' %avi_controller, verify=False, data={'username': avi_user, 'password': avi_pass},timeout=15)
r.set('avi_login',pickle.dumps(login))
return login
def avi_request(avi_api,tenant,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({'X-Avi-Tenant': '%s' %tenant, 'content-type': 'application/json', 'X-Avi-Version': '%s' %api_version})
return requests.get('https://%s/api/%s' %(avi_controller,avi_api), verify=False, headers = headers,cookies=cookies,timeout=50)
def avi_post(api_url,tenant,payload,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "%s" %tenant, 'content-type': 'application/json','referer': 'https://%s' %avi_controller, 'X-CSRFToken': dict(login.cookies)['csrftoken'],'X-Avi-Version':'%s' %api_version})
cookies['csrftoken'] = login.cookies['csrftoken']
return requests.post('https://%s/api/%s' %(avi_controller,api_url), verify=False, headers = headers,cookies=cookies, data=json.dumps(payload),timeout=50)
def remove_version_specific_metrics(entity_type,metric_list):
try:
#----- Generate List of Available Metrics
if r.get('available_metrics_last_poll_time') == None:
r.set('available_metrics_last_poll_time', (time.time()-3601))
if r.get('metric_id_polling') == None:
r.set('metric_id_polling', 'False')
if (time.time() - float(r.get('available_metrics_last_poll_time')) > 3600 or r.get('available_metrics') == None) and r.get('metric_id_polling') == 'False':
r.set('metric_id_polling', 'True')
resp = avi_request('analytics/metric_id',login.json()['tenants'][0]['name']).json()
_available_metrics = {}
for m in resp['results']:
_available_metrics[m['name']]=m['entity_types']
r.set('available_metrics', pickle.dumps(_available_metrics))
r.set('available_metrics_last_poll_time', time.time())
r.set('metric_id_polling', 'False')
available_metrics = pickle.loads(r.get('available_metrics'))
_metrics = metric_list.replace(' ','').split(',')
_metric_list = []
if entity_type == 'virtualservice':
for m in _metrics:
if m.lower() in available_metrics:
if 'virtualservice' in available_metrics[m.lower()]:
_metric_list.append(m)
elif entity_type == 'serviceengine':
for m in _metrics:
if m.lower() in available_metrics:
if 'serviceengine' in available_metrics[m.lower()]:
_metric_list.append(m)
elif entity_type == 'pool':
for m in _metrics:
if m.lower() in available_metrics:
if 'pool' in available_metrics[m.lower()]:
_metric_list.append(m)
elif entity_type == 'controller':
for m in _metrics:
if m.lower() in available_metrics:
if 'cluster' in available_metrics[m.lower()]:
_metric_list.append(m)
_metric_list = ','.join(_metric_list)
return _metric_list
except:
r.set('metric_id_polling', 'False')
print(str(datetime.now())+' '+avi_controller+': remove_version_specific_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
#----------
def generate_params_list(request):
d=request.args.to_dict()
tenant_list = []
all_tenants = []
ten_inv = avi_request('tenant?fields=name&page_size=200','admin')
if ten_inv.status_code != 403:
resp = ten_inv.json()
page_number = 1
while 'next' in resp:
page_number += 1
resp = avi_request('tenant?fields=name&page_size=200&page='+str(page_number),'admin').json()
for v in resp['results']:
ten_inv.json()['results'].append(v)
for t in ten_inv.json()['results']:
all_tenants.append(t['name'])
else:
for t in login.json()['tenants']:
all_tenants.append(t['name'])
if 'tenant' in d:
for t in all_tenants:
if t.lower() in request.args.get('tenant').lower().split(','):
tenant_list.append(t)
else:
for t in all_tenants:
tenant_list.append(t)
if 'cloud' in d:
cloud_list = request.args.get('cloud').lower().split(',')
else:
cloud_list = ['*']
if 'entity_uuid' in d:
uuid_list = request.args.get('entity_uuid').lower().split(',')
else:
uuid_list = '*'
r.set('tenant_list', pickle.dumps(all_tenants))
return tenant_list,cloud_list, uuid_list
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- Service engine statistics
#----- build lists for requested params, allows for multiple jobs servers to scrape different metrics
def serviceengine_metrics_params(request):
if r.get('se_entity_uuid') == None:
r.set('se_entity_uuid',pickle.dumps({}))
if r.get('se_metric_id') == None:
r.set('se_metric_id',pickle.dumps({}))
if r.get('se_tenant') == None:
r.set('se_tenant',pickle.dumps({}))
if r.get('se_cloud') == None:
r.set('se_cloud',pickle.dumps({}))
if r.get('se_runtime') == None:
r.set('se_runtime',pickle.dumps({}))
d=request.args.to_dict()
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
metric_id = request.args.get('metric_id').lower()
else:
metric_id = default_se_metric_list
se_metric_list = remove_version_specific_metrics('serviceengine',metric_id)
_metric_list = se_metric_list.split(',')
#---- define metric id list
_se_metric_id = pickle.loads(r.get('se_metric_id'))
for m in _metric_list:
_se_metric_id[m] = time.time()
_removal = []
for m in _se_metric_id:
if (time.time() - _se_metric_id[m]) > (metric_refresh_interval*2):
_removal.append(m)
for m in _removal:
_se_metric_id.pop(m, None)
r.set('se_metric_id', pickle.dumps(_se_metric_id))
#---- define tenant list
_tenant_dict = pickle.loads(r.get('se_tenant'))
for t in tenant_list:
_tenant_dict[t] = time.time()
_removal = []
for t in _tenant_dict:
if (time.time() - _tenant_dict[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_tenant_dict.pop(t, None)
r.set('se_tenant', pickle.dumps(_tenant_dict))
#---- define se runtime for tenant
_se_runtime = pickle.loads(r.get('se_runtime'))
if 'runtime' in d:
if request.args.get('runtime').lower() == 'true':
for t in _tenant_dict:
_se_runtime[t] = time.time()
_removal = []
for t in _se_runtime:
if (time.time() - _se_runtime[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_se_runtime.pop(t, None)
r.set('se_runtime', pickle.dumps(_se_runtime))
#---- define cloud list
_cloud_dict = pickle.loads(r.get('se_cloud'))
for c in cloud_list:
_cloud_dict[c] = time.time()
_removal = []
for c in _cloud_dict:
if (time.time() - _cloud_dict[c]) > (metric_refresh_interval*2):
_removal.append(c)
for c in _removal:
_cloud_dict.pop(c, None)
r.set('se_cloud', pickle.dumps(_cloud_dict))
#---- define uuid list
_uuid_dict = pickle.loads(r.get('se_entity_uuid'))
for u in uuid_list:
_uuid_dict[u] = time.time()
_removal = []
for u in _uuid_dict:
if (time.time() - _uuid_dict[u]) > (metric_refresh_interval*2):
_removal.append(u)
for u in _removal:
_uuid_dict.pop(u, None)
r.set('se_entity_uuid', pickle.dumps(_uuid_dict))
#---- filters metrics from cache to return only what's requested in the Prometheus requests
def serviceengine_filter_metrics(request):
d=request.args.to_dict()
se_metrics = pickle.loads(r.get('se_metrics'))
se_metrics_runtime = pickle.loads(r.get('se_metrics_runtime'))
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
se_metric_list = request.args.get('metric_id').lower()
else:
se_metric_list = default_se_metric_list
se_metric_list = remove_version_specific_metrics('serviceengine',se_metric_list)
_metric_list = se_metric_list.replace('.','_').split(',')
if 'runtime' in d:
if request.args.get('runtime').lower() == 'true':
_metric_list = _metric_list + se_metrics_runtime
#----- filter results based upon request params
list_to_remove = []
#----- filter metrics
for l in se_metrics:
if '# HELP ' in l or '# TYPE ' in l:
if l.split(' ')[2] not in _metric_list:
list_to_remove.append(l)
else:
if l.split(' ')[0] not in _metric_list:
list_to_remove.append(l)
for e in list_to_remove:
se_metrics.remove(e)
#----- filter by UUID
if uuid_list !='*':
list_to_remove = []
for l in se_metrics:
if "# " not in l:
if l.split('uuid="')[1].split('"',1)[0] not in uuid_list:
list_to_remove.append(l)
for e in list_to_remove:
se_metrics.remove(e)
#----- filter by tenant
else:
list_to_remove = []
for l in se_metrics:
if "# " not in l:
if l.split('tenant="')[1].split('"',1)[0] not in tenant_list:
list_to_remove.append(l)
for e in list_to_remove:
se_metrics.remove(e)
#----- filter by cloud
if '*' not in cloud_list:
list_to_remove = []
for l in se_metrics:
if "# " not in l:
if l.split('cloud="')[1].split('"',1)[0] not in cloud_list:
list_to_remove.append(l)
for e in list_to_remove:
se_metrics.remove(e)
#----- remove emtpy comment lines
list_to_remove = []
_mlist = []
for l in se_metrics:
if "# " not in l:
_mlist.append(l.split(' ')[0])
for l in se_metrics:
if "# " in l:
if l.split(' ')[2] not in _mlist:
list_to_remove.append(l)
if len(list_to_remove) > 0:
for e in list_to_remove:
se_metrics.remove(e)
#-----
se_metrics.append('\n')
se_metrics = '\n'.join(se_metrics)
return se_metrics
def serviceengine_metrics(request):
try:
if r.get('se_last_poll_time') == None:
r.set('se_last_poll_time', (time.time()-(metric_refresh_interval+10)))
if r.get('se_last_poll_start_time') == None:
r.set('se_last_poll_start_time', (time.time()-(metric_refresh_interval+20)))
if r.get('se_metrics') == None:
r.set('se_metrics',pickle.dumps([]))
if r.get('se_metrics_runtime') == None:
r.set('se_metrics_runtime',pickle.dumps([]))
if r.get('se_polling') == None:
r.set('se_polling', 'False')
if time.time() - float(r.get('se_last_poll_start_time')) > metric_refresh_interval and r.get('se_polling') == 'False':
r.set('se_polling','True')
serviceengine_metrics_params(request)
if wait_for_cache == False:
se_metrics = serviceengine_filter_metrics(request)
p = Process(target = serviceengine_static.refresh_serviceengine_metrics, args = (r,login,avi_controller,))
p.start()
else:
serviceengine_static.refresh_serviceengine_metrics(r,login,avi_controller)
se_metrics = serviceengine_filter_metrics(request)
return se_metrics
else:
print(str(datetime.now())+' =====> Using cached Serviceengine metrics')
serviceengine_metrics_params(request)
se_metrics = serviceengine_filter_metrics(request)
if time.time() - float(r.get('se_last_poll_time')) > (metric_refresh_interval * 2):
r.set('se_metrics',pickle.dumps([]))
if r.get('se_polling') == 'True':
r.set('se_polling', 'False')
return se_metrics
except:
print(str(datetime.now())+' '+avi_controller+': func serviceengine_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- Virtual service statistics - STATIC prometheus setup
#----- build lists for requested params, allows for multiple jobs servers to scrape different metrics
def virtualservice_metrics_params(request):
if r.get('vs_entity_uuid') == None:
r.set('vs_entity_uuid',pickle.dumps({}))
if r.get('vs_metric_id') == None:
r.set('vs_metric_id',pickle.dumps({}))
if r.get('vs_tenant') == None:
r.set('vs_tenant',pickle.dumps({}))
if r.get('vs_cloud') == None:
r.set('vs_cloud',pickle.dumps({}))
d=request.args.to_dict()
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
metric_id = request.args.get('metric_id').lower()
else:
metric_id = default_vs_metric_list
vs_metric_list = remove_version_specific_metrics('virtualservice',metric_id)
_metric_list = vs_metric_list.split(',')
#---- define metric id list
_vs_metric_id = pickle.loads(r.get('vs_metric_id'))
for m in _metric_list:
_vs_metric_id[m] = time.time()
_removal = []
for m in _vs_metric_id:
if (time.time() - _vs_metric_id[m]) > (metric_refresh_interval*2):
_removal.append(m)
for m in _removal:
_vs_metric_id.pop(m, None)
r.set('vs_metric_id', pickle.dumps(_vs_metric_id))
#---- define tenant list
_tenant_dict = pickle.loads(r.get('vs_tenant'))
for t in tenant_list:
_tenant_dict[t] = time.time()
_removal = []
for t in _tenant_dict:
if (time.time() - _tenant_dict[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_tenant_dict.pop(t, None)
r.set('vs_tenant', pickle.dumps(_tenant_dict))
#---- define cloud list
_cloud_dict = pickle.loads(r.get('vs_cloud'))
for c in cloud_list:
_cloud_dict[c] = time.time()
_removal = []
for c in _cloud_dict:
if (time.time() - _cloud_dict[c]) > (metric_refresh_interval*2):
_removal.append(c)
for c in _removal:
_cloud_dict.pop(c, None)
r.set('vs_cloud', pickle.dumps(_cloud_dict))
#---- define uuid list
_uuid_dict = pickle.loads(r.get('vs_entity_uuid'))
for u in uuid_list:
_uuid_dict[u] = time.time()
_removal = []
for u in _uuid_dict:
if (time.time() - _uuid_dict[u]) > (metric_refresh_interval*2):
_removal.append(u)
for u in _removal:
_uuid_dict.pop(u, None)
r.set('vs_entity_uuid', pickle.dumps(_uuid_dict))
#---- filters metrics from cache to return only what's requested in the Prometheus requests
def virtualservice_filter_metrics(request):
d=request.args.to_dict()
vs_metrics = pickle.loads(r.get('vs_metrics'))
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
vs_metric_list = request.args.get('metric_id').lower()
else:
vs_metric_list = default_vs_metric_list
vs_metric_list = remove_version_specific_metrics('virtualservice',vs_metric_list)
_metric_list = vs_metric_list.replace('.','_').split(',')
#----- filter results based upon request params
list_to_remove = []
#----- filter metrics
for l in vs_metrics:
if '# HELP ' in l or '# TYPE ' in l:
if l.split(' ')[2] not in _metric_list:
list_to_remove.append(l)
else:
if l.split(' ')[0] not in _metric_list:
list_to_remove.append(l)
for e in list_to_remove:
vs_metrics.remove(e)
#----- filter by UUID
if uuid_list !='*':
list_to_remove = []
for l in vs_metrics:
if "# " not in l:
if l.split('uuid="')[1].split('"',1)[0] not in uuid_list:
list_to_remove.append(l)
for e in list_to_remove:
vs_metrics.remove(e)
#----- filter by tenant
else:
list_to_remove = []
for l in vs_metrics:
if "# " not in l:
if l.split('tenant="')[1].split('"',1)[0] not in tenant_list:
list_to_remove.append(l)
for e in list_to_remove:
vs_metrics.remove(e)
#----- filter by cloud
if '*' not in cloud_list:
list_to_remove = []
for l in vs_metrics:
if "# " not in l:
if l.split('cloud="')[1].split('"',1)[0] not in cloud_list:
list_to_remove.append(l)
for e in list_to_remove:
vs_metrics.remove(e)
#----- remove emtpy comment lines
list_to_remove = []
_mlist = []
for l in vs_metrics:
if "# " not in l:
_mlist.append(l.split(' ')[0])
for l in vs_metrics:
if "# " in l:
if l.split(' ')[2] not in _mlist:
list_to_remove.append(l)
if len(list_to_remove) > 0:
for e in list_to_remove:
vs_metrics.remove(e)
#-----
vs_metrics.append('\n')
vs_metrics = '\n'.join(vs_metrics)
return vs_metrics
#----------
def virtualservice_metrics(request):
try:
if r.get('vs_last_poll_time') == None:
r.set('vs_last_poll_time', (time.time()-(metric_refresh_interval+10)))
if r.get('vs_last_poll_start_time') == None:
r.set('vs_last_poll_start_time', (time.time()-(metric_refresh_interval+20)))
if r.get('vs_metrics') == None:
r.set('vs_metrics',pickle.dumps([]))
if r.get('vs_polling') == None:
r.set('vs_polling', 'False')
if time.time() - float(r.get('vs_last_poll_start_time')) > metric_refresh_interval and r.get('vs_polling') == 'False':
r.set('vs_polling','True')
virtualservice_metrics_params(request)
if wait_for_cache == False:
vs_metrics = virtualservice_filter_metrics(request)
p = Process(target = virtualservice_static.refresh_vs_metrics, args = (r,login,avi_controller,))
p.start()
else:
virtualservice_static.refresh_vs_metrics(r,login,avi_controller)
vs_metrics = virtualservice_filter_metrics(request)
return vs_metrics
else:
print(str(datetime.now())+' =====> Using cached Virtualservice metrics')
virtualservice_metrics_params(request)
vs_metrics = virtualservice_filter_metrics(request)
if time.time() - float(r.get('vs_last_poll_time')) > (metric_refresh_interval * 2):
r.set('vs_metrics',pickle.dumps([]))
if r.get('vs_polling') == 'True':
r.set('vs_polling', 'False')
return vs_metrics
except:
print(str(datetime.now())+' '+avi_controller+': func virtualservice_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- Virtual service statistics - Prometheus service discovery
def update_servicediscovery_targets(request):
try:
if r.get('sd_targets') == None:
r.set('sd_targets',pickle.dumps({}))
if r.get('sd_names') == None:
r.set('sd_names', pickle.dumps({}))
sd_names = pickle.loads(r.get('sd_names'))
sd_targets = pickle.loads(r.get('sd_targets'))
d=request.args.to_dict()
tenant = request.args.get('kubernetes_namespace')
vs_name = request.args.get('virtualservice')
if 'metric_id' in d:
sd_metric_list = request.args.get('metric_id').lower()
else:
sd_metric_list = default_vs_metric_list
if tenant not in sd_names:
sd_names[tenant] = {}
if 'extra_metrics' in d:
extra_metrics = request.args.get('extra_metrics')
sd_metric_list = (sd_metric_list+','+extra_metrics).replace(' ','')
sd_metric_list = remove_version_specific_metrics('virtualservice',sd_metric_list)
sd_metric_list = sd_metric_list.split(',')
#---- remove unneccesary metrics
if vs_name in sd_names[tenant]:
uuid = sd_names[tenant][vs_name]
sd_targets[uuid]['vs_metric_list'] = sd_metric_list
r.set('sd_targets',pickle.dumps(sd_targets))
else:
print(str(datetime.now())+' =====> New VS discovered: %s' %vs_name)
resp = avi_request('virtualservice?name=%s&fields=cloud_ref,tenant_ref&include_name=true' %vs_name, tenant)
if resp.status_code == 200:
if resp.json()['count'] == 1:
cloud = resp.json()['results'][0]['cloud_ref'].split('#')[1]
entity_uuid = resp.json()['results'][0]['uuid']
temp_name = resp.json()['results'][0]['name']
sd_names[tenant][temp_name] = entity_uuid
r.set('sd_names', pickle.dumps(sd_names))
sd_targets[entity_uuid] = {'vs_metric_list': sd_metric_list, 'cloud': cloud, 'lastquery': time.time(), 'lastresponse': time.time()}
r.set('sd_targets', pickle.dumps(sd_targets))
else:
print(str(datetime.now())+' =====> ERROR update_servicediscovery_targets: %s' %resp.text)
return ['ERROR',resp.status_code,resp.text]
return ['SUCCESS']
except:
print(str(datetime.now())+' '+avi_controller+': func update_servicediscovery_targets encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#----------
#----------
def virtualservice_servicediscovery_metrics(request):
try:
if r.get('sd_polling') == None:
r.set('sd_polling','False')
if r.get('sd_last_poll_time') == None:
r.set('sd_last_poll_time',(time.time()-(metric_refresh_interval+10)))
if r.get('sd_last_poll_start_time') == None:
r.set('sd_last_poll_start_time',(time.time()-(metric_refresh_interval+20)))
if r.get('sd_metrics') == None:
r.set('sd_metrics', pickle.dumps([]))
status = update_servicediscovery_targets(request)
if status[0] != 'SUCCESS':
return status[0]+'|'+str(status[1])+'|'+status[2]
else:
if time.time() - float(r.get('sd_last_poll_start_time')) > metric_refresh_interval and r.get('sd_polling') == 'False':
r.set('sd_polling','True')
if wait_for_cache == False:
p = Process(target = servicediscovery.update_servicediscovery_metric_cache_multiprocess, args = (r,login,avi_controller,metric_refresh_interval,))
p.start()
else:
servicediscovery.update_servicediscovery_metric_cache_multiprocess(r,login,avi_controller,metric_refresh_interval)
else:
print(str(datetime.now())+' =====> Using cached Servicediscovery metrics')
sd_names = pickle.loads(r.get('sd_names'))
sd_targets = pickle.loads(r.get('sd_targets'))
sd_metrics = pickle.loads(r.get('sd_metrics'))
tenant = request.args.get('kubernetes_namespace')
vs_name = request.args.get('virtualservice')
uuid = sd_names[tenant][vs_name]
#prom_metrics = ''
prom_metrics = ['\n']
for s in sd_metrics:
for v in s:
if v == uuid:
for m in s[v]:
if 'data' in m:
temp_tags = ''
#----- check is metric is desired for the vs
if m['header']['name'] in sd_targets[uuid]['vs_metric_list']:
metric_name = m['header']['name'].replace('.','_').replace('-','_')
metric_description = m['header']['metric_description']
metric_value = m['data'][0]['value']
temp_payload = {}
temp_payload['name'] = vs_name
temp_payload['uuid'] = uuid
temp_payload['cloud'] = sd_targets[uuid]['cloud']
temp_payload['tenant'] = tenant
temp_payload['entity_type'] = 'virtualservice'
for e in temp_payload:
temp_tags=temp_tags+(str(e+'="'+temp_payload[e]+'",'))
temp_tags = '{'+temp_tags.rstrip(',')+'}'
#prom_metrics = prom_metrics+'\n'+'# HELP '+metric_name+' '+metric_description
#prom_metrics = prom_metrics+'\n'+'# TYPE '+metric_name+' gauge'
#prom_metrics = prom_metrics+'\n'+metric_name+''+temp_tags+' '+str(metric_value)
prom_metrics.append('%s 01# HELP %s %s' %(metric_name,metric_name, metric_description))
prom_metrics.append('%s 02# TYPE %s gauge' %(metric_name,metric_name))
prom_metrics.append('%s %s %s' %(metric_name,temp_tags,str(metric_value)))
sd_targets[uuid]['lastquery'] = time.time()
sd_targets[uuid]['lastresponse'] = time.time()
r.set('sd_targets',pickle.dumps(sd_targets))
#prom_metrics = prom_metrics+'\n'
prom_metrics = list(set(prom_metrics))
prom_metrics = sorted(prom_metrics)
for idx, item in enumerate(prom_metrics):
if '01#' in item:
item = item.split('01',1)[1]
prom_metrics[idx] = item
elif '02#' in item:
item = item.split('02',1)[1]
prom_metrics[idx] = item
prom_metrics.append('\n')
prom_metrics = '\n'.join(prom_metrics)
if time.time() - float(r.get('sd_last_poll_time')) > (metric_refresh_interval * 2):
r.set('sd_metrics',pickle.dumps(''))
if r.get('sd_polling') == 'True':
r.set('sd_polling', 'False')
return prom_metrics
except:
r.set('sd_polling','False')
print(str(datetime.now())+' '+avi_controller+': func virtualservice_servicediscovery_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- Pool statistics
#----- build lists for requested params, allows for multiple jobs servers to scrape different metrics
def pool_metrics_params(request):
if r.get('pool_entity_uuid') == None:
r.set('pool_entity_uuid',pickle.dumps({}))
if r.get('pool_metric_id') == None:
r.set('pool_metric_id',pickle.dumps({}))
if r.get('pool_tenant') == None:
r.set('pool_tenant',pickle.dumps({}))
if r.get('pool_cloud') == None:
r.set('pool_cloud',pickle.dumps({}))
d=request.args.to_dict()
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
metric_id = request.args.get('metric_id').lower()
else:
metric_id = default_pool_metric_list
pool_metric_list = remove_version_specific_metrics('pool',metric_id)
_metric_list = pool_metric_list.split(',')
#---- define metric id list
_pool_metric_id = pickle.loads(r.get('pool_metric_id'))
for m in _metric_list:
_pool_metric_id[m] = time.time()
_removal = []
for m in _pool_metric_id:
if (time.time() - _pool_metric_id[m]) > (metric_refresh_interval*2):
_removal.append(m)
for m in _removal:
_pool_metric_id.pop(m, None)
r.set('pool_metric_id', pickle.dumps(_pool_metric_id))
#---- define tenant list
_tenant_dict = pickle.loads(r.get('pool_tenant'))
for t in tenant_list:
_tenant_dict[t] = time.time()
_removal = []
for t in _tenant_dict:
if (time.time() - _tenant_dict[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_tenant_dict.pop(t, None)
r.set('pool_tenant', pickle.dumps(_tenant_dict))
#---- define cloud list
_cloud_dict = pickle.loads(r.get('pool_cloud'))
for c in cloud_list:
_cloud_dict[c] = time.time()
_removal = []
for c in _cloud_dict:
if (time.time() - _cloud_dict[c]) > (metric_refresh_interval*2):
_removal.append(c)
for c in _removal:
_cloud_dict.pop(c, None)
r.set('pool_cloud', pickle.dumps(_cloud_dict))
#---- define uuid list
_uuid_dict = pickle.loads(r.get('pool_entity_uuid'))
for u in uuid_list:
_uuid_dict[u] = time.time()
_removal = []
for u in _uuid_dict:
if (time.time() - _uuid_dict[u]) > (metric_refresh_interval*2):
_removal.append(u)
for u in _removal:
_uuid_dict.pop(u, None)
r.set('pool_entity_uuid', pickle.dumps(_uuid_dict))
#---- filters metrics from cache to return only what's requested in the Prometheus requests
def pool_filter_metrics(request):
d=request.args.to_dict()
pool_metrics = pickle.loads(r.get('pool_metrics'))
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
pool_metric_list = request.args.get('metric_id').lower()
else:
pool_metric_list = default_pool_metric_list
pool_metric_list = remove_version_specific_metrics('pool',pool_metric_list)
_metric_list = pool_metric_list.replace('.','_').split(',')
#----- filter results based upon request params
list_to_remove = []
#----- filter metrics
for l in pool_metrics:
if '# HELP ' in l or '# TYPE ' in l:
if l.split(' ')[2] not in _metric_list:
list_to_remove.append(l)
else:
if l.split(' ')[0] not in _metric_list:
list_to_remove.append(l)
for e in list_to_remove:
pool_metrics.remove(e)
#----- filter by UUID
if uuid_list !='*':
list_to_remove = []
for l in pool_metrics:
if "# " not in l:
if l.split('uuid="')[1].split('"',1)[0] not in uuid_list:
list_to_remove.append(l)
for e in list_to_remove:
pool_metrics.remove(e)
#----- filter by tenant
else:
list_to_remove = []
for l in pool_metrics:
if "# " not in l:
if l.split('tenant="')[1].split('"',1)[0] not in tenant_list:
list_to_remove.append(l)
for e in list_to_remove:
pool_metrics.remove(e)
#----- filter by cloud
if '*' not in cloud_list:
list_to_remove = []
for l in pool_metrics:
if "# " not in l:
if l.split('cloud="')[1].split('"',1)[0] not in cloud_list:
list_to_remove.append(l)
for e in list_to_remove:
pool_metrics.remove(e)
#----- remove emtpy comment lines
list_to_remove = []
_mlist = []
for l in pool_metrics:
if "# " not in l:
_mlist.append(l.split(' ')[0])
for l in pool_metrics:
if "# " in l:
if l.split(' ')[2] not in _mlist:
list_to_remove.append(l)
if len(list_to_remove) > 0:
for e in list_to_remove:
pool_metrics.remove(e)
#-----
pool_metrics.append('\n')
pool_metrics = '\n'.join(pool_metrics)
return pool_metrics
def pool_metrics(request):
try:
if r.get('pool_last_poll_time') == None:
r.set('pool_last_poll_time', (time.time()-(metric_refresh_interval+10)))
if r.get('pool_last_poll_start_time') == None:
r.set('pool_last_poll_start_time', (time.time()-(metric_refresh_interval+20)))
if r.get('pool_metrics') == None:
r.set('pool_metrics',pickle.dumps([]))
if r.get('pool_polling') == None:
r.set('pool_polling', 'False')
if time.time() - float(r.get('pool_last_poll_start_time')) > metric_refresh_interval and r.get('pool_polling') == 'False':
pool_metrics_params(request)
if wait_for_cache == False:
pool_metrics = pool_filter_metrics(request)
p = Process(target = pool_static.refresh_pool_metrics, args = (r,login,avi_controller,))
p.start()
else:
pool_static.refresh_pool_metrics(r,login,avi_controller)
pool_metrics = pool_filter_metrics(request)
return pool_metrics
else:
print(str(datetime.now())+' =====> Using cached Pool metrics')
pool_metrics_params(request)
pool_metrics = pool_filter_metrics(request)
#pool_metrics = pickle.loads(r.get('pool_metrics'))
if time.time() - float(r.get('pool_last_poll_time')) > (metric_refresh_interval * 2):
r.set('pool_metrics',pickle.dumps([]))
if r.get('pool_polling') == 'True':
r.set('pool_polling', 'False')
return pool_metrics
except:
r.set('pool_polling','False')
print(str(datetime.now())+' '+avi_controller+': func pool_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- GET controller Member specific statistics
#----- build lists for requested params, allows for multiple jobs servers to scrape different metrics
def controller_metrics_params(request):
if r.get('ctl_metric_id') == None:
r.set('ctl_metric_id',pickle.dumps({}))
if r.get('ctl_runtime') == None:
r.set('ctl_runtime',pickle.dumps({}))
d=request.args.to_dict()
if 'metric_id' in d:
metric_id = request.args.get('metric_id').lower()
else:
metric_id = default_controller_metric_list
controller_metric_list = remove_version_specific_metrics('controller',metric_id)
_metric_list = controller_metric_list.split(',')
#---- define metric id list
_controller_metric_id = pickle.loads(r.get('ctl_metric_id'))
for m in _metric_list:
_controller_metric_id[m] = time.time()
_removal = []
for m in _controller_metric_id:
if (time.time() - _controller_metric_id[m]) > (metric_refresh_interval*2):
_removal.append(m)
for m in _removal:
_controller_metric_id.pop(m, None)
r.set('ctl_metric_id', pickle.dumps(_controller_metric_id))
#---- define ctl runtime
_ctl_runtime = pickle.loads(r.get('ctl_runtime'))
if 'runtime' in d:
if request.args.get('runtime').lower() == 'true':
_ctl_runtime['true'] = time.time()
_removal = []
for t in _ctl_runtime:
if (time.time() - _ctl_runtime[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_ctl_runtime.pop(t, None)
r.set('ctl_runtime', pickle.dumps(_ctl_runtime))
#---- filters metrics from cache to return only what's requested in the Prometheus requests
def controller_filter_metrics(request):
ctl_metrics = pickle.loads(r.get('ctl_metrics'))
ctl_metrics_runtime = pickle.loads(r.get('ctl_metrics_runtime'))
d=request.args.to_dict()
if 'metric_id' in d:
ctl_metric_list = request.args.get('metric_id').lower()
else:
ctl_metric_list = default_controller_metric_list
ctl_metric_list = remove_version_specific_metrics('controller',ctl_metric_list)
_metric_list = ctl_metric_list.replace('.','_').split(',')
if 'runtime' in d:
if request.args.get('runtime').lower() == 'true':
_metric_list = _metric_list + ctl_metrics_runtime
#----- filter results based upon request params
list_to_remove = []
#----- filter metrics
for l in ctl_metrics:
if '# HELP ' in l or '# TYPE ' in l:
if l.split(' ')[2] not in _metric_list:
list_to_remove.append(l)
else:
if l.split(' ')[0] not in _metric_list:
list_to_remove.append(l)
for e in list_to_remove:
ctl_metrics.remove(e)
ctl_metrics.append('\n')
ctl_metrics = '\n'.join(ctl_metrics)
return ctl_metrics
def controller_metrics(request):
try:
if r.get('ctl_last_poll_time') == None:
r.set('ctl_last_poll_time', (time.time()-(metric_refresh_interval+10)))
if r.get('ctl_last_poll_start_time') == None:
r.set('ctl_last_poll_start_time', (time.time()-(metric_refresh_interval+20)))
if r.get('ctl_metrics') == None:
r.set('ctl_metrics',pickle.dumps([]))
if r.get('ctl_polling') == None:
r.set('ctl_polling', 'False')
if r.get('ctl_metrics_runtime') == None:
r.set('ctl_metrics_runtime',pickle.dumps([]))
if time.time() - float(r.get('ctl_last_poll_start_time')) > metric_refresh_interval and r.get('ctl_polling') == 'False':
r.set('ctl_polling','True')
controller_metrics_params(request)
if wait_for_cache == False:
ctl_metrics = controller_filter_metrics(request)
p = Process(target = controller_static.refresh_ctl_metrics, args = (r,login,avi_controller,))
p.start()
else:
controller_static.refresh_ctl_metrics(r,login,avi_controller)
ctl_metrics = controller_filter_metrics(request)
return ctl_metrics
else:
print(str(datetime.now())+' =====> Using cached Controller metrics')
controller_metrics_params(request)
ctl_metrics = controller_filter_metrics(request)
if time.time() - float(r.get('ctl_last_poll_time')) > (metric_refresh_interval * 2):
r.set('ctl_metrics',pickle.dumps([]))
if r.get('ctl_polling') == 'True':
r.set('ctl_polling', 'False')
return ctl_metrics
except:
r.set('ctl_polling', 'False')
print(str(datetime.now())+' '+avi_controller+': func controller_cluster_metrics encountered an error encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#-------------------------
app = Flask(__name__)
@app.route('/metrics', methods=['GET'])
def add_message():
try:
req_start_time = time.time()
d=request.args.to_dict()
avi_login()
if request.args.get('entity_type').lower() == 'virtualservice':
if 'kubernetes_namespace' in d:
metrics = virtualservice_servicediscovery_metrics(request)
else:
metrics = virtualservice_metrics(request)
#print metrics
elif request.args.get('entity_type').lower() == 'serviceengine':
metrics = serviceengine_metrics(request)
#print metrics
elif request.args.get('entity_type').lower() == 'controller':
metrics = controller_metrics(request)
#print metrics
elif request.args.get('entity_type').lower() == 'pool':
metrics = pool_metrics(request)
#print metrics
else:
return '', 500
req_total_time = str(time.time()-req_start_time)
print(str(datetime.now())+' =====> request took '+req_total_time+' seconds\n')
if metrics == False:
return '', 500
elif metrics.split('|')[0] == 'ERROR':
return metrics.split('|')[2], int(metrics.split('|')[1])
else:
return metrics, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
@app.route('/virtualservice_debug', methods=['GET'])
def vs_debug():
try:
_1 = '\n<p>=====> Last Polling Time %s</p>\n' %time.ctime(float(r.get('vs_last_poll_time')))
_2 = '\n<p>=====></p>\n'
_3 = '\n<p>'+str(pickle.loads(r.get('vs_results')))+'</p>\n'
_4 = '\n<p>'+str(pickle.loads(r.get('vs_metrics')))+'</p>\n'
_5 = '\n<p>=====> VS without METRICS %s</p>\n' %str(pickle.loads(r.get('vs_missing_metrics')))
response = _1+_3+_4+_5
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Virtualservice metrics polling hasn't run yet\n", 200
@app.route('/servicediscovery_debug', methods=['GET'])
def sd_debug():
try:
_1 = '\n<p>=====> Last Polling Time %s</p>\n' %time.ctime(float(r.get('sd_last_poll_time')))
_2 = '\n<p>=====></p>\n'
_3 = '\n<p>'+str(pickle.loads(r.get('sd_names')))+'</p>\n'
_4 = '\n<p>'+str(pickle.loads(r.get('sd_targets')))+'</p>\n'
_5 = '\n<p>'+str(pickle.loads(r.get('sd_metrics')))+'</p>\n'
response = _1+_3+_4+_5
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Servicediscovery metrics polling hasn't run yet\n", 200
@app.route('/serviceengine_debug', methods=['GET'])
def se_debug():
try:
_1 = '\n<p>=====> Last Polling Time %s</p>\n' %time.ctime(float(r.get('se_last_poll_time')))
_2 = '=====>'
_3 = '\n<p>'+str(pickle.loads(r.get('se_results')))+'</p>\n'
_4 = '\n</p>'+str(pickle.loads(r.get('se_metrics')))+'</p>\n'
_5 = '\n<p>=====> Serviceengine without METRICS %s</p>\n' %str(pickle.loads(r.get('se_missing_metrics')))
response = _1+_3+_4+_5
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Serviceengine metrics polling hasn't run yet\n", 200
@app.route('/pool_debug', methods=['GET'])
def pool_debug():
try:
_1 = '\n<p>=====> Last Polling Time %s</p>\n' %time.ctime(float(r.get('pool_last_poll_time')))
_2 = '=====>'
_3 = '\n<p>'+str(pickle.loads(r.get('pool_results')))+'</p>\n'
_4 = '\n</p>'+str(pickle.loads(r.get('pool_metrics')))+'</p>\n'
_5 = '\n<p>=====> Pools without METRICS %s</p>\n' %str(pickle.loads(r.get('pool_missing_metrics')))
response = _1+_3+_4+_5
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Pool metrics polling hasn't run yet\n", 200
@app.route('/redis_debug', methods=['GET'])
def redis_debug():
try:
d=request.args.to_dict()
if 'key' in d:
key = request.args.get('key').lower()
try:
value = str(pickle.loads(r.get(key)))
except:
value = str(r.get(key))
_1 = '\n<p>=====> Redis Key %s</p>\n' %key
_2 = value+'</p>\n'
response = _1+_2
return response, 200
else:
_1 = '\n<p>=====> All Redis Keys </p>\n'
response = []
for key in r.scan_iter("*"):
response.append(key)
response = sorted(response)
response.remove('avi_login')
response = ('\n'.join(response))+'\n'
response = _1+response
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Redis Debug has an error\n", 500
@app.route('/redis_delete', methods=['GET'])
def redis_delete():
try:
d=request.args.to_dict()
if 'key' in d:
key = request.args.get('key').lower()
r.delete(key)
_1 = '\n<p>=====> Deleted Redis Key %s</p>\n' %key
_2 = value+'</p>\n'
response = _1+_2
return response, 200
else:
_1 = '\n<p>=====> Deleted All Redis Keys </p>\n'
response = []
for key in r.scan_iter("*"):
response.append(key)
r.delete(key)
response = sorted(response)
if 'avi_login' in response:
response.remove('avi_login')
response = ('\n'.join(response))+'\n'
response = _1+response
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Redis Flush has an error\n", 500
try:
r = redis.Redis(host='localhost', port=6379, db=0)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8080, threaded=True)
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text) |
#! /usr/bin/python3
""" Run the Visual Studio AStyleTestI18n test using AppLocale to test non-ASCII files.
"""
# to disable the print statement and use the print() function (version 3 format)
from __future__ import print_function
import os
import subprocess
import time
# local libraries
import libastyle
# global variables ------------------------------------------------------------
# change the following for the correct VS version
# always uses the debug configuration
__builddir = libastyle.get_astyletest_directory() + "/build/" + libastyle.VS_RELEASE
# -----------------------------------------------------------------------------
def main():
"""Main processing function.
"""
# initialization
libastyle.set_text_color("yellow")
print(libastyle.get_python_version())
verify_os()
exepath = "C:/Windows/AppPatch/AppLoc.exe"
i18npath = __builddir + "/debug/AStyleTestI18nd.exe"
# verify files
if not os.path.exists(exepath):
libastyle.system_exit("AppLoc not installed: " + exepath)
build_testi18n_executable()
# for some reason the subprocess call must be one long statement and quoted as follows???
# the country LCID is added by the subprocess call
command = exepath + ' ' + i18npath + ' ' + "\"--terse_output --no_close\"" + ' '
# run tests
print("\nWAIT for a test to finish before running the next.")
print("NOTE A bad return may work anyway. Check the individual displays.")
print("Press OK to continue with the displayed language.\n")
print("Running Greek Test")
retval = subprocess.call(command + "/L0408")
if retval:
print(" Bad AppLoc return: " + str(retval))
time.sleep(2) # must finish before running the next test
print("Running Japanese Test")
retval = subprocess.call(command + "/L0411")
if retval:
print(" Bad AppLoc return: " + str(retval))
time.sleep(2) # must finish before running the next test
print("Running Russian Test")
retval = subprocess.call(command + "/L0419")
if retval:
print(" Bad AppLoc return: " + str(retval))
time.sleep(2) # must finish before running the next test
# -----------------------------------------------------------------------------
def build_testi18n_executable():
"""Build the Visual Studio AStyleTestI18n debug executable.
"""
print("Building " + libastyle.VS_RELEASE + " AStyleTestI18n Debug")
# Compile the astyle executable for Windows.
slnpath = (__builddir + "/AStyleTestI18n.sln")
libastyle.compile_windows_executable(slnpath, libastyle.DEBUG)
# -----------------------------------------------------------------------------
def verify_os():
"""Verify the operating system
"""
if os.name != "nt":
print("This script is for Windows only")
# -----------------------------------------------------------------------------
# make the module executable
if __name__ == "__main__":
main()
libastyle.system_exit()
# -----------------------------------------------------------------------------
|
"""Some syft imports..."""
from . import core
from . import spdz
from .core.frameworks.torch import _SyftTensor
from torch.autograd import Variable
from torch.nn import Parameter
from torch.autograd import Variable as Var
from .core.frameworks.torch import TorchHook
from .core.frameworks.torch import _LocalTensor, _PointerTensor, _FixedPrecisionTensor, \
_PlusIsMinusTensor, _GeneralizedPointerTensor, _SPDZTensor
from syft.core.workers import VirtualWorker, SocketWorker
from .core.frameworks.numpy import array
__all__ = ['core', 'spdz']
import syft
import torch
for f in dir(torch):
if ("_" not in f):
setattr(syft, f, getattr(torch, f))
setattr(syft, 'deser', _SyftTensor.deser) |
try:
import http.server
import thread
from urllib.parse import parse_qs
from urllib.parse import urlencode
except ImportError:
import http.server as BaseHTTPServer
import _thread as thread
# from urllib.parse import urlparse
from urllib.parse import urlencode
from urllib.parse import parse_qs
import sys
import json
import os
SID = 'TestAccountSid'
AUTH_TOKEN = 'TestToken'
PORT_NUMBER = 41123
URL = 'https://localhost:' + str(PORT_NUMBER)
RESOURCES_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'resources')
)
UNITTEST_PATH = os.path.join(RESOURCES_PATH, 'unittests.json')
class Expectation(object):
def __init__(self, group, test):
self._createExpectation(group, test)
def _createExpectation(self, group, test):
unittestsData = json.load(open(UNITTEST_PATH, 'r'))
groupData = unittestsData[group]
testData = groupData[test]
# print(json.dumps(testData, indent=4, sort_keys=True))
self.method = testData['method']
if testData['path'] == 'Accounts':
self.path = '/Accounts/' + SID + '.json'
else:
self.path = '/Accounts/' + SID + '/' + testData['path']
self.queryParams = self._parseParams(testData['queryParams'])
self.bodyParams = self._parseParams(testData['bodyParams'])
self.responseFile = testData['response']
def _parseParams(self, jsonParams):
if jsonParams is None:
return None
params = {}
for param in jsonParams:
name = param['name']
value = param['value']
params[name] = value
paramsUrlEncoded = urlencode(params)
return paramsUrlEncoded
class MockHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self._process()
def do_POST(self):
self._process()
def do_DELETE(self):
self._process()
def log_message(self, format, *args):
return
def _process(self):
expectation = self.server.expectation
bodyParams = self.bodyParams
queryParams = self.queryParams
# print(self.command, expectation.method,
# self.command == expectation.method)
# print(self.pathWithoutQuery, expectation.path,
# self.pathWithoutQuery == expectation.path)
# print('\nreceived bodyParams:', bodyParams)
# print('\nexpected bodyParams:', expectation.bodyParams)
# print('\nBody params ok:', self._isParamsEqual(
# bodyParams, expectation.bodyParams))
# print('\nreceived queryParams:', queryParams)
# print('\nexpected queryParams:', expectation.queryParams)
# print('\nqueryParams ok:', self._isParamsEqual(
# queryParams, expectation.queryParams))
if (self.command == expectation.method and
self.pathWithoutQuery == expectation.path and
self._isParamsEqual(bodyParams, expectation.bodyParams) and
self._isParamsEqual(queryParams, expectation.queryParams)):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
responsePath = RESOURCES_PATH + expectation.responseFile
body = json.dumps(json.load(open(responsePath, 'r')))
if sys.version_info >= (3, 0):
body = str.encode(body)
self.wfile.write(body)
def _isParamsEqual(self, params0, params1):
"""Extract urlencoded string and compare all key value pairs if same"""
if params0 == params1:
return True
elif params0 is None or params1 is None:
return False
try:
dict0 = parse_qs(params0)
dict1 = parse_qs(params1)
for param in dict0:
if param not in dict0.keys():
return False
val0 = dict1[param][0]
val1 = dict0[param][0]
if val0.lower() == 'true' or val0.lower() == 'false':
val0 = bool(val0)
val1 = bool(val1)
if val0 != val1:
return False
return True
except Expectation as e:
print(e)
return False
@property
def pathWithoutQuery(self):
url = self.path.split('?')
return None if len(url) == 0 else url[0]
@property
def bodyParams(self):
if sys.version_info >= (3, 0):
neadle = 'Content-Length'
else:
neadle = 'content-length'
if (not hasattr(self, '_bodyParams') and
neadle in self.headers.keys()):
if sys.version_info >= (3, 0):
contentLen = int(self.headers['Content-Length'])
else:
contentLen = int(self.headers.getheader('content-length', 0))
if contentLen > 0:
self._bodyParams = self.rfile.read(contentLen)
if sys.version_info >= (3, 0):
self._bodyParams = self._bodyParams.decode('utf-8')
else:
self._bodyParams = None
else:
self._bodyParams = None
return self._bodyParams
@property
def queryParams(self):
url = self.path.split('?')
return None if len(url) != 2 else url[1]
class MockHTTPServer(http.server.HTTPServer):
def __init__(self, expectation, *args, **kw):
http.server.HTTPServer.__init__(self, *args, **kw)
self.expectation = expectation
thread.start_new_thread(self.handle_request, ())
class TestUtil(object):
@classmethod
def start(cls, group, test):
expectation = Expectation(group, test)
serverAddress = ('', PORT_NUMBER)
MockHTTPServer(expectation, serverAddress, MockHTTPRequestHandler)
|
from astropy.timeseries import LombScargle
from hydroDL import kPath
from hydroDL.app import waterQuality
from hydroDL.data import gageII, usgs, gridMET
from hydroDL.master import basins
from hydroDL.post import axplot, figplot
import matplotlib.pyplot as plt
import importlib
import pandas as pd
import numpy as np
import os
import time
import scipy.signal as signal
# wqData = waterQuality.DataModelWQ('Silica64')
siteNoLst = wqData.siteNoLst
# real data
# siteNo = '07083000'
# siteNo = '09352900'
indS = np.random.randint(0, 64)
siteNo = siteNoLst[indS]
# siteNo = '06317000'
dfObs = waterQuality.readSiteTS(siteNo, ['00955'])
# rm outlier
df = dfObs[dfObs['00955'].notna().values]
y = df['00955'].values
yV = y[y < np.percentile(y, 99)]
yV = yV[yV > np.percentile(y, 1)]
ul = np.mean(yV)+np.std(yV)*5
dfObs[dfObs['00955'] > ul] = np.nan
# fourier
df = dfObs[dfObs.notna().values]
tt = dfObs.index.values
xx = (tt.astype('datetime64[D]') -
np.datetime64('1979-01-01')).astype(np.float)
t = df.index.values
x = (t.astype('datetime64[D]') -
np.datetime64('1979-01-01')).astype(np.float)
y = df['00955'].values
y = y-np.nanmean(y)
nt = len(xx)
# nt = 1000
# freq = 1/np.linspace(2, nt, nt)
# freq = np.arange(1, nt)/nt
freq = np.fft.fftfreq(nt)[1:]
yy = dfObs['00955'].values
np.fft.fft(yy)
ls = LombScargle(x, y)
power = ls.power(freq)
xx = (dfObs.index.values.astype('datetime64[D]') -
np.datetime64('1979-01-01')).astype(np.float)
p = ls.false_alarm_probability(power)
indP = np.where(p < 0.1)[0]
pd = np.unique(np.abs((1/freq[indP]).astype(int)))
yy = np.zeros(len(tt))
y2 = np.zeros(len(t))
# for d in pd:
# if d > 0:
# yy = yy+ls.model(xx, 1/d)
# y2 = y2+ls.model(x, 1/d)
for k in indP.tolist():
yy = yy+ls.model(xx, freq[k])
y2 = y2+ls.model(x, freq[k])
fig, axes = plt.subplots(2, 1, figsize=(10, 4))
axes[0].plot(tt, yy, '-r', label='Lomb-Scargle')
axes[0].plot(t, y, '-*b', label='obs')
axes[0].legend()
# axes[0].set_xlabel('day')
axes[0].set_title(siteNo)
axes[1].plot(t, y2-y, '-*b', label='obs')
axes[1].legend()
axes[1].set_xlabel('period (day)')
plt.tight_layout()
fig.show()
|
{
'targets': [{
'target_name': 'uwp',
'type': '<(library)',
'defines': [ 'JS_ENGINE_CHAKRA=1', '_WIN32_WINNT=0x0A00', '_GNU_SOURCE',
'JS_ENGINE_V8=1', 'V8_IS_3_28=1', 'USE_EDGEMODE_JSRT=1',
'WINONECORE=1', 'BUILDING_CHAKRASHIM=1' ],
'sources': [
'binding.gyp',
'index.js',
'src/node-async.h',
'src/uwp.h',
'src/uwp.cc',
],
'conditions': [
[ 'target_arch=="arm"', {
'defines': [ '__arm__=1' ]
}],
[ 'node_win_onecore==1', {
'libraries': [
'-lchakrart.lib',
],
}],
[ 'node_win_onecore==0', {
'libraries': [
'-lchakrart.lib',
'-lole32.lib',
'-lversion.lib',
],
}]
],
'include_dirs': [
'../../src',
'../../src/jx',
'../../src/wrappers',
'../../src/jx/Proxy',
'../../src/jx/external',
'../uv/include',
'../cares/include',
'../http_parser',
'../openssl/openssl/include',
'../zlib',
'../chakrashim/include',
],
}]
}
|
"""
Betclic odds scraper
"""
import datetime
import json
import re
import urllib
import dateutil.parser
from collections import defaultdict
import sportsbetting as sb
from sportsbetting.auxiliary_functions import merge_dicts, truncate_datetime
from sportsbetting.database_functions import is_player_in_db, add_player_to_db, is_player_added_in_db
def parse_betclic_api(id_league):
"""
Get odds from Betclic API
"""
url = ("https://offer.cdn.betclic.fr/api/pub/v2/competitions/{}?application=2&countrycode=fr"
"&fetchMultipleDefaultMarkets=true&language=fr&sitecode=frfr".format(id_league))
content = urllib.request.urlopen(url).read()
parsed = json.loads(content)
odds_match = {}
if (not parsed) or "unifiedEvents" not in parsed:
return odds_match
matches = parsed["unifiedEvents"]
for match in matches:
if match["isLive"]:
continue
if "contestants" not in match:
continue
contestants = match["contestants"]
if not contestants:
continue
name = " - ".join(contestant["name"] for contestant in contestants)
date = dateutil.parser.isoparse(match["date"])+datetime.timedelta(hours=2)
markets = match["markets"]
if not markets:
continue
odds = [selection["odds"] for selection in markets[0]["selections"]]
odds_match[name] = {}
odds_match[name]["date"] = truncate_datetime(date)
odds_match[name]["odds"] = {"betclic":odds}
odds_match[name]["id"] = {"betclic":match["id"]}
return odds_match
def parse_betclic(url):
"""
Get odds from Betclic url
"""
if "-s" in url and url.split("-s")[-1].isdigit():
return parse_sport_betclic(url.split("-s")[-1])
id_league = re.findall(r'\d+', url)[-1]
return parse_betclic_api(id_league)
def parse_sport_betclic(id_sport):
"""
Get odds from Betclic sport id
"""
url = ("https://offer.cdn.betclic.fr/api/pub/v2/sports/{}?application=2&countrycode=fr&language=fr&sitecode=frfr"
.format(id_sport))
content = urllib.request.urlopen(url).read()
parsed = json.loads(content)
list_odds = []
competitions = parsed["competitions"]
for competition in competitions:
id_competition = competition["id"]
list_odds.append(parse_betclic_api(id_competition))
return merge_dicts(list_odds)
def get_sub_markets_players_basketball_betclic(id_match):
if not id_match:
return {}
url = 'https://offer.cdn.betclic.fr/api/pub/v4/events/{}?application=2&countrycode=fr&language=fr&sitecode=frfr'.format(str(id_match))
content = urllib.request.urlopen(url).read()
parsed = json.loads(content)
if not parsed:
return {}
markets = parsed['markets']
sub_markets = {}
markets_to_keep = {'Bkb_Ppf2':'Points + passes + rebonds', 'Bkb_Pta2':'Passes',
'Bkb_Ptr2':'Rebonds',
'Bkb_PnA':'Points + passes',
'Bkb_PnR':'Points + rebonds',
'Bkb_AeR':'Passes + rebonds'}
for market in markets:
if market['mtc'] not in markets_to_keep:
continue
selections = market['selections']
odds_market = {}
for selection in selections:
player = re.split('\\s\\+\\s|\\s\\-\\s', selection['name'].replace(".", " "))[0].split()[1]
limit = selection['name'].split(' de ')[(-1)].replace(",", ".")
player = re.split('\\s\\+\\s|\\s\\-\\s', selection['name'].replace(".", " "))[0].strip()
ref_player = player
if is_player_added_in_db(player, "betclic"):
ref_player = is_player_added_in_db(player, "betclic")
elif is_player_in_db(player):
add_player_to_db(player, "betclic")
else:
if sb.DB_MANAGEMENT:
print(player, "betclic")
continue
key_player = ref_player + "_" + limit
if key_player not in odds_market:
odds_market[key_player] = {"odds":{"betclic":[]}}
odds_market[ref_player + "_" + limit]["odds"]["betclic"].append(selection['odds'])
sub_markets[markets_to_keep[market['mtc']]] = dict(odds_market)
return sub_markets |
#!/usr/bin/env python
# (c) Corey Goldberg (corey@goldb.org) 2008-2009
#
# Get health metrics from a Windows server with WMI
import wmi
import re
from subprocess import Popen, PIPE
class MetricsFetcher:
def __init__(self, computer, user, password):
self.c = wmi.WMI(find_classes=False, computer=computer, user=user, password=password)
def get_uptime(self):
secs_up = int([uptime.SystemUpTime for uptime in
self.c.Win32_PerfFormattedData_PerfOS_System()][0])
hours_up = secs_up / 3600
return hours_up
def get_cpu(self):
utilizations = [cpu.LoadPercentage for cpu in self.c.Win32_Processor()]
utilization = int(sum(utilizations) / len(utilizations)) # avg all cores/processors
return utilization
def get_mem_mbytes(self):
available_mbytes = int([mem.AvailableMBytes for mem in
self.c.Win32_PerfFormattedData_PerfOS_Memory()][0])
return available_mbytes
def get_mem_pct(self):
pct_in_use = int([mem.PercentCommittedBytesInUse for mem in
self.c.Win32_PerfFormattedData_PerfOS_Memory()][0])
return pct_in_use
def get_disk_used_pct(self, drive):
for disk in self.c.Win32_LogicalDisk(Name=drive):
total = long(disk.Size) / 1073741824.0
for disk in self.c.Win32_LogicalDisk(Name=drive):
free = long(disk.FreeSpace) / 1073741824.0
used = total - free
pct_used = int((used * 100) / total)
return pct_used
def get_disk_non_idle_pct(self, drive):
pct_non_idle = 100 - (int([disk.PercentIdleTime for disk in
self.c.Win32_PerfFormattedData_PerfDisk_LogicalDisk(Name=drive)][0]))
return pct_non_idle
def ping(host_name):
p = Popen('ping -n 1 ' + host_name, stdout=PIPE)
m = re.search('Average = (.*)ms', p.stdout.read())
if m:
return True
else:
raise Exception
|
# -*- coding: utf-8 -*-
"""
The Asyncio Runner
~~~~~~~~~~~~~~~~~~
The asyncio runner is a backend for the sync/async experiment that provides
an asyncio interface that otherwise matches the sync interface defined
elsewhere.
"""
import asyncio
class _ClientHTTPProtocol(asyncio.Protocol):
"""
This is a basic HTTP protocol that is used in the sync/async experiment.
This protocol is used to issue a HTTP request and receive a HTTP response.
This protocol can potentially be quite long-lived if the connection is
being actively re-used.
"""
def __init__(self):
self.transport = None
self.data_handler = None
self._response_future = None
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
# TODO: We need to be somewhat clever here, but right now do nothing.
pass
def data_received(self, data):
response = self.data_handler.receive_bytes(data)
if response is not None:
self._response_future.set_result(response)
self._response_future = None
@asyncio.coroutine
def send_request(self, request):
# TODO: flow control, listen for watermarks and stuff.
assert self._response_future is None
self._response_future = asyncio.Future()
request_bytes = self.data_handler.request_to_bytes(request)
self.transport.write(request_bytes)
# For asyncio, the request body can be bytes, or an iterable, or a
# coroutine function.
# TODO: Consider restricting to Python 3.5 and allowing async
# iterables.
if isinstance(request.body, bytes):
body = [request.body]
else:
body = request.body
# If the body is a coroutine function, yield from it and then wrap
# the result in a list so we can iterate over it.
if asyncio.iscoroutinefunction(body):
body = yield from body()
body = [body]
for chunk in body:
chunk_bytes = self.data_handler.body_chunk_to_bytes(chunk)
self.transport.write(chunk_bytes)
eof_bytes = self.data_handler.end_of_body()
if eof_bytes:
self.transport.write(eof_bytes)
return (yield from self._response_future)
def _client_http_protocol_factory():
return _ClientHTTPProtocol()
@asyncio.coroutine
def run(request, data_handler, host, port):
"""
A asyncio request/response sender.
This method takes a request and a data handler. The request codifies the
request to be sent.
The data handler contains most of the intelligence in the code. It is a
complex object that must have two methods:
- one that takes a request and returns a generator that builds bytes.
- one that receives bytes and returns a response object or None.
This returns a coroutine that yields the flow of execution until the
response has arrived.
This does not yet handle:
- 100 continue (not clear we should handle that at all)
- HTTP/2, which has some concerns about this interface
- plenty of error cases!
- connection pooling
- flow control
"""
loop = asyncio.get_event_loop()
_, protocol = yield from loop.create_connection(
_client_http_protocol_factory,
host=host,
port=port,
)
# TODO: We need to be smarter about this. Connection pooling will need to
# do this too.
protocol.data_handler = data_handler
return (yield from protocol.send_request(request))
|
# coding: utf-8
# 这里存放我的class
from chijiuhua import load_json, to_json, to_mongo, load_mongo
class Book():
books = [] # 书的对象s
books_id = []
books_del_id = []
def __init__(self, name, price, shuliang, is_del=0, **kwargs):
self._id = len(self.books)+1 # 书籍的id是当前书籍的+1
self.name = name
self.price = price
self.shuliang = shuliang
self.is_del = is_del # 书籍是否是删除
self.add_s(self)
@classmethod
def add_s(cls, self):
'''
cls: 是类(这玩意会自动传进来)
self: 是类实例化的当前对象
将书籍s保存到Book,books,方便后面读取
'''
cls.books.append(self)
if self.is_del:
cls.books_del_id.append(self._id)
else:
cls.books_id.append(str(self._id))
# 在添加书籍的时候就把数据保存到数据库
# TODO读取就保存数据库
# cls.save_book()
@classmethod
def new1(cls):
'''创建书'''
print('书籍的id,将会自动生成')
name = input('请输入书籍的名字:').strip()
price = input('请输入书籍的价格:').strip()
if not price.isdigit():
# TODO有问题
print('价格输入有误')
return
if int(price) < 0:
print('价格不能低于0')
return
shuliang = input('请输入书籍的数量:').strip()
if not shuliang.isdecimal():
print('数量输入有误')
return
if int(shuliang) < 0:
price('数量不能低于0')
return
return cls(name, int(price), int(shuliang))
@classmethod
def del1(cls):
'''删除书的方法'''
while 1:
cls.looks()
in_book = input('请输入你要删除的书籍的id(输入q退出):').strip()
if in_book.lower() == 'q':
return
if not in_book.isdigit():
print('输入有误')
continue
in_book = int(in_book)
for i in cls.books:
if in_book == i._id:
i.is_del = 1
print('删除成功')
cls.del_s(in_book)
return
print('没有找到这个书籍')
@classmethod
def find(cls, _id):
'''
查看指定书籍是否存在
_id: 指定书籍的id(编号),可以是列表
return: 如果书存在就返回那本书的对象,不存在就会返回None或者空列表
'''
if isinstance(_id, list):
book_l1 = []
for book_id in _id:
for book in cls.books:
if book._id == book_id:
book_l1.append(book)
break
return book_l1
for book in cls.books:
if book._id == _id:
return book
@classmethod
def del_s(cls, _id):
'''
去删除当前的书籍列表
_id: 要删除的那个书籍的id
'''
# cls.books.pop(_id-1) # !如果这个也删除了,数据库中就也会删除这本书
cls.books_id.pop(_id-1)
@classmethod
def load(cls):
'''
从数据库读取书籍,这个东西应该是程序运行开始的时候运行
'''
# books = load_mongo(cls.__name__)
books = load_json(cls.__name__)
for book in books:
# if book.get('is_del'):
# continue
b1 = cls(**book)
return
@classmethod
def save(cls):
'''
这玩意会把类里面的所有书籍保存到数据库
'''
l1 = []
for book in cls.books:
# print(book)
l1.append(book.get_data())
# to_mongo(l1, cls.__name__)
to_json(l1, cls.__name__)
print(cls.__name__, '保存成功')
def get_data(self):
return {
'_id':self._id,
'name':self.name,
'price':self.price,
'shuliang':self.shuliang,
'is_del':self.is_del
}
@classmethod
def looks(cls, is_del=0):
'''
查看当前所有书籍,并且打印出来
'''
print('-'*90)
print('书籍id\t书籍名称\t书籍单价\t书籍数量\t'.expandtabs(21))
for book in Book.books:
if is_del == book.is_del:
print(f'{book._id}\t{book.name}\t{book.price}\t{book.shuliang}\t'.expandtabs(24))
print('-'*90)
class Dingdan():
dingdans = [] # 书的对象s
dingdans_id = []
def __init__(self, book_name, book_shuliang, zongjia, **kwargs) -> None:
self._id = len(Dingdan.dingdans) + 1
self.book_name = book_name
self.book_shuliang = book_shuliang
self.zongjia = zongjia
self.add_s(self)
@classmethod
def add_s(cls, self):
'''将订单放到列表中'''
cls.dingdans.append(self)
cls.dingdans_id.append(self._id)
@classmethod
def new1(cls):
'''创建订单'''
print('订单的id,将会自动生成')
Book.looks()
while 1:
# 此循环是为了找到书
while 1:
# 此循环是为了正确的输入
book_id = input('请输入书籍的编号(输入q退出):')
if book_id.lower() == 'q':
return
if book_id.isdecimal():
book_id = int(book_id)
if book_id < 0:
print('数量不能小于0')
continue
break
print('输入有误')
book = Book.find(book_id)
if not book:
print('书籍不存在')
continue
print(f'书籍: {book.name}')
break
# TODO 查找书籍
while 1:
# 为了正确的输入
count = input(f'请输入书籍的数量(当前数量({book.shuliang})):')
if count.isdecimal(): # 如果count是数字,而且不是小数
count = int(count)
if count <= book.shuliang:
book.shuliang -= count
break
print('我这边没有那么多书啊……')
else :
print('输入有误')
jiaqian = book.price * count
print(f'总价是{jiaqian}')
return cls(book.name, count, jiaqian)
@classmethod
def load(cls):
'''
从数据库读取订单,这个东西应该是程序运行开始的时候运行
'''
dingdans = load_json(cls.__name__)
for dingdan in dingdans:
# if book.get('is_del'):
# continue
b1 = cls(**dingdan)
return
@classmethod
def looks(cls):
'''
查看当前所有订单,并且打印出来
'''
print('-'*90)
print('订单号\t书籍名称\t书籍数量\t总价\t'.expandtabs(21))
for dingdan in Dingdan.dingdans:
print(f'{dingdan._id}\t{dingdan.book_name}\t{dingdan.book_shuliang}\t{dingdan.zongjia}\t'.expandtabs(24))
print('-'*90)
def get_data(self):
return {
'_id': self._id,
'book_name': self.book_name,
'book_shuliang': self.book_shuliang,
'zongjia': self.zongjia,
}
@classmethod
def save(cls):
'''保存订单'''
l1 = []
for dingdan in cls.dingdans:
l1.append(dingdan.get_data())
# to_mongo(l1, cls.__name__)
to_json(l1, cls.__name__)
print(cls.__name__, '保存成功')
def select_func():
l1 = []
for count,func in Book.book_funcs:
l1.append([str(count), func])
def save_all(*args):
'''
*args: 这个东西是吧指定的类传入,然后保存类下的所有东西
'''
if not args:
args = [Book, Dingdan]
for cls in args:
cls.save()
def load_all(*args):
'''
*args: 这个东西是吧指定的类传入,然后读取类下的所有东西
'''
if not args:
args = [Book, Dingdan]
for cls in args:
cls.load()
funcs = [
[Book.looks, ['查看所有书籍']],
[Book.new1, ['新建书籍']],
[Book.del1, ['删除书籍']],
[Dingdan.looks, ['查看所有的订单']],
[Dingdan.new1, ['新建订单(买书)']],
[save_all, ['保存']],
]
if __name__ == '__main__':
# Book.load()
# load_all(Book)
load_all(Book, Dingdan)
# x = Book.find_book([1,2121])
# if not x: print('空')
# print(x)
# a1 = Book('python入门', 10.99, 300)
# a2 = Book('python高级', 20.00, 30)
# a3 = Book('java入门', 30.00, 90)
# # Book.look_books()
# Book.new_book()
# Book.look_books(1)
# Book.del_book()
# lfr
# Dingdan.new1()
# Dingdan.
Book.looks()
Dingdan.looks()
# Dingdan.new1()
# Dingdan.looks()
# print()
# test_gitee
# Book.save()
save_all(Book, Dingdan)
# Book.look_books()
# Book.del_books()
|
from lib.database import connection
from lib.types import TYPES, CASTING_TABLE
from lib.defaults import DEFAULTS
from lib.operators import OPERATORS
from lib.output import Output
from lib.funcs import FUNCTIONS, FUNCTIONS_LIST
from lib import editor
from resources import Verb, ListVerb
from resources.builtin import resource as builtin_resource, field as builtin_field
from sqlalchemy import Table, Column, Integer, select
import logging
def create_table_def(name, columns=None):
resource = builtin_resource.get_resource(name=name)
table = Table(
resource.table_name,
connection.metadata,
Column("id", Integer()),
extend_existing=True,
)
for field in builtin_field.get_fields(resource=name):
if columns and field.name not in columns:
continue
table.append_column(column=Column(field.name, TYPES.get(field.type)()))
return table
def get_value(options, field):
if options[field.name] is None:
if field.default in DEFAULTS:
value = DEFAULTS[field.default]()
else:
value = field.default
elif options[field.name] == editor.ARG:
value = editor.open_editor(field_name=field.name)
else:
value = options[field.name]
return value
def build_execution_chain(query, table, options):
for option in vars(options):
if "__" not in option:
logging.debug("Discarding %s for execution chain", option)
column, op = option.split("__")
if op not in FUNCTIONS_LIST:
continue
query = FUNCTIONS[op](
query=query, value=getattr(options, option), columns=table.columns
)
return query
def build_query(table, options):
all_columns = [c.name for c in table.columns]
query = []
for option in vars(options):
if "__" not in option:
logging.debug("Discarding %s for query", option)
column, op = option.split("__")
if op not in OPERATORS:
continue
if column not in all_columns:
logging.error("Unknown field %s", column)
exit(1)
query.append(OPERATORS[op](getattr(table.c, column), getattr(options, option)))
return query
class Delete(Verb):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build_query(self, table):
return build_query(table, self.unknown_options)
def execute(self, **options):
resource = builtin_resource.get_resource(name=options["resource"])
table_def = create_table_def(name=resource.table_name)
query = self.build_query(table=table_def)
connection.engine.execute(table_def.delete().where(*query))
class Update(Verb):
def __init__(self, argv, parent_args):
super().__init__(argv, parent_args=parent_args)
args = parent_args.parse_known_args(argv[1:])[0]
self.resource = builtin_resource.get_resource(name=args.resource)
self.fields = builtin_field.get_fields(resource=self.resource.table_name)
self.field_map = {}
for field in self.fields:
self.field_map[field.name] = field
self.add_argument(name=field.name, required=False)
def execute(self, **options):
values = {}
table_def = create_table_def(name=self.resource.table_name)
for field in self.fields:
if field.name not in options:
continue
if options[field.name] is None:
continue
value = get_value(options, field)
values[field.name] = CASTING_TABLE[field.type].cast_to(value)
query = build_query(table=table_def, options=self.unknown_options)
logging.debug(
"Updating resource %s with values: %s", self.resource.table_name, values
)
connection.engine.execute(table_def.update(*query).values(**values))
class List(ListVerb):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.add_argument(name="fields", required=False)
def build_query(self, table):
return build_query(table, self.unknown_options)
def execute(self, **options):
if options.get("fields"):
fields = options.get("fields").split(",")
else:
fields = None
resource = builtin_resource.get_resource(name=options["resource"])
table_def = create_table_def(name=resource.table_name)
columns = []
if fields:
for f in fields:
if hasattr(table_def.c, f):
columns.append(getattr(table_def.c, f))
else:
fields = [c.name for c in table_def.columns]
columns = [c for c in table_def.columns]
query = build_query(table=table_def, options=self.unknown_options)
chain = build_execution_chain(
query=select(*columns).where(*query),
table=table_def,
options=self.unknown_options,
)
output = Output(headers=fields)
for row in connection.engine.execute(chain).all():
output.add_row(row)
output.render(mode=options["output"])
class Create(Verb):
def __init__(self, argv, parent_args):
super().__init__(argv=argv, parent_args=parent_args)
args = parent_args.parse_known_args(argv[1:])[0]
self.resource = builtin_resource.get_resource(name=args.resource)
self.fields = builtin_field.get_fields(resource=self.resource.table_name)
self.field_map = {}
for field in self.fields:
self.field_map[field.name] = field
self.add_argument(name=field.name, required=field.default is None)
def execute(self, **options):
values = {}
for field in self.fields:
if field.name not in options:
continue
value = get_value(options, field)
values[field.name] = CASTING_TABLE[field.type].cast_to(value)
logging.debug(
"Creating resource %s with values: %s", self.resource.table_name, values
)
table_def = create_table_def(name=self.resource.table_name)
connection.engine.execute(table_def.insert().values(**values))
VERBS = {"create": Create, "list": List, "delete": Delete, "update": Update}
|
from .imdb import load_imdb
from .mnist import load_mnist
from .small_parallel_enja import load_small_parallel_enja
|
import py, os, sys
import numpy as np
sys.path = [os.path.join(os.pardir, 'python')] + sys.path
class TestNOMAD:
def setup_class(cls):
import SQNomad, logging
SQNomad.log.setLevel(logging.DEBUG)
def test01_simple_example(self):
"""Read access to instance public data and verify values"""
import SQNomad
def f_easy_simple(x):
from math import sin
fv = np.inner(x, x)
fv *= 1 + 0.1*sin(10*(x[0]+x[1]))
return fv
bounds = np.array([[-1, 1], [-1, 1]], dtype=float)
budget = 40
x0 = np.array([0.5, 0.5])
result, history = SQNomad.minimize(f_easy_simple, x0, bounds, budget, SEED=1)
# problem is symmetric, so values may have switched: just check the sum
assert type(result.optpar) == np.ndarray
assert np.round(sum(result.optpar)-sum([2.77555756e-17, -0.]), 8) == 0
|
# Reference: https://en.wikipedia.org/wiki/Bitwise_operation
# Notice: The following inequality must be satifised for both shifts, n => 0
from math import log
'''
@function left_shift - Finds all the variables within a left bit shift operation
@param k - The value of the right side ("prefix") of the bitwise operation
@param n - The value of the left side of the bitwise operation
@param y - The output of the bitwise operation
@const l - The constant found in both shift equations to find the @param n
Operation must have the form:
k << n = y
'''
def left_shift(k=None, n=None, y=None):
l = log(2)
# If k is not given find the value of k
if k is None:
k = y / (2 ** n)
return k
# If n is not given find the value of n
elif n is None:
n = log(y/k) / l
return n
# If y is not given find the value of y
elif y is None:
y = k * (2 ** n)
return y
'''
@function right_shift - Finds all the variables within a right bit shift operation
@param k - The value of the right side ("prefix") of the bitwise operation
@param n - The value of the left side of the bitwise operation
@param y - The output of the bitwise operation
@const l - The constant found in both shift equations to find the @param n
Operation must have the form:
k >> n = y
'''
def right_shift(k=None, n=None, y=None):
l = log(2)
# If k is not given find the value of k
if k is None:
k = y * (2 ** n)
return k
# If n is not given find the value of n
elif n is None:
n = log(k/y) / l
return n
# If y is not given find the value of y
elif y is None:
y = k / (2 ** n)
return y
|
import argparse
import pickle
import time
import cv2
# Local
from islib.hashutils import dhash, convert_hash
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-q", "--query", required=True, type=str,
help="Path to input query image")
ap.add_argument("-b", "--data", required=True, type=str,
help="Path to database to search input image")
ap.add_argument("-d", "--distance", type=int, default=10,
help="Maximum hamming distance")
args = vars(ap.parse_args())
# Load the VP-Tree and hashes dictionary
print("[INFO] Loading VP-Tree and hashes...")
tree = pickle.loads(open(args["data"] + ".tree.pickle", "rb").read())
hashes = pickle.loads(open(args["data"] + ".dict.pickle", "rb").read())
# Load the input query image
image = cv2.imread(args["query"])
cv2.imshow("Query image", image)
# Compute the hash for the query image, then convert it
queryHash = convert_hash(dhash(image))
# Perform the search
print("[INFO] Performing search...")
start = time.time()
results = tree.get_all_in_range(queryHash, args["distance"])
results = sorted(results)
end = time.time()
print("[INFO] Search took {} seconds".format(end - start))
# Loop over the results
for (d, h) in results:
# Grab all image paths in our dataset with the same hash
result_paths = hashes.get(h, [])
print("[INFO] {} total image(s) with d: {}, h: {}".format(len(result_paths), d, h))
# Loop over the result paths
for p in result_paths:
# Load the result image and display it to our screen
result = cv2.imread(p)
cv2.imshow(p, result)
cv2.waitKey(0) # Esc
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: snap.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from etcetra.grpc_api import gogo_pb2 as gogo__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nsnap.proto\x12\x06snappb\x1a\ngogo.proto\"+\n\x08snapshot\x12\x11\n\x03\x63rc\x18\x01 \x01(\rB\x04\xc8\xde\x1f\x00\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x42\x10\xc8\xe2\x1e\x01\xe0\xe2\x1e\x01\xd0\xe2\x1e\x01\xc8\xe1\x1e\x00')
_SNAPSHOT = DESCRIPTOR.message_types_by_name['snapshot']
snapshot = _reflection.GeneratedProtocolMessageType('snapshot', (_message.Message,), {
'DESCRIPTOR' : _SNAPSHOT,
'__module__' : 'snap_pb2'
# @@protoc_insertion_point(class_scope:snappb.snapshot)
})
_sym_db.RegisterMessage(snapshot)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\310\342\036\001\340\342\036\001\320\342\036\001\310\341\036\000'
_SNAPSHOT.fields_by_name['crc']._options = None
_SNAPSHOT.fields_by_name['crc']._serialized_options = b'\310\336\037\000'
_SNAPSHOT._serialized_start=34
_SNAPSHOT._serialized_end=77
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8
from django.apps import AppConfig
class ContributionsDjangoConfig(AppConfig):
name = "contributions_django"
|
from blackboard import BlackBoardContent, BlackBoardClient, BlackBoardAttachment, BlackBoardEndPoints, \
BlackBoardCourse, BlackBoardInstitute
import os
import re
import requests
import datetime
import xmltodict
import argparse
import sys
import json
import getpass
import main
def test():
args = main.handle_arguments(True)
# Institute Data
print("Dumping Institute Properties...")
institute_data = dict()
institute_vars = vars(args.institute)
for item in institute_vars:
institute_data[item] = institute_vars[item]
print("Dumped Institute Properties...")
# Client Data
client_data = dict()
client = BlackBoardClient(username=args.username, password=args.password, site=args.site, save_location=args.location, institute=args.institute)
attempt = client.login()
print(f"Client Login {'Successful' if attempt[0] else 'Failure'}...\nDumping Client Properties...")
client_data["public_api_available"] = client.public_endpoint_available()
client_data["login_endpoint"] = attempt[1].url
client_data["login_status_code"] = attempt[1].status_code
client_data["login_response"] = attempt[1].text
client_data["successful_login"] = attempt[0]
client_vars = vars(client)
for item in client_vars:
if item not in ('_BlackBoardClient__password', 'session', 'institute', 'api_version', 'thread_pool'):
client_data[item] = client_vars[item]
print("Dumped Client Properties...")
# Get Parent Course Data
course_data = {
'endpoint': '',
'status_code': '',
'response': '',
'courses': []
}
def get_courses():
"""
Get all Available Course Information for the Client and Record Details
"""
courses_request = client.send_get_request(BlackBoardEndPoints.get_user_courses(client.user_id))
courses = courses_request.json()
course_data['endpoint'] = courses_request.url
course_data['status_code'] = courses_request.status_code
course_data['response'] = courses
if "results" in courses:
for course in courses["results"]:
try:
course_request = client.send_get_request(BlackBoardEndPoints.get_course(course["courseId"]))
course = course_request.json()
bbcourse = BlackBoardCourse(client, course)
course_vars = vars(bbcourse)
course_sub_data = dict()
course_sub_data["course_endpoint"] = course_request.url
course_sub_data['status_code'] = course_request.status_code
for item in course_vars:
course_sub_data[item] = str(course_vars[item])
course_data['courses'].append(course_sub_data)
except Exception as e:
course_data['courses'].append({'error': str(e)})
print("Getting Course Data...")
get_courses()
print("Completed Course Data...")
dumps = {
'institute': institute_data,
'client': client_data,
'courses': course_data,
}
print("Preparing to Dump Debug...")
with open(os.path.abspath(os.path.join(client.base_path, "dump.json")), 'w+') as file:
print(f"Writing File: \"{file.name}\"...")
json.dump(dumps, file)
print("Done...")
if __name__ == "__main__":
test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.