code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def __magic_name__( lowerCamelCase = 1_0_0_0_0_0_0):
__lowerCAmelCase = set(range(3, lowerCamelCase, 2))
primes.add(2)
for p in range(3, lowerCamelCase, 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase)))
__lowerCAmelCase = [float(lowerCamelCase) for n in range(limit + 1)]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
'''simple docstring'''
# Imports
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
__lowerCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _snake_case (self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case (self ):
return self.nir * (self.red / (self.green**2))
def _snake_case (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case (self ):
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case (self ):
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case (self ):
return (self.nir / self.green) - 1
def _snake_case (self ):
return (self.nir / self.redEdge) - 1
def _snake_case (self ):
return (self.red - self.blue) / self.red
def _snake_case (self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case (self ):
return self.nir - self.green
def _snake_case (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case (self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case (self , __lowercase=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case (self , __lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case (self , __lowercase=None , __lowercase=None ):
return (self.nir - b) / (a * self.red)
def _snake_case (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case (self ):
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case (self ):
return self.green / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.nir / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.red / (self.nir + self.red + self.green)
def _snake_case (self ):
return (self.green - self.red) / (self.green + self.red)
def _snake_case (self ):
return (self.red - self.green) / (self.red + self.green)
def _snake_case (self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 9 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
_UpperCAmelCase : List[Any] = 3
def __magic_name__( lowerCamelCase):
print('''Generating primitive root of p''')
while True:
__lowerCAmelCase = random.randrange(3, lowerCamelCase)
if pow(lowerCamelCase, 2, lowerCamelCase) == 1:
continue
if pow(lowerCamelCase, lowerCamelCase, lowerCamelCase) == 1:
continue
return g
def __magic_name__( lowerCamelCase):
print('''Generating prime p...''')
__lowerCAmelCase = rabin_miller.generate_large_prime(lowerCamelCase) # select large prime number.
__lowerCAmelCase = primitive_root(lowerCamelCase) # one primitive root on modulo p.
__lowerCAmelCase = random.randrange(3, lowerCamelCase) # private_key -> have to be greater than 2 for safety.
__lowerCAmelCase = cryptomath.find_mod_inverse(pow(lowerCamelCase, lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = (key_size, e_a, e_a, p)
__lowerCAmelCase = (key_size, d)
return public_key, private_key
def __magic_name__( lowerCamelCase, lowerCamelCase):
if os.path.exists(F"""{name}_pubkey.txt""") or os.path.exists(F"""{name}_privkey.txt"""):
print('''\nWARNING:''')
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''')
sys.exit()
__lowerCAmelCase , __lowerCAmelCase = generate_key(lowerCamelCase)
print(F"""\nWriting public key to file {name}_pubkey.txt...""")
with open(F"""{name}_pubkey.txt""", '''w''') as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""")
print(F"""Writing private key to file {name}_privkey.txt...""")
with open(F"""{name}_privkey.txt""", '''w''') as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""")
def __magic_name__( ):
print('''Making key files...''')
make_key_files('''elgamal''', 2_0_4_8)
print('''Key files generation successful''')
if __name__ == "__main__":
main()
| 9 |
'''simple docstring'''
from math import sqrt
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool"
return status
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1))
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase)):
for j in range(i + 1, len(lowerCamelCase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(lowerCamelCase):
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase):
while quotient != 1:
if is_prime(lowerCamelCase) and (quotient % factor == 0):
ans.append(lowerCamelCase)
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = max(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = min(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 == 0
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 != 0
def __magic_name__( lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase)
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (len(lowerCamelCase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = prime_factorization(lowerCamelCase)
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(max(lowerCamelCase, lowerCamelCase)):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase):
ans += 1
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime(
lowerCamelCase), "'ans' must been a prime number and from type int"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase)
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and ans[0] != p_number_a
and ans[len(lowerCamelCase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(lowerCamelCase)
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCamelCase)
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase))
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 9 | 1 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __magic_name__( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''', type=lowerCamelCase, default='''microsoft/unixcoder-base-nine''')
parser.add_argument('''--num_epochs''', type=lowerCamelCase, default=5)
parser.add_argument('''--batch_size''', type=lowerCamelCase, default=6)
parser.add_argument('''--gradient_accumulation_steps''', type=lowerCamelCase, default=1)
parser.add_argument('''--freeze''', type=lowerCamelCase, default=lowerCamelCase)
parser.add_argument('''--learning_rate''', type=lowerCamelCase, default=5E-4)
parser.add_argument('''--seed''', type=lowerCamelCase, default=0)
parser.add_argument('''--lr_scheduler_type''', type=lowerCamelCase, default='''cosine''')
parser.add_argument('''--num_warmup_steps''', type=lowerCamelCase, default=1_0)
parser.add_argument('''--weight_decay''', type=lowerCamelCase, default=0.01)
parser.add_argument('''--output_dir''', type=lowerCamelCase, default='''./results''')
return parser.parse_args()
_UpperCAmelCase : List[Any] = load("""accuracy""")
def __magic_name__( lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase = eval_pred
__lowerCAmelCase = np.argmax(lowerCamelCase, axis=1)
return metric.compute(predictions=lowerCamelCase, references=lowerCamelCase)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = trainer
def _snake_case (self , __lowercase , __lowercase , __lowercase , **__lowercase ):
if control.should_evaluate:
__lowerCAmelCase = deepcopy(__lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def __magic_name__( ):
__lowerCAmelCase = get_args()
set_seed(args.seed)
__lowerCAmelCase = load_dataset('''codeparrot/codecomplex''', split='''train''')
__lowerCAmelCase = dataset.train_test_split(test_size=0.2)
__lowerCAmelCase = train_test['''test'''].train_test_split(test_size=0.5)
__lowerCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
})
print('''Loading tokenizer and model''')
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
__lowerCAmelCase = tokenizer.eos_token
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7)
__lowerCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__lowerCAmelCase = False
__lowerCAmelCase = ClassLabel(num_classes=7, names=list(set(train_test_validation['''train''']['''complexity'''])))
def tokenize(lowerCamelCase):
__lowerCAmelCase = tokenizer(example['''src'''], truncation=lowerCamelCase, max_length=1_0_2_4)
__lowerCAmelCase = labels.straint(example['''complexity'''])
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__lowerCAmelCase = train_test_validation.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=train_test_validation['''train'''].column_names, )
__lowerCAmelCase = DataCollatorWithPadding(tokenizer=lowerCamelCase)
__lowerCAmelCase = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy='''epoch''', save_strategy='''epoch''', logging_strategy='''epoch''', per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model='''accuracy''', run_name='''complexity-java''', report_to='''wandb''', )
__lowerCAmelCase = Trainer(
model=lowerCamelCase, args=lowerCamelCase, train_dataset=tokenized_datasets['''train'''], eval_dataset=tokenized_datasets['''valid'''], tokenizer=lowerCamelCase, data_collator=lowerCamelCase, compute_metrics=lowerCamelCase, )
print('''Training...''')
trainer.add_callback(CustomCallback(lowerCamelCase))
trainer.train()
if __name__ == "__main__":
main()
| 9 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=2 , __lowercase=3 , __lowercase=4 , __lowercase=2 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=36 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=6 , __lowercase=6 , __lowercase=3 , __lowercase=4 , __lowercase=None , __lowercase=10_00 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = coordinate_size
__lowerCAmelCase = shape_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase = text_seq_length
__lowerCAmelCase = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase = self.text_seq_length + self.image_seq_length
def _snake_case (self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase = bbox[i, j, 3]
__lowerCAmelCase = bbox[i, j, 1]
__lowerCAmelCase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase = bbox[i, j, 2]
__lowerCAmelCase = bbox[i, j, 0]
__lowerCAmelCase = tmp_coordinate
__lowerCAmelCase = tf.constant(__lowercase )
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = TFLayoutLMvaModel(config=__lowercase )
# text + image
__lowerCAmelCase = model(__lowercase , pixel_values=__lowercase , training=__lowercase )
__lowerCAmelCase = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , training=__lowercase , )
__lowerCAmelCase = model(__lowercase , bbox=__lowercase , pixel_values=__lowercase , training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase = model(__lowercase , training=__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase = model({'''pixel_values''': pixel_values} , training=__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFLayoutLMvaForSequenceClassification(config=__lowercase )
__lowerCAmelCase = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , training=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFLayoutLMvaForTokenClassification(config=__lowercase )
__lowerCAmelCase = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , training=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = 2
__lowerCAmelCase = TFLayoutLMvaForQuestionAnswering(config=__lowercase )
__lowerCAmelCase = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , training=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = config_and_inputs
__lowerCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Optional[Any] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
return True
def _snake_case (self , __lowercase , __lowercase , __lowercase=False ):
__lowerCAmelCase = copy.deepcopy(__lowercase )
if model_class in get_values(__lowercase ):
__lowerCAmelCase = {
k: tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowercase ):
__lowerCAmelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowercase ):
__lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowercase ):
__lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowercase ):
__lowerCAmelCase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case (self ):
__lowerCAmelCase = TFLayoutLMvaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
if getattr(__lowercase , '''hf_compute_loss''' , __lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
__lowerCAmelCase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__lowercase )[0]
]
__lowerCAmelCase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
__lowerCAmelCase = prepared_for_class.pop('''input_ids''' )
__lowerCAmelCase = model(__lowercase , **__lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
__lowerCAmelCase = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
__lowerCAmelCase = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase = -1_00
__lowerCAmelCase = tf.convert_to_tensor(__lowercase )
__lowerCAmelCase = model(__lowercase , **__lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
__lowerCAmelCase = model(__lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase = inspect.signature(model.call ).parameters
__lowerCAmelCase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase = {0: '''input_ids'''}
for label_key in label_keys:
__lowerCAmelCase = signature_names.index(__lowercase )
__lowerCAmelCase = label_key
__lowerCAmelCase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase = prepared_for_class[value]
__lowerCAmelCase = tuple(__lowercase )
# Send to model
__lowerCAmelCase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case (self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def _snake_case (self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def _snake_case (self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def _snake_case (self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def _snake_case (self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
@slow
def _snake_case (self ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFLayoutLMvaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__( ):
__lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case (self ):
return LayoutLMvaImageProcessor(apply_ocr=__lowercase ) if is_vision_available() else None
@slow
def _snake_case (self ):
__lowerCAmelCase = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__lowercase , return_tensors='''tf''' ).pixel_values
__lowerCAmelCase = tf.constant([[1, 2]] )
__lowerCAmelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase = model(input_ids=__lowercase , bbox=__lowercase , pixel_values=__lowercase , training=__lowercase )
# verify the logits
__lowerCAmelCase = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __lowercase )
__lowerCAmelCase = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ) )
| 9 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
if isinstance(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = np.full((len(lowerCamelCase), sequence_length, 2), lowerCamelCase)
else:
__lowerCAmelCase = np.full((len(lowerCamelCase), sequence_length), lowerCamelCase)
for i, tensor in enumerate(lowerCamelCase):
if padding_side == "right":
if isinstance(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = tensor[:sequence_length]
else:
__lowerCAmelCase = tensor[:sequence_length]
else:
if isinstance(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = tensor[:sequence_length]
else:
__lowerCAmelCase = tensor[:sequence_length]
return out_tensor.tolist()
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = ord(lowerCamelCase)
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
__lowerCAmelCase = unicodedata.category(lowerCamelCase)
if cat.startswith('''P'''):
return True
return False
@dataclass
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : int = -100
__UpperCamelCase : str = "pt"
def _snake_case (self , __lowercase ):
import torch
__lowerCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCAmelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCAmelCase = self.tokenizer.pad(
__lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCAmelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCAmelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCAmelCase = [
list(__lowercase ) + [self.label_pad_token_id] * (sequence_length - len(__lowercase )) for label in labels
]
else:
__lowerCAmelCase = [
[self.label_pad_token_id] * (sequence_length - len(__lowercase )) + list(__lowercase ) for label in labels
]
__lowerCAmelCase = [feature['''ner_tags'''] for feature in features]
__lowerCAmelCase = padding_tensor(__lowercase , -1 , __lowercase , __lowercase )
__lowerCAmelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCAmelCase = padding_tensor(__lowercase , (-1, -1) , __lowercase , __lowercase )
__lowerCAmelCase = {k: torch.tensor(__lowercase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 9 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = old_name
if "patch_embed" in old_name:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''')
if layer == "0":
__lowerCAmelCase = old_name.replace('''0''', '''convolution1''')
elif layer == "1":
__lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''')
elif layer == "3":
__lowerCAmelCase = old_name.replace('''3''', '''convolution2''')
else:
__lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''')
if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase):
__lowerCAmelCase = r'''\b\d{2}\b'''
if bool(re.search(lowerCamelCase, lowerCamelCase)):
__lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group()
else:
__lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group()
if int(match[0]) < 6:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
__lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1])
__lowerCAmelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
if int(match[2]) < num_meta4D_last_stage:
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2])
else:
__lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage)
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index)
if "norm1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''')
elif "norm2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''')
elif "fc1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''')
elif "fc2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''')
__lowerCAmelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase):
__lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''')
if "fc" in new_name:
__lowerCAmelCase = new_name.replace('''fc''', '''convolution''')
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''')
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''')
if "proj" in new_name:
__lowerCAmelCase = new_name.replace('''proj''', '''projection''')
if "dist_head" in new_name:
__lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''')
elif "head" in new_name:
__lowerCAmelCase = new_name.replace('''head''', '''classifier''')
elif "patch_embed" in new_name:
__lowerCAmelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCAmelCase = new_name.replace('''norm''', '''layernorm''')
__lowerCAmelCase = '''efficientformer.''' + new_name
else:
__lowerCAmelCase = '''efficientformer.encoder.''' + new_name
return new_name
def __magic_name__( lowerCamelCase, lowerCamelCase):
for key in checkpoint.copy().keys():
__lowerCAmelCase = checkpoint.pop(lowerCamelCase)
__lowerCAmelCase = val
return checkpoint
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return image
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model''']
__lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase)
__lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase)
__lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1])
__lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
__lowerCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = 2_5_6
__lowerCAmelCase = 2_2_4
__lowerCAmelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
__lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values
# original processing pipeline
__lowerCAmelCase = Compose(
[
Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']),
CenterCrop(lowerCamelCase),
ToTensor(),
Normalize(lowerCamelCase, lowerCamelCase),
])
__lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0)
assert torch.allclose(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = (1, 1_0_0_0)
if "l1" in model_name:
__lowerCAmelCase = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78])
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""")
# Save Checkpoints
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""")
processor.save_pretrained(lowerCamelCase)
print(F"""Processor successfuly saved at {pytorch_dump_path}""")
if push_to_hub:
print('''Pushing model to the hub...''')
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 9 | 1 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=1E-12):
__lowerCAmelCase = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1), a_min=lowerCamelCase)).T
__lowerCAmelCase = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1), a_min=lowerCamelCase)).T
return jnp.matmul(lowerCamelCase, norm_emb_a.T)
class a__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase : CLIPConfig
__UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case (self ):
__lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config )
__lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowercase , dtype=self.dtype )
__lowerCAmelCase = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__lowerCAmelCase = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__lowerCAmelCase = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
__lowerCAmelCase = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self , __lowercase ):
__lowerCAmelCase = self.vision_model(__lowercase )[1]
__lowerCAmelCase = self.visual_projection(__lowercase )
__lowerCAmelCase = jax_cosine_distance(__lowercase , self.special_care_embeds )
__lowerCAmelCase = jax_cosine_distance(__lowercase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__lowerCAmelCase = 0.0
__lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__lowerCAmelCase = jnp.round(__lowercase , 3 )
__lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowercase )
# Use a lower threshold if an image has any special care concept
__lowerCAmelCase = is_special_care * 0.0_1
__lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__lowerCAmelCase = jnp.round(__lowercase , 3 )
__lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict = CLIPConfig
__UpperCamelCase : Dict = 'clip_input'
__UpperCamelCase : str = FlaxStableDiffusionSafetyCheckerModule
def __init__(self , __lowercase , __lowercase = None , __lowercase = 0 , __lowercase = jnp.floataa , __lowercase = True , **__lowercase , ):
if input_shape is None:
__lowerCAmelCase = (1, 2_24, 2_24, 3)
__lowerCAmelCase = self.module_class(config=__lowercase , dtype=__lowercase , **__lowercase )
super().__init__(__lowercase , __lowercase , input_shape=__lowercase , seed=__lowercase , dtype=__lowercase , _do_init=_do_init )
def _snake_case (self , __lowercase , __lowercase , __lowercase = None ):
# init input tensor
__lowerCAmelCase = jax.random.normal(__lowercase , __lowercase )
__lowerCAmelCase , __lowerCAmelCase = jax.random.split(__lowercase )
__lowerCAmelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
__lowerCAmelCase = self.module.init(__lowercase , __lowercase )['''params''']
return random_params
def __call__(self , __lowercase , __lowercase = None , ):
__lowerCAmelCase = jnp.transpose(__lowercase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(__lowercase , dtype=jnp.floataa ) , rngs={} , )
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__( lowerCamelCase, lowerCamelCase):
if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2:
raise Exception('''Matrices are not 2x2''')
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase):
if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('''Odd matrices are not supported!''')
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)]
return top_left, top_right, bot_left, bot_right
def __magic_name__( lowerCamelCase):
return len(lowerCamelCase), len(matrix[0])
def __magic_name__( lowerCamelCase):
print('''\n'''.join(str(lowerCamelCase) for line in matrix))
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase) == (2, 2):
return default_matrix_multiplication(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(lowerCamelCase)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]:
__lowerCAmelCase = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase))))
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
__lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase)
# Removing the additional zeros
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCAmelCase : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['keras_nlp']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''keras_nlp'''] )
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9 | 1 |
'''simple docstring'''
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = [0] * len_array
if len_array > 0:
__lowerCAmelCase = array[0]
for i in range(1 , __lowercase ):
__lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def _snake_case (self , __lowercase , __lowercase ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _snake_case (self , __lowercase ):
__lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=2 , __lowercase=56 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=2 , __lowercase=2 , __lowercase=7 , __lowercase="gelu_new" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=4 , __lowercase="block_sparse" , __lowercase=True , __lowercase=False , __lowercase=2 , __lowercase=3 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_choices
__lowerCAmelCase = rescale_embeddings
__lowerCAmelCase = attention_type
__lowerCAmelCase = use_bias
__lowerCAmelCase = block_size
__lowerCAmelCase = num_random_blocks
def _snake_case (self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[str] = False
def _snake_case (self ):
__lowerCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case (self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case (self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case (self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case (self ):
super().test_hidden_states_output()
@slow
def _snake_case (self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(__lowercase )
def _snake_case (self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(__lowercase , __lowercase )
__lowerCAmelCase = model_class(__lowercase )
@jax.jit
def model_jitted(__lowercase , __lowercase=None , **__lowercase ):
return model(input_ids=__lowercase , attention_mask=__lowercase , **__lowercase )
with self.subTest('''JIT Enabled''' ):
__lowerCAmelCase = model_jitted(**__lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for jitted_output, output in zip(__lowercase , __lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase=1e-5 , __lowercase="outputs" , __lowercase=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
| 9 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
if num < 0:
return False
__lowerCAmelCase = num
__lowerCAmelCase = 0
while num > 0:
__lowerCAmelCase = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __magic_name__( ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 0.00
__lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
__lowerCAmelCase = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase)
first_sum += 1 / float(lowerCamelCase)
index += 1
return 1 / first_sum
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 0.00
__lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__lowerCAmelCase = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase)
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
self.assertTrue(isinstance(dc.token_ids , __lowercase ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case (self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(__lowercase ) # fails here
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 9 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'convbert'
def __init__(self , __lowercase=3_05_22 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=7_68 , __lowercase=2 , __lowercase=9 , __lowercase=1 , __lowercase=None , **__lowercase , ):
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = embedding_size
__lowerCAmelCase = head_ratio
__lowerCAmelCase = conv_kernel_size
__lowerCAmelCase = num_groups
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 9 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase = new_id
# turn into Numpy arrays
__lowerCAmelCase = np.array(lowerCamelCase)
__lowerCAmelCase = np.array(lowerCamelCase)
if reduce_labels:
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label - 1
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label != ignore_index
__lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = pred_label[mask]
__lowerCAmelCase = np.array(lowerCamelCase)[mask]
__lowerCAmelCase = pred_label[pred_label == label]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# compute metrics
__lowerCAmelCase = {}
__lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase = total_area_intersect / total_area_union
__lowerCAmelCase = total_area_intersect / total_area_label
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = all_acc
__lowerCAmelCase = iou
__lowerCAmelCase = acc
if nan_to_num is not None:
__lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
__lowerCAmelCase = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 9 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_UpperCAmelCase : Union[str, Any] = True
except (ImportError, AttributeError):
_UpperCAmelCase : Union[str, Any] = object
def __magic_name__( *lowerCamelCase, **lowerCamelCase):
pass
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = logging.get_logger("""transformers-cli/serving""")
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(lowerCamelCase, args.host, args.port, args.workers)
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : dict
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[int]]
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Any
class a__ ( __A ):
"""simple docstring"""
@staticmethod
def _snake_case (__lowercase ):
__lowerCAmelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=__lowercase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=__lowercase , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=__lowercase , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=__lowercase , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=__lowercase , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=__lowercase , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=__lowercase , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=__lowercase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=__lowercase )
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = pipeline
__lowerCAmelCase = host
__lowerCAmelCase = port
__lowerCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
__lowerCAmelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=__lowercase , response_class=__lowercase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=__lowercase , response_class=__lowercase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=__lowercase , response_class=__lowercase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=__lowercase , response_class=__lowercase , methods=['''POST'''] , ),
] , timeout=6_00 , )
def _snake_case (self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _snake_case (self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _snake_case (self , __lowercase = Body(__lowercase , embed=__lowercase ) , __lowercase = Body(__lowercase , embed=__lowercase ) ):
try:
__lowerCAmelCase = self._pipeline.tokenizer.tokenize(__lowercase )
if return_ids:
__lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(__lowercase )
return ServeTokenizeResult(tokens=__lowercase , tokens_ids=__lowercase )
else:
return ServeTokenizeResult(tokens=__lowercase )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(__lowercase )} )
def _snake_case (self , __lowercase = Body(__lowercase , embed=__lowercase ) , __lowercase = Body(__lowercase , embed=__lowercase ) , __lowercase = Body(__lowercase , embed=__lowercase ) , ):
try:
__lowerCAmelCase = self._pipeline.tokenizer.decode(__lowercase , __lowercase , __lowercase )
return ServeDeTokenizeResult(model='''''' , text=__lowercase )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(__lowercase )} )
async def _snake_case (self , __lowercase=Body(__lowercase , embed=__lowercase ) ):
# Check we don't have empty string
if len(__lowercase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowerCAmelCase = self._pipeline(__lowercase )
return ServeForwardResult(output=__lowercase )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(__lowercase )} )
| 9 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : str = True
__UpperCamelCase : Any = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCAmelCase = {'''unk_token''': '''[UNK]'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = '''lower newer'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer('''Hello''' , '''World''' )
__lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowercase )
@slow
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
__lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase )
__lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']]
# fmt: off
__lowerCAmelCase = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowercase )
for expected, decoded in zip(__lowercase , __lowercase ):
self.assertEqual(__lowercase , __lowercase )
| 9 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
try:
__lowerCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCAmelCase = strtobool(lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""")
return _value
_UpperCAmelCase : Union[str, Any] = parse_flag_from_env("""RUN_SLOW""", default=False)
def __magic_name__( lowerCamelCase):
return unittest.skip('''Test was skipped''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(_run_slow_tests, '''test is slow''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(not torch.cuda.is_available(), '''test requires only a CPU''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(torch.cuda.is_available(), '''test requires a GPU''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_xpu_available(), '''test requires a XPU''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_mps_available(), '''test requires a `mps` backend support in `torch`''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(), '''test requires the Hugging Face suite''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_bnb_available(), '''test requires the bitsandbytes library''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_tpu_available(), '''test requires TPU''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(torch.cuda.device_count() == 1, '''test requires a GPU''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(torch.xpu.device_count() == 1, '''test requires a XPU''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(torch.cuda.device_count() > 1, '''test requires multiple GPUs''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(torch.xpu.device_count() > 1, '''test requires multiple XPUs''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_safetensors_available(), '''test requires safetensors''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_deepspeed_available(), '''test requires DeepSpeed''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_torch_version('''>=''', '''1.12.0'''), '''test requires torch version >= 1.12.0''')(lowerCamelCase)
def __magic_name__( lowerCamelCase=None, lowerCamelCase=None):
if test_case is None:
return partial(lowerCamelCase, version=lowerCamelCase)
return unittest.skipUnless(is_torch_version('''>=''', lowerCamelCase), F"""test requires torch version >= {version}""")(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_tensorboard_available(), '''test requires Tensorboard''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_wandb_available(), '''test requires wandb''')(lowerCamelCase)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(is_comet_ml_available(), '''test requires comet_ml''')(lowerCamelCase)
_UpperCAmelCase : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __magic_name__( lowerCamelCase):
return unittest.skipUnless(
_atleast_one_tracker_available, '''test requires at least one tracker to be available and for `comet_ml` to not be installed''', )(lowerCamelCase)
class a__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = True
@classmethod
def _snake_case (cls ):
__lowerCAmelCase = tempfile.mkdtemp()
@classmethod
def _snake_case (cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _snake_case (self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowercase )
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self , __lowercase ):
__lowerCAmelCase = mocks if isinstance(__lowercase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = AcceleratorState()
__lowerCAmelCase = tensor[None].clone().to(state.device)
__lowerCAmelCase = gather(lowerCamelCase).cpu()
__lowerCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i], lowerCamelCase):
return False
return True
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = returncode
__lowerCAmelCase = stdout
__lowerCAmelCase = stderr
async def __magic_name__( lowerCamelCase, lowerCamelCase):
while True:
__lowerCAmelCase = await stream.readline()
if line:
callback(lowerCamelCase)
else:
break
async def __magic_name__( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=False):
if echo:
print('''\nRunning: ''', ''' '''.join(lowerCamelCase))
__lowerCAmelCase = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=lowerCamelCase, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=lowerCamelCase, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCAmelCase = []
__lowerCAmelCase = []
def tee(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=""):
__lowerCAmelCase = line.decode('''utf-8''').rstrip()
sink.append(lowerCamelCase)
if not quiet:
print(lowerCamelCase, lowerCamelCase, file=lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda lowerCamelCase: tee(lowerCamelCase, lowerCamelCase, sys.stdout, label='''stdout:'''))),
asyncio.create_task(_read_stream(p.stderr, lambda lowerCamelCase: tee(lowerCamelCase, lowerCamelCase, sys.stderr, label='''stderr:'''))),
], timeout=lowerCamelCase, )
return _RunOutput(await p.wait(), lowerCamelCase, lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=1_8_0, lowerCamelCase=False, lowerCamelCase=True):
__lowerCAmelCase = asyncio.get_event_loop()
__lowerCAmelCase = loop.run_until_complete(
_stream_subprocess(lowerCamelCase, env=lowerCamelCase, stdin=lowerCamelCase, timeout=lowerCamelCase, quiet=lowerCamelCase, echo=lowerCamelCase))
__lowerCAmelCase = ''' '''.join(lowerCamelCase)
if result.returncode > 0:
__lowerCAmelCase = '''\n'''.join(result.stderr)
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""")
return result
class a__ ( __A ):
"""simple docstring"""
pass
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
try:
__lowerCAmelCase = subprocess.check_output(lowerCamelCase, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(lowerCamelCase, '''decode'''):
__lowerCAmelCase = output.decode('''utf-8''')
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}""") from e
| 9 |
'''simple docstring'''
import argparse
import datetime
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase) < 1_1:
raise ValueError('''Must be 10 characters long''')
# Get month
__lowerCAmelCase = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''')
__lowerCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get day
__lowerCAmelCase = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''')
# Get second separator
__lowerCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get year
__lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''')
# Get datetime obj for validation
__lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase))
# Start math
if m <= 2:
__lowerCAmelCase = y - 1
__lowerCAmelCase = m + 1_2
# maths var
__lowerCAmelCase = int(str(lowerCamelCase)[:2])
__lowerCAmelCase = int(str(lowerCamelCase)[2:])
__lowerCAmelCase = int(2.6 * m - 5.39)
__lowerCAmelCase = int(c / 4)
__lowerCAmelCase = int(k / 4)
__lowerCAmelCase = int(d + k)
__lowerCAmelCase = int(t + u + v + x)
__lowerCAmelCase = int(z - (2 * c))
__lowerCAmelCase = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''')
# Response
__lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : List[str] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCAmelCase : Dict = parser.parse_args()
zeller(args.date_input)
| 9 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , *__lowercase , **__lowercase ):
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 9 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ConsistencyModelPipeline
__UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _snake_case (self , __lowercase=False ):
if class_cond:
__lowerCAmelCase = self.dummy_cond_unet
else:
__lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
__lowerCAmelCase = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase )
__lowerCAmelCase = latents
return inputs
def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
if type(__lowercase ) == str:
__lowerCAmelCase = torch.device(__lowercase )
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
return latents
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 9 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_UpperCAmelCase : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(4_2)
_UpperCAmelCase : Tuple = """sshleifer/student_marian_en_ro_6_1"""
_UpperCAmelCase : Dict = """sshleifer/tiny-mbart"""
@require_torch
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self , __lowercase=False , __lowercase=None , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , ):
__lowerCAmelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowercase , num_train_epochs=1 , distributed=__lowercase , extra_args_str=__lowercase , predict_with_generate=__lowercase , do_train=__lowercase , do_eval=__lowercase , do_predict=__lowercase , )
__lowerCAmelCase = TrainerState.load_from_json(os.path.join(__lowercase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCAmelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCAmelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCAmelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowercase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _snake_case (self ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _snake_case (self ):
self.run_seqaseq_quick(distributed=__lowercase )
@require_torch_multi_gpu
def _snake_case (self ):
self.run_seqaseq_quick(distributed=__lowercase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def _snake_case (self ):
self.run_seqaseq_quick(distributed=__lowercase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def _snake_case (self ):
self.run_seqaseq_quick(distributed=__lowercase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def _snake_case (self ):
self.run_seqaseq_quick(distributed=__lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowercase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def _snake_case (self ):
self.run_seqaseq_quick(
distributed=__lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowercase )
@require_apex
@require_torch_gpu
def _snake_case (self ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowercase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowercase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def _snake_case (self , __lowercase ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCAmelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCAmelCase = experiments[experiment_id]
__lowerCAmelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCAmelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowercase , extra_args_str=data['''extra_args_str'''] )
__lowerCAmelCase = len(re.findall(__lowercase , cl.err ) )
self.assertEqual(__lowercase , data['''n_matches'''] )
@slow
def _snake_case (self ):
__lowerCAmelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__lowercase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowercase , )
# Check metrics
__lowerCAmelCase = TrainerState.load_from_json(os.path.join(__lowercase , '''trainer_state.json''' ) ).log_history
__lowerCAmelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCAmelCase = eval_metrics[0]
__lowerCAmelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowercase )
# test if do_predict saves generations and metrics
__lowerCAmelCase = os.listdir(__lowercase )
__lowerCAmelCase = {os.path.basename(__lowercase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _snake_case (self ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowercase ) -> Tuple[int, float]:
__lowerCAmelCase = '''--skip_memory_metrics 0'''
__lowerCAmelCase = self.run_trainer(
max_len=1_28 , model_name=__lowercase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowercase , distributed=__lowercase , extra_args_str=__lowercase , do_eval=__lowercase , do_predict=__lowercase , n_gpus_to_use=1 , )
# Check metrics
__lowerCAmelCase = TrainerState.load_from_json(Path(__lowercase , '''trainer_state.json''' ) ).log_history
__lowerCAmelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCAmelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCAmelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCAmelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCAmelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCAmelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCAmelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCAmelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowercase , __lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowercase , __lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowercase , __lowercase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = 3e-3 , __lowercase = "adafactor" , __lowercase = False , __lowercase = None , __lowercase = 0 , __lowercase = True , __lowercase = True , __lowercase = True , __lowercase = True , __lowercase = None , ):
__lowerCAmelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowercase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowercase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
__lowerCAmelCase = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowercase )}
""".split()
__lowerCAmelCase = '''
--do_predict
'''.split()
__lowerCAmelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCAmelCase = get_gpu_count()
__lowerCAmelCase = get_torch_dist_unique_port()
__lowerCAmelCase = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
__lowerCAmelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowercase , env=self.get_env() )
else:
__lowerCAmelCase = ['''run_translation.py'''] + args
with patch.object(__lowercase , '''argv''' , __lowercase ):
main()
return output_dir
| 9 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_UpperCAmelCase : List[Any] = datasets.load_iris()
_UpperCAmelCase : Dict = np.array(data["""data"""])
_UpperCAmelCase : int = np.array(data["""target"""])
_UpperCAmelCase : str = data["""target_names"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y)
def __magic_name__( lowerCamelCase, lowerCamelCase):
return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase))
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5):
__lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase)
# List of distances of all points from the point to be classified
__lowerCAmelCase = []
for data_point in data:
__lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
__lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ConsistencyModelPipeline
__UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _snake_case (self , __lowercase=False ):
if class_cond:
__lowerCAmelCase = self.dummy_cond_unet
else:
__lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
__lowerCAmelCase = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase )
__lowerCAmelCase = latents
return inputs
def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
if type(__lowercase ) == str:
__lowerCAmelCase = torch.device(__lowercase )
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
return latents
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' )
__lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs['''input_ids''']
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 9 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = """▁"""
_UpperCAmelCase : str = {"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : List[str] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_UpperCAmelCase : Any = {
"""google/reformer-crime-and-punishment""": 5_2_4_2_8_8,
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = ['input_ids', 'attention_mask']
def __init__(self , __lowercase , __lowercase="</s>" , __lowercase="<unk>" , __lowercase=[] , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase , unk_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def _snake_case (self ):
return self.sp_model.get_piece_size()
def _snake_case (self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__(self , __lowercase ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case (self , __lowercase ):
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def _snake_case (self , __lowercase ):
return self.sp_model.piece_to_id(__lowercase )
def _snake_case (self , __lowercase ):
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(__lowercase )
return token
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
__lowerCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def _snake_case (self , __lowercase , __lowercase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 9 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9 | 1 |
'''simple docstring'''
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
# we need a list not a string, so do something to change the type
__lowerCAmelCase = arr.split(''',''' )
def _snake_case (self ):
__lowerCAmelCase = [int(self.array[0] )] * len(self.array )
__lowerCAmelCase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowerCAmelCase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowerCAmelCase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_UpperCAmelCase : List[str] = input("""please input some numbers:""")
_UpperCAmelCase : Tuple = SubArray(whole_array)
_UpperCAmelCase : List[Any] = array.solve_sub_array()
print(("""the results is:""", re))
| 9 |
'''simple docstring'''
import numpy as np
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ):
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase)
__lowerCAmelCase = np.iscomplexobj(lowerCamelCase)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase)
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(lowerCamelCase)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase))
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_)
return lambda_, vector
def __magic_name__( ):
__lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]])
__lowerCAmelCase = np.array([4_1, 4, 2_0])
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa)
__lowerCAmelCase = np.triu(1J * complex_input_matrix, 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase)
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 9 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'data2vec-vision'
def __init__(self , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=2_24 , __lowercase=16 , __lowercase=3 , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=0.1 , __lowercase=0.1 , __lowercase=True , __lowercase=[3, 5, 7, 11] , __lowercase=[1, 2, 3, 6] , __lowercase=True , __lowercase=0.4 , __lowercase=2_56 , __lowercase=1 , __lowercase=False , __lowercase=2_55 , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Any = version.parse('1.11' )
@property
def _snake_case (self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case (self ):
return 1e-4
| 9 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : str = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
return [
int(1_0_0_0 * (box[0] / width)),
int(1_0_0_0 * (box[1] / height)),
int(1_0_0_0 * (box[2] / width)),
int(1_0_0_0 * (box[3] / height)),
]
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None):
__lowerCAmelCase = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__lowerCAmelCase = to_pil_image(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase)
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase))
assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 9 | 1 |
'''simple docstring'''
from itertools import product
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = sides_number
__lowerCAmelCase = max_face_number * dice_number
__lowerCAmelCase = [0] * (max_total + 1)
__lowerCAmelCase = 1
__lowerCAmelCase = range(lowerCamelCase, max_face_number + 1)
for dice_numbers in product(lowerCamelCase, repeat=lowerCamelCase):
__lowerCAmelCase = sum(lowerCamelCase)
totals_frequencies[total] += 1
return totals_frequencies
def __magic_name__( ):
__lowerCAmelCase = total_frequency_distribution(
sides_number=4, dice_number=9)
__lowerCAmelCase = total_frequency_distribution(
sides_number=6, dice_number=6)
__lowerCAmelCase = 0
__lowerCAmelCase = 9
__lowerCAmelCase = 4 * 9
__lowerCAmelCase = 6
for peter_total in range(lowerCamelCase, max_peter_total + 1):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total])
__lowerCAmelCase = (4**9) * (6**6)
__lowerCAmelCase = peter_wins_count / total_games_number
__lowerCAmelCase = round(lowerCamelCase, ndigits=7)
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : List[Any] = 2_5_0_0_0_4
_UpperCAmelCase : Union[str, Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = MBartTokenizer
__UpperCamelCase : Union[str, Any] = MBartTokenizerFast
__UpperCamelCase : int = True
__UpperCamelCase : Tuple = True
def _snake_case (self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = MBartTokenizer(__lowercase , keep_accents=__lowercase )
__lowerCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case (self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowerCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = 'facebook/mbart-large-en-ro'
__UpperCamelCase : Tuple = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCamelCase : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _snake_case (cls ):
__lowerCAmelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__lowerCAmelCase = 1
return cls
def _snake_case (self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def _snake_case (self ):
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
__lowerCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowerCAmelCase = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , __lowercase )
__lowerCAmelCase = 10
__lowerCAmelCase = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def _snake_case (self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
__lowerCAmelCase = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
__lowerCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowerCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
__lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=10 , return_tensors='''pt''' )
__lowerCAmelCase = targets['''input_ids''']
__lowerCAmelCase = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 9 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = do_resize
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88}
__lowerCAmelCase = size_divisor
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_pad
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
def _snake_case (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _snake_case (self , __lowercase , __lowercase=False ):
if not batched:
__lowerCAmelCase = self.size['''shortest_edge''']
__lowerCAmelCase = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
__lowerCAmelCase = size / min(__lowercase , __lowercase )
if h < w:
__lowerCAmelCase , __lowerCAmelCase = size, scale * w
else:
__lowerCAmelCase , __lowerCAmelCase = scale * h, size
__lowerCAmelCase = int((13_33 / 8_00) * size )
if max(__lowercase , __lowercase ) > max_size:
__lowerCAmelCase = max_size / max(__lowercase , __lowercase )
__lowerCAmelCase = newh * scale
__lowerCAmelCase = neww * scale
__lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCAmelCase , __lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0]
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None
def _snake_case (self ):
__lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def _snake_case (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case (self ):
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) )
def _snake_case (self ):
pass
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 9 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = XCLIPTextConfig()
# derive patch size from model name
__lowerCAmelCase = model_name.find('''patch''')
__lowerCAmelCase = int(model_name[start_idx + len('''patch''') : start_idx + len('''patch''') + 2])
__lowerCAmelCase = XCLIPVisionConfig(patch_size=lowerCamelCase, num_frames=lowerCamelCase)
if "large" in model_name:
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = 3_0_7_2
__lowerCAmelCase = 1_2
__lowerCAmelCase = 1_0_2_4
__lowerCAmelCase = 4_0_9_6
__lowerCAmelCase = 1_6
__lowerCAmelCase = 2_4
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__lowerCAmelCase = 3_3_6
__lowerCAmelCase = XCLIPConfig.from_text_vision_configs(lowerCamelCase, lowerCamelCase)
if "large" in model_name:
__lowerCAmelCase = 7_6_8
return config
def __magic_name__( lowerCamelCase):
# text encoder
if name == "token_embedding.weight":
__lowerCAmelCase = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''')
if name == "positional_embedding":
__lowerCAmelCase = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''')
if "ln_1" in name:
__lowerCAmelCase = name.replace('''ln_1''', '''layer_norm1''')
if "ln_2" in name:
__lowerCAmelCase = name.replace('''ln_2''', '''layer_norm2''')
if "c_fc" in name:
__lowerCAmelCase = name.replace('''c_fc''', '''fc1''')
if "c_proj" in name:
__lowerCAmelCase = name.replace('''c_proj''', '''fc2''')
if name.startswith('''transformer.resblocks'''):
__lowerCAmelCase = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''')
if "attn.out_proj" in name and "message" not in name:
__lowerCAmelCase = name.replace('''attn.out_proj''', '''self_attn.out_proj''')
if "ln_final" in name:
__lowerCAmelCase = name.replace('''ln_final''', '''text_model.final_layer_norm''')
# visual encoder
if name == "visual.class_embedding":
__lowerCAmelCase = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''')
if name == "visual.positional_embedding":
__lowerCAmelCase = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''')
if name.startswith('''visual.transformer.resblocks'''):
__lowerCAmelCase = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''')
if "visual.conv1" in name:
__lowerCAmelCase = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''')
if "visual.ln_pre" in name:
__lowerCAmelCase = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''')
if "visual.ln_post" in name:
__lowerCAmelCase = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''')
if "visual.proj" in name:
__lowerCAmelCase = name.replace('''visual.proj''', '''visual_projection.weight''')
if "text_projection" in name:
__lowerCAmelCase = name.replace('''text_projection''', '''text_projection.weight''')
# things on top
if "prompts_visual_proj" in name:
__lowerCAmelCase = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''')
if "prompts_visual_ln" in name:
__lowerCAmelCase = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''')
# mit
if name == "mit.positional_embedding":
__lowerCAmelCase = name.replace('''positional''', '''position''')
if name.startswith('''mit.resblocks'''):
__lowerCAmelCase = name.replace('''mit.resblocks''', '''mit.encoder.layers''')
# prompts generator
if name.startswith('''prompts_generator.norm'''):
__lowerCAmelCase = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''')
return name
def __magic_name__( lowerCamelCase, lowerCamelCase):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowerCamelCase)
if "attn.in_proj" in key:
__lowerCAmelCase = key.split('''.''')
if key.startswith('''visual'''):
__lowerCAmelCase = key_split[3]
__lowerCAmelCase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__lowerCAmelCase = val[
:dim, :
]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[
-dim:, :
]
else:
__lowerCAmelCase = val[
:dim
]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[
-dim:
]
else:
if "weight" in key:
__lowerCAmelCase = val[
:dim, :
]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[
-dim:, :
]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[-dim:]
elif key.startswith('''mit'''):
__lowerCAmelCase = key_split[2]
__lowerCAmelCase = config.vision_config.mit_hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = key_split[2]
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowerCamelCase)
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__lowerCAmelCase = val.T
__lowerCAmelCase = val
return orig_state_dict
def __magic_name__( lowerCamelCase):
if num_frames == 8:
__lowerCAmelCase = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__lowerCAmelCase = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__lowerCAmelCase = '''eating_spaghetti_32_frames.npy'''
__lowerCAmelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=lowerCamelCase, repo_type='''dataset''', )
__lowerCAmelCase = np.load(lowerCamelCase)
return list(lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=False):
__lowerCAmelCase = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__lowerCAmelCase = model_to_url[model_name]
__lowerCAmelCase = 8
if "16-frames" in model_name:
__lowerCAmelCase = 1_6
elif "shot" in model_name:
__lowerCAmelCase = 3_2
__lowerCAmelCase = get_xclip_config(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = XCLIPModel(lowerCamelCase)
model.eval()
if "drive" in checkpoint_url:
__lowerCAmelCase = '''pytorch_model.bin'''
gdown.cached_download(lowerCamelCase, lowerCamelCase, quiet=lowerCamelCase)
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model''']
else:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase)['''model''']
__lowerCAmelCase = convert_state_dict(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = XCLIPModel(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase)
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__lowerCAmelCase = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__lowerCAmelCase = VideoMAEImageProcessor(size=lowerCamelCase)
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''')
__lowerCAmelCase = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''')
__lowerCAmelCase = XCLIPProcessor(image_processor=lowerCamelCase, tokenizer=lowerCamelCase)
__lowerCAmelCase = prepare_video(lowerCamelCase)
__lowerCAmelCase = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=lowerCamelCase, return_tensors='''pt''', padding=lowerCamelCase)
print('''Shape of pixel values:''', inputs.pixel_values.shape)
with torch.no_grad():
__lowerCAmelCase = model(**lowerCamelCase)
# Verify outputs
__lowerCAmelCase = outputs.logits_per_video
__lowerCAmelCase = logits_per_video.softmax(dim=1)
print('''Probs:''', lowerCamelCase)
# kinetics-400
if model_name == "xclip-base-patch32":
__lowerCAmelCase = torch.tensor([[0.00_19, 0.99_51, 0.00_30]])
elif model_name == "xclip-base-patch32-16-frames":
__lowerCAmelCase = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]])
elif model_name == "xclip-base-patch16":
__lowerCAmelCase = torch.tensor([[0.00_83, 0.96_81, 0.02_36]])
elif model_name == "xclip-base-patch16-16-frames":
__lowerCAmelCase = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]])
elif model_name == "xclip-large-patch14":
__lowerCAmelCase = torch.tensor([[0.00_62, 0.98_64, 0.00_75]])
elif model_name == "xclip-large-patch14-16-frames":
__lowerCAmelCase = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]])
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__lowerCAmelCase = torch.tensor([[0.05_55, 0.89_14, 0.05_31]])
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__lowerCAmelCase = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]])
elif model_name == "xclip-large-patch14-kinetics-600":
__lowerCAmelCase = torch.tensor([[0.00_36, 0.99_20, 0.00_45]])
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__lowerCAmelCase = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]])
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__lowerCAmelCase = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]])
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__lowerCAmelCase = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]])
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__lowerCAmelCase = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]])
elif model_name == "xclip-base-patch16-ucf-2-shot":
__lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]])
elif model_name == "xclip-base-patch16-ucf-4-shot":
__lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]])
elif model_name == "xclip-base-patch16-ucf-8-shot":
__lowerCAmelCase = torch.tensor([[0.00_27, 0.99_04, 0.00_70]])
elif model_name == "xclip-base-patch16-ucf-16-shot":
__lowerCAmelCase = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]])
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__lowerCAmelCase = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]])
else:
raise ValueError(F"""Model name {model_name} not supported""")
assert torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase)
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''')
model.push_to_hub(lowerCamelCase, organization='''nielsr''')
processor.push_to_hub(lowerCamelCase, organization='''nielsr''')
slow_tokenizer.push_to_hub(lowerCamelCase, organization='''nielsr''')
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 |
'''simple docstring'''
# Imports
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
__lowerCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _snake_case (self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case (self ):
return self.nir * (self.red / (self.green**2))
def _snake_case (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case (self ):
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case (self ):
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case (self ):
return (self.nir / self.green) - 1
def _snake_case (self ):
return (self.nir / self.redEdge) - 1
def _snake_case (self ):
return (self.red - self.blue) / self.red
def _snake_case (self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case (self ):
return self.nir - self.green
def _snake_case (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case (self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case (self , __lowercase=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case (self , __lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case (self , __lowercase=None , __lowercase=None ):
return (self.nir - b) / (a * self.red)
def _snake_case (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case (self ):
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case (self ):
return self.green / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.nir / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.red / (self.nir + self.red + self.green)
def _snake_case (self ):
return (self.green - self.red) / (self.green + self.red)
def _snake_case (self ):
return (self.red - self.green) / (self.red + self.green)
def _snake_case (self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 9 | 1 |
'''simple docstring'''
from PIL import Image
def __magic_name__( lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase = image.size
__lowerCAmelCase = 0
__lowerCAmelCase = image.load()
for i in range(lowerCamelCase):
for j in range(lowerCamelCase):
__lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase):
for i in range(lowerCamelCase):
__lowerCAmelCase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCAmelCase : List[str] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 9 |
'''simple docstring'''
from math import sqrt
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool"
return status
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1))
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase)):
for j in range(i + 1, len(lowerCamelCase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(lowerCamelCase):
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase):
while quotient != 1:
if is_prime(lowerCamelCase) and (quotient % factor == 0):
ans.append(lowerCamelCase)
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = max(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = min(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 == 0
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 != 0
def __magic_name__( lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase)
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (len(lowerCamelCase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = prime_factorization(lowerCamelCase)
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(max(lowerCamelCase, lowerCamelCase)):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase):
ans += 1
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime(
lowerCamelCase), "'ans' must been a prime number and from type int"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase)
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and ans[0] != p_number_a
and ans[len(lowerCamelCase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(lowerCamelCase)
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCamelCase)
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase))
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a__ :
"""simple docstring"""
__UpperCamelCase : int = PegasusConfig
__UpperCamelCase : List[str] = {}
__UpperCamelCase : List[str] = 'gelu'
def __init__(self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=False , __lowercase=99 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=40 , __lowercase=2 , __lowercase=1 , __lowercase=0 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
def _snake_case (self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase = prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, inputs_dict
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = TFPegasusModel(config=__lowercase ).get_decoder()
__lowerCAmelCase = inputs_dict['''input_ids''']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCAmelCase = inputs_dict['''head_mask''']
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase )[0]
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCamelCase, config.pad_token_id), tf.inta)
if decoder_attention_mask is None:
__lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.inta),
], axis=-1, )
if head_mask is None:
__lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__UpperCamelCase : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : int = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Any = True
__UpperCamelCase : str = False
__UpperCamelCase : Optional[Any] = False
def _snake_case (self ):
__lowerCAmelCase = TFPegasusModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
__UpperCamelCase : int = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__UpperCamelCase : List[Any] = 'google/pegasus-xsum'
@cached_property
def _snake_case (self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case (self ):
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _snake_case (self , **__lowercase ):
__lowerCAmelCase = self.translate_src_text(**__lowercase )
assert self.expected_text == generated_words
def _snake_case (self , **__lowercase ):
__lowerCAmelCase = self.tokenizer(self.src_text , **__lowercase , padding=__lowercase , return_tensors='''tf''' )
__lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowercase , )
__lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowercase )
return generated_words
@slow
def _snake_case (self ):
self._assert_generated_batch_equal_expected()
| 9 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase = 1_0, lowerCamelCase = 1_0_0_0, lowerCamelCase = True):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''')
return min_val if option else max_val
def __magic_name__( lowerCamelCase, lowerCamelCase):
return int((number_a + number_a) / 2)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''')
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''')
def answer(lowerCamelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''')
__lowerCAmelCase = lower
__lowerCAmelCase = higher
__lowerCAmelCase = []
while True:
__lowerCAmelCase = get_avg(lowerCamelCase, lowerCamelCase)
last_numbers.append(lowerCamelCase)
if answer(lowerCamelCase) == "low":
__lowerCAmelCase = number
elif answer(lowerCamelCase) == "high":
__lowerCAmelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__( ):
__lowerCAmelCase = int(input('''Enter lower value : ''').strip())
__lowerCAmelCase = int(input('''Enter high value : ''').strip())
__lowerCAmelCase = int(input('''Enter value to guess : ''').strip())
guess_the_number(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if __name__ == "__main__":
main()
| 9 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['input_features', 'attention_mask']
def __init__(self , __lowercase=80 , __lowercase=1_60_00 , __lowercase=80 , __lowercase=0.0 , __lowercase=True , __lowercase=True , __lowercase=True , **__lowercase , ):
super().__init__(feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase )
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = do_ceptral_normalize
__lowerCAmelCase = normalize_means
__lowerCAmelCase = normalize_vars
__lowerCAmelCase = True
def _snake_case (self , __lowercase , ):
__lowerCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__lowerCAmelCase = torch.from_numpy(__lowercase ).unsqueeze(0 )
__lowerCAmelCase = ta_kaldi.fbank(__lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case (__lowercase , __lowercase , __lowercase = True , __lowercase = True , __lowercase = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__lowerCAmelCase = x[:input_length].mean(axis=0 )
__lowerCAmelCase = np.subtract(__lowercase , __lowercase )
if normalize_vars:
__lowerCAmelCase = x[:input_length].std(axis=0 )
__lowerCAmelCase = np.divide(__lowercase , __lowercase )
if input_length < x.shape[0]:
__lowerCAmelCase = padding_value
# make sure array is in float32
__lowerCAmelCase = x.astype(np.floataa )
return x
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowercase , __lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowercase , __lowercase )
]
def __call__(self , __lowercase , __lowercase = False , __lowercase = None , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , **__lowercase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__lowerCAmelCase = isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
__lowerCAmelCase = is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase = [np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
__lowerCAmelCase = np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [raw_speech]
# extract fbank features
__lowerCAmelCase = [self._extract_fbank_features(__lowercase ) for waveform in raw_speech]
# convert into correct format for padding
__lowerCAmelCase = BatchFeature({'''input_features''': features} )
__lowerCAmelCase = self.pad(
__lowercase , padding=__lowercase , max_length=__lowercase , truncation=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
# make sure list is in array format
__lowerCAmelCase = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowercase ):
__lowerCAmelCase = [np.asarray(__lowercase , dtype=np.floataa ) for feature in input_features]
__lowerCAmelCase = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__lowerCAmelCase = [np.asarray(__lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__lowerCAmelCase = (
np.array(__lowercase , dtype=np.intaa )
if self._get_padding_strategies(__lowercase , max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCAmelCase = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowercase )
if return_tensors is not None:
__lowerCAmelCase = padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs
| 9 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = old_name
if "patch_embed" in old_name:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''')
if layer == "0":
__lowerCAmelCase = old_name.replace('''0''', '''convolution1''')
elif layer == "1":
__lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''')
elif layer == "3":
__lowerCAmelCase = old_name.replace('''3''', '''convolution2''')
else:
__lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''')
if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase):
__lowerCAmelCase = r'''\b\d{2}\b'''
if bool(re.search(lowerCamelCase, lowerCamelCase)):
__lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group()
else:
__lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group()
if int(match[0]) < 6:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
__lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1])
__lowerCAmelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
if int(match[2]) < num_meta4D_last_stage:
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2])
else:
__lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage)
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index)
if "norm1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''')
elif "norm2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''')
elif "fc1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''')
elif "fc2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''')
__lowerCAmelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase):
__lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''')
if "fc" in new_name:
__lowerCAmelCase = new_name.replace('''fc''', '''convolution''')
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''')
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''')
if "proj" in new_name:
__lowerCAmelCase = new_name.replace('''proj''', '''projection''')
if "dist_head" in new_name:
__lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''')
elif "head" in new_name:
__lowerCAmelCase = new_name.replace('''head''', '''classifier''')
elif "patch_embed" in new_name:
__lowerCAmelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCAmelCase = new_name.replace('''norm''', '''layernorm''')
__lowerCAmelCase = '''efficientformer.''' + new_name
else:
__lowerCAmelCase = '''efficientformer.encoder.''' + new_name
return new_name
def __magic_name__( lowerCamelCase, lowerCamelCase):
for key in checkpoint.copy().keys():
__lowerCAmelCase = checkpoint.pop(lowerCamelCase)
__lowerCAmelCase = val
return checkpoint
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return image
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model''']
__lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase)
__lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase)
__lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1])
__lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
__lowerCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = 2_5_6
__lowerCAmelCase = 2_2_4
__lowerCAmelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
__lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values
# original processing pipeline
__lowerCAmelCase = Compose(
[
Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']),
CenterCrop(lowerCamelCase),
ToTensor(),
Normalize(lowerCamelCase, lowerCamelCase),
])
__lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0)
assert torch.allclose(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = (1, 1_0_0_0)
if "l1" in model_name:
__lowerCAmelCase = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78])
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""")
# Save Checkpoints
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""")
processor.save_pretrained(lowerCamelCase)
print(F"""Processor successfuly saved at {pytorch_dump_path}""")
if push_to_hub:
print('''Pushing model to the hub...''')
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 9 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : Any = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : Any = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase : List[Any] = model.state_dict()
_UpperCAmelCase : str = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase : Dict = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase : Tuple = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase : Union[str, Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase : List[Any] = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase : List[Any] = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : List[Any] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase : Union[str, Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__( lowerCamelCase, lowerCamelCase):
if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2:
raise Exception('''Matrices are not 2x2''')
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase):
if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('''Odd matrices are not supported!''')
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)]
return top_left, top_right, bot_left, bot_right
def __magic_name__( lowerCamelCase):
return len(lowerCamelCase), len(matrix[0])
def __magic_name__( lowerCamelCase):
print('''\n'''.join(str(lowerCamelCase) for line in matrix))
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase) == (2, 2):
return default_matrix_multiplication(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(lowerCamelCase)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]:
__lowerCAmelCase = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase))))
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
__lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase)
# Removing the additional zeros
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCAmelCase : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 | 1 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_UpperCAmelCase : str = """sshleifer/mar_enro_6_3_student"""
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self ):
super().setUp()
__lowerCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=__lowercase , )
__lowerCAmelCase = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _snake_case (self ):
MarianMTModel.from_pretrained(__lowercase )
@slow
@require_torch_gpu
def _snake_case (self ):
__lowerCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__lowerCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__lowerCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__lowercase , str(__lowercase ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowerCAmelCase = F"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowerCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(__lowercase , '''argv''' , __lowercase ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__lowercase )
__lowerCAmelCase = SummarizationModule.add_model_specific_args(__lowercase , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = main(__lowercase )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics['''val'''][0]
__lowerCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __lowercase )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__lowercase )
__lowerCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __lowercase )
__lowerCAmelCase = torch.load(__lowercase , map_location='''cpu''' )
__lowerCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class a__ ( __A ):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def _snake_case (self ):
__lowerCAmelCase = F"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
__lowerCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 1_28,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__lowerCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__lowerCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__lowerCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__lowercase , str(__lowercase ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = bash_script.replace('''--fp16''' , '''''' )
__lowerCAmelCase = 6
__lowerCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
F"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
F"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(__lowercase , '''argv''' , __lowercase ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__lowercase )
__lowerCAmelCase = SummarizationDistiller.add_model_specific_args(__lowercase , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowerCAmelCase = distill_main(__lowercase )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics['''val'''][0]
__lowerCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__lowercase )
__lowerCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __lowercase )
__lowerCAmelCase = torch.load(__lowercase , map_location='''cpu''' )
__lowerCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Any = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 1 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 9 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_UpperCAmelCase : Union[str, Any] = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase : Dict = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase : Optional[int] = 'translator'
__UpperCamelCase : Any = AutoTokenizer
__UpperCamelCase : Any = AutoModelForSeqaSeqLM
__UpperCamelCase : Optional[Any] = LANGUAGE_CODES
__UpperCamelCase : Optional[int] = ['text', 'text', 'text']
__UpperCamelCase : Union[str, Any] = ['text']
def _snake_case (self , __lowercase , __lowercase , __lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
__lowerCAmelCase = self.lang_to_code[src_lang]
__lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__lowercase , return_tensors='''pt''' , src_lang=__lowercase , tgt_lang=__lowercase )
def _snake_case (self , __lowercase ):
return self.model.generate(**__lowercase )
def _snake_case (self , __lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowercase )
| 9 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __magic_name__( ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 9 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase : Optional[Any] = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase : Tuple = 'image_segmenter'
__UpperCamelCase : Tuple = CLIPSegForImageSegmentation
__UpperCamelCase : Optional[int] = ['image', 'text']
__UpperCamelCase : str = ['image']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''vision'''] )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
return self.pre_processor(text=[label] , images=[image] , padding=__lowercase , return_tensors='''pt''' )
def _snake_case (self , __lowercase ):
with torch.no_grad():
__lowerCAmelCase = self.model(**__lowercase ).logits
return logits
def _snake_case (self , __lowercase ):
__lowerCAmelCase = outputs.cpu().detach().numpy()
__lowerCAmelCase = 0
__lowerCAmelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 9 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
self.assertTrue(isinstance(dc.token_ids , __lowercase ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case (self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(__lowercase ) # fails here
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 9 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : "DiagonalGaussianDistribution"
class a__ ( __A , __A ):
"""simple docstring"""
__UpperCamelCase : List[str] = True
@register_to_config
def __init__(self , __lowercase = 3 , __lowercase = 3 , __lowercase = ("DownEncoderBlock2D",) , __lowercase = ("UpDecoderBlock2D",) , __lowercase = (64,) , __lowercase = 1 , __lowercase = "silu" , __lowercase = 4 , __lowercase = 32 , __lowercase = 32 , __lowercase = 0.1_8_2_1_5 , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , norm_num_groups=__lowercase , act_fn=__lowercase , )
__lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCAmelCase = 0.2_5
def _snake_case (self , __lowercase , __lowercase=False ):
if isinstance(__lowercase , (Encoder, Decoder) ):
__lowerCAmelCase = value
def _snake_case (self , __lowercase = True ):
__lowerCAmelCase = use_tiling
def _snake_case (self ):
self.enable_tiling(__lowercase )
def _snake_case (self ):
__lowerCAmelCase = True
def _snake_case (self ):
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _snake_case (self ):
__lowerCAmelCase = {}
def fn_recursive_add_processors(__lowercase , __lowercase , __lowercase ):
if hasattr(__lowercase , '''set_processor''' ):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def _snake_case (self , __lowercase ):
__lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase ):
if hasattr(__lowercase , '''set_processor''' ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def _snake_case (self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__lowercase , return_dict=__lowercase )
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(__lowercase ) for x_slice in x.split(1 )]
__lowerCAmelCase = torch.cat(__lowercase )
else:
__lowerCAmelCase = self.encoder(__lowercase )
__lowerCAmelCase = self.quant_conv(__lowercase )
__lowerCAmelCase = DiagonalGaussianDistribution(__lowercase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowercase )
def _snake_case (self , __lowercase , __lowercase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__lowercase , return_dict=__lowercase )
__lowerCAmelCase = self.post_quant_conv(__lowercase )
__lowerCAmelCase = self.decoder(__lowercase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = True ):
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(__lowercase ).sample for z_slice in z.split(1 )]
__lowerCAmelCase = torch.cat(__lowercase )
else:
__lowerCAmelCase = self._decode(__lowercase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = min(a.shape[2] , b.shape[2] , __lowercase )
for y in range(__lowercase ):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _snake_case (self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = min(a.shape[3] , b.shape[3] , __lowercase )
for x in range(__lowercase ):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0 , x.shape[2] , __lowercase ):
__lowerCAmelCase = []
for j in range(0 , x.shape[3] , __lowercase ):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(__lowercase )
__lowerCAmelCase = self.quant_conv(__lowercase )
row.append(__lowercase )
rows.append(__lowercase )
__lowerCAmelCase = []
for i, row in enumerate(__lowercase ):
__lowerCAmelCase = []
for j, tile in enumerate(__lowercase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , __lowercase , __lowercase )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , __lowercase , __lowercase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__lowercase , dim=3 ) )
__lowerCAmelCase = torch.cat(__lowercase , dim=2 )
__lowerCAmelCase = DiagonalGaussianDistribution(__lowercase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowercase )
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0 , z.shape[2] , __lowercase ):
__lowerCAmelCase = []
for j in range(0 , z.shape[3] , __lowercase ):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(__lowercase )
__lowerCAmelCase = self.decoder(__lowercase )
row.append(__lowercase )
rows.append(__lowercase )
__lowerCAmelCase = []
for i, row in enumerate(__lowercase ):
__lowerCAmelCase = []
for j, tile in enumerate(__lowercase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , __lowercase , __lowercase )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , __lowercase , __lowercase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__lowercase , dim=3 ) )
__lowerCAmelCase = torch.cat(__lowercase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def _snake_case (self , __lowercase , __lowercase = False , __lowercase = True , __lowercase = None , ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(__lowercase ).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=__lowercase )
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
| 9 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase = new_id
# turn into Numpy arrays
__lowerCAmelCase = np.array(lowerCamelCase)
__lowerCAmelCase = np.array(lowerCamelCase)
if reduce_labels:
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label - 1
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label != ignore_index
__lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = pred_label[mask]
__lowerCAmelCase = np.array(lowerCamelCase)[mask]
__lowerCAmelCase = pred_label[pred_label == label]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# compute metrics
__lowerCAmelCase = {}
__lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase = total_area_intersect / total_area_union
__lowerCAmelCase = total_area_intersect / total_area_label
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = all_acc
__lowerCAmelCase = iou
__lowerCAmelCase = acc
if nan_to_num is not None:
__lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
__lowerCAmelCase = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 9 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = 0
def _snake_case (self ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__lowercase , __lowercase )
def _snake_case (self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__lowercase ) / '''preprocessor_config.json'''
__lowerCAmelCase = Path(__lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def _snake_case (self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__lowercase ) / '''preprocessor_config.json'''
__lowerCAmelCase = Path(__lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def _snake_case (self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCAmelCase = Path(__lowercase ) / '''preprocessor_config.json'''
__lowerCAmelCase = Path(__lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__lowercase ).to_dict()
config_dict.pop('''image_processor_type''' )
__lowerCAmelCase = CLIPImageProcessor(**__lowercase )
# save in new folder
model_config.save_pretrained(__lowercase )
config.save_pretrained(__lowercase )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__lowercase )
# make sure private variable is not incorrectly saved
__lowerCAmelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__lowercase , __lowercase )
def _snake_case (self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__lowercase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def _snake_case (self ):
with self.assertRaisesRegex(
__lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def _snake_case (self ):
with self.assertRaisesRegex(
__lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''' )
def _snake_case (self ):
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _snake_case (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _snake_case (self ):
try:
AutoConfig.register('''custom''' , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoImageProcessor.register(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__lowercase ) / '''preprocessor_config.json'''
__lowerCAmelCase = Path(__lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
__lowerCAmelCase = CustomImageProcessor.from_pretrained(__lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case (self ):
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Any = True
try:
AutoConfig.register('''custom''' , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local
__lowerCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__lowercase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 9 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : str = True
__UpperCamelCase : Any = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCAmelCase = {'''unk_token''': '''[UNK]'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = '''lower newer'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer('''Hello''' , '''World''' )
__lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowercase )
@slow
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
__lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase )
__lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']]
# fmt: off
__lowerCAmelCase = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowercase )
for expected, decoded in zip(__lowercase , __lowercase ):
self.assertEqual(__lowercase , __lowercase )
| 9 | 1 |
'''simple docstring'''
_UpperCAmelCase : Dict = 8.31_44_62 # Unit - J mol-1 K-1
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 9 |
'''simple docstring'''
import argparse
import datetime
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase) < 1_1:
raise ValueError('''Must be 10 characters long''')
# Get month
__lowerCAmelCase = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''')
__lowerCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get day
__lowerCAmelCase = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''')
# Get second separator
__lowerCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get year
__lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''')
# Get datetime obj for validation
__lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase))
# Start math
if m <= 2:
__lowerCAmelCase = y - 1
__lowerCAmelCase = m + 1_2
# maths var
__lowerCAmelCase = int(str(lowerCamelCase)[:2])
__lowerCAmelCase = int(str(lowerCamelCase)[2:])
__lowerCAmelCase = int(2.6 * m - 5.39)
__lowerCAmelCase = int(c / 4)
__lowerCAmelCase = int(k / 4)
__lowerCAmelCase = int(d + k)
__lowerCAmelCase = int(t + u + v + x)
__lowerCAmelCase = int(z - (2 * c))
__lowerCAmelCase = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''')
# Response
__lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : List[str] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCAmelCase : Dict = parser.parse_args()
zeller(args.date_input)
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(__lowercase ) != 0:
__lowerCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowercase ) != cols:
raise error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise error
__lowerCAmelCase = rows
else:
__lowerCAmelCase = []
def _snake_case (self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _snake_case (self ):
return len(self.rows )
@property
def _snake_case (self ):
return len(self.rows[0] )
@property
def _snake_case (self ):
return (self.num_rows, self.num_columns)
@property
def _snake_case (self ):
return self.order[0] == self.order[1]
def _snake_case (self ):
__lowerCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowercase )
def _snake_case (self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _snake_case (self ):
return bool(self.determinant() )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowercase ).determinant()
def _snake_case (self , __lowercase , __lowercase ):
if (row + column) % 2 == 0:
return self.get_minor(__lowercase , __lowercase )
return -1 * self.get_minor(__lowercase , __lowercase )
def _snake_case (self ):
return Matrix(
[
[self.get_minor(__lowercase , __lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _snake_case (self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _snake_case (self ):
__lowerCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__(self ):
return str(self.rows )
def __str__(self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(__lowercase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(__lowercase )
else:
__lowerCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in column:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
__lowerCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__lowerCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self , __lowercase ):
return not self == other
def __neg__(self ):
return self * -1
def __add__(self , __lowercase ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self , __lowercase ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self , __lowercase ):
if isinstance(__lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowercase , __lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(__lowercase , __lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__(self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
__lowerCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
return sum(row[i] * column[i] for i in range(len(__lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ConsistencyModelPipeline
__UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _snake_case (self , __lowercase=False ):
if class_cond:
__lowerCAmelCase = self.dummy_cond_unet
else:
__lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
__lowerCAmelCase = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase )
__lowerCAmelCase = latents
return inputs
def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
if type(__lowercase ) == str:
__lowerCAmelCase = torch.device(__lowercase )
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
return latents
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 9 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
if num <= 0:
raise ValueError('''Input must be a positive integer''')
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, lowerCamelCase):
__lowerCAmelCase = False
p += 1
return [prime for prime in range(2, num + 1) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 9 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_UpperCAmelCase : List[Any] = datasets.load_iris()
_UpperCAmelCase : Dict = np.array(data["""data"""])
_UpperCAmelCase : int = np.array(data["""target"""])
_UpperCAmelCase : str = data["""target_names"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y)
def __magic_name__( lowerCamelCase, lowerCamelCase):
return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase))
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5):
__lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase)
# List of distances of all points from the point to be classified
__lowerCAmelCase = []
for data_point in data:
__lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
__lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
_UpperCAmelCase : Dict = tuple[int, int]
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
__lowerCAmelCase = vertices
__lowerCAmelCase = {
(min(__lowercase ), max(__lowercase )): weight for edge, weight in edges.items()
}
def _snake_case (self , __lowercase , __lowercase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCAmelCase = weight
def _snake_case (self ):
__lowerCAmelCase = Graph({min(self.vertices )} , {} )
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCAmelCase = edge
__lowerCAmelCase = weight
subgraph.add_edge(__lowercase , __lowercase )
return subgraph
def __magic_name__( lowerCamelCase = "p107_network.txt"):
__lowerCAmelCase = os.path.abspath(os.path.dirname(lowerCamelCase))
__lowerCAmelCase = os.path.join(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = {}
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
with open(lowerCamelCase) as f:
__lowerCAmelCase = f.read().strip().split('''\n''')
__lowerCAmelCase = [line.split(''',''') for line in data]
for edgea in range(1, len(lowerCamelCase)):
for edgea in range(lowerCamelCase):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCAmelCase = int(adjaceny_matrix[edgea][edgea])
__lowerCAmelCase = Graph(set(range(len(lowerCamelCase))), lowerCamelCase)
__lowerCAmelCase = graph.prims_algorithm()
__lowerCAmelCase = sum(graph.edges.values())
__lowerCAmelCase = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' )
__lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs['''input_ids''']
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 9 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase = new_id
# turn into Numpy arrays
__lowerCAmelCase = np.array(lowerCamelCase)
__lowerCAmelCase = np.array(lowerCamelCase)
if reduce_labels:
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label - 1
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label != ignore_index
__lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = pred_label[mask]
__lowerCAmelCase = np.array(lowerCamelCase)[mask]
__lowerCAmelCase = pred_label[pred_label == label]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# compute metrics
__lowerCAmelCase = {}
__lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase = total_area_intersect / total_area_union
__lowerCAmelCase = total_area_intersect / total_area_label
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = all_acc
__lowerCAmelCase = iou
__lowerCAmelCase = acc
if nan_to_num is not None:
__lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
__lowerCAmelCase = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 9 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9 | 1 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 |
'''simple docstring'''
import numpy as np
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ):
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase)
__lowerCAmelCase = np.iscomplexobj(lowerCamelCase)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase)
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(lowerCamelCase)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase))
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_)
return lambda_, vector
def __magic_name__( ):
__lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]])
__lowerCAmelCase = np.array([4_1, 4, 2_0])
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa)
__lowerCAmelCase = np.triu(1J * complex_input_matrix, 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase)
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 9 | 1 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
_UpperCAmelCase : Any = """path-to-your-trained-model"""
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
_UpperCAmelCase : Optional[int] = """A photo of sks dog in a bucket"""
_UpperCAmelCase : Dict = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 9 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : str = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
return [
int(1_0_0_0 * (box[0] / width)),
int(1_0_0_0 * (box[1] / height)),
int(1_0_0_0 * (box[2] / width)),
int(1_0_0_0 * (box[3] / height)),
]
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None):
__lowerCAmelCase = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__lowerCAmelCase = to_pil_image(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase)
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase))
assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 9 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase = 16 , __lowercase = 88 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = 32 , __lowercase = None , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = "geglu" , __lowercase = None , ):
super().__init__()
__lowerCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__lowercase , attention_head_dim=__lowercase , in_channels=__lowercase , num_layers=__lowercase , dropout=__lowercase , norm_num_groups=__lowercase , cross_attention_dim=__lowercase , attention_bias=__lowercase , sample_size=__lowercase , num_vector_embeds=__lowercase , activation_fn=__lowercase , num_embeds_ada_norm=__lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__lowerCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__lowerCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__lowerCAmelCase = [1, 0]
def _snake_case (self , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = True , ):
__lowerCAmelCase = hidden_states
__lowerCAmelCase = []
__lowerCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__lowerCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__lowerCAmelCase = self.transformer_index_for_condition[i]
__lowerCAmelCase = self.transformers[transformer_index](
__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase , cross_attention_kwargs=__lowercase , return_dict=__lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__lowerCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__lowerCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__lowercase )
| 9 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_UpperCAmelCase : int = logging.get_logger(__name__)
@add_end_docstrings(
__A , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self , __lowercase ):
if self.framework == "tf":
__lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowercase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.get_masked_index(__lowercase )
__lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _snake_case (self , __lowercase ):
if isinstance(__lowercase , __lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__lowercase )
def _snake_case (self , __lowercase , __lowercase=None , **__lowercase ):
if return_tensors is None:
__lowerCAmelCase = self.framework
__lowerCAmelCase = self.tokenizer(__lowercase , return_tensors=__lowercase )
self.ensure_exactly_one_mask_token(__lowercase )
return model_inputs
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.model(**__lowercase )
__lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def _snake_case (self , __lowercase , __lowercase=5 , __lowercase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__lowerCAmelCase = target_ids.shape[0]
__lowerCAmelCase = model_outputs['''input_ids'''][0]
__lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__lowerCAmelCase = outputs.numpy()
__lowerCAmelCase = outputs[0, masked_index, :]
__lowerCAmelCase = stable_softmax(__lowercase , axis=-1 )
if target_ids is not None:
__lowerCAmelCase = tf.gather_nd(tf.squeeze(__lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
__lowerCAmelCase = tf.expand_dims(__lowercase , 0 )
__lowerCAmelCase = tf.math.top_k(__lowercase , k=__lowercase )
__lowerCAmelCase , __lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__lowerCAmelCase = outputs[0, masked_index, :]
__lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__lowerCAmelCase = probs[..., target_ids]
__lowerCAmelCase , __lowerCAmelCase = probs.topk(__lowercase )
__lowerCAmelCase = []
__lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__lowerCAmelCase = target_ids[p].tolist()
__lowerCAmelCase = p
# Filter padding out:
__lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__lowerCAmelCase = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
__lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__lowercase )
result.append(__lowercase )
if single_mask:
return result[0]
return result
def _snake_case (self , __lowercase , __lowercase=None ):
if isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = [targets]
try:
__lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
__lowerCAmelCase = {}
__lowerCAmelCase = []
for target in targets:
__lowerCAmelCase = vocab.get(__lowercase , __lowercase )
if id_ is None:
__lowerCAmelCase = self.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_attention_mask=__lowercase , return_token_type_ids=__lowercase , max_length=1 , truncation=__lowercase , )['''input_ids''']
if len(__lowercase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
__lowerCAmelCase = list(set(__lowercase ) )
if len(__lowercase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__lowerCAmelCase = np.array(__lowercase )
return target_ids
def _snake_case (self , __lowercase=None , __lowercase=None ):
__lowerCAmelCase = {}
if targets is not None:
__lowerCAmelCase = self.get_target_ids(__lowercase , __lowercase )
__lowerCAmelCase = target_ids
if top_k is not None:
__lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__(self , __lowercase , *__lowercase , **__lowercase ):
__lowerCAmelCase = super().__call__(__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) == 1:
return outputs[0]
return outputs
| 9 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = do_resize
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88}
__lowerCAmelCase = size_divisor
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_pad
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
def _snake_case (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _snake_case (self , __lowercase , __lowercase=False ):
if not batched:
__lowerCAmelCase = self.size['''shortest_edge''']
__lowerCAmelCase = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
__lowerCAmelCase = size / min(__lowercase , __lowercase )
if h < w:
__lowerCAmelCase , __lowerCAmelCase = size, scale * w
else:
__lowerCAmelCase , __lowerCAmelCase = scale * h, size
__lowerCAmelCase = int((13_33 / 8_00) * size )
if max(__lowercase , __lowercase ) > max_size:
__lowerCAmelCase = max_size / max(__lowercase , __lowercase )
__lowerCAmelCase = newh * scale
__lowerCAmelCase = neww * scale
__lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCAmelCase , __lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0]
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None
def _snake_case (self ):
__lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def _snake_case (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case (self ):
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) )
def _snake_case (self ):
pass
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 9 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = "x", lowerCamelCase = 1_0**-1_0, lowerCamelCase = 1, ):
__lowerCAmelCase = symbols(lowerCamelCase)
__lowerCAmelCase = lambdify(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = lambdify(lowerCamelCase, diff(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
__lowerCAmelCase = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError('''Could not find root''') from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
__lowerCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_05)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 9 |
'''simple docstring'''
# Imports
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
__lowerCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _snake_case (self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case (self ):
return self.nir * (self.red / (self.green**2))
def _snake_case (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case (self ):
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case (self ):
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case (self ):
return (self.nir / self.green) - 1
def _snake_case (self ):
return (self.nir / self.redEdge) - 1
def _snake_case (self ):
return (self.red - self.blue) / self.red
def _snake_case (self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case (self ):
return self.nir - self.green
def _snake_case (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case (self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case (self , __lowercase=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case (self , __lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case (self , __lowercase=None , __lowercase=None ):
return (self.nir - b) / (a * self.red)
def _snake_case (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case (self ):
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case (self ):
return self.green / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.nir / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.red / (self.nir + self.red + self.green)
def _snake_case (self ):
return (self.green - self.red) / (self.green + self.red)
def _snake_case (self ):
return (self.red - self.green) / (self.red + self.green)
def _snake_case (self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 9 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="resnet50" , __lowercase=3 , __lowercase=32 , __lowercase=3 , __lowercase=True , __lowercase=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def _snake_case (self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def _snake_case (self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = TimmBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(__lowercase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _snake_case (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a__ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
__UpperCamelCase : str = False
__UpperCamelCase : int = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[int] = False
def _snake_case (self ):
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def _snake_case (self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case (self ):
__lowerCAmelCase = '''resnet18'''
__lowerCAmelCase = '''microsoft/resnet-18'''
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _snake_case (self ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _snake_case (self ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _snake_case (self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
__lowerCAmelCase = self._prepare_for_class(__lowercase , __lowercase )
__lowerCAmelCase = model(**__lowercase )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__lowercase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(__lowercase )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(__lowercase )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
| 9 |
'''simple docstring'''
from math import sqrt
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool"
return status
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1))
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase)):
for j in range(i + 1, len(lowerCamelCase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(lowerCamelCase):
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase):
while quotient != 1:
if is_prime(lowerCamelCase) and (quotient % factor == 0):
ans.append(lowerCamelCase)
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = max(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = min(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 == 0
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 != 0
def __magic_name__( lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase)
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (len(lowerCamelCase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = prime_factorization(lowerCamelCase)
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(max(lowerCamelCase, lowerCamelCase)):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase):
ans += 1
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime(
lowerCamelCase), "'ans' must been a prime number and from type int"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase)
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and ans[0] != p_number_a
and ans[len(lowerCamelCase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(lowerCamelCase)
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCamelCase)
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase))
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 9 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
if density <= 0:
raise ValueError('''Impossible fluid density''')
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''')
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = XLMRobertaTokenizer
__UpperCamelCase : Tuple = XLMRobertaTokenizerFast
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Any = True
def _snake_case (self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = XLMRobertaTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = '''<pad>'''
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowercase ) , 10_02 )
def _snake_case (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _snake_case (self ):
__lowerCAmelCase = XLMRobertaTokenizer(__lowercase , keep_accents=__lowercase )
__lowerCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case (self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowerCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@cached_property
def _snake_case (self ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case (self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowercase , f.name )
__lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowercase )
__lowerCAmelCase = pickle.dumps(__lowercase )
pickle.loads(__lowercase )
def _snake_case (self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = '''I was born in 92000, and this is falsé.'''
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
__lowerCAmelCase = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__lowerCAmelCase = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(__lowercase )
__lowerCAmelCase = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def _snake_case (self ):
__lowerCAmelCase = '''Hello World!'''
__lowerCAmelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def _snake_case (self ):
__lowerCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__lowerCAmelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def _snake_case (self ):
# fmt: off
__lowerCAmelCase = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 9 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 9 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = old_name
if "patch_embed" in old_name:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''')
if layer == "0":
__lowerCAmelCase = old_name.replace('''0''', '''convolution1''')
elif layer == "1":
__lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''')
elif layer == "3":
__lowerCAmelCase = old_name.replace('''3''', '''convolution2''')
else:
__lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''')
if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase):
__lowerCAmelCase = r'''\b\d{2}\b'''
if bool(re.search(lowerCamelCase, lowerCamelCase)):
__lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group()
else:
__lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group()
if int(match[0]) < 6:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
__lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1])
__lowerCAmelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
if int(match[2]) < num_meta4D_last_stage:
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2])
else:
__lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage)
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index)
if "norm1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''')
elif "norm2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''')
elif "fc1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''')
elif "fc2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''')
__lowerCAmelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase):
__lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''')
if "fc" in new_name:
__lowerCAmelCase = new_name.replace('''fc''', '''convolution''')
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''')
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''')
if "proj" in new_name:
__lowerCAmelCase = new_name.replace('''proj''', '''projection''')
if "dist_head" in new_name:
__lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''')
elif "head" in new_name:
__lowerCAmelCase = new_name.replace('''head''', '''classifier''')
elif "patch_embed" in new_name:
__lowerCAmelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCAmelCase = new_name.replace('''norm''', '''layernorm''')
__lowerCAmelCase = '''efficientformer.''' + new_name
else:
__lowerCAmelCase = '''efficientformer.encoder.''' + new_name
return new_name
def __magic_name__( lowerCamelCase, lowerCamelCase):
for key in checkpoint.copy().keys():
__lowerCAmelCase = checkpoint.pop(lowerCamelCase)
__lowerCAmelCase = val
return checkpoint
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return image
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model''']
__lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase)
__lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase)
__lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1])
__lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
__lowerCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = 2_5_6
__lowerCAmelCase = 2_2_4
__lowerCAmelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
__lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values
# original processing pipeline
__lowerCAmelCase = Compose(
[
Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']),
CenterCrop(lowerCamelCase),
ToTensor(),
Normalize(lowerCamelCase, lowerCamelCase),
])
__lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0)
assert torch.allclose(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = (1, 1_0_0_0)
if "l1" in model_name:
__lowerCAmelCase = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78])
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""")
# Save Checkpoints
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""")
processor.save_pretrained(lowerCamelCase)
print(F"""Processor successfuly saved at {pytorch_dump_path}""")
if push_to_hub:
print('''Pushing model to the hub...''')
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__( lowerCamelCase, lowerCamelCase):
# Checks if the entire collection has been sorted
if len(lowerCamelCase) <= 1 or n <= 1:
return
insert_next(lowerCamelCase, n - 1)
rec_insertion_sort(lowerCamelCase, n - 1)
def __magic_name__( lowerCamelCase, lowerCamelCase):
# Checks order between adjacent elements
if index >= len(lowerCamelCase) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowerCAmelCase , __lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase, index + 1)
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = input("""Enter integers separated by spaces: """)
_UpperCAmelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__( lowerCamelCase, lowerCamelCase):
if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2:
raise Exception('''Matrices are not 2x2''')
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase):
if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('''Odd matrices are not supported!''')
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)]
return top_left, top_right, bot_left, bot_right
def __magic_name__( lowerCamelCase):
return len(lowerCamelCase), len(matrix[0])
def __magic_name__( lowerCamelCase):
print('''\n'''.join(str(lowerCamelCase) for line in matrix))
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase) == (2, 2):
return default_matrix_multiplication(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(lowerCamelCase)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]:
__lowerCAmelCase = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase))))
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
__lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase)
# Removing the additional zeros
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCAmelCase : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = self.vocab_size - 1
def _snake_case (self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ):
__lowerCAmelCase = OpenAIGPTModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , token_type_ids=__lowercase , head_mask=__lowercase )
__lowerCAmelCase = model(__lowercase , token_type_ids=__lowercase )
__lowerCAmelCase = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ):
__lowerCAmelCase = OpenAIGPTLMHeadModel(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ):
__lowerCAmelCase = OpenAIGPTDoubleHeadsModel(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = OpenAIGPTForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = model(__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__UpperCamelCase : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__UpperCamelCase : Union[str, Any] = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case (self , __lowercase , __lowercase , __lowercase=False ):
__lowerCAmelCase = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase , )
__lowerCAmelCase = inputs_dict['''labels''']
__lowerCAmelCase = inputs_dict['''labels''']
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowercase , )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def _snake_case (self ):
__lowerCAmelCase = OpenAIGPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase , n_embd=37 )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowercase )
@slow
def _snake_case (self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = OpenAIGPTModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case (self ):
__lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(__lowercase )
__lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=__lowercase ) # the president is
__lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCAmelCase = model.generate(__lowercase , do_sample=__lowercase )
self.assertListEqual(output_ids[0].tolist() , __lowercase )
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9 | 1 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = checkpoints.load_tax_checkpoint(lowerCamelCase)
__lowerCAmelCase = flatten_dict(lowerCamelCase)
return flax_params
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = {}
__lowerCAmelCase = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
__lowerCAmelCase = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__lowerCAmelCase = '''.'''.join(key[1:])
# rename the key
for old, new in CONVERSION_MAPPING.items():
__lowerCAmelCase = new_key.replace(lowerCamelCase, lowerCamelCase)
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__lowerCAmelCase = new_key.replace(lowerCamelCase, lowerCamelCase)
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__lowerCAmelCase = re.sub(r'''layers_(\d+)''', r'''layer.\1''', lowerCamelCase)
__lowerCAmelCase = new_key.replace('''encoder''', '''encoder.encoder''')
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__lowerCAmelCase = re.sub(r'''layers_(\d+)''', r'''layer.\1''', lowerCamelCase)
__lowerCAmelCase = flax_dict[key]
__lowerCAmelCase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__lowerCAmelCase = torch.from_numpy(converted_dict[key].T)
else:
__lowerCAmelCase = torch.from_numpy(converted_dict[key])
return converted_torch_dict
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=False):
__lowerCAmelCase = get_flax_param(lowerCamelCase)
if not use_large:
__lowerCAmelCase = PixaStructVisionConfig()
__lowerCAmelCase = PixaStructTextConfig()
else:
__lowerCAmelCase = PixaStructVisionConfig(
hidden_size=1_5_3_6, d_ff=3_9_6_8, num_attention_heads=2_4, num_hidden_layers=1_8)
__lowerCAmelCase = PixaStructTextConfig(hidden_size=1_5_3_6, d_ff=3_9_6_8, num_heads=2_4, num_layers=1_8)
__lowerCAmelCase = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=lowerCamelCase)
__lowerCAmelCase = PixaStructForConditionalGeneration(lowerCamelCase)
__lowerCAmelCase = rename_and_convert_flax_params(lowerCamelCase)
model.load_state_dict(lowerCamelCase)
__lowerCAmelCase = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''')
__lowerCAmelCase = PixaStructImageProcessor()
__lowerCAmelCase = PixaStructProcessor(image_processor=lowerCamelCase, tokenizer=lowerCamelCase)
if use_large:
__lowerCAmelCase = 4_0_9_6
__lowerCAmelCase = True
# mkdir if needed
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
processor.save_pretrained(lowerCamelCase)
print('''Model saved in {}'''.format(lowerCamelCase))
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_UpperCAmelCase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 9 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 1 |
'''simple docstring'''
import copy
import re
class a__ :
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'hp'
__UpperCamelCase : Any = {}
__UpperCamelCase : List[str] = None
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = prefix
__lowerCAmelCase = defaults
cls.build_naming_info()
@staticmethod
def _snake_case (__lowercase , __lowercase ):
if len(__lowercase ) == 0:
return ""
__lowerCAmelCase = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowercase ) + 1 ):
__lowerCAmelCase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__lowerCAmelCase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowercase ):
__lowerCAmelCase = ''''''
while integer != 0:
__lowerCAmelCase = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
__lowerCAmelCase = 0
while True:
__lowerCAmelCase = word + '''#''' + int_to_alphabetic(__lowercase )
if sword in info["reverse_short_word"]:
continue
else:
__lowerCAmelCase = sword
break
__lowerCAmelCase = short_word
__lowerCAmelCase = word
return short_word
@staticmethod
def _snake_case (__lowercase , __lowercase ):
__lowerCAmelCase = param_name.split('''_''' )
__lowerCAmelCase = [TrialShortNamer.shortname_for_word(__lowercase , __lowercase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__lowerCAmelCase = ['''''', '''_''']
for separator in separators:
__lowerCAmelCase = separator.join(__lowercase )
if shortname not in info["reverse_short_param"]:
__lowerCAmelCase = shortname
__lowerCAmelCase = param_name
return shortname
return param_name
@staticmethod
def _snake_case (__lowercase , __lowercase ):
__lowerCAmelCase = TrialShortNamer.shortname_for_key(__lowercase , __lowercase )
__lowerCAmelCase = short_name
__lowerCAmelCase = param_name
@classmethod
def _snake_case (cls ):
if cls.NAMING_INFO is not None:
return
__lowerCAmelCase = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
__lowerCAmelCase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowercase , __lowercase )
__lowerCAmelCase = info
@classmethod
def _snake_case (cls , __lowercase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__lowerCAmelCase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__lowerCAmelCase = cls.NAMING_INFO['''short_param'''][k]
if isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = 1 if v else 0
__lowerCAmelCase = '''''' if isinstance(__lowercase , (int, float) ) else '''-'''
__lowerCAmelCase = F"""{key}{sep}{v}"""
name.append(__lowercase )
return "_".join(__lowercase )
@classmethod
def _snake_case (cls , __lowercase ):
__lowerCAmelCase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__lowerCAmelCase = []
else:
__lowerCAmelCase = repr.split('''_''' )
__lowerCAmelCase = {}
for value in values:
if "-" in value:
__lowerCAmelCase , __lowerCAmelCase = value.split('''-''' )
else:
__lowerCAmelCase = re.sub('''[0-9.]''' , '''''' , __lowercase )
__lowerCAmelCase = float(re.sub('''[^0-9.]''' , '''''' , __lowercase ) )
__lowerCAmelCase = cls.NAMING_INFO['''reverse_short_param'''][p_k]
__lowerCAmelCase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__lowerCAmelCase = cls.DEFAULTS[k]
return parameters
| 9 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9 | 1 |
'''simple docstring'''
_UpperCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCAmelCase : str = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCAmelCase : str = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 9 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __magic_name__( ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 9 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class a__ ( __A ):
"""simple docstring"""
def __init__(self ):
# test for the above condition
self.test()
def _snake_case (self ):
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(__lowercase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(__lowercase )
counter += 1
if counter > 1_00_00:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def _snake_case (self ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case (self , __lowercase ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case (self , __lowercase ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case (self ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case (self ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case (self , __lowercase=False ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super(__lowercase , self ).__init__()
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(__lowercase , __lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def _snake_case (self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _snake_case (self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _snake_case (self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}""" )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(__lowercase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def _snake_case (self ):
__lowerCAmelCase = False
__lowerCAmelCase = 0
def _snake_case (self ):
return self.seqlen - (self.fulfilled_idx + 1)
def _snake_case (self , __lowercase=False ):
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=True ):
__lowerCAmelCase = max([len(__lowercase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(__lowercase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(__lowercase , __lowercase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
__lowerCAmelCase = root
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.next_tokens(__lowercase )
return len(__lowercase ) == 0
def _snake_case (self , __lowercase ):
__lowerCAmelCase = list(root.values() )
if len(__lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(__lowercase ) for nn in next_nodes] )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.count_leaves(__lowercase )
return len(__lowercase ) != leaf_count
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super(__lowercase , self ).__init__()
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(__lowercase , __lowercase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(__lowercase , __lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
__lowerCAmelCase = DisjunctiveTrie(__lowercase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def _snake_case (self ):
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(__lowercase ) == 0:
return None
else:
return token_list
def _snake_case (self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}""" )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _snake_case (self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}""" )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(__lowercase ):
self.current_seq.append(__lowercase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def _snake_case (self ):
__lowerCAmelCase = False
__lowerCAmelCase = []
def _snake_case (self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _snake_case (self , __lowercase=False ):
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = False
self.init_state()
def _snake_case (self ):
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=__lowercase ) for constraint in self.constraints]
def _snake_case (self ):
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _snake_case (self ):
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(__lowercase , __lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase , __lowercase ):
token_list.extend(__lowercase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(__lowercase , __lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase , __lowercase ):
token_list.extend(__lowercase )
if len(__lowercase ) == 0:
return None
else:
return token_list
def _snake_case (self , __lowercase ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(__lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def _snake_case (self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(__lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowercase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__lowercase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(__lowercase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__lowercase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _snake_case (self , __lowercase=True ):
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=__lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=__lowercase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 9 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
self.assertTrue(isinstance(dc.token_ids , __lowercase ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case (self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(__lowercase ) # fails here
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 9 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'decision_transformer'
__UpperCamelCase : List[Any] = ['past_key_values']
__UpperCamelCase : str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , __lowercase=17 , __lowercase=4 , __lowercase=1_28 , __lowercase=40_96 , __lowercase=True , __lowercase=1 , __lowercase=10_24 , __lowercase=3 , __lowercase=1 , __lowercase=None , __lowercase="relu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1e-5 , __lowercase=0.0_2 , __lowercase=True , __lowercase=True , __lowercase=5_02_56 , __lowercase=5_02_56 , __lowercase=False , __lowercase=False , **__lowercase , ):
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 9 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase = new_id
# turn into Numpy arrays
__lowerCAmelCase = np.array(lowerCamelCase)
__lowerCAmelCase = np.array(lowerCamelCase)
if reduce_labels:
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label - 1
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label != ignore_index
__lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = pred_label[mask]
__lowerCAmelCase = np.array(lowerCamelCase)[mask]
__lowerCAmelCase = pred_label[pred_label == label]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# compute metrics
__lowerCAmelCase = {}
__lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase = total_area_intersect / total_area_union
__lowerCAmelCase = total_area_intersect / total_area_label
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = all_acc
__lowerCAmelCase = iou
__lowerCAmelCase = acc
if nan_to_num is not None:
__lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
__lowerCAmelCase = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 9 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def _snake_case (self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
__lowerCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
@require_multi_gpu
def _snake_case (self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
__lowerCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
@require_multi_gpu
def _snake_case (self ):
__lowerCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
@require_multi_gpu
def _snake_case (self ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
__lowerCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCAmelCase : Any = Accelerator()
_UpperCAmelCase : Tuple = (accelerator.state.process_index + 2, 1_0)
_UpperCAmelCase : Optional[int] = torch.randint(0, 1_0, shape).to(accelerator.device)
_UpperCAmelCase : Tuple = """"""
_UpperCAmelCase : Any = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_UpperCAmelCase : Union[str, Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_UpperCAmelCase : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 9 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : str = True
__UpperCamelCase : Any = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCAmelCase = {'''unk_token''': '''[UNK]'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = '''lower newer'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer('''Hello''' , '''World''' )
__lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowercase )
@slow
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
__lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase )
__lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']]
# fmt: off
__lowerCAmelCase = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowercase )
for expected, decoded in zip(__lowercase , __lowercase ):
self.assertEqual(__lowercase , __lowercase )
| 9 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_UpperCAmelCase : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_UpperCAmelCase : Tuple = 1_2_8_0_2_2
_UpperCAmelCase : Optional[int] = 1_2_8_0_2_8
@require_sentencepiece
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = MaMaaaTokenizer
__UpperCamelCase : Dict = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : str = True
def _snake_case (self ):
super().setUp()
__lowerCAmelCase = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = Path(self.tmpdirname )
save_json(__lowercase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
__lowerCAmelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case (self , **__lowercase ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
return (
"This is a test",
"This is a test",
)
def _snake_case (self ):
__lowerCAmelCase = '''</s>'''
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(__lowercase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [2, 3, 4, 5, 6] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
__lowerCAmelCase = tokenizer.convert_tokens_to_string(__lowercase )
self.assertEqual(__lowercase , '''This is a test''' )
@slow
def _snake_case (self ):
# fmt: off
__lowerCAmelCase = {'''input_ids''': [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'facebook/m2m100_418M'
__UpperCamelCase : Any = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
__UpperCamelCase : Tuple = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
__UpperCamelCase : Any = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def _snake_case (cls ):
__lowerCAmelCase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
__lowerCAmelCase = 1
return cls
def _snake_case (self ):
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_80_06 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_80_22 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_80_76 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_80_63 )
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer.get_vocab()
self.assertEqual(len(__lowercase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = '''en'''
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def _snake_case (self ):
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
# fmt: off
__lowerCAmelCase = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
__lowerCAmelCase = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__lowercase )
__lowerCAmelCase = MaMaaaTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.lang_token_to_id , __lowercase )
@require_torch
def _snake_case (self ):
__lowerCAmelCase = '''en'''
__lowerCAmelCase = '''fr'''
__lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
__lowerCAmelCase = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__lowerCAmelCase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _snake_case (self ):
__lowerCAmelCase = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__lowerCAmelCase = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _snake_case (self ):
__lowerCAmelCase = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__lowerCAmelCase = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_80_22, 58, 41_83, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_80_06,
} , )
| 9 |
'''simple docstring'''
import argparse
import datetime
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase) < 1_1:
raise ValueError('''Must be 10 characters long''')
# Get month
__lowerCAmelCase = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''')
__lowerCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get day
__lowerCAmelCase = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''')
# Get second separator
__lowerCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get year
__lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''')
# Get datetime obj for validation
__lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase))
# Start math
if m <= 2:
__lowerCAmelCase = y - 1
__lowerCAmelCase = m + 1_2
# maths var
__lowerCAmelCase = int(str(lowerCamelCase)[:2])
__lowerCAmelCase = int(str(lowerCamelCase)[2:])
__lowerCAmelCase = int(2.6 * m - 5.39)
__lowerCAmelCase = int(c / 4)
__lowerCAmelCase = int(k / 4)
__lowerCAmelCase = int(d + k)
__lowerCAmelCase = int(t + u + v + x)
__lowerCAmelCase = int(z - (2 * c))
__lowerCAmelCase = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''')
# Response
__lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : List[str] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCAmelCase : Dict = parser.parse_args()
zeller(args.date_input)
| 9 | 1 |
'''simple docstring'''
from typing import Any
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
_validation(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# Creates data structures and fill initial step
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for state in states_space:
__lowerCAmelCase = observations_space[0]
__lowerCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__lowerCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1, len(lowerCamelCase)):
__lowerCAmelCase = observations_space[o]
__lowerCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__lowerCAmelCase = ''''''
__lowerCAmelCase = -1
for k_state in states_space:
__lowerCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__lowerCAmelCase = probability
__lowerCAmelCase = k_state
# Update probabilities and pointers dicts
__lowerCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__lowerCAmelCase = arg_max
# The final observation
__lowerCAmelCase = observations_space[len(lowerCamelCase) - 1]
# argmax for given final observation
__lowerCAmelCase = ''''''
__lowerCAmelCase = -1
for k_state in states_space:
__lowerCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__lowerCAmelCase = probability
__lowerCAmelCase = k_state
__lowerCAmelCase = arg_max
# Process pointers backwards
__lowerCAmelCase = last_state
__lowerCAmelCase = []
for o in range(len(lowerCamelCase) - 1, -1, -1):
result.append(lowerCamelCase)
__lowerCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
_validate_not_empty(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
_validate_lists(lowerCamelCase, lowerCamelCase)
_validate_dicts(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('''There\'s an empty parameter''')
def __magic_name__( lowerCamelCase, lowerCamelCase):
_validate_list(lowerCamelCase, '''observations_space''')
_validate_list(lowerCamelCase, '''states_space''')
def __magic_name__( lowerCamelCase, lowerCamelCase):
if not isinstance(_object, lowerCamelCase):
__lowerCAmelCase = F"""{var_name} must be a list"""
raise ValueError(lowerCamelCase)
else:
for x in _object:
if not isinstance(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = F"""{var_name} must be a list of strings"""
raise ValueError(lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
_validate_dict(lowerCamelCase, '''initial_probabilities''', lowerCamelCase)
_validate_nested_dict(lowerCamelCase, '''transition_probabilities''')
_validate_nested_dict(lowerCamelCase, '''emission_probabilities''')
def __magic_name__( lowerCamelCase, lowerCamelCase):
_validate_dict(_object, lowerCamelCase, lowerCamelCase)
for x in _object.values():
_validate_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False):
if not isinstance(_object, lowerCamelCase):
__lowerCAmelCase = F"""{var_name} must be a dict"""
raise ValueError(lowerCamelCase)
if not all(isinstance(lowerCamelCase, lowerCamelCase) for x in _object):
__lowerCAmelCase = F"""{var_name} all keys must be strings"""
raise ValueError(lowerCamelCase)
if not all(isinstance(lowerCamelCase, lowerCamelCase) for x in _object.values()):
__lowerCAmelCase = '''nested dictionary ''' if nested else ''''''
__lowerCAmelCase = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(lowerCamelCase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 9 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ConsistencyModelPipeline
__UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _snake_case (self , __lowercase=False ):
if class_cond:
__lowerCAmelCase = self.dummy_cond_unet
else:
__lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
__lowerCAmelCase = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase )
__lowerCAmelCase = latents
return inputs
def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
if type(__lowercase ) == str:
__lowerCAmelCase = torch.device(__lowercase )
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
return latents
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 9 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""}
_UpperCAmelCase : str = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_UpperCAmelCase : Optional[Any] = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = collections.OrderedDict()
with open(lowerCamelCase, '''r''', encoding='''utf-8''') as reader:
__lowerCAmelCase = reader.readlines()
for index, token in enumerate(lowerCamelCase):
__lowerCAmelCase = token.rstrip('''\n''')
__lowerCAmelCase = index
return vocab
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase="<unk>" , __lowercase=2_00 ):
__lowerCAmelCase = vocab
__lowerCAmelCase = unk_token
__lowerCAmelCase = max_input_chars_per_word
def _snake_case (self , __lowercase ):
__lowerCAmelCase = list(__lowercase )
if len(__lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCAmelCase = 0
__lowerCAmelCase = []
while start < len(__lowercase ):
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = None
while start < end:
__lowerCAmelCase = ''''''.join(chars[start:end] )
if substr in self.vocab:
__lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowercase )
__lowerCAmelCase = end
return sub_tokens
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
__UpperCamelCase : Optional[Any] = False
def __init__(self , __lowercase , __lowercase="<d>" , __lowercase="</d>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="<unk>" , __lowercase="</n>" , __lowercase="</_>" , __lowercase="left" , **__lowercase , ):
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__lowercase , eod_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , unk_token=__lowercase , line_token=__lowercase , space_token=__lowercase , padding_side=__lowercase , **__lowercase , )
__lowerCAmelCase = bod_token
__lowerCAmelCase = eod_token
__lowerCAmelCase = load_vocab(__lowercase )
__lowerCAmelCase = self.encoder[space_token]
__lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowercase : x[1] ) )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _snake_case (self ):
return self.encoder[self.bod_token]
@property
def _snake_case (self ):
return self.encoder[self.eod_token]
@property
def _snake_case (self ):
return self.encoder["\n"]
@property
def _snake_case (self ):
return len(self.encoder )
def _snake_case (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for x in jieba.cut(__lowercase , cut_all=__lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowercase ) )
return output_tokens
def _snake_case (self , __lowercase , **__lowercase ):
__lowerCAmelCase = [i for i in token_ids if i >= 0]
__lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowercase , **__lowercase )
def _snake_case (self , __lowercase ):
return token in self.encoder
def _snake_case (self , __lowercase ):
return "".join(__lowercase )
def _snake_case (self , __lowercase ):
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def _snake_case (self , __lowercase ):
return self.decoder.get(__lowercase , self.unk_token )
def _snake_case (self , __lowercase , __lowercase = None ):
if os.path.isdir(__lowercase ):
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__lowerCAmelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__lowerCAmelCase = 0
if " " in self.encoder:
__lowerCAmelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCAmelCase = self.encoder['''\n''']
del self.encoder["\n"]
__lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowercase : x[1] ) )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__lowerCAmelCase = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _snake_case (self , __lowercase , __lowercase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase ))
return [1] + ([0] * len(__lowercase ))
| 9 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_UpperCAmelCase : List[Any] = datasets.load_iris()
_UpperCAmelCase : Dict = np.array(data["""data"""])
_UpperCAmelCase : int = np.array(data["""target"""])
_UpperCAmelCase : str = data["""target_names"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y)
def __magic_name__( lowerCamelCase, lowerCamelCase):
return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase))
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5):
__lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase)
# List of distances of all points from the point to be classified
__lowerCAmelCase = []
for data_point in data:
__lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
__lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = parent
def _snake_case (self ):
return {}
def __magic_name__( ):
__lowerCAmelCase = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__lowerCAmelCase = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = MarkupLMFeatureExtractor if is_bsa_available() else None
def _snake_case (self ):
__lowerCAmelCase = MarkupLMFeatureExtractionTester(self )
@property
def _snake_case (self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def _snake_case (self ):
# Initialize feature_extractor
__lowerCAmelCase = self.feature_extraction_class()
# Test not batched input
__lowerCAmelCase = get_html_strings()[0]
__lowerCAmelCase = feature_extractor(__lowercase )
# fmt: off
__lowerCAmelCase = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__lowerCAmelCase = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , __lowercase )
self.assertEqual(encoding.xpaths , __lowercase )
# Test batched
__lowerCAmelCase = get_html_strings()
__lowerCAmelCase = feature_extractor(__lowercase )
# fmt: off
__lowerCAmelCase = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__lowerCAmelCase = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __lowercase )
self.assertEqual(encoding.xpaths , __lowercase )
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' )
__lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs['''input_ids''']
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 9 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __magic_name__( lowerCamelCase):
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name)
_UpperCAmelCase : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class a__ ( __A ):
"""simple docstring"""
@staticmethod
def _snake_case (__lowercase ):
__lowerCAmelCase = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=__lowercase , required=__lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=__lowercase , required=__lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=__lowercase , required=__lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=__lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=__lowercase , default=__lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=__lowercase )
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase , ):
__lowerCAmelCase = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"""Loading model {model_type}""" )
__lowerCAmelCase = model_type
__lowerCAmelCase = tf_checkpoint
__lowerCAmelCase = pytorch_dump_output
__lowerCAmelCase = config
__lowerCAmelCase = finetuning_task_name
def _snake_case (self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
if "ckpt" in self._tf_checkpoint.lower():
__lowerCAmelCase = self._tf_checkpoint
__lowerCAmelCase = ''''''
else:
__lowerCAmelCase = self._tf_checkpoint
__lowerCAmelCase = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
__lowercase , self._config , self._pytorch_dump_output , __lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 9 |
'''simple docstring'''
import numpy as np
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ):
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase)
__lowerCAmelCase = np.iscomplexobj(lowerCamelCase)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase)
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(lowerCamelCase)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase))
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_)
return lambda_, vector
def __magic_name__( ):
__lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]])
__lowerCAmelCase = np.array([4_1, 4, 2_0])
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa)
__lowerCAmelCase = np.triu(1J * complex_input_matrix, 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase)
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 9 | 1 |
'''simple docstring'''
_UpperCAmelCase : List[Any] = tuple[float, float, float]
_UpperCAmelCase : List[Any] = tuple[float, float, float]
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = end_pointa[0] - end_pointa[0]
__lowerCAmelCase = end_pointa[1] - end_pointa[1]
__lowerCAmelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowerCAmelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowerCAmelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __magic_name__( lowerCamelCase, lowerCamelCase):
return tuple(round(lowerCamelCase, lowerCamelCase) for x in vector) == (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1_0):
__lowerCAmelCase = create_vector(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = create_vector(lowerCamelCase, lowerCamelCase)
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase, lowerCamelCase), lowerCamelCase)
| 9 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : str = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
return [
int(1_0_0_0 * (box[0] / width)),
int(1_0_0_0 * (box[1] / height)),
int(1_0_0_0 * (box[2] / width)),
int(1_0_0_0 * (box[3] / height)),
]
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None):
__lowerCAmelCase = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__lowerCAmelCase = to_pil_image(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase)
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase))
assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 9 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , **__lowercase , ):
super().__init__(features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , **__lowercase )
__lowerCAmelCase = Sql(
cache_dir=__lowercase , features=__lowercase , sql=__lowercase , con=__lowercase , **__lowercase , )
def _snake_case (self ):
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , )
# Build dataset for splits
__lowerCAmelCase = self.builder.as_dataset(
split='''train''' , verification_mode=__lowercase , in_memory=self.keep_in_memory )
return dataset
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
__lowerCAmelCase = dataset
__lowerCAmelCase = name
__lowerCAmelCase = con
__lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCAmelCase = num_proc
__lowerCAmelCase = to_sql_kwargs
def _snake_case (self ):
__lowerCAmelCase = self.to_sql_kwargs.pop('''sql''' , __lowercase )
__lowerCAmelCase = self.to_sql_kwargs.pop('''con''' , __lowercase )
__lowerCAmelCase = self.to_sql_kwargs.pop('''index''' , __lowercase )
__lowerCAmelCase = self._write(index=__lowercase , **self.to_sql_kwargs )
return written
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = args
__lowerCAmelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(__lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCAmelCase = batch.to_pandas()
__lowerCAmelCase = df.to_sql(self.name , self.con , index=__lowercase , **__lowercase )
return num_rows or len(__lowercase )
def _snake_case (self , __lowercase , **__lowercase ):
__lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__lowerCAmelCase , __lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowercase , __lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 9 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = 'Salesforce/blip-image-captioning-base'
__UpperCamelCase : Union[str, Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
__UpperCamelCase : Union[str, Any] = 'image_captioner'
__UpperCamelCase : Optional[int] = AutoModelForVisionaSeq
__UpperCamelCase : Any = ['image']
__UpperCamelCase : str = ['text']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''vision'''] )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase ):
return self.pre_processor(images=__lowercase , return_tensors='''pt''' )
def _snake_case (self , __lowercase ):
return self.model.generate(**__lowercase )
def _snake_case (self , __lowercase ):
return self.pre_processor.batch_decode(__lowercase , skip_special_tokens=__lowercase )[0].strip()
| 9 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = do_resize
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88}
__lowerCAmelCase = size_divisor
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_pad
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
def _snake_case (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _snake_case (self , __lowercase , __lowercase=False ):
if not batched:
__lowerCAmelCase = self.size['''shortest_edge''']
__lowerCAmelCase = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
__lowerCAmelCase = size / min(__lowercase , __lowercase )
if h < w:
__lowerCAmelCase , __lowerCAmelCase = size, scale * w
else:
__lowerCAmelCase , __lowerCAmelCase = scale * h, size
__lowerCAmelCase = int((13_33 / 8_00) * size )
if max(__lowercase , __lowercase ) > max_size:
__lowerCAmelCase = max_size / max(__lowercase , __lowercase )
__lowerCAmelCase = newh * scale
__lowerCAmelCase = neww * scale
__lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCAmelCase , __lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0]
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None
def _snake_case (self ):
__lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def _snake_case (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case (self ):
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) )
def _snake_case (self ):
pass
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Any = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
# Imports
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
__lowerCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _snake_case (self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case (self ):
return self.nir * (self.red / (self.green**2))
def _snake_case (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case (self ):
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case (self ):
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case (self ):
return (self.nir / self.green) - 1
def _snake_case (self ):
return (self.nir / self.redEdge) - 1
def _snake_case (self ):
return (self.red - self.blue) / self.red
def _snake_case (self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case (self ):
return self.nir - self.green
def _snake_case (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case (self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case (self , __lowercase=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case (self , __lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case (self , __lowercase=None , __lowercase=None ):
return (self.nir - b) / (a * self.red)
def _snake_case (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case (self ):
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case (self ):
return self.green / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.nir / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.red / (self.nir + self.red + self.green)
def _snake_case (self ):
return (self.green - self.red) / (self.green + self.red)
def _snake_case (self ):
return (self.red - self.green) / (self.red + self.green)
def _snake_case (self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 9 | 1 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_UpperCAmelCase : int = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase : List[Any] = """PoolFormerConfig"""
# Base docstring
_UpperCAmelCase : Optional[int] = """sail/poolformer_s12"""
_UpperCAmelCase : Union[str, Any] = [1, 5_1_2, 7, 7]
# Image classification docstring
_UpperCAmelCase : Tuple = """sail/poolformer_s12"""
_UpperCAmelCase : Tuple = """tabby, tabby cat"""
_UpperCAmelCase : str = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __magic_name__( lowerCamelCase, lowerCamelCase = 0.0, lowerCamelCase = False):
if drop_prob == 0.0 or not training:
return input
__lowerCAmelCase = 1 - drop_prob
__lowerCAmelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCAmelCase = keep_prob + torch.rand(lowerCamelCase, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
__lowerCAmelCase = input.div(lowerCamelCase) * random_tensor
return output
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase = None ):
super().__init__()
__lowerCAmelCase = drop_prob
def _snake_case (self , __lowercase ):
return drop_path(__lowercase , self.drop_prob , self.training )
def _snake_case (self ):
return "p={}".format(self.drop_prob )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ):
super().__init__()
__lowerCAmelCase = patch_size if isinstance(__lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
__lowerCAmelCase = stride if isinstance(__lowercase , collections.abc.Iterable ) else (stride, stride)
__lowerCAmelCase = padding if isinstance(__lowercase , collections.abc.Iterable ) else (padding, padding)
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase )
__lowerCAmelCase = norm_layer(__lowercase ) if norm_layer else nn.Identity()
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.projection(__lowercase )
__lowerCAmelCase = self.norm(__lowercase )
return embeddings
class a__ ( nn.GroupNorm ):
"""simple docstring"""
def __init__(self , __lowercase , **__lowercase ):
super().__init__(1 , __lowercase , **__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = nn.AvgPoolad(__lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowercase )
def _snake_case (self , __lowercase ):
return self.pool(__lowercase ) - hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase ):
super().__init__()
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
__lowerCAmelCase = PoolFormerDropPath(__lowercase )
if isinstance(config.hidden_act , __lowercase ):
__lowerCAmelCase = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase = config.hidden_act
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.conva(__lowercase )
__lowerCAmelCase = self.act_fn(__lowercase )
__lowerCAmelCase = self.drop(__lowercase )
__lowerCAmelCase = self.conva(__lowercase )
__lowerCAmelCase = self.drop(__lowercase )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
super().__init__()
__lowerCAmelCase = PoolFormerPooling(__lowercase )
__lowerCAmelCase = PoolFormerOutput(__lowercase , __lowercase , __lowercase , __lowercase )
__lowerCAmelCase = PoolFormerGroupNorm(__lowercase )
__lowerCAmelCase = PoolFormerGroupNorm(__lowercase )
# Useful for training neural nets
__lowerCAmelCase = PoolFormerDropPath(__lowercase ) if drop_path > 0.0 else nn.Identity()
__lowerCAmelCase = config.use_layer_scale
if config.use_layer_scale:
__lowerCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowercase) ) , requires_grad=__lowercase )
__lowerCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowercase) ) , requires_grad=__lowercase )
def _snake_case (self , __lowercase ):
if self.use_layer_scale:
__lowerCAmelCase = self.pooling(self.before_norm(__lowercase ) )
__lowerCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__lowerCAmelCase = hidden_states + self.drop_path(__lowercase )
__lowerCAmelCase = ()
__lowerCAmelCase = self.output(self.after_norm(__lowercase ) )
__lowerCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__lowerCAmelCase = hidden_states + self.drop_path(__lowercase )
__lowerCAmelCase = (output,) + outputs
return outputs
else:
__lowerCAmelCase = self.drop_path(self.pooling(self.before_norm(__lowercase ) ) )
# First residual connection
__lowerCAmelCase = pooling_output + hidden_states
__lowerCAmelCase = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCAmelCase = self.drop_path(self.output(self.after_norm(__lowercase ) ) )
__lowerCAmelCase = hidden_states + layer_output
__lowerCAmelCase = (output,) + outputs
return outputs
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = config
# stochastic depth decay rule
__lowerCAmelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__lowerCAmelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__lowerCAmelCase = nn.ModuleList(__lowercase )
# Transformer blocks
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__lowerCAmelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowercase ) )
__lowerCAmelCase = nn.ModuleList(__lowercase )
def _snake_case (self , __lowercase , __lowercase=False , __lowercase=True ):
__lowerCAmelCase = () if output_hidden_states else None
__lowerCAmelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__lowerCAmelCase , __lowerCAmelCase = layers
# Get patch embeddings from hidden_states
__lowerCAmelCase = embedding_layer(__lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowercase ):
__lowerCAmelCase = blk(__lowercase )
__lowerCAmelCase = layer_outputs[0]
if output_hidden_states:
__lowerCAmelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : int = PoolFormerConfig
__UpperCamelCase : Union[str, Any] = 'poolformer'
__UpperCamelCase : Tuple = 'pixel_values'
__UpperCamelCase : Tuple = True
def _snake_case (self , __lowercase ):
if isinstance(__lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case (self , __lowercase , __lowercase=False ):
if isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = value
_UpperCAmelCase : Any = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : List[Any] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __A , )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = config
__lowerCAmelCase = PoolFormerEncoder(__lowercase )
# Initialize weights and apply final processing
self.post_init()
def _snake_case (self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case (self , __lowercase = None , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__lowerCAmelCase = self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , )
__lowerCAmelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowercase , hidden_states=encoder_outputs.hidden_states , )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.dense(__lowercase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __A , )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = PoolFormerModel(__lowercase )
# Final norm
__lowerCAmelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__lowerCAmelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case (self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.poolformer(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , )
__lowerCAmelCase = outputs[0]
__lowerCAmelCase = self.classifier(self.norm(__lowercase ).mean([-2, -1] ) )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = '''single_label_classification'''
else:
__lowerCAmelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(__lowercase , __lowercase )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(__lowercase , __lowercase )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 9 |
'''simple docstring'''
from math import sqrt
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool"
return status
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1))
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase)):
for j in range(i + 1, len(lowerCamelCase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(lowerCamelCase):
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase):
while quotient != 1:
if is_prime(lowerCamelCase) and (quotient % factor == 0):
ans.append(lowerCamelCase)
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = max(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = min(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 == 0
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 != 0
def __magic_name__( lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase)
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (len(lowerCamelCase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = prime_factorization(lowerCamelCase)
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(max(lowerCamelCase, lowerCamelCase)):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase):
ans += 1
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime(
lowerCamelCase), "'ans' must been a prime number and from type int"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase)
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and ans[0] != p_number_a
and ans[len(lowerCamelCase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(lowerCamelCase)
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCamelCase)
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase))
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 9 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase = 1_0_0_0):
__lowerCAmelCase , __lowerCAmelCase = 1, 1
__lowerCAmelCase = []
for i in range(1, n + 1):
__lowerCAmelCase = prev_numerator + 2 * prev_denominator
__lowerCAmelCase = prev_numerator + prev_denominator
if len(str(lowerCamelCase)) > len(str(lowerCamelCase)):
result.append(lowerCamelCase)
__lowerCAmelCase = numerator
__lowerCAmelCase = denominator
return len(lowerCamelCase)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 0, lowerCamelCase = -1):
if hi < 0:
__lowerCAmelCase = len(lowerCamelCase)
while lo < hi:
__lowerCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase = mid + 1
else:
__lowerCAmelCase = mid
return lo
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 0, lowerCamelCase = -1):
if hi < 0:
__lowerCAmelCase = len(lowerCamelCase)
while lo < hi:
__lowerCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase = mid + 1
else:
__lowerCAmelCase = mid
return lo
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 0, lowerCamelCase = -1):
sorted_collection.insert(bisect_left(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase), lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 0, lowerCamelCase = -1):
sorted_collection.insert(bisect_right(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase), lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = 0
__lowerCAmelCase = len(lowerCamelCase) - 1
while left <= right:
__lowerCAmelCase = left + (right - left) // 2
__lowerCAmelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase = midpoint - 1
else:
__lowerCAmelCase = midpoint + 1
return None
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = bisect.bisect_left(lowerCamelCase, lowerCamelCase)
if index != len(lowerCamelCase) and sorted_collection[index] == item:
return index
return None
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
if right < left:
return None
__lowerCAmelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCamelCase, lowerCamelCase, lowerCamelCase, midpoint - 1)
else:
return binary_search_by_recursion(lowerCamelCase, lowerCamelCase, midpoint + 1, lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Any = sorted(int(item) for item in user_input.split(""","""))
_UpperCAmelCase : str = int(input("""Enter a single number to be found in the list:\n"""))
_UpperCAmelCase : Dict = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = old_name
if "patch_embed" in old_name:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''')
if layer == "0":
__lowerCAmelCase = old_name.replace('''0''', '''convolution1''')
elif layer == "1":
__lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''')
elif layer == "3":
__lowerCAmelCase = old_name.replace('''3''', '''convolution2''')
else:
__lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''')
if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase):
__lowerCAmelCase = r'''\b\d{2}\b'''
if bool(re.search(lowerCamelCase, lowerCamelCase)):
__lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group()
else:
__lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group()
if int(match[0]) < 6:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
__lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1])
__lowerCAmelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
if int(match[2]) < num_meta4D_last_stage:
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2])
else:
__lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage)
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index)
if "norm1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''')
elif "norm2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''')
elif "fc1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''')
elif "fc2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''')
__lowerCAmelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase):
__lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''')
if "fc" in new_name:
__lowerCAmelCase = new_name.replace('''fc''', '''convolution''')
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''')
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''')
if "proj" in new_name:
__lowerCAmelCase = new_name.replace('''proj''', '''projection''')
if "dist_head" in new_name:
__lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''')
elif "head" in new_name:
__lowerCAmelCase = new_name.replace('''head''', '''classifier''')
elif "patch_embed" in new_name:
__lowerCAmelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCAmelCase = new_name.replace('''norm''', '''layernorm''')
__lowerCAmelCase = '''efficientformer.''' + new_name
else:
__lowerCAmelCase = '''efficientformer.encoder.''' + new_name
return new_name
def __magic_name__( lowerCamelCase, lowerCamelCase):
for key in checkpoint.copy().keys():
__lowerCAmelCase = checkpoint.pop(lowerCamelCase)
__lowerCAmelCase = val
return checkpoint
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return image
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model''']
__lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase)
__lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase)
__lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1])
__lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
__lowerCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = 2_5_6
__lowerCAmelCase = 2_2_4
__lowerCAmelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
__lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values
# original processing pipeline
__lowerCAmelCase = Compose(
[
Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']),
CenterCrop(lowerCamelCase),
ToTensor(),
Normalize(lowerCamelCase, lowerCamelCase),
])
__lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0)
assert torch.allclose(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = (1, 1_0_0_0)
if "l1" in model_name:
__lowerCAmelCase = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78])
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""")
# Save Checkpoints
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""")
processor.save_pretrained(lowerCamelCase)
print(F"""Processor successfuly saved at {pytorch_dump_path}""")
if push_to_hub:
print('''Pushing model to the hub...''')
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(lowerCamelCase):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase)
if len(lowerCamelCase) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : Optional[Any] = [2, 5, 3, 7]
_UpperCAmelCase : Optional[Any] = [0, 0, 0, 0]
_UpperCAmelCase : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_UpperCAmelCase : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__( lowerCamelCase, lowerCamelCase):
if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2:
raise Exception('''Matrices are not 2x2''')
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase):
if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('''Odd matrices are not supported!''')
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)]
return top_left, top_right, bot_left, bot_right
def __magic_name__( lowerCamelCase):
return len(lowerCamelCase), len(matrix[0])
def __magic_name__( lowerCamelCase):
print('''\n'''.join(str(lowerCamelCase) for line in matrix))
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase) == (2, 2):
return default_matrix_multiplication(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(lowerCamelCase)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]:
__lowerCAmelCase = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase))))
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
__lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase)
# Removing the additional zeros
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCAmelCase : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""Salesforce/codegen-350M-mono""": 2_0_4_8,
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
__UpperCamelCase : Optional[int] = CodeGenTokenizer
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<|endoftext|>" , __lowercase="<|endoftext|>" , __lowercase="<|endoftext|>" , __lowercase=False , **__lowercase , ):
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
if kwargs.pop('''add_bos_token''' , __lowercase ):
__lowerCAmelCase = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
__lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
__lowerCAmelCase = getattr(__lowercase , pre_tok_state.pop('''type''' ) )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = pre_tok_class(**__lowercase )
__lowerCAmelCase = add_prefix_space
def _snake_case (self , *__lowercase , **__lowercase ):
__lowerCAmelCase = kwargs.get('''is_split_into_words''' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def _snake_case (self , *__lowercase , **__lowercase ):
__lowerCAmelCase = kwargs.get('''is_split_into_words''' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def _snake_case (self , __lowercase , __lowercase = False , __lowercase = None , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = super().decode(
token_ids=__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
if truncate_before_pattern is not None and len(__lowercase ) > 0:
__lowerCAmelCase = self.truncate(__lowercase , __lowercase )
return decoded_text
def _snake_case (self , __lowercase , __lowercase ):
def find_re(__lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = pattern.search(__lowercase , __lowercase )
return m.start() if m else -1
__lowerCAmelCase = [re.compile(__lowercase , re.MULTILINE ) for pattern in truncate_before_pattern]
__lowerCAmelCase = list(re.finditer('''^print''' , __lowercase , re.MULTILINE ) )
if len(__lowercase ) > 1:
__lowerCAmelCase = completion[: prints[1].start()]
__lowerCAmelCase = list(re.finditer('''^def''' , __lowercase , re.MULTILINE ) )
if len(__lowercase ) > 1:
__lowerCAmelCase = completion[: defs[1].start()]
__lowerCAmelCase = 0
__lowerCAmelCase = [
pos for pos in [find_re(__lowercase , __lowercase , __lowercase ) for terminal in terminals] if pos != -1
]
if len(__lowercase ) > 0:
return completion[: min(__lowercase )]
else:
return completion
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = [10, 20, 30, 40, 50, 60]
__lowerCAmelCase = [2, 4, 6, 8, 10, 12]
__lowerCAmelCase = 1_00
self.assertEqual(kp.calc_profit(__lowercase , __lowercase , __lowercase ) , 2_10 )
def _snake_case (self ):
self.assertRaisesRegex(__lowercase , '''max_weight must greater than zero.''' )
def _snake_case (self ):
self.assertRaisesRegex(__lowercase , '''Weight can not be negative.''' )
def _snake_case (self ):
self.assertRaisesRegex(__lowercase , '''Profit can not be negative.''' )
def _snake_case (self ):
self.assertRaisesRegex(__lowercase , '''max_weight must greater than zero.''' )
def _snake_case (self ):
self.assertRaisesRegex(
__lowercase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 9 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'bert'
def __init__(self , __lowercase=3_05_22 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=0 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 9 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_UpperCAmelCase : Optional[Any] = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_UpperCAmelCase : Tuple = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
_UpperCAmelCase : Optional[Any] = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = 0.0
for i, j in zip(__lowercase , __lowercase ):
n_correct += 1.0 if math_equivalence.is_equiv(__lowercase , __lowercase ) else 0.0
__lowerCAmelCase = n_correct / len(__lowercase )
return {
"accuracy": accuracy,
}
| 9 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __magic_name__( ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : float
__UpperCamelCase : TreeNode | None = None
__UpperCamelCase : TreeNode | None = None
def __magic_name__( lowerCamelCase):
# Validation
def is_valid_tree(lowerCamelCase) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase, lowerCamelCase):
return False
try:
float(node.data)
except (TypeError, ValueError):
return False
return is_valid_tree(node.left) and is_valid_tree(node.right)
if not is_valid_tree(lowerCamelCase):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''')
def is_binary_search_tree_recursive_check(
lowerCamelCase, lowerCamelCase, lowerCamelCase) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left, lowerCamelCase, node.data)
and is_binary_search_tree_recursive_check(
node.right, node.data, lowerCamelCase)
)
return is_binary_search_tree_recursive_check(lowerCamelCase, -float('''inf'''), float('''inf'''))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
self.assertTrue(isinstance(dc.token_ids , __lowercase ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case (self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(__lowercase ) # fails here
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 9 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_UpperCAmelCase : List[str] = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def __magic_name__( ):
__lowerCAmelCase = Github(os.environ['''GITHUB_TOKEN'''])
__lowerCAmelCase = g.get_repo('''huggingface/accelerate''')
__lowerCAmelCase = repo.get_issues(state='''open''')
for issue in open_issues:
__lowerCAmelCase = sorted([comment for comment in issue.get_comments()], key=lambda lowerCamelCase: i.created_at, reverse=lowerCamelCase)
__lowerCAmelCase = comments[0] if len(lowerCamelCase) > 0 else None
__lowerCAmelCase = dt.utcnow()
__lowerCAmelCase = (current_time - issue.updated_at).days
__lowerCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''')
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''')
if __name__ == "__main__":
main()
| 9 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase = new_id
# turn into Numpy arrays
__lowerCAmelCase = np.array(lowerCamelCase)
__lowerCAmelCase = np.array(lowerCamelCase)
if reduce_labels:
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label - 1
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label != ignore_index
__lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = pred_label[mask]
__lowerCAmelCase = np.array(lowerCamelCase)[mask]
__lowerCAmelCase = pred_label[pred_label == label]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# compute metrics
__lowerCAmelCase = {}
__lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase = total_area_intersect / total_area_union
__lowerCAmelCase = total_area_intersect / total_area_label
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = all_acc
__lowerCAmelCase = iou
__lowerCAmelCase = acc
if nan_to_num is not None:
__lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
__lowerCAmelCase = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : Dict = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : str = True
__UpperCamelCase : Any = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCAmelCase = {'''unk_token''': '''[UNK]'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = '''lower newer'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer('''Hello''' , '''World''' )
__lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowercase )
@slow
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
__lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase )
__lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']]
# fmt: off
__lowerCAmelCase = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowercase )
for expected, decoded in zip(__lowercase , __lowercase ):
self.assertEqual(__lowercase , __lowercase )
| 9 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'falcon'
__UpperCamelCase : Optional[int] = ['past_key_values']
def __init__(self , __lowercase=6_50_24 , __lowercase=45_44 , __lowercase=32 , __lowercase=71 , __lowercase=1e-5 , __lowercase=0.0_2 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=11 , __lowercase=11 , **__lowercase , ):
__lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCAmelCase = kwargs.pop('''n_embed''' , __lowercase )
__lowerCAmelCase = hidden_size if n_embed is None else n_embed
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
__lowerCAmelCase = alibi
__lowerCAmelCase = new_decoder_architecture
__lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
__lowerCAmelCase = parallel_attn
__lowerCAmelCase = bias
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
@property
def _snake_case (self ):
return self.hidden_size // self.num_attention_heads
@property
def _snake_case (self ):
return not self.alibi
| 9 |
'''simple docstring'''
import argparse
import datetime
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase) < 1_1:
raise ValueError('''Must be 10 characters long''')
# Get month
__lowerCAmelCase = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''')
__lowerCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get day
__lowerCAmelCase = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''')
# Get second separator
__lowerCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''')
# Get year
__lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''')
# Get datetime obj for validation
__lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase))
# Start math
if m <= 2:
__lowerCAmelCase = y - 1
__lowerCAmelCase = m + 1_2
# maths var
__lowerCAmelCase = int(str(lowerCamelCase)[:2])
__lowerCAmelCase = int(str(lowerCamelCase)[2:])
__lowerCAmelCase = int(2.6 * m - 5.39)
__lowerCAmelCase = int(c / 4)
__lowerCAmelCase = int(k / 4)
__lowerCAmelCase = int(d + k)
__lowerCAmelCase = int(t + u + v + x)
__lowerCAmelCase = int(z - (2 * c))
__lowerCAmelCase = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''')
# Response
__lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : List[str] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCAmelCase : Dict = parser.parse_args()
zeller(args.date_input)
| 9 | 1 |
'''simple docstring'''
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = name
__lowerCAmelCase = value
__lowerCAmelCase = weight
def __repr__(self ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def _snake_case (self ):
return self.value
def _snake_case (self ):
return self.name
def _snake_case (self ):
return self.weight
def _snake_case (self ):
return self.value / self.weight
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
menu.append(Things(name[i], value[i], weight[i]))
return menu
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = sorted(lowerCamelCase, key=lowerCamelCase, reverse=lowerCamelCase)
__lowerCAmelCase = []
__lowerCAmelCase , __lowerCAmelCase = 0.0, 0.0
for i in range(len(lowerCamelCase)):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i])
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __magic_name__( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ConsistencyModelPipeline
__UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _snake_case (self , __lowercase=False ):
if class_cond:
__lowerCAmelCase = self.dummy_cond_unet
else:
__lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
__lowerCAmelCase = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase )
__lowerCAmelCase = latents
return inputs
def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
if type(__lowercase ) == str:
__lowerCAmelCase = torch.device(__lowercase )
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
return latents
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 9 | 1 |
'''simple docstring'''
from math import factorial
def __magic_name__( lowerCamelCase = 2_0):
__lowerCAmelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowerCAmelCase = n // 2
return int(factorial(lowerCamelCase) / (factorial(lowerCamelCase) * factorial(n - k)))
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_UpperCAmelCase : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 9 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_UpperCAmelCase : List[Any] = datasets.load_iris()
_UpperCAmelCase : Dict = np.array(data["""data"""])
_UpperCAmelCase : int = np.array(data["""target"""])
_UpperCAmelCase : str = data["""target_names"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y)
def __magic_name__( lowerCamelCase, lowerCamelCase):
return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase))
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5):
__lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase)
# List of distances of all points from the point to be classified
__lowerCAmelCase = []
for data_point in data:
__lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
__lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Tuple = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ["""OwlViTFeatureExtractor"""]
_UpperCAmelCase : List[Any] = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' )
__lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs['''input_ids''']
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 9 | 1 |
'''simple docstring'''
import random
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = False):
__lowerCAmelCase = {i: [] for i in range(lowerCamelCase)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase):
for j in range(i + 1, lowerCamelCase):
if random.random() < probability:
graph[i].append(lowerCamelCase)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase)
return graph
def __magic_name__( lowerCamelCase):
return {
i: [j for j in range(lowerCamelCase) if i != j] for i in range(lowerCamelCase)
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = CTRLTokenizer
__UpperCamelCase : Dict = False
__UpperCamelCase : List[str] = False
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''adapt react readapt apt'''
__lowerCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = '''adapt react readapt apt'''
__lowerCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
| 9 |
'''simple docstring'''
import numpy as np
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ):
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase)
__lowerCAmelCase = np.iscomplexobj(lowerCamelCase)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase)
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(lowerCamelCase)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase))
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_)
return lambda_, vector
def __magic_name__( ):
__lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]])
__lowerCAmelCase = np.array([4_1, 4, 2_0])
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa)
__lowerCAmelCase = np.triu(1J * complex_input_matrix, 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase)
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 9 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['input_features', 'is_longer']
def __init__(self , __lowercase=64 , __lowercase=4_80_00 , __lowercase=4_80 , __lowercase=10 , __lowercase=10_24 , __lowercase=0.0 , __lowercase=False , __lowercase = 0 , __lowercase = 1_40_00 , __lowercase = None , __lowercase = "fusion" , __lowercase = "repeatpad" , **__lowercase , ):
super().__init__(
feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
__lowerCAmelCase = top_db
__lowerCAmelCase = truncation
__lowerCAmelCase = padding
__lowerCAmelCase = fft_window_size
__lowerCAmelCase = (fft_window_size >> 1) + 1
__lowerCAmelCase = hop_length
__lowerCAmelCase = max_length_s
__lowerCAmelCase = max_length_s * sampling_rate
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = frequency_min
__lowerCAmelCase = frequency_max
__lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowercase , min_frequency=__lowercase , max_frequency=__lowercase , sampling_rate=__lowercase , norm=__lowercase , mel_scale='''htk''' , )
__lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowercase , min_frequency=__lowercase , max_frequency=__lowercase , sampling_rate=__lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def _snake_case (self ):
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = spectrogram(
__lowercase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowercase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _snake_case (self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCAmelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCAmelCase = [0]
# randomly choose index for each part
__lowerCAmelCase = np.random.choice(ranges[0] )
__lowerCAmelCase = np.random.choice(ranges[1] )
__lowerCAmelCase = np.random.choice(ranges[2] )
__lowerCAmelCase = mel[idx_front : idx_front + chunk_frames, :]
__lowerCAmelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowerCAmelCase = mel[idx_back : idx_back + chunk_frames, :]
__lowerCAmelCase = torch.tensor(mel[None, None, :] )
__lowerCAmelCase = torch.nn.functional.interpolate(
__lowercase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=__lowercase )
__lowerCAmelCase = mel_shrink[0][0].numpy()
__lowerCAmelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowerCAmelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowerCAmelCase = len(__lowercase ) - max_length
__lowerCAmelCase = np.random.randint(0 , overflow + 1 )
__lowerCAmelCase = waveform[idx : idx + max_length]
__lowerCAmelCase = self._np_extract_fbank_features(__lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowerCAmelCase = self._np_extract_fbank_features(__lowercase , self.mel_filters )
__lowerCAmelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowerCAmelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowerCAmelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__lowerCAmelCase = False
else:
__lowerCAmelCase = self._random_mel_fusion(__lowercase , __lowercase , __lowercase )
__lowerCAmelCase = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
__lowerCAmelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowerCAmelCase = int(max_length / len(__lowercase ) )
__lowerCAmelCase = np.stack(np.tile(__lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowerCAmelCase = int(max_length / len(__lowercase ) )
__lowerCAmelCase = np.stack(np.tile(__lowercase , __lowercase ) )
__lowerCAmelCase = np.pad(__lowercase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__lowerCAmelCase = self._np_extract_fbank_features(__lowercase , self.mel_filters )
__lowerCAmelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowerCAmelCase = self._np_extract_fbank_features(__lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = truncation if truncation is not None else self.truncation
__lowerCAmelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__lowerCAmelCase = isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
__lowerCAmelCase = is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase = [np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
__lowerCAmelCase = np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [np.asarray(__lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
__lowerCAmelCase = [
self._get_input_mel(__lowercase , max_length if max_length else self.nb_max_samples , __lowercase , __lowercase )
for waveform in raw_speech
]
__lowerCAmelCase = []
__lowerCAmelCase = []
for mel, longer in padded_inputs:
input_mel.append(__lowercase )
is_longer.append(__lowercase )
if truncation == "fusion" and sum(__lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowerCAmelCase = np.random.randint(0 , len(__lowercase ) )
__lowerCAmelCase = True
if isinstance(input_mel[0] , __lowercase ):
__lowerCAmelCase = [np.asarray(__lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowerCAmelCase = [[longer] for longer in is_longer]
__lowerCAmelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
__lowerCAmelCase = BatchFeature(__lowercase )
if return_tensors is not None:
__lowerCAmelCase = input_features.convert_to_tensors(__lowercase )
return input_features
| 9 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : str = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
return [
int(1_0_0_0 * (box[0] / width)),
int(1_0_0_0 * (box[1] / height)),
int(1_0_0_0 * (box[2] / width)),
int(1_0_0_0 * (box[3] / height)),
]
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None):
__lowerCAmelCase = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__lowerCAmelCase = to_pil_image(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase)
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase))
assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 9 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=10 , __lowercase=18 , __lowercase=30 , __lowercase=4_00 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , __lowercase=None , ):
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
__lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_frames
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = crop_size
def _snake_case (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = VivitImageProcessor if is_vision_available() else None
def _snake_case (self ):
__lowerCAmelCase = VivitImageProcessingTester(self )
@property
def _snake_case (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case (self ):
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
def _snake_case (self ):
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _snake_case (self ):
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for video in video_inputs:
self.assertIsInstance(__lowercase , __lowercase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _snake_case (self ):
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for video in video_inputs:
self.assertIsInstance(__lowercase , __lowercase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _snake_case (self ):
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for video in video_inputs:
self.assertIsInstance(__lowercase , __lowercase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class a__ ( __A , __A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'bit'
__UpperCamelCase : Tuple = ['preactivation', 'bottleneck']
__UpperCamelCase : Union[str, Any] = ['SAME', 'VALID']
def __init__(self , __lowercase=3 , __lowercase=64 , __lowercase=[2_56, 5_12, 10_24, 20_48] , __lowercase=[3, 4, 6, 3] , __lowercase="preactivation" , __lowercase="relu" , __lowercase=None , __lowercase=32 , __lowercase=0.0 , __lowercase=False , __lowercase=32 , __lowercase=1 , __lowercase=None , __lowercase=None , **__lowercase , ):
super().__init__(**__lowercase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowerCAmelCase = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
__lowerCAmelCase = num_channels
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = layer_type
__lowerCAmelCase = hidden_act
__lowerCAmelCase = global_padding
__lowerCAmelCase = num_groups
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = embedding_dynamic_padding
__lowerCAmelCase = output_stride
__lowerCAmelCase = width_factor
__lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowercase ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
| 9 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = do_resize
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88}
__lowerCAmelCase = size_divisor
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_pad
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
def _snake_case (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _snake_case (self , __lowercase , __lowercase=False ):
if not batched:
__lowerCAmelCase = self.size['''shortest_edge''']
__lowerCAmelCase = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
__lowerCAmelCase = size / min(__lowercase , __lowercase )
if h < w:
__lowerCAmelCase , __lowerCAmelCase = size, scale * w
else:
__lowerCAmelCase , __lowerCAmelCase = scale * h, size
__lowerCAmelCase = int((13_33 / 8_00) * size )
if max(__lowercase , __lowercase ) > max_size:
__lowerCAmelCase = max_size / max(__lowercase , __lowercase )
__lowerCAmelCase = newh * scale
__lowerCAmelCase = neww * scale
__lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCAmelCase , __lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0]
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None
def _snake_case (self ):
__lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def _snake_case (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case (self ):
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) )
def _snake_case (self ):
pass
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 9 | 1 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def __magic_name__( lowerCamelCase):
# getting number of pixels in the image
__lowerCAmelCase , __lowerCAmelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCamelCase):
for j in range(lowerCamelCase):
__lowerCAmelCase = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_UpperCAmelCase : Union[str, Any] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
_UpperCAmelCase : Optional[Any] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 9 |
'''simple docstring'''
# Imports
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
__lowerCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _snake_case (self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case (self ):
return self.nir * (self.red / (self.green**2))
def _snake_case (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case (self ):
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case (self ):
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case (self ):
return (self.nir / self.green) - 1
def _snake_case (self ):
return (self.nir / self.redEdge) - 1
def _snake_case (self ):
return (self.red - self.blue) / self.red
def _snake_case (self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case (self ):
return self.nir - self.green
def _snake_case (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case (self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case (self , __lowercase=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case (self , __lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case (self , __lowercase=None , __lowercase=None ):
return (self.nir - b) / (a * self.red)
def _snake_case (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case (self ):
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case (self ):
return self.green / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.nir / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.red / (self.nir + self.red + self.green)
def _snake_case (self ):
return (self.green - self.red) / (self.green + self.red)
def _snake_case (self ):
return (self.red - self.green) / (self.red + self.green)
def _snake_case (self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 9 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCAmelCase : List[str] = """\
Text data.
Second line of data."""
_UpperCAmelCase : Any = """file"""
@pytest.fixture(scope='''session''')
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = tmp_path_factory.mktemp('''data''') / (FILE_PATH + '''.zstd''')
__lowerCAmelCase = bytes(lowerCamelCase, '''utf-8''')
with zstd.open(lowerCamelCase, '''wb''') as f:
f.write(lowerCamelCase)
return path
@pytest.fixture
def __magic_name__( lowerCamelCase):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase), '''w''') as f:
f.write(lowerCamelCase)
return FILE_PATH
@pytest.mark.parametrize('''compression_format''', ['''gzip''', '''xz''', '''zstd'''])
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__lowerCAmelCase = input_paths[compression_format]
__lowerCAmelCase = tmp_path / '''cache'''
__lowerCAmelCase = DownloadConfig(cache_dir=lowerCamelCase, extract_compressed_file=lowerCamelCase)
__lowerCAmelCase = cached_path(lowerCamelCase, download_config=lowerCamelCase)
with open(lowerCamelCase) as f:
__lowerCAmelCase = f.read()
with open(lowerCamelCase) as f:
__lowerCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''', [True, False])
@pytest.mark.parametrize('''default_cache_dir''', [True, False])
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = '''custom_cache'''
__lowerCAmelCase = '''custom_extracted_dir'''
__lowerCAmelCase = tmp_path / '''custom_extracted_path'''
if default_extracted:
__lowerCAmelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''', lowerCamelCase)
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(lowerCamelCase))
__lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowerCAmelCase = xz_file
__lowerCAmelCase = (
DownloadConfig(extract_compressed_file=lowerCamelCase)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase)
)
__lowerCAmelCase = cached_path(lowerCamelCase, download_config=lowerCamelCase)
assert Path(lowerCamelCase).parent.parts[-2:] == expected
def __magic_name__( lowerCamelCase):
# absolute path
__lowerCAmelCase = str(Path(lowerCamelCase).resolve())
assert cached_path(lowerCamelCase) == text_file
# relative path
__lowerCAmelCase = str(Path(lowerCamelCase).resolve().relative_to(Path(os.getcwd())))
assert cached_path(lowerCamelCase) == text_file
def __magic_name__( lowerCamelCase):
# absolute path
__lowerCAmelCase = str(tmp_path.resolve() / '''__missing_file__.txt''')
with pytest.raises(lowerCamelCase):
cached_path(lowerCamelCase)
# relative path
__lowerCAmelCase = '''./__missing_file__.txt'''
with pytest.raises(lowerCamelCase):
cached_path(lowerCamelCase)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = get_from_cache(F"""tmp://{tmpfs_file}""")
with open(lowerCamelCase) as f:
__lowerCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''', lowerCamelCase)
def __magic_name__( ):
with pytest.raises(lowerCamelCase):
cached_path('''https://huggingface.co''')
@patch('''datasets.config.HF_DATASETS_OFFLINE''', lowerCamelCase)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = tmp_path_factory.mktemp('''data''') / '''file.html'''
with pytest.raises(lowerCamelCase):
http_get('''https://huggingface.co''', temp_file=lowerCamelCase)
with pytest.raises(lowerCamelCase):
http_head('''https://huggingface.co''')
@patch('''datasets.config.HF_DATASETS_OFFLINE''', lowerCamelCase)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = tmp_path_factory.mktemp('''data''') / '''file.html'''
with pytest.raises(lowerCamelCase):
ftp_get('''ftp://huggingface.co''', temp_file=lowerCamelCase)
with pytest.raises(lowerCamelCase):
ftp_head('''ftp://huggingface.co''')
@patch('''datasets.config.HF_DATASETS_OFFLINE''', lowerCamelCase)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = tmp_path_factory.mktemp('''data''') / '''file.html'''
with pytest.raises(lowerCamelCase):
fsspec_get('''s3://huggingface.co''', temp_file=lowerCamelCase)
with pytest.raises(lowerCamelCase):
fsspec_head('''s3://huggingface.co''')
| 9 |
'''simple docstring'''
from math import sqrt
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool"
return status
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1))
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase)):
for j in range(i + 1, len(lowerCamelCase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(lowerCamelCase):
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase):
while quotient != 1:
if is_prime(lowerCamelCase) and (quotient % factor == 0):
ans.append(lowerCamelCase)
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = max(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = min(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 == 0
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 != 0
def __magic_name__( lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase)
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (len(lowerCamelCase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = prime_factorization(lowerCamelCase)
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(max(lowerCamelCase, lowerCamelCase)):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase):
ans += 1
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime(
lowerCamelCase), "'ans' must been a prime number and from type int"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase)
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and ans[0] != p_number_a
and ans[len(lowerCamelCase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(lowerCamelCase)
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCamelCase)
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase))
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : str = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
_UpperCAmelCase : List[Any] = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
_UpperCAmelCase : Optional[int] = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
_UpperCAmelCase : str = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __magic_name__( *lowerCamelCase, lowerCamelCase = None, lowerCamelCase=True, lowerCamelCase=2):
from .. import __version__
__lowerCAmelCase = take_from
__lowerCAmelCase = ()
if not isinstance(args[0], lowerCamelCase):
__lowerCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase).base_version) >= version.parse(lowerCamelCase):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""")
__lowerCAmelCase = None
if isinstance(lowerCamelCase, lowerCamelCase) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase),)
__lowerCAmelCase = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowerCamelCase, lowerCamelCase):
values += (getattr(lowerCamelCase, lowerCamelCase),)
__lowerCAmelCase = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
__lowerCAmelCase = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
__lowerCAmelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message, lowerCamelCase, stacklevel=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) > 0:
__lowerCAmelCase = inspect.getouterframes(inspect.currentframe())[1]
__lowerCAmelCase = call_frame.filename
__lowerCAmelCase = call_frame.lineno
__lowerCAmelCase = call_frame.function
__lowerCAmelCase , __lowerCAmelCase = next(iter(deprecated_kwargs.items()))
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""")
if len(lowerCamelCase) == 0:
return
elif len(lowerCamelCase) == 1:
return values[0]
return values
| 9 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True, lowerCamelCase="pt"):
__lowerCAmelCase = {'''add_prefix_space''': True} if isinstance(lowerCamelCase, lowerCamelCase) and not line.startswith(''' ''') else {}
__lowerCAmelCase = padding_side
return tokenizer(
[line], max_length=lowerCamelCase, padding='''max_length''' if pad_to_max_length else None, truncation=lowerCamelCase, return_tensors=lowerCamelCase, add_special_tokens=lowerCamelCase, **lowerCamelCase, )
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, ):
__lowerCAmelCase = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase="train" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="" , ):
super().__init__()
__lowerCAmelCase = Path(__lowercase ).joinpath(type_path + '''.source''' )
__lowerCAmelCase = Path(__lowercase ).joinpath(type_path + '''.target''' )
__lowerCAmelCase = self.get_char_lens(self.src_file )
__lowerCAmelCase = max_source_length
__lowerCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
__lowerCAmelCase = tokenizer
__lowerCAmelCase = prefix
if n_obs is not None:
__lowerCAmelCase = self.src_lens[:n_obs]
__lowerCAmelCase = src_lang
__lowerCAmelCase = tgt_lang
def __len__(self ):
return len(self.src_lens )
def __getitem__(self , __lowercase ):
__lowerCAmelCase = index + 1 # linecache starts at 1
__lowerCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip('''\n''' )
__lowerCAmelCase = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
__lowerCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
__lowerCAmelCase = encode_line(__lowercase , __lowercase , self.max_source_length , '''right''' )
__lowerCAmelCase = encode_line(__lowercase , __lowercase , self.max_target_length , '''right''' )
__lowerCAmelCase = source_inputs['''input_ids'''].squeeze()
__lowerCAmelCase = target_inputs['''input_ids'''].squeeze()
__lowerCAmelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _snake_case (__lowercase ):
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def _snake_case (self , __lowercase ):
__lowerCAmelCase = torch.stack([x['''input_ids'''] for x in batch] )
__lowerCAmelCase = torch.stack([x['''attention_mask'''] for x in batch] )
__lowerCAmelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__lowerCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
__lowerCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
__lowerCAmelCase = trim_batch(__lowercase , __lowercase )
__lowerCAmelCase , __lowerCAmelCase = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
__lowerCAmelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
_UpperCAmelCase : List[Any] = getLogger(__name__)
def __magic_name__( lowerCamelCase):
return list(itertools.chain.from_iterable(lowerCamelCase))
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = get_git_info()
save_json(lowerCamelCase, os.path.join(lowerCamelCase, '''git_log.json'''))
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=4, **lowerCamelCase):
with open(lowerCamelCase, '''w''') as f:
json.dump(lowerCamelCase, lowerCamelCase, indent=lowerCamelCase, **lowerCamelCase)
def __magic_name__( lowerCamelCase):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def __magic_name__( ):
__lowerCAmelCase = git.Repo(search_parent_directories=lowerCamelCase)
__lowerCAmelCase = {
'''repo_id''': str(lowerCamelCase),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def __magic_name__( lowerCamelCase, lowerCamelCase):
return list(map(lowerCamelCase, lowerCamelCase))
def __magic_name__( lowerCamelCase, lowerCamelCase):
with open(lowerCamelCase, '''wb''') as f:
return pickle.dump(lowerCamelCase, lowerCamelCase)
def __magic_name__( lowerCamelCase):
def remove_articles(lowerCamelCase):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', lowerCamelCase)
def white_space_fix(lowerCamelCase):
return " ".join(text.split())
def remove_punc(lowerCamelCase):
__lowerCAmelCase = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = normalize_answer(lowerCamelCase).split()
__lowerCAmelCase = normalize_answer(lowerCamelCase).split()
__lowerCAmelCase = Counter(lowerCamelCase) & Counter(lowerCamelCase)
__lowerCAmelCase = sum(common.values())
if num_same == 0:
return 0
__lowerCAmelCase = 1.0 * num_same / len(lowerCamelCase)
__lowerCAmelCase = 1.0 * num_same / len(lowerCamelCase)
__lowerCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__( lowerCamelCase, lowerCamelCase):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert len(lowerCamelCase) == len(lowerCamelCase)
__lowerCAmelCase = 0
for hypo, pred in zip(lowerCamelCase, lowerCamelCase):
em += exact_match_score(lowerCamelCase, lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def __magic_name__( lowerCamelCase):
return model_prefix.startswith('''rag''')
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCAmelCase = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCamelCase, lowerCamelCase, lowerCamelCase):
if not hasattr(lowerCamelCase, lowerCamelCase) and not hasattr(lowerCamelCase, equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCamelCase))
delattr(lowerCamelCase, lowerCamelCase)
continue
__lowerCAmelCase = p if hasattr(lowerCamelCase, lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase, lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
delattr(lowerCamelCase, lowerCamelCase)
return hparams, config
| 9 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = old_name
if "patch_embed" in old_name:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''')
if layer == "0":
__lowerCAmelCase = old_name.replace('''0''', '''convolution1''')
elif layer == "1":
__lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''')
elif layer == "3":
__lowerCAmelCase = old_name.replace('''3''', '''convolution2''')
else:
__lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''')
if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase):
__lowerCAmelCase = r'''\b\d{2}\b'''
if bool(re.search(lowerCamelCase, lowerCamelCase)):
__lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group()
else:
__lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group()
if int(match[0]) < 6:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
__lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1])
__lowerCAmelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
if int(match[2]) < num_meta4D_last_stage:
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2])
else:
__lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage)
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index)
if "norm1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''')
elif "norm2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''')
elif "fc1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''')
elif "fc2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''')
__lowerCAmelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase):
__lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''')
if "fc" in new_name:
__lowerCAmelCase = new_name.replace('''fc''', '''convolution''')
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''')
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''')
if "proj" in new_name:
__lowerCAmelCase = new_name.replace('''proj''', '''projection''')
if "dist_head" in new_name:
__lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''')
elif "head" in new_name:
__lowerCAmelCase = new_name.replace('''head''', '''classifier''')
elif "patch_embed" in new_name:
__lowerCAmelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCAmelCase = new_name.replace('''norm''', '''layernorm''')
__lowerCAmelCase = '''efficientformer.''' + new_name
else:
__lowerCAmelCase = '''efficientformer.encoder.''' + new_name
return new_name
def __magic_name__( lowerCamelCase, lowerCamelCase):
for key in checkpoint.copy().keys():
__lowerCAmelCase = checkpoint.pop(lowerCamelCase)
__lowerCAmelCase = val
return checkpoint
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return image
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model''']
__lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase)
__lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase)
__lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1])
__lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
__lowerCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = 2_5_6
__lowerCAmelCase = 2_2_4
__lowerCAmelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
__lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values
# original processing pipeline
__lowerCAmelCase = Compose(
[
Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']),
CenterCrop(lowerCamelCase),
ToTensor(),
Normalize(lowerCamelCase, lowerCamelCase),
])
__lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0)
assert torch.allclose(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = (1, 1_0_0_0)
if "l1" in model_name:
__lowerCAmelCase = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78])
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""")
# Save Checkpoints
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""")
processor.save_pretrained(lowerCamelCase)
print(F"""Processor successfuly saved at {pytorch_dump_path}""")
if push_to_hub:
print('''Pushing model to the hub...''')
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 9 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCAmelCase : Any = None
try:
import msvcrt
except ImportError:
_UpperCAmelCase : Any = None
try:
import fcntl
except ImportError:
_UpperCAmelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCAmelCase : str = OSError
# Data
# ------------------------------------------------
_UpperCAmelCase : Any = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
_UpperCAmelCase : int = """3.0.12"""
_UpperCAmelCase : Optional[int] = None
def __magic_name__( ):
global _logger
__lowerCAmelCase = _logger or logging.getLogger(__name__)
return _logger
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = lock_file
return None
def __str__(self ):
__lowerCAmelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = lock
return None
def __enter__(self ):
return self.lock
def __exit__(self , __lowercase , __lowercase , __lowercase ):
self.lock.release()
return None
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
__lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__lowerCAmelCase = self.hash_filename_if_too_long(__lowercase , __lowercase )
# The path to the lock file.
__lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCAmelCase = None
# The default timeout value.
__lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCAmelCase = 0
return None
@property
def _snake_case (self ):
return self._lock_file
@property
def _snake_case (self ):
return self._timeout
@timeout.setter
def _snake_case (self , __lowercase ):
__lowerCAmelCase = float(__lowercase )
return None
def _snake_case (self ):
raise NotImplementedError()
def _snake_case (self ):
raise NotImplementedError()
@property
def _snake_case (self ):
return self._lock_file_fd is not None
def _snake_case (self , __lowercase=None , __lowercase=0.0_5 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCAmelCase = id(self )
__lowerCAmelCase = self._lock_file
__lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__lowercase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _snake_case (self , __lowercase=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCAmelCase = id(self )
__lowerCAmelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCAmelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
self.acquire()
return self
def __exit__(self , __lowercase , __lowercase , __lowercase ):
self.release()
return None
def __del__(self ):
self.release(force=__lowercase )
return None
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = os.path.basename(__lowercase )
if len(__lowercase ) > max_length and max_length > 0:
__lowerCAmelCase = os.path.dirname(__lowercase )
__lowerCAmelCase = str(hash(__lowercase ) )
__lowerCAmelCase = filename[: max_length - len(__lowercase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__lowercase , __lowercase )
else:
return path
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__lowercase , timeout=__lowercase , max_filename_length=__lowercase )
__lowerCAmelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def _snake_case (self ):
__lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
except OSError:
pass
else:
try:
msvcrt.locking(__lowercase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__lowercase )
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
__lowerCAmelCase = self._lock_file_fd
__lowerCAmelCase = None
msvcrt.locking(__lowercase , msvcrt.LK_UNLCK , 1 )
os.close(__lowercase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
__lowerCAmelCase = os.statvfs(os.path.dirname(__lowercase ) ).f_namemax
super().__init__(__lowercase , timeout=__lowercase , max_filename_length=__lowercase )
def _snake_case (self ):
__lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
try:
fcntl.flock(__lowercase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__lowercase )
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCAmelCase = self._lock_file_fd
__lowerCAmelCase = None
fcntl.flock(__lowercase , fcntl.LOCK_UN )
os.close(__lowercase )
return None
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
except OSError:
pass
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
os.close(self._lock_file_fd )
__lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCAmelCase : Any = None
if msvcrt:
_UpperCAmelCase : Any = WindowsFileLock
elif fcntl:
_UpperCAmelCase : List[Any] = UnixFileLock
else:
_UpperCAmelCase : List[Any] = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__( lowerCamelCase, lowerCamelCase):
if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2:
raise Exception('''Matrices are not 2x2''')
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(lowerCamelCase))
]
def __magic_name__( lowerCamelCase):
if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception('''Odd matrices are not supported!''')
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)]
return top_left, top_right, bot_left, bot_right
def __magic_name__( lowerCamelCase):
return len(lowerCamelCase), len(matrix[0])
def __magic_name__( lowerCamelCase):
print('''\n'''.join(str(lowerCamelCase) for line in matrix))
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase) == (2, 2):
return default_matrix_multiplication(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase))
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase)
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(lowerCamelCase)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def __magic_name__( lowerCamelCase, lowerCamelCase):
if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]:
__lowerCAmelCase = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
__lowerCAmelCase = matrix_dimensions(lowerCamelCase)
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase))))
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
new_matrixa[i].append(0)
else:
new_matrixa.append([0] * maxim)
__lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase)
# Removing the additional zeros
for i in range(0, lowerCamelCase):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCamelCase):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_UpperCAmelCase : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_UpperCAmelCase : Tuple = {
"""allenai/longformer-base-4096""": 4_0_9_6,
"""allenai/longformer-large-4096""": 4_0_9_6,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_0_9_6,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_0_9_6,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __magic_name__( ):
__lowerCAmelCase = (
list(range(ord('''!'''), ord('''~''') + 1)) + list(range(ord('''¡'''), ord('''¬''') + 1)) + list(range(ord('''®'''), ord('''ÿ''') + 1))
)
__lowerCAmelCase = bs[:]
__lowerCAmelCase = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
__lowerCAmelCase = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase, lowerCamelCase))
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__lowerCAmelCase = char
return pairs
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ['input_ids', 'attention_mask']
def __init__(self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ):
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
__lowerCAmelCase = json.load(__lowercase )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = errors # how to handle errors in decoding
__lowerCAmelCase = bytes_to_unicode()
__lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = {}
__lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case (self ):
return len(self.encoder )
def _snake_case (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case (self , __lowercase ):
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(__lowercase )
__lowerCAmelCase = get_pairs(__lowercase )
if not pairs:
return token
while True:
__lowerCAmelCase = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__lowercase ):
try:
__lowerCAmelCase = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__lowercase )
__lowerCAmelCase = new_word
if len(__lowercase ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__lowercase )
__lowerCAmelCase = ''' '''.join(__lowercase )
__lowerCAmelCase = word
return word
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for token in re.findall(self.pat , __lowercase ):
__lowerCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(''' ''' ) )
return bpe_tokens
def _snake_case (self , __lowercase ):
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def _snake_case (self , __lowercase ):
return self.decoder.get(__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = ''''''.join(__lowercase )
__lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case (self , __lowercase , __lowercase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
__lowerCAmelCase = 0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCAmelCase = token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case (self , __lowercase , __lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case (self , __lowercase , __lowercase=False , **__lowercase ):
__lowerCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
__lowerCAmelCase = ''' ''' + text
return (text, kwargs)
| 9 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9 | 1 |
'''simple docstring'''
from math import factorial
def __magic_name__( lowerCamelCase = 1_0_0):
return sum(int(lowerCamelCase) for x in str(factorial(lowerCamelCase)))
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 9 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __magic_name__( lowerCamelCase):
if not postfix_notation:
return 0
__lowerCAmelCase = {'''+''', '''-''', '''*''', '''/'''}
__lowerCAmelCase = []
for token in postfix_notation:
if token in operations:
__lowerCAmelCase , __lowerCAmelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b)
elif token == "-":
stack.append(a - b)
elif token == "*":
stack.append(a * b)
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1)
else:
stack.append(a // b)
else:
stack.append(int(lowerCamelCase))
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9 | 1 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __magic_name__( *lowerCamelCase):
if not isinstance(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = list(lowerCamelCase)
for i in range(len(lowerCamelCase)):
__lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(lowerCamelCase, lowerCamelCase) and len(exception.args) == 1:
return any(err in exception.args[0] for err in _statements)
return False
def __magic_name__( lowerCamelCase = None, lowerCamelCase = 1_2_8):
if function is None:
return functools.partial(lowerCamelCase, starting_batch_size=lowerCamelCase)
__lowerCAmelCase = starting_batch_size
def decorator(*lowerCamelCase, **lowerCamelCase):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowerCAmelCase = list(inspect.signature(lowerCamelCase).parameters.keys())
# Guard against user error
if len(lowerCamelCase) < (len(lowerCamelCase) + 1):
__lowerCAmelCase = ''', '''.join([F"""{arg}={value}""" for arg, value in zip(params[1:], args[1:])])
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""")
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''')
try:
return function(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
except Exception as e:
if should_reduce_batch_size(lowerCamelCase):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 9 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __magic_name__( ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
self.assertTrue(isinstance(dc.token_ids , __lowercase ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case (self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(__lowercase ) # fails here
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 9 | 1 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_UpperCAmelCase : List[Any] = datasets.load_iris()
_UpperCAmelCase : Dict = np.array(data["""data"""])
_UpperCAmelCase : int = np.array(data["""target"""])
_UpperCAmelCase : str = data["""target_names"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y)
def __magic_name__( lowerCamelCase, lowerCamelCase):
return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase))
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5):
__lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase)
# List of distances of all points from the point to be classified
__lowerCAmelCase = []
for data_point in data:
__lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
__lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase = new_id
# turn into Numpy arrays
__lowerCAmelCase = np.array(lowerCamelCase)
__lowerCAmelCase = np.array(lowerCamelCase)
if reduce_labels:
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label - 1
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label != ignore_index
__lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = pred_label[mask]
__lowerCAmelCase = np.array(lowerCamelCase)[mask]
__lowerCAmelCase = pred_label[pred_label == label]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# compute metrics
__lowerCAmelCase = {}
__lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase = total_area_intersect / total_area_union
__lowerCAmelCase = total_area_intersect / total_area_label
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = all_acc
__lowerCAmelCase = iou
__lowerCAmelCase = acc
if nan_to_num is not None:
__lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
__lowerCAmelCase = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 9 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.