code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : Optional[int] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
def run_func(UpperCamelCase ):
@wraps(UpperCamelCase )
def run_in_eager_mode(*UpperCamelCase , **UpperCamelCase ):
return func(*UpperCamelCase , **UpperCamelCase )
@wraps(UpperCamelCase )
@tf.function(experimental_compile=UpperCamelCase )
def run_in_graph_mode(*UpperCamelCase , **UpperCamelCase ):
return func(*UpperCamelCase , **UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> ["tf.Tensor"]:
lowerCamelCase__ : str = random.Random()
lowerCamelCase__ : Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( _lowercase ):
a = 42
a = 42
a = "TensorFlow"
@property
def lowerCamelCase_ ( self: Dict ):
return tf.__version__
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
# initialize GPU on separate process
lowerCamelCase__ : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCamelCase__ : Tuple = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_inference )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
lowerCamelCase__ : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCamelCase__ : Dict = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_train )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
lowerCamelCase__ : int = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCamelCase__ : Tuple = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_inference )
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCamelCase__ : List[Any] = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_train )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
lowerCamelCase__ : int = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCamelCase__ : List[str] = (
hasattr(UpperCamelCase__ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Optional[int] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : Optional[Any] = __import__("""transformers""" , fromlist=[model_class] )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : int = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size
lowerCamelCase__ : int = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , training=UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase__ : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCamelCase__ : Any = (
hasattr(UpperCamelCase__ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : Tuple = __import__("""transformers""" , fromlist=[model_class] )
lowerCamelCase__ : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Tuple = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size
lowerCamelCase__ : int = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ : int = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
lowerCamelCase__ : Any = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ : Dict = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
lowerCamelCase__ : Union[str, Any] = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
lowerCamelCase__ : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: int ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(UpperCamelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ : Tuple = timeit.repeat(
UpperCamelCase__ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
lowerCamelCase__ : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
lowerCamelCase__ : str = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ : int = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ )
lowerCamelCase__ : int = meminfo.used
lowerCamelCase__ : int = Memory(UpperCamelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
lowerCamelCase__ : List[Any] = None
else:
lowerCamelCase__ : List[str] = measure_peak_memory_cpu(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ : Dict = stop_memory_tracing(UpperCamelCase__ )
if memory is None:
lowerCamelCase__ : Union[str, Any] = summary.total
else:
lowerCamelCase__ : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 41 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
A : Optional[int] = 'zero2'
A : str = 'zero3'
A : Tuple = [ZEROa, ZEROa]
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
A : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A( a ):
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any:
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , )
self.do_checks(_snake_case )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case )
__a = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a = self.get_launcher(_snake_case )
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]:
'''simple docstring'''
__a = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 6 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case ) | 6 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if point:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
__UpperCamelCase :Optional[int] = (
'''Expected a list of numbers as input, found '''
f"""{type(SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :List[str] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Missing an input''' )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_a : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ,_lowerCamelCase : Dict ,_lowerCamelCase : str=None ) -> List[str]:
# Initialise PyTorch model
_lowerCAmelCase : str = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : str = finetuning_task
_lowerCAmelCase : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Any = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Optional[int] = finetuning_task
_lowerCAmelCase : List[str] = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : str = os.path.join(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase ,_lowerCamelCase )
print(f"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() ,_lowerCamelCase )
print(f"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
_a : List[Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 44 |
def __lowerCAmelCase ( a__ , a__ ) -> float:
def get_matched_characters(a__ , a__ ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}"""
return "".join(a__ )
# matching characters
__a = get_matched_characters(a__ , a__ )
__a = get_matched_characters(a__ , a__ )
__a = len(a__ )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world')) | 6 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase_ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase_ = "UperNetConfig"
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 0 , _a = False , _a = 1 , ):
super().__init__()
__a = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , padding=_a , bias=_a , dilation=_a , )
__a = nn.BatchNormad(_a )
__a = nn.ReLU()
def __UpperCAmelCase ( self , _a ):
__a = self.conv(_a )
__a = self.batch_norm(_a )
__a = self.activation(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
super().__init__()
__a = [
nn.AdaptiveAvgPoolad(_a ),
UperNetConvModule(_a , _a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = input
for layer in self.layers:
__a = layer(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a ):
super().__init__()
__a = pool_scales
__a = align_corners
__a = in_channels
__a = channels
__a = []
for i, pool_scale in enumerate(_a ):
__a = UperNetPyramidPoolingBlock(pool_scale=_a , in_channels=_a , channels=_a )
self.blocks.append(_a )
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = []
for ppm in self.blocks:
__a = ppm(_a )
__a = nn.functional.interpolate(
_a , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_a )
return ppm_outs
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = config
__a = config.pool_scales # e.g. (1, 2, 3, 6)
__a = in_channels
__a = config.hidden_size
__a = False
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__a = nn.ModuleList()
__a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__a = UperNetConvModule(_a , self.channels , kernel_size=1 )
__a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_a )
self.fpn_convs.append(_a )
__a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
__a = inputs[-1]
__a = [x]
psp_outs.extend(self.psp_modules(_a ) )
__a = torch.cat(_a , dim=1 )
__a = self.bottleneck(_a )
return output
def __UpperCAmelCase ( self , _a ):
# build laterals
__a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_a ) )
# build top-down path
__a = len(_a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = laterals[i - 1].shape[2:]
__a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_a , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__a = torch.cat(_a , dim=1 )
__a = self.fpn_bottleneck(_a )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a = 2 , _a = 3 , _a = 1 ):
super().__init__()
__a = config
__a = config.auxiliary_in_channels
__a = config.auxiliary_channels
__a = config.auxiliary_num_convs
__a = config.auxiliary_concat_input
__a = in_index
__a = (kernel_size // 2) * dilation
__a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
if self.num_convs == 0:
__a = nn.Identity()
else:
__a = nn.Sequential(*_a )
if self.concat_input:
__a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_a , padding=kernel_size // 2 )
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
# just take the relevant feature maps
__a = encoder_hidden_states[self.in_index]
__a = self.convs(_a )
if self.concat_input:
__a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = UperNetConfig
__UpperCAmelCase : Union[str, Any] = 'pixel_values'
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __UpperCAmelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __UpperCAmelCase ( self , _a , _a=False ):
if isinstance(_a , _a ):
__a = value
lowercase_ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__(_a )
__a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__a = UperNetHead(_a , in_channels=self.backbone.channels )
__a = UperNetFCNHead(_a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , _a = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
__a = self.backbone.forward_with_filtered_kwargs(
_a , output_hidden_states=_a , output_attentions=_a )
__a = outputs.feature_maps
__a = self.decode_head(_a )
__a = nn.functional.interpolate(_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if self.auxiliary_head is not None:
__a = self.auxiliary_head(_a )
__a = nn.functional.interpolate(
_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__a = loss_fct(_a , _a )
__a = loss_fct(_a , _a )
__a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__a = (logits,) + outputs[1:]
else:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 45 |
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main() | 6 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
SCREAMING_SNAKE_CASE__ = "http://www.mocksite.com/file1.txt"
SCREAMING_SNAKE_CASE__ = "\"text\": [\"foo\", \"foo\"]"
SCREAMING_SNAKE_CASE__ = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowercase :
_SCREAMING_SNAKE_CASE = 200
_SCREAMING_SNAKE_CASE = {'Content-Length': '100'}
_SCREAMING_SNAKE_CASE = {}
def _snake_case ( self , **lowercase ) -> List[str]:
return [bytes(lowercase , """utf-8""" )]
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE , """request""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = URL
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = url
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [url]
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = {"""train""": url}
lowerCAmelCase = """dummy"""
lowerCAmelCase = """downloads"""
lowerCAmelCase = tmp_path
lowerCAmelCase = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , use_etag=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE , download_config=SCREAMING_SNAKE_CASE )
lowerCAmelCase = dl_manager.download(SCREAMING_SNAKE_CASE )
lowerCAmelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [downloaded_paths]
lowerCAmelCase = [urls]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
lowerCAmelCase = downloaded_paths.values()
lowerCAmelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCAmelCase = Path(SCREAMING_SNAKE_CASE )
lowerCAmelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCAmelCase = downloaded_path.read_text()
assert content == CONTENT
lowerCAmelCase = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowerCAmelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = str(SCREAMING_SNAKE_CASE )
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = filename
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [filename]
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = {"""train""": filename}
lowerCAmelCase = """dummy"""
lowerCAmelCase = xz_file.parent
lowerCAmelCase = """extracted"""
lowerCAmelCase = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE , use_etag=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE , download_config=SCREAMING_SNAKE_CASE )
lowerCAmelCase = dl_manager.extract(SCREAMING_SNAKE_CASE )
lowerCAmelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [extracted_paths]
lowerCAmelCase = [paths]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
lowerCAmelCase = extracted_paths.values()
lowerCAmelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCAmelCase = Path(SCREAMING_SNAKE_CASE )
lowerCAmelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE , etag=SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCAmelCase = extracted_path.read_text()
lowerCAmelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE , start=1 ):
lowerCAmelCase = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase = request.getfixturevalue(SCREAMING_SNAKE_CASE )
lowerCAmelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase = request.getfixturevalue(SCREAMING_SNAKE_CASE )
lowerCAmelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 46 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : str , _a : Any , _a : List[str]=13 , _a : Union[str, Any]=7 , _a : List[Any]=True , _a : Optional[int]=True , _a : Tuple=True , _a : Optional[Any]=True , _a : Optional[int]=99 , _a : str=24 , _a : str=2 , _a : Union[str, Any]=6 , _a : Union[str, Any]=37 , _a : Any="gelu" , _a : Any=0.1 , _a : List[Any]=0.1 , _a : List[str]=512 , _a : Tuple=16 , _a : List[str]=2 , _a : int=0.02 , _a : Optional[int]=3 , _a : Any=None , _a : int=1000 , ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =range_bbox
def A ( self : int ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 3]
_SCREAMING_SNAKE_CASE =bbox[i, j, 1]
_SCREAMING_SNAKE_CASE =t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 2]
_SCREAMING_SNAKE_CASE =bbox[i, j, 0]
_SCREAMING_SNAKE_CASE =t
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : int , _a : Any , _a : str , _a : Union[str, Any] , _a : Any , _a : Tuple , _a : List[Any] , _a : Dict , ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Optional[int] , _a : Tuple , _a : List[str] , _a : Optional[Any] , _a : Any , _a : Tuple , _a : Any , _a : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =LiltForTokenClassification(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Tuple , _a : str , _a : List[str] , _a : Optional[Any] , _a : Union[str, Any] , _a : Dict , _a : List[Any] , _a : str , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , A__ , unittest.TestCase ):
A__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
def A ( self : Tuple , _a : Union[str, Any] , _a : int , _a : List[Any] , _a : Optional[int] , _a : int ) -> Optional[Any]:
'''simple docstring'''
return True
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*_a )
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =LiltModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@slow
class A__ ( unittest.TestCase ):
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(_a )
_SCREAMING_SNAKE_CASE =torch.tensor([[1, 2]] , device=_a )
_SCREAMING_SNAKE_CASE =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(input_ids=_a , bbox=_a )
_SCREAMING_SNAKE_CASE =torch.Size([1, 2, 768] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_a , )
self.assertTrue(outputs.last_hidden_state.shape , _a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _a , atol=1e-3 ) )
| 47 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _A :
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
__a = np.abs((a - b)).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'Difference between torch and flax is {diff} (>= {tol}).')
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim))
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = after_output[0]
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
pt_model.to(__SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__a = inputs_dict
__a = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__a = pt_model(**__SCREAMING_SNAKE_CASE).to_tuple()
__a = fx_model(**__SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(__SCREAMING_SNAKE_CASE) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_pt=__SCREAMING_SNAKE_CASE)
__a = fx_model_loaded(**__SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(__SCREAMING_SNAKE_CASE) , '''Output lengths differ between Flax and PyTorch''')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_flax=__SCREAMING_SNAKE_CASE)
pt_model_loaded.to(__SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__a = pt_model_loaded(**__SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(__SCREAMING_SNAKE_CASE) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4E-2)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __SCREAMING_SNAKE_CASE)
__a = fx_state
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a = config_inputs_dict.pop('''vision_config''')
__a = config_inputs_dict.pop('''text_config''')
__a = config_inputs_dict
self.check_equivalence_pt_to_flax(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a , __a = self.get_pretrained_model_and_inputs()
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = after_outputs[0]
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
@require_flax
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
__a = 13
__a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = FlaxViTModel(__SCREAMING_SNAKE_CASE)
__a = FlaxBertModel(__SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = FlaxViTModelTester(self)
__a = FlaxBertModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
__a , __a , __a , __a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
__a = 13
__a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = FlaxCLIPVisionModel(__SCREAMING_SNAKE_CASE)
__a = FlaxBertModel(__SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = FlaxCLIPVisionModelTester(self)
__a = FlaxBertModelTester(self)
__a = clip_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
__a , __a , __a , __a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0)
__a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''')
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image , __SCREAMING_SNAKE_CASE , atol=1E-3))
| 49 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 6 | 0 |
from scipy.stats import pearsonr
import datasets
_UpperCAmelCase : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_UpperCAmelCase : Union[str, Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_UpperCAmelCase : List[Any] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def A_ ( self : Any ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def A_ ( self : int , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Any=False ) -> str:
if return_pvalue:
lowerCamelCase__ : int = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 50 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __A( a ):
snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''text''': Value('''string''' )} )
snake_case_ = Features({} )
snake_case_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"} | 6 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case_ : Optional[Any] = 3
def A (__A : int ) -> int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
UpperCAmelCase_ = random.randrange(3 , __A )
if pow(__A , 2 , __A ) == 1:
continue
if pow(__A , __A , __A ) == 1:
continue
return g
def A (__A : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
UpperCAmelCase_ = rabin_miller.generate_large_prime(__A ) # select large prime number.
UpperCAmelCase_ = primitive_root(__A ) # one primitive root on modulo p.
UpperCAmelCase_ = random.randrange(3 , __A ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase_ = cryptomath.find_mod_inverse(pow(__A , __A , __A ) , __A )
UpperCAmelCase_ = (key_size, e_a, e_a, p)
UpperCAmelCase_ = (key_size, d)
return public_key, private_key
def A (__A : str , __A : int ) -> None:
"""simple docstring"""
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
UpperCAmelCase_ , UpperCAmelCase_ = generate_key(__A )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def A () -> None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 51 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]:
__a = AutoTokenizer.from_pretrained(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
__a = tok.pad_token_id
def get_lens(a__ ):
__a = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a = []
for batch in dl:
__a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
__a = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
__a = get_lens(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
__a = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 6 | 0 |
import numpy as np
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1e-12 , _lowerCAmelCase = 100 , ) -> tuple[float, np.ndarray]:
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowerCAmelCase ) == np.iscomplexobj(_lowerCAmelCase )
UpperCamelCase : Optional[int] = np.iscomplexobj(_lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCamelCase : str = False
UpperCamelCase : int = 0
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 1e12
while not convergence:
# Multiple matrix by the vector.
UpperCamelCase : Any = np.dot(_lowerCAmelCase , _lowerCAmelCase )
# Normalize the resulting output vector.
UpperCamelCase : List[str] = w / np.linalg.norm(_lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCamelCase : List[str] = vector.conj().T if is_complex else vector.T
UpperCamelCase : List[Any] = np.dot(_lowerCAmelCase , np.dot(_lowerCAmelCase , _lowerCAmelCase ) )
# Check convergence.
UpperCamelCase : List[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCamelCase : str = True
UpperCamelCase : Union[str, Any] = lambda_
if is_complex:
UpperCamelCase : Optional[Any] = np.real(lambda_ )
return lambda_, vector
def A_ ( ) -> None:
UpperCamelCase : Any = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCamelCase : str = np.array([41, 4, 20] )
UpperCamelCase : Optional[Any] = real_input_matrix.astype(np.complexaaa )
UpperCamelCase : Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCamelCase : Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCamelCase : int = real_input_matrix
UpperCamelCase : Any = real_vector
elif problem_type == "complex":
UpperCamelCase : Union[str, Any] = complex_input_matrix
UpperCamelCase : Tuple = complex_vector
# Our implementation.
UpperCamelCase , UpperCamelCase : List[Any] = power_iteration(_lowerCAmelCase , _lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCamelCase , UpperCamelCase : Optional[int] = np.linalg.eigh(_lowerCAmelCase )
# Last eigenvalue is the maximum one.
UpperCamelCase : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCamelCase : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowerCAmelCase ) - np.abs(_lowerCAmelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 52 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __lowercase : list[int | str] ) -> None:
"""simple docstring"""
create_state_space_tree(__lowercase , [] , 0 , [0 for i in range(len(__lowercase ) )] )
def lowercase__ ( __lowercase : list[int | str] , __lowercase : list[int | str] , __lowercase : int , __lowercase : list[int] , ) -> None:
"""simple docstring"""
if index == len(__lowercase ):
print(__lowercase )
return
for i in range(len(__lowercase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__UpperCamelCase = True
create_state_space_tree(__lowercase , __lowercase , index + 1 , __lowercase )
current_sequence.pop()
__UpperCamelCase = False
a__ : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
a__ : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 53 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for ch in input_str:
__SCREAMING_SNAKE_CASE = ord(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = pow(2 , lowerCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a = (low + high) // 2
__a , __a , __a = max_subarray(a__ , a__ , a__ )
__a , __a , __a = max_subarray(a__ , mid + 1 , a__ )
__a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]:
__a , __a = float('''-inf''' ), -1
__a , __a = float('''-inf''' ), -1
__a = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a = summ
__a = i
__a = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a = summ
__a = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase ( a__ ) -> float:
__a = [randint(1 , a__ ) for _ in range(a__ )]
__a = time.time()
max_subarray(a__ , 0 , input_size - 1 )
__a = time.time()
return end - start
def __lowerCAmelCase ( ) -> None:
__a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__a = [time_max_subarray(a__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '''\t\t''' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase_ = len(UpperCamelCase ) - 1
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCamelCase ) , 5 ) == 1
return output_values
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase_ = self.basis_function(UpperCamelCase )
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self , UpperCamelCase = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase_ = [] # x coordinates of points to plot
lowerCamelCase_ = [] # y coordinates of points to plot
lowerCamelCase_ = 0.0
while t <= 1:
lowerCamelCase_ = self.bezier_curve_function(UpperCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase_ = [i[0] for i in self.list_of_points]
lowerCamelCase_ = [i[1] for i in self.list_of_points]
plt.plot(
UpperCamelCase , UpperCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(UpperCamelCase , UpperCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 55 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 6 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
def __init__( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=True , lowercase_ : List[str]=False , lowercase_ : List[str]=10 , lowercase_ : Tuple=3 , lowercase_ : int=32 * 8 , lowercase_ : Optional[int]=32 * 8 , lowercase_ : str=4 , lowercase_ : Dict=64 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = is_training
snake_case_ = use_auxiliary_loss
snake_case_ = num_queries
snake_case_ = num_channels
snake_case_ = min_size
snake_case_ = max_size
snake_case_ = num_labels
snake_case_ = hidden_dim
snake_case_ = hidden_dim
def A_ ( self : int ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
snake_case_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
snake_case_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
snake_case_ = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
snake_case_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A_ ( self : int ):
snake_case_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case_ = self.num_queries
snake_case_ = self.num_labels
snake_case_ = [1, 1, 1, 1]
snake_case_ = self.num_channels
snake_case_ = 64
snake_case_ = 128
snake_case_ = self.hidden_dim
snake_case_ = self.hidden_dim
snake_case_ = self.hidden_dim
return config
def A_ ( self : Optional[Any] ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self.prepare_config_and_inputs()
snake_case_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def A_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
snake_case_ = output.encoder_hidden_states
snake_case_ = output.pixel_decoder_hidden_states
snake_case_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_layers )
def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[Any]=False ):
with torch.no_grad():
snake_case_ = MaskaFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
snake_case_ = model(lowercase_ , output_hidden_states=lowercase_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Dict ):
snake_case_ = MaskaFormerForUniversalSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
snake_case_ = model(lowercase_ )
comm_check_on_output(lowercase_ )
snake_case_ = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ):
snake_case_ = MaskaFormerModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def A_ ( self : int ):
self.config_tester.run_common_tests()
def A_ ( self : str ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def A_ ( self : Optional[int] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def A_ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def A_ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def A_ ( self : Dict ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def A_ ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A_ ( self : str ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A_ ( self : List[Any] ):
pass
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def A_ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case_ = MaskaFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = (self.model_tester.min_size,) * 2
snake_case_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowercase_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowercase_ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
snake_case_ = self.model_tester.get_config()
snake_case_ = MaskaFormerForUniversalSegmentation(lowercase_ ).to(lowercase_ )
snake_case_ = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def A_ ( self : Dict ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ ).to(lowercase_ )
snake_case_ = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def A_ ( self : Optional[int] ):
if not self.model_tester.is_training:
return
snake_case_ = self.all_model_classes[1]
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
snake_case_ = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def A_ ( self : List[str] ):
snake_case_ = self.all_model_classes[1]
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(lowercase_ ).to(lowercase_ )
model.train()
snake_case_ = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
snake_case_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : List[Any] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A_ ( self : Union[str, Any] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A_ ( self : Union[str, Any] ):
snake_case_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowercase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
snake_case_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 384, 384) )
with torch.no_grad():
snake_case_ = model(**lowercase_ )
snake_case_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
snake_case_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
snake_case_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def A_ ( self : Dict ):
snake_case_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase_ ).eval()
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
snake_case_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 384, 384) )
with torch.no_grad():
snake_case_ = model(**lowercase_ )
# masks_queries_logits
snake_case_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case_ = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
snake_case_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def A_ ( self : Optional[int] ):
snake_case_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase_ ).eval()
snake_case_ = self.default_image_processor
snake_case_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case_ = inputs['''pixel_values'''].to(lowercase_ )
snake_case_ = [el.to(lowercase_ ) for el in inputs['''mask_labels''']]
snake_case_ = [el.to(lowercase_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case_ = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 56 |
from math import ceil
def __lowerCAmelCase ( a__ = 1001 ) -> int:
__a = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__a = 2 * i + 1
__a = 2 * i
__a = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 6 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class | 6 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowercase_ = re.compile(R"""\s+""")
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[Any]:
return {"hash": hashlib.mda(re.sub(__lowerCamelCase , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def lowerCamelCase ( __lowerCamelCase : Optional[int] ) ->Dict:
_SCREAMING_SNAKE_CASE = [len(__lowerCamelCase ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(__lowerCamelCase ), "line_max": max(__lowerCamelCase )}
def lowerCamelCase ( __lowerCamelCase : Any ) ->Any:
_SCREAMING_SNAKE_CASE = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) ->Union[str, Any]:
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=5 ) ->str:
_SCREAMING_SNAKE_CASE = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for _, line in zip(range(__lowerCamelCase ) , __lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=5 , __lowerCamelCase : Union[str, Any]=0.05 ) ->Dict:
_SCREAMING_SNAKE_CASE = ["""unit tests""", """test file""", """configuration file"""]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# first test
for _, line in zip(range(__lowerCamelCase ) , __lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_SCREAMING_SNAKE_CASE = example["""content"""].count("""\n""" )
_SCREAMING_SNAKE_CASE = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase ( __lowerCamelCase : List[str] ) ->str:
_SCREAMING_SNAKE_CASE = ["""def """, """class """, """for """, """while """]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=4 ) ->List[Any]:
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
_SCREAMING_SNAKE_CASE = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase ( __lowerCamelCase : str ) ->str:
_SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=__lowerCamelCase )["""input_ids"""]
_SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(__lowerCamelCase )
return {"ratio": ratio}
def lowerCamelCase ( __lowerCamelCase : List[Any] ) ->List[Any]:
_SCREAMING_SNAKE_CASE = {}
results.update(get_hash(__lowerCamelCase ) )
results.update(line_stats(__lowerCamelCase ) )
results.update(alpha_stats(__lowerCamelCase ) )
results.update(char_token_ratio(__lowerCamelCase ) )
results.update(is_autogenerated(__lowerCamelCase ) )
results.update(is_config_or_test(__lowerCamelCase ) )
results.update(has_no_keywords(__lowerCamelCase ) )
results.update(has_few_assignments(__lowerCamelCase ) )
return results
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : str ) ->Tuple:
if not check_uniques(__lowerCamelCase , __lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase ( __lowerCamelCase : Dict ) ->Optional[int]:
with open(__lowerCamelCase , """rb""" ) as f_in:
with gzip.open(str(__lowerCamelCase ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowerCamelCase , __lowerCamelCase )
os.unlink(__lowerCamelCase )
# Settings
lowercase_ = HfArgumentParser(PreprocessingArguments)
lowercase_ = parser.parse_args()
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowercase_ = time.time()
lowercase_ = load_dataset(args.dataset_name, split="""train""")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowercase_ = time.time()
lowercase_ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowercase_ = set(ds.unique("""hash"""))
lowercase_ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowercase_ = time.time()
lowercase_ = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowercase_ = time.time()
lowercase_ , lowercase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowercase_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
lowercase_ = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
lowercase_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowercase_ = str(data_dir / f"""file-{file_number+1:012}.json""")
lowercase_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 58 |
from __future__ import annotations
import typing
from collections import Counter
def __lowerCAmelCase ( a__ ) -> typing.Counter[int]:
__a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a__ , max_perimeter + 1 ):
__a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
__a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowerCAmelCase ( a__ = 1000 ) -> int:
__a = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions") | 6 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
snake_case : int = k_size // 2
snake_case , snake_case : Tuple = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
snake_case : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(__lowerCamelCase ) + square(__lowerCamelCase )) / (2 * square(__lowerCamelCase )) )
return g
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Dict ):
snake_case , snake_case : List[str] = image.shape[0], image.shape[1]
# dst image height and width
snake_case : Union[str, Any] = height - k_size + 1
snake_case : Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
snake_case : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
snake_case : List[str] = 0
for i, j in product(range(__lowerCamelCase ) , range(__lowerCamelCase ) ):
snake_case : Tuple = ravel(image[i : i + k_size, j : j + k_size] )
snake_case : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
snake_case : Tuple = gen_gaussian_kernel(__lowerCamelCase , __lowerCamelCase )
snake_case : List[str] = ravel(__lowerCamelCase )
# reshape and get the dst image
snake_case : int = dot(__lowerCamelCase , __lowerCamelCase ).reshape(__lowerCamelCase , __lowerCamelCase ).astype(__lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
__lowerCamelCase = imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__lowerCamelCase = gaussian_filter(gray, 3, sigma=1)
__lowerCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 59 |
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 6 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case__ : Optional[int] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __A( a ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
__a = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__a = truncation
__a = tokenize_kwargs
__a = {}
if return_tensors is not None:
__a = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]:
'''simple docstring'''
__a = self.framework
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.model(**_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return super().__call__(*_snake_case , **_snake_case ) | 6 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : List[Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : int = latents.to(lowercase_ )
UpperCAmelCase_ : List[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Dict = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : int = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self._execution_device
UpperCAmelCase_ : Dict = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[int] = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Any = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Optional[Any] = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Tuple = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : Any = self.scheduler.timesteps
UpperCAmelCase_ : List[Any] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = variance_pred.chunk(2 )
UpperCAmelCase_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Dict = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : Any = image * 0.5 + 0.5
UpperCAmelCase_ : Tuple = image.clamp(0 , 1 )
UpperCAmelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 61 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Optional[int] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4 | 6 | 0 |
from __future__ import annotations
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =size
# approximate the overall size of segment tree with given value
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
__UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , A_ ) -> int:
return idx * 2
def _a ( self , A_ ) -> int:
return idx * 2 + 1
def _a ( self , A_ , A_ , A_ , A_ ) -> None:
if left_element == right_element:
__UpperCamelCase =a[left_element - 1]
else:
__UpperCamelCase =(left_element + right_element) // 2
self.build(self.left(A_ ) , A_ , A_ , A_ )
self.build(self.right(A_ ) , mid + 1 , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCamelCase =val
if left_element != right_element:
__UpperCamelCase =val
__UpperCamelCase =val
__UpperCamelCase =True
__UpperCamelCase =True
return True
__UpperCamelCase =(left_element + right_element) // 2
self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ )
self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
return True
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCamelCase =(left_element + right_element) // 2
__UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ )
__UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ )
return max(A_ , A_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 62 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : int = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case ) | 6 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =GPTSanJapaneseTokenizer
__a =False
__a ={'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase__ ( self : Union[str, Any] ):
super().setUp()
# fmt: off
_a = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_a = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_a = {"unk_token": "<unk>"}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(__a ) )
def UpperCamelCase__ ( self : Dict , **__a : str ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__a )
def UpperCamelCase__ ( self : str , __a : Any ):
_a = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_a = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def UpperCamelCase__ ( self : Optional[Any] , __a : List[str] ):
_a , _a = self.get_input_output_texts(__a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
_a = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def UpperCamelCase__ ( self : Any ):
pass # TODO add if relevant
def UpperCamelCase__ ( self : Tuple ):
pass # TODO add if relevant
def UpperCamelCase__ ( self : str ):
pass # TODO add if relevant
def UpperCamelCase__ ( self : Tuple ):
_a = self.get_tokenizer()
# Testing tokenization
_a = "こんにちは、世界。 こんばんは、㔺界。"
_a = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_a = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_a = tokens + [tokenizer.unk_token]
_a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.get_tokenizer()
# Testing tokenization
_a = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_a = "こんにちは、、、、世界。こんばんは、、、、世界。"
_a = tokenizer.encode(__a )
_a = tokenizer.decode(__a )
self.assertEqual(__a , __a )
@slow
def UpperCamelCase__ ( self : List[Any] ):
_a = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_a = "こんにちは、世界。"
_a = "こんばんは、㔺界。😀"
_a = "こんにちは、世界。こんばんは、世界。😀"
_a = tokenizer.encode(prefix_text + input_text )
_a = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_a = tokenizer.encode(__a , prefix_text=__a )
_a = tokenizer.decode(__a )
_a = tokenizer.decode(__a )
_a = tokenizer.decode(__a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
@slow
def UpperCamelCase__ ( self : Any ):
_a = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_a = "こんにちは、世界。"
_a = "こんばんは、㔺界。😀"
_a = len(tokenizer.encode(__a ) ) - 2
_a = len(tokenizer.encode(__a ) ) - 2
_a = [1] + [0] * (len_prefix + len_text + 1)
_a = [1] * (len_prefix + len_text + 1) + [0]
_a = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_a = tokenizer(prefix_text + input_text ).token_type_ids
_a = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_a = tokenizer(__a , prefix_text=__a ).token_type_ids
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def UpperCamelCase__ ( self : Dict ):
_a = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_a = tokenizer.encode("あンいワ" )
_a = tokenizer.encode("" , prefix_text="あンいワ" )
_a = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertNotEqual(__a , __a )
self.assertNotEqual(__a , __a )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase__ ( self : List[Any] ):
_a = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_a = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_a = tokenizer(__a , padding=__a )
_a = tokenizer.batch_encode_plus(__a , padding=__a )
# fmt: off
_a = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_a = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_a = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __a )
self.assertListEqual(x_token.token_type_ids , __a )
self.assertListEqual(x_token.attention_mask , __a )
self.assertListEqual(x_token_a.input_ids , __a )
self.assertListEqual(x_token_a.token_type_ids , __a )
self.assertListEqual(x_token_a.attention_mask , __a )
def UpperCamelCase__ ( self : List[str] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase__ ( self : int ):
# tokenizer has no padding token
pass
| 63 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
A : Optional[int] = 'zero2'
A : str = 'zero3'
A : Tuple = [ZEROa, ZEROa]
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
A : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A( a ):
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any:
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , )
self.do_checks(_snake_case )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case )
__a = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a = self.get_launcher(_snake_case )
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]:
'''simple docstring'''
__a = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 6 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
A_ = {
'''google/fnet-base''': 5_12,
'''google/fnet-large''': 5_12,
}
A_ = '''▁'''
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "token_type_ids"]
lowercase__ = FNetTokenizer
def __init__( self: List[str], a_: str=None, a_: Optional[Any]=None, a_: Tuple=False, a_: Any=True, a_: List[str]=True, a_: List[Any]="<unk>", a_: Optional[Any]="[SEP]", a_: Optional[int]="<pad>", a_: Optional[Any]="[CLS]", a_: int="[MASK]", **a_: Optional[Any], ):
'''simple docstring'''
_snake_case : str = (
AddedToken(a_, lstrip=a_, rstrip=a_, normalized=a_ )
if isinstance(a_, a_ )
else mask_token
)
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, remove_space=a_, keep_accents=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, **a_, )
_snake_case : Union[str, Any] = do_lower_case
_snake_case : Dict = remove_space
_snake_case : int = keep_accents
_snake_case : Dict = vocab_file
_snake_case : str = False if not self.vocab_file else True
def UpperCamelCase_ ( self: Optional[Any], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self: Union[str, Any], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Any = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: List[str], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : List[Any] = os.path.join(
a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file, a_ )
return (out_vocab_file,)
| 64 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case ) | 6 | 0 |
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = credit_card_number
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(__A ) - 2
for i in range(__A, -1, -2 ):
# double the value of every second digit
UpperCAmelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCAmelCase__ = cc_number[:i] + str(__A ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(__A ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(__A ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(__A ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 65 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 |
def __lowerCAmelCase ( a__ , a__ ) -> float:
def get_matched_characters(a__ , a__ ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}"""
return "".join(a__ )
# matching characters
__a = get_matched_characters(a__ , a__ )
__a = get_matched_characters(a__ , a__ )
__a = len(a__ )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world')) | 6 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( ) -> int:
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(UpperCamelCase__ , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 67 |
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main() | 6 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip_text_model'
def __init__( self , lowercase=250002 , lowercase=1024 , lowercase=24 , lowercase=16 , lowercase=4096 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=514 , lowercase=1 , lowercase=0.02 , lowercase=0.02 , lowercase=1e-05 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=768 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = initializer_factor
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = project_dim
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip_vision_model'
def __init__( self , lowercase=768 , lowercase=3072 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=3 , lowercase=224 , lowercase=32 , lowercase="quick_gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , **lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase )
A__ = hidden_size
A__ = intermediate_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def UpperCamelCase ( cls , lowercase , **lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
A__ , A__ = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip'
__lowerCamelCase = True
def __init__( self , lowercase=None , lowercase=None , lowercase=768 , lowercase=2.6592 , **lowercase ) -> List[str]:
'''simple docstring'''
A__ = kwargs.pop("text_config_dict" , lowercase )
A__ = kwargs.pop("vision_config_dict" , lowercase )
super().__init__(**lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A__ = {}
# This is the complete result when using `text_config_dict`.
A__ = AltCLIPTextConfig(**lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A__ = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A__ = {}
# This is the complete result when using `vision_config_dict`.
A__ = AltCLIPVisionConfig(**lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A__ = {
str(lowercase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A__ = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A__ = AltCLIPTextConfig(**lowercase )
A__ = AltCLIPVisionConfig(**lowercase )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = 1.0
@classmethod
def UpperCamelCase ( cls , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 68 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = dataset
snake_case_ = process
snake_case_ = params
def __len__( self) -> Optional[Any]:
return len(self.dataset)
def __getitem__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.dataset[i]
snake_case_ = self.process(lowerCAmelCase__, **self.params)
return processed
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None) -> Optional[Any]:
snake_case_ = loader
snake_case_ = infer
snake_case_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case_ = None
snake_case_ = loader_batch_size
# Internal bookkeeping
snake_case_ = None
snake_case_ = None
def __len__( self) -> str:
return len(self.loader)
def __iter__( self) -> List[Any]:
snake_case_ = iter(self.loader)
return self
def a_ ( self) -> Dict:
if isinstance(self._loader_batch_data, torch.Tensor):
# Batch data is simple tensor, just fetch the slice
snake_case_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
# Convert ModelOutput to tuple first
snake_case_ = element.to_tuple()
if isinstance(element[0], torch.Tensor):
snake_case_ = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0], np.ndarray):
snake_case_ = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCAmelCase__, lowerCAmelCase__):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor):
snake_case_ = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0], np.ndarray):
snake_case_ = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
snake_case_ = None
elif isinstance(element[self._loader_batch_index], torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index], np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ = np.expand_dims(element[self._loader_batch_index], 0)
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case_ = self._loader_batch_data.__class__(lowerCAmelCase__)
self._loader_batch_index += 1
return result
def a_ ( self) -> Optional[int]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case_ = next(self.iterator)
snake_case_ = self.infer(lowerCAmelCase__, **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCAmelCase__, torch.Tensor):
snake_case_ = processed
else:
snake_case_ = list(processed.keys())[0]
snake_case_ = processed[key]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = len(lowerCAmelCase__)
else:
snake_case_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ = observed_batch_size
# Setting internal index to unwrap the batch
snake_case_ = processed
snake_case_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None) -> Any:
super().__init__(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def __iter__( self) -> str:
snake_case_ = iter(self.loader)
snake_case_ = None
return self
def a_ ( self) -> Optional[int]:
if self.subiterator is None:
snake_case_ = self.infer(next(self.iterator), **self.params)
try:
# Try to return next item
snake_case_ = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case_ = self.infer(next(self.iterator), **self.params)
snake_case_ = next(self.subiterator)
return processed
class UpperCamelCase ( lowerCAmelCase__ ):
def __iter__( self) -> Any:
snake_case_ = iter(self.loader)
return self
def a_ ( self) -> Tuple:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
snake_case_ = False
snake_case_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case_ = self.loader_batch_item()
snake_case_ = item.pop('is_last')
accumulator.append(lowerCAmelCase__)
if is_last:
return accumulator
while not is_last:
snake_case_ = self.infer(next(self.iterator), **self.params)
if self.loader_batch_size is not None:
if isinstance(lowerCAmelCase__, torch.Tensor):
snake_case_ = processed
else:
snake_case_ = list(processed.keys())[0]
snake_case_ = processed[key]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = len(lowerCAmelCase__)
else:
snake_case_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ = observed_batch_size
snake_case_ = processed
snake_case_ = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case_ = self.loader_batch_item()
snake_case_ = item.pop('is_last')
accumulator.append(lowerCAmelCase__)
if is_last:
return accumulator
else:
snake_case_ = processed
snake_case_ = item.pop('is_last')
accumulator.append(lowerCAmelCase__)
return accumulator
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = dataset
snake_case_ = key
def __len__( self) -> str:
return len(self.dataset)
def __getitem__( self, lowerCAmelCase__) -> List[Any]:
return self.dataset[i][self.key]
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = dataset
snake_case_ = keya
snake_case_ = keya
def __len__( self) -> str:
return len(self.dataset)
def __getitem__( self, lowerCAmelCase__) -> Any:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 69 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : int =logging.get_logger(__name__)
A__ : List[Any] ={
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[int] = '''data2vec-text'''
def __init__( self : str , __snake_case : Optional[int]=3_05_22 , __snake_case : str=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : Any="gelu" , __snake_case : List[Any]=0.1 , __snake_case : str=0.1 , __snake_case : List[str]=5_12 , __snake_case : List[str]=2 , __snake_case : List[Any]=0.02 , __snake_case : Tuple=1E-1_2 , __snake_case : Tuple=1 , __snake_case : List[str]=0 , __snake_case : List[Any]=2 , __snake_case : Union[str, Any]="absolute" , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : List[str] , ) -> int:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 70 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Tuple = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Union[str, Any] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A_ :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 71 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 6 | 0 |
"""simple docstring"""
import unittest
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Tuple = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Tuple = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
_lowerCamelCase : List[str] = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Any = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 72 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __A( a ):
snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''text''': Value('''string''' )} )
snake_case_ = Features({} )
snake_case_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"} | 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]:
__a = AutoTokenizer.from_pretrained(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
__a = tok.pad_token_id
def get_lens(a__ ):
__a = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a = []
for batch in dl:
__a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
__a = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
__a = get_lens(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
__a = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 6 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0 | 74 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 | 0 |
'''simple docstring'''
import math
def a_ ( __snake_case : list , __snake_case : int = 0 , __snake_case : int = 0 ) -> list:
"""simple docstring"""
lowerCamelCase_ =end or len(__snake_case )
for i in range(__snake_case , __snake_case ):
lowerCamelCase_ =i
lowerCamelCase_ =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ =array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ =temp_index_value
return array
def a_ ( __snake_case : list , __snake_case : int , __snake_case : int ) -> None: # Max Heap
"""simple docstring"""
lowerCamelCase_ =index
lowerCamelCase_ =2 * index + 1 # Left Node
lowerCamelCase_ =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ =left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ =right_index
if largest != index:
lowerCamelCase_, lowerCamelCase_ =array[largest], array[index]
heapify(__snake_case , __snake_case , __snake_case )
def a_ ( __snake_case : list ) -> list:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
for i in range(n // 2 , -1 , -1 ):
heapify(__snake_case , __snake_case , __snake_case )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_, lowerCamelCase_ =array[0], array[i]
heapify(__snake_case , 0 , __snake_case )
return array
def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =low
lowerCamelCase_ =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_, lowerCamelCase_ =array[j], array[i]
i += 1
def a_ ( __snake_case : list ) -> list:
"""simple docstring"""
if len(__snake_case ) == 0:
return array
lowerCamelCase_ =2 * math.ceil(math.loga(len(__snake_case ) ) )
lowerCamelCase_ =16
return intro_sort(__snake_case , 0 , len(__snake_case ) , __snake_case , __snake_case )
def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__snake_case )
max_depth -= 1
lowerCamelCase_ =median_of_a(__snake_case , __snake_case , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ =partition(__snake_case , __snake_case , __snake_case , __snake_case )
intro_sort(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase_ =p
return insertion_sort(__snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Optional[Any] = input("""Enter numbers separated by a comma : """).strip()
a_ : List[Any] = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 75 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a_ = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
a_ = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
a_ = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
a_ = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
a_ = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
a_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
a_ = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
a_ = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
a_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
a_ = ''
a_ = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
a_ = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
a_ = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase__ ( _a , _a):
assert ReadMe.from_string(_a , _a).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase__ ( _a , _a):
with pytest.raises(_a , match=re.escape(expected_error.format(path="root"))):
SCREAMING_SNAKE_CASE : str = ReadMe.from_string(_a , _a)
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _a , _a):
with pytest.raises(_a , match=re.escape(expected_error.format(path="root"))):
ReadMe.from_string(_a , _a)
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _a):
ReadMe.from_string(_a , _a , suppress_parsing_errors=_a)
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase__ ( _a , _a):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[int] = Path(_a) / "README.md"
with open(_a , "w+") as readme_file:
readme_file.write(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = ReadMe.from_readme(_a , _a).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase__ ( _a , _a):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[str] = Path(_a) / "README.md"
with open(_a , "w+") as readme_file:
readme_file.write(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = expected_error.format(path=_a)
with pytest.raises(_a , match=re.escape(_a)):
SCREAMING_SNAKE_CASE : Optional[int] = ReadMe.from_readme(_a , _a)
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _a , _a):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Any = Path(_a) / "README.md"
with open(_a , "w+") as readme_file:
readme_file.write(_a)
SCREAMING_SNAKE_CASE : Dict = expected_error.format(path=_a)
with pytest.raises(_a , match=re.escape(_a)):
ReadMe.from_readme(_a , _a)
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _a):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : int = Path(_a) / "README.md"
with open(_a , "w+") as readme_file:
readme_file.write(_a)
ReadMe.from_readme(_a , _a , suppress_parsing_errors=_a) | 76 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a = (low + high) // 2
__a , __a , __a = max_subarray(a__ , a__ , a__ )
__a , __a , __a = max_subarray(a__ , mid + 1 , a__ )
__a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]:
__a , __a = float('''-inf''' ), -1
__a , __a = float('''-inf''' ), -1
__a = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a = summ
__a = i
__a = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a = summ
__a = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase ( a__ ) -> float:
__a = [randint(1 , a__ ) for _ in range(a__ )]
__a = time.time()
max_subarray(a__ , 0 , input_size - 1 )
__a = time.time()
return end - start
def __lowerCAmelCase ( ) -> None:
__a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__a = [time_max_subarray(a__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '''\t\t''' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 | 0 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCamelCase : Any = re.compile(r"\b(a|an|the)\b", re.UNICODE)
_UpperCamelCase : Union[str, Any] = None
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_lowerCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_lowerCAmelCase , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
def remove_articles(_lowerCAmelCase : int ):
return ARTICLES_REGEX.sub(' ' , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : str ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : List[Any] ):
lowercase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_lowerCAmelCase ).split()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
'''simple docstring'''
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Dict = get_tokens(_lowerCAmelCase )
lowercase__ : List[str] = get_tokens(_lowerCAmelCase )
lowercase__ : List[Any] = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase )
lowercase__ : int = sum(common.values() )
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase__ : Any = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Dict = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Any = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = {}
lowercase__ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Any = qa['id']
lowercase__ : Union[str, Any] = [t for t in qa['answers']['text'] if normalize_answer(_lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase__ : Dict = ['']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
lowercase__ : Optional[int] = preds[qid]
# Take max over all gold answers
lowercase__ : int = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
lowercase__ : Optional[Any] = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : str = {}
for qid, s in scores.items():
lowercase__ : int = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase__ : Optional[Any] = float(not qid_to_has_ans[qid] )
else:
lowercase__ : Optional[Any] = s
return new_scores
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None ):
'''simple docstring'''
if not qid_list:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values() ) / total),
('f1', 1_0_0.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for k in new_eval:
lowercase__ : int = new_eval[k]
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
plt.step(_lowerCAmelCase , _lowerCAmelCase , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_lowerCAmelCase )
plt.savefig(_lowerCAmelCase )
plt.clf()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
'''simple docstring'''
lowercase__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
lowercase__ : Tuple = 0.0
lowercase__ : List[str] = 1.0
lowercase__ : List[str] = 0.0
lowercase__ : Union[str, Any] = [1.0]
lowercase__ : List[Any] = [0.0]
lowercase__ : Optional[int] = 0.0
for i, qid in enumerate(_lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase__ : Tuple = true_pos / float(i + 1 )
lowercase__ : Union[str, Any] = true_pos / float(_lowerCAmelCase )
if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCAmelCase )
recalls.append(_lowerCAmelCase )
if out_image:
plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return {"ap": 1_0_0.0 * avg_prec}
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
lowercase__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase__ : Dict = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowercase__ : Tuple = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowercase__ : List[Any] = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase__ : Any = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_exact' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_f1' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_oracle' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if not qid_list:
return
lowercase__ : List[str] = [na_probs[k] for k in qid_list]
lowercase__ : Tuple = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) )
plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(_lowerCAmelCase , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase__ : int = num_no_ans
lowercase__ : Optional[int] = cur_score
lowercase__ : Tuple = 0.0
lowercase__ : Dict = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(_lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase__ : Optional[int] = scores[qid]
else:
if preds[qid]:
lowercase__ : List[Any] = -1
else:
lowercase__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
lowercase__ : Dict = cur_score
lowercase__ : Optional[int] = na_probs[qid]
return 1_0_0.0 * best_score / len(_lowerCAmelCase ), best_thresh
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ , lowercase__ : List[Any] = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ : Dict = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Any = best_exact
lowercase__ : Tuple = exact_thresh
lowercase__ : Optional[Any] = best_fa
lowercase__ : Any = fa_thresh
def a_ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowercase__ : List[Any] = json.load(_lowerCAmelCase )
lowercase__ : Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowercase__ : str = json.load(_lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase__ : Union[str, Any] = json.load(_lowerCAmelCase )
else:
lowercase__ : str = {k: 0.0 for k in preds}
lowercase__ : int = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False
lowercase__ : List[str] = [k for k, v in qid_to_has_ans.items() if v]
lowercase__ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowercase__ , lowercase__ : Any = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Union[str, Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Tuple = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase )
if has_ans_qids:
lowercase__ : int = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'HasAns' )
if no_ans_qids:
lowercase__ : Optional[Any] = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
else:
print(json.dumps(_lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 77 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 6 | 0 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :Tuple=13 , lowercase_ :Any=7 , lowercase_ :List[str]=True , lowercase_ :Optional[int]=True , lowercase_ :str=False , lowercase_ :Optional[int]=True , lowercase_ :Union[str, Any]=99 , lowercase_ :Optional[int]=32 , lowercase_ :int=5 , lowercase_ :int=4 , lowercase_ :Optional[int]=64 , lowercase_ :Union[str, Any]="gelu" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :Union[str, Any]=5_12 , lowercase_ :Tuple=16 , lowercase_ :Union[str, Any]=2 , lowercase_ :Optional[Any]=0.02 , lowercase_ :Union[str, Any]=3 , lowercase_ :int=4 , lowercase_ :Any=None , lowercase_ :Any=2 , lowercase_ :List[Any]=2 , lowercase_ :Optional[Any]=2 , lowercase_ :Any=2 , lowercase_ :int=4 , lowercase_ :Optional[int]=1 , ) -> int:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = q_groups
UpperCAmelCase = k_groups
UpperCAmelCase = v_groups
UpperCAmelCase = post_attention_groups
UpperCAmelCase = intermediate_groups
UpperCAmelCase = output_groups
def UpperCAmelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List[str] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Tuple ) -> str:
UpperCAmelCase = SqueezeBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :str , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Any ) -> int:
UpperCAmelCase = SqueezeBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Dict , lowercase_ :List[str] ) -> Dict:
UpperCAmelCase = SqueezeBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str , lowercase_ :List[str] , lowercase_ :int , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] ) -> int:
UpperCAmelCase = self.num_labels
UpperCAmelCase = SqueezeBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Optional[int] , lowercase_ :Tuple ) -> Any:
UpperCAmelCase = self.num_labels
UpperCAmelCase = SqueezeBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :int , lowercase_ :Dict , lowercase_ :Any , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str ) -> int:
UpperCAmelCase = self.num_choices
UpperCAmelCase = SqueezeBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCamelCase = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = False
def UpperCAmelCase__ ( self :str ) -> Dict:
UpperCAmelCase = SqueezeBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , dim=37 )
def UpperCAmelCase__ ( self :int ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :List[str] ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Dict ) -> List[str]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SqueezeBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Tuple ) -> int:
UpperCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
UpperCAmelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
| 78 |
from math import ceil
def __lowerCAmelCase ( a__ = 1001 ) -> int:
__a = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__a = 2 * i + 1
__a = 2 * i
__a = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class | 6 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : int = '▁'
a__ : Optional[int] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
a__ : Union[str, Any] = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
a__ : Union[str, Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
a__ : str = {'mustc': MUSTC_LANGS}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
__UpperCAmelCase = []
def __init__( self , a , a , a="<s>" , a="</s>" , a="<pad>" , a="<unk>" , a=False , a=False , a=None , a=None , a = None , **a , ):
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , do_upper_case=a , do_lower_case=a , tgt_lang=a , lang_codes=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
UpperCamelCase__ = do_upper_case
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = load_json(a )
UpperCamelCase__ = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ = spm_file
UpperCamelCase__ = load_spm(a , self.sp_model_kwargs )
if lang_codes is not None:
UpperCamelCase__ = lang_codes
UpperCamelCase__ = LANGUAGES[lang_codes]
UpperCamelCase__ = [f'''<lang:{lang}>''' for lang in self.langs]
UpperCamelCase__ = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
UpperCamelCase__ = self.lang_tokens
UpperCamelCase__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCamelCase__ = {}
@property
def __a ( self ):
return len(self.encoder )
@property
def __a ( self ):
return self._tgt_lang
@tgt_lang.setter
def __a ( self , a ):
UpperCamelCase__ = new_tgt_lang
self.set_tgt_lang_special_tokens(a )
def __a ( self , a ):
UpperCamelCase__ = self.lang_code_to_id[tgt_lang]
UpperCamelCase__ = [lang_code_id]
def __a ( self , a ):
return self.sp_model.encode(a , out_type=a )
def __a ( self , a ):
return self.encoder.get(a , self.encoder[self.unk_token] )
def __a ( self , a ):
return self.decoder.get(a , self.unk_token )
def __a ( self , a ):
UpperCamelCase__ = []
UpperCamelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCamelCase__ = self.sp_model.decode(a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCamelCase__ = []
else:
current_sub_tokens.append(a )
UpperCamelCase__ = self.sp_model.decode(a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __a ( self , a , a=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
UpperCamelCase__ = [1] * len(self.prefix_tokens )
UpperCamelCase__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def __a ( self ):
UpperCamelCase__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def __a ( self , a , a = None ):
UpperCamelCase__ = Path(a )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
UpperCamelCase__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCamelCase__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a )
if os.path.abspath(self.spm_file ) != os.path.abspath(a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a )
elif not os.path.isfile(self.spm_file ):
with open(a , "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (str(a ), str(a ))
def _UpperCamelCase ( __A , __A ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
UpperCamelCase__ = sentencepiece.SentencePieceProcessor(**__A )
spm.Load(str(__A ) )
return spm
def _UpperCamelCase ( __A ) -> Union[Dict, List]:
'''simple docstring'''
with open(__A , "r" ) as f:
return json.load(__A )
def _UpperCamelCase ( __A , __A ) -> None:
'''simple docstring'''
with open(__A , "w" ) as f:
json.dump(__A , __A , indent=2 )
| 80 |
from __future__ import annotations
import typing
from collections import Counter
def __lowerCAmelCase ( a__ ) -> typing.Counter[int]:
__a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a__ , max_perimeter + 1 ):
__a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
__a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowerCAmelCase ( a__ = 1000 ) -> int:
__a = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions") | 6 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Optional[Any] = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _A ( ):
"""simple docstring"""
a =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
a =bs[:]
a =0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
a =[chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _A ( lowercase ):
"""simple docstring"""
a =set()
a =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a =char
return pairs
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ) -> List[Any]:
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding='''utf-8''' ) as vocab_handle:
a =json.load(__A )
a ={v: k for k, v in self.encoder.items()}
a =errors # how to handle errors in decoding
a =bytes_to_unicode()
a ={v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding='''utf-8''' ) as merges_handle:
a =merges_handle.read().split('''\n''' )[1:-1]
a =[tuple(merge.split() ) for merge in bpe_merges]
a =dict(zip(__A , range(len(__A ) ) ) )
a ={}
a =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
a =tuple(__A )
a =get_pairs(__A )
if not pairs:
return token
while True:
a =min(__A , key=lambda __A : self.bpe_ranks.get(__A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a =bigram
a =[]
a =0
while i < len(__A ):
try:
a =word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a =j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a =tuple(__A )
a =new_word
if len(__A ) == 1:
break
else:
a =get_pairs(__A )
a =''' '''.join(__A )
a =word
return word
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
a =[]
for token in re.findall(self.pat , __A ):
a =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[int]:
return self.decoder.get(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Any:
a =''''''.join(__A )
a =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + '''\n''' )
a =0
with open(__A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
a =token_index
writer.write(''' '''.join(__A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , __A , __A=False , **__A ) -> str:
a =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
a =''' ''' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = PaddingStrategy.DO_NOT_PAD , __A = None , __A = None , ) -> dict:
a =super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
a ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a =len(encoded_inputs['''global_attention_mask'''] ) != len(__A )
if needs_to_be_padded:
a =len(__A ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
a =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs | 81 |
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 6 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = (PNDMScheduler,)
__lowerCamelCase = (('''num_inference_steps''', 50),)
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_snake_case )
return config
def snake_case ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_snake_case )
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
_lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
_lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_snake_case )
_lowerCAmelCase = scheduler_class(**_snake_case )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase = model(_snake_case , _snake_case )
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase = model(_snake_case , _snake_case )
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , """set_timesteps""" ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , """set_timesteps""" ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = scheduler.step_prk(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase = scheduler.step_plms(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = scheduler.step_plms(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def snake_case ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 82 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __A( a ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
__a = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__a = truncation
__a = tokenize_kwargs
__a = {}
if return_tensors is not None:
__a = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]:
'''simple docstring'''
__a = self.framework
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.model(**_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return super().__call__(*_snake_case , **_snake_case ) | 6 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Dict = set(UpperCAmelCase_ ), [start]
while stack:
_UpperCamelCase : List[Any] = stack.pop()
explored.add(UpperCAmelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase_ )
return explored
snake_case_ : Optional[Any] = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 83 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Optional[int] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4 | 6 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__UpperCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__UpperCAmelCase = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__UpperCAmelCase = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__UpperCAmelCase = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__UpperCAmelCase = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _snake_case ( lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowercase__ )
return [m.group(0 ) for m in matches]
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ :Optional[Any] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCAmelCase_ :List[Any] = collections.defaultdict(lowercase__ )
lowerCAmelCase_ :Dict = collections.defaultdict(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = collections.defaultdict(lowercase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase__ ):
lowerCAmelCase_ :List[Any] = None
if _re_tf_models.match(lowercase__ ) is not None:
lowerCAmelCase_ :List[str] = tf_models
lowerCAmelCase_ :Dict = _re_tf_models.match(lowercase__ ).groups()[0]
elif _re_flax_models.match(lowercase__ ) is not None:
lowerCAmelCase_ :Tuple = flax_models
lowerCAmelCase_ :List[str] = _re_flax_models.match(lowercase__ ).groups()[0]
elif _re_pt_models.match(lowercase__ ) is not None:
lowerCAmelCase_ :List[Any] = pt_models
lowerCAmelCase_ :str = _re_pt_models.match(lowercase__ ).groups()[0]
if lookup_dict is not None:
while len(lowercase__ ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCAmelCase_ :List[Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ :Optional[int] = """""".join(camel_case_split(lowercase__ )[:-1] )
lowerCAmelCase_ :Optional[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCAmelCase_ :Dict = list(lowercase__ )
all_models.sort()
lowerCAmelCase_ :str = {"""model_type""": all_models}
lowerCAmelCase_ :Optional[int] = [pt_models[t] for t in all_models]
lowerCAmelCase_ :Dict = [tf_models[t] for t in all_models]
lowerCAmelCase_ :Optional[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCAmelCase_ :Any = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCAmelCase_ :Any = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCAmelCase_ :List[str] = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCAmelCase_ :List[Any] = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCAmelCase_ :Union[str, Any] = """AutoTokenizer"""
lowerCAmelCase_ :Union[str, Any] = [processors[t] for t in all_models]
return pd.DataFrame(lowercase__ )
def _snake_case ( lowercase__ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCAmelCase_ :Dict = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCAmelCase_ :Dict = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase__ , lowercase__ , lowercase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase__ , lowercase__ ):
continue
# First extract all model_names
lowerCAmelCase_ :Optional[Any] = []
for name in getattr(lowercase__ , lowercase__ ).values():
if isinstance(lowercase__ , lowercase__ ):
model_names.append(lowercase__ )
else:
model_names.extend(list(lowercase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = get_frameworks_table()
lowerCAmelCase_ :Optional[int] = Dataset.from_pandas(lowercase__ )
lowerCAmelCase_ :Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=lowercase__ )
lowerCAmelCase_ :List[str] = Dataset.from_json(lowercase__ )
lowerCAmelCase_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(lowercase__ ) )
}
lowerCAmelCase_ :Dict = update_pipeline_and_auto_class_table(lowercase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCAmelCase_ :str = sorted(table.keys() )
lowerCAmelCase_ :List[str] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
lowerCAmelCase_ :List[str] = Dataset.from_pandas(lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(lowercase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
lowerCAmelCase_ :List[Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCAmelCase_ :int = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=lowercase__ , repo_type="""dataset""" , token=lowercase__ , commit_message=lowercase__ , )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Tuple = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCAmelCase_ :Optional[int] = transformers_module.pipelines.SUPPORTED_TASKS
lowerCAmelCase_ :int = []
for key in pipeline_tasks:
if key not in in_table:
lowerCAmelCase_ :Any = pipeline_tasks[key]["""pt"""]
if isinstance(lowercase__ , (list, tuple) ):
lowerCAmelCase_ :int = model[0]
lowerCAmelCase_ :List[Any] = model.__name__
if model not in in_table.values():
missing.append(lowercase__ )
if len(lowercase__ ) > 0:
lowerCAmelCase_ :Optional[int] = """, """.join(lowercase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
__UpperCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 84 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : int = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case ) | 6 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int = 1_0_0_0 ):
'''simple docstring'''
snake_case_ = -1
snake_case_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
snake_case_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
snake_case_ = n - a - b
if c * c == (a * a + b * b):
snake_case_ = a * b * c
if candidate >= product:
snake_case_ = candidate
return product
if __name__ == "__main__":
print(F"{solution() = }")
| 85 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
A : Optional[int] = 'zero2'
A : str = 'zero3'
A : Tuple = [ZEROa, ZEROa]
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
A : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A( a ):
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any:
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , )
self.do_checks(_snake_case )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case )
__a = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a = self.get_launcher(_snake_case )
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]:
'''simple docstring'''
__a = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 6 | 0 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase__ = getLogger(__name__)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 8 , _UpperCamelCase = 1024 , _UpperCamelCase="val" , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase="summarization" , _UpperCamelCase=None , _UpperCamelCase=1 , _UpperCamelCase = None , _UpperCamelCase="" , **_UpperCamelCase , ):
__lowerCAmelCase : List[Any] = str(_UpperCamelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=_UpperCamelCase )
__lowerCAmelCase : List[str] = Path(_UpperCamelCase )
__lowerCAmelCase : Any = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(_UpperCamelCase )
__lowerCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).cuda()
if fpaa:
__lowerCAmelCase : Union[str, Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_UpperCamelCase , _UpperCamelCase ) # update config with task specific params
__lowerCAmelCase : str = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__lowerCAmelCase : Optional[int] = num_return_sequences
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__lowerCAmelCase : int = tokenizer.model_max_length
if prefix is None:
__lowerCAmelCase : Any = prefix or getattr(model.config , 'prefix' , '' ) or ''
__lowerCAmelCase : str = SeqaSeqDataset(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , max_target_length=1024 , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , prefix=_UpperCamelCase , **_UpperCamelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__lowerCAmelCase : Tuple = ds.make_sortish_sampler(_UpperCamelCase , distributed=_UpperCamelCase , add_extra_examples=_UpperCamelCase , shuffle=_UpperCamelCase )
__lowerCAmelCase : Dict = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn )
__lowerCAmelCase : Tuple = []
for batch in tqdm(_UpperCamelCase ):
__lowerCAmelCase : List[Any] = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=_UpperCamelCase , num_beams=_UpperCamelCase , **_UpperCamelCase , )
__lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
__lowerCAmelCase : Any = batch['ids']
if num_return_sequences > 1:
__lowerCAmelCase : List[Any] = chunks(_UpperCamelCase , _UpperCamelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_UpperCamelCase ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(_UpperCamelCase , _UpperCamelCase )
return results, sampler.num_replicas
def __lowerCAmelCase ():
__lowerCAmelCase : str = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=_UpperCamelCase , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=_UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=_UpperCamelCase , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=_UpperCamelCase , default=_UpperCamelCase )
parser.add_argument(
'--type_path' , type=_UpperCamelCase , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=_UpperCamelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_UpperCamelCase , default=8 , required=_UpperCamelCase , help='batch size' )
parser.add_argument(
'--local_rank' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=_UpperCamelCase , default=600 , required=_UpperCamelCase , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase )
parser.add_argument('--tgt_lang' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase )
parser.add_argument(
'--prefix' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
__lowerCAmelCase : Optional[int] = time.time()
__lowerCAmelCase , __lowerCAmelCase : str = parser.parse_known_args()
__lowerCAmelCase : Any = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
__lowerCAmelCase : List[Any] = Path(args.save_dir + '_tmp' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) # this handles locking.
__lowerCAmelCase : List[str] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__lowerCAmelCase : Optional[int] = {}
if args.src_lang is not None:
__lowerCAmelCase : Tuple = args.src_lang
if args.tgt_lang is not None:
__lowerCAmelCase : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase : str = eval_data_dir(
args.data_dir , _UpperCamelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCamelCase , **_UpperCamelCase , )
if args.local_rank <= 0:
__lowerCAmelCase : Optional[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_UpperCamelCase )
__lowerCAmelCase : List[str] = gather_results_from_each_node(_UpperCamelCase , _UpperCamelCase , args.sync_timeout )
__lowerCAmelCase : Optional[Any] = combine_partial_results(_UpperCamelCase )
if args.num_return_sequences > 1:
__lowerCAmelCase : int = save_dir.joinpath('pseudolabel_results.json' )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(_UpperCamelCase , _UpperCamelCase )
return
__lowerCAmelCase : Optional[int] = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(_UpperCamelCase ) as f:
__lowerCAmelCase : Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(_UpperCamelCase )]
# Calculate metrics, save metrics, and save _generations.txt
__lowerCAmelCase : Optional[Any] = 'translation' in args.task
__lowerCAmelCase : Any = calculate_bleu if calc_bleu else calculate_rouge
__lowerCAmelCase : Any = 'bleu' if calc_bleu else 'rouge'
__lowerCAmelCase : Dict = score_fn(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : int = len(_UpperCamelCase )
__lowerCAmelCase : Tuple = time.time() - start_time
__lowerCAmelCase : Tuple = round(runtime / metrics['n_obs'] , 4 )
__lowerCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__lowerCAmelCase : List[str] = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase )
print(_UpperCamelCase )
write_txt_file(_UpperCamelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(_UpperCamelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = []
for partial_result in partial_results:
records.extend(_UpperCamelCase )
__lowerCAmelCase : List[Any] = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x["id"] )
__lowerCAmelCase : Optional[int] = [x['pred'] for x in records]
return preds
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# WAIT FOR lots of .json files
__lowerCAmelCase : Union[str, Any] = time.time()
logger.info('waiting for all nodes to finish' )
__lowerCAmelCase : Tuple = None
while (time.time() - start_wait) < timeout:
__lowerCAmelCase : Dict = list(save_dir.glob('rank_*.json' ) )
if len(_UpperCamelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__lowerCAmelCase : List[str] = lmap(_UpperCamelCase , _UpperCamelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate() | 86 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case ) | 6 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class snake_case_ ( __A ):
__A : List[str] = "longformer"
def __init__( self : Any , lowercase_ : Union[List[int], int] = 5_12 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 0 , lowercase_ : int = 2 , lowercase_ : int = 3_05_22 , lowercase_ : int = 7_68 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 30_72 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 5_12 , lowercase_ : int = 2 , lowercase_ : float = 0.02 , lowercase_ : float = 1E-12 , lowercase_ : bool = False , **lowercase_ : Dict , ) -> str:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowercase__ : List[Any] = attention_window
lowercase__ : Any = sep_token_id
lowercase__ : Dict = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
lowercase__ : str = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Tuple = hidden_act
lowercase__ : int = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : Dict = onnx_export
class snake_case_ ( __A ):
def __init__( self : Optional[int] , lowercase_ : "PretrainedConfig" , lowercase_ : str = "default" , lowercase_ : "List[PatchingSpec]" = None ) -> int:
super().__init__(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Any = True
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
lowercase__ : Optional[int] = super().outputs
if self.task == "default":
lowercase__ : Optional[Any] = {0: "batch"}
return outputs
@property
def __UpperCamelCase ( self : Optional[int] ) -> float:
return 1E-4
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __UpperCamelCase ( self : List[str] , lowercase_ : "PreTrainedTokenizerBase" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase__ : Tuple = super().generate_dummy_inputs(
preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase__ : Dict = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowercase__ : str = 1
return inputs
| 87 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
def __lowerCAmelCase ( a__ , a__ ) -> float:
def get_matched_characters(a__ , a__ ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}"""
return "".join(a__ )
# matching characters
__a = get_matched_characters(a__ , a__ )
__a = get_matched_characters(a__ , a__ )
__a = len(a__ )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world')) | 6 | 0 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Any ):
_a : str = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) ,1 )
self.assertEqual(x.component(2 ) ,3 )
_a : Union[str, Any] = Vector()
def __lowercase ( self : int ):
_a : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) ,'(0,0,0,0,0,1)' )
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) ,4 )
def __lowercase ( self : Optional[int] ):
_a : Dict = Vector([1, 2] )
_a : Optional[Any] = Vector([1, 2, 3, 4, 5] )
_a : Tuple = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_a : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() ,2.2_36 ,3 )
self.assertAlmostEqual(y.euclidean_length() ,7.4_16 ,3 )
self.assertEqual(z.euclidean_length() ,0 )
self.assertAlmostEqual(w.euclidean_length() ,7.6_16 ,3 )
def __lowercase ( self : Any ):
_a : Dict = Vector([1, 2, 3] )
_a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) ,2 )
self.assertEqual((x + y).component(1 ) ,3 )
self.assertEqual((x + y).component(2 ) ,4 )
def __lowercase ( self : Tuple ):
_a : str = Vector([1, 2, 3] )
_a : int = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) ,0 )
self.assertEqual((x - y).component(1 ) ,1 )
self.assertEqual((x - y).component(2 ) ,2 )
def __lowercase ( self : Dict ):
_a : Tuple = Vector([1, 2, 3] )
_a : int = Vector([2, -1, 4] ) # for test of dot product
_a : Dict = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) ,'(3.0,6.0,9.0)' )
self.assertEqual((a * b) ,0 )
def __lowercase ( self : List[Any] ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) ,10 )
def __lowercase ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 ,1 ) ) ,'(0,1,0)' )
def __lowercase ( self : Optional[Any] ):
_a : Optional[Any] = Vector([1, 2, 3] )
_a : Any = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 ,_UpperCAmelCase ,_UpperCAmelCase ) ) ,'(3,4,7)' )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_a : Union[str, Any] = x.copy()
self.assertEqual(str(_UpperCAmelCase ) ,str(_UpperCAmelCase ) )
def __lowercase ( self : Tuple ):
_a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 ,0 )
x.change_component(1 ,1 )
self.assertEqual(str(_UpperCAmelCase ) ,'(0,1,0)' )
def __lowercase ( self : List[Any] ):
_a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' ,str(_UpperCAmelCase ) )
def __lowercase ( self : Dict ):
_a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : Optional[int] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] ,a.minor(_UpperCAmelCase ,_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
_a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : List[str] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] ,a.cofactor(_UpperCAmelCase ,_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
_a : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(-5 ,a.determinant() )
def __lowercase ( self : List[str] ):
_a : Union[str, Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ,3 ,3 )
_a : Any = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' ,str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' ,str(a * 2 ) )
def __lowercase ( self : List[Any] ):
_a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
a.change_component(0 ,2 ,5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' ,str(_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
_a : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(7 ,a.component(2 ,1 ) ,0.01 )
def __lowercase ( self : Tuple ):
_a : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' ,str(a + b ) )
def __lowercase ( self : Dict ):
_a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' ,str(a - b ) )
def __lowercase ( self : Dict ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' ,str(square_zero_matrix(5 ) ) ,)
if __name__ == "__main__":
unittest.main()
| 89 |
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main() | 6 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
__lowerCamelCase = [int(UpperCamelCase__ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(UpperCamelCase__ ) == 4 and all(0 <= int(UpperCamelCase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__A = input().strip()
__A = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 90 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Tuple = 16
UpperCAmelCase_ : List[str] = 32
def _A (__a , __a = 16 ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE_ : Dict = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : Any = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : int = 8
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
SCREAMING_SNAKE_CASE_ : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : int = mocked_dataloaders # noqa: F811
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE_ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : Any = config['''lr''']
SCREAMING_SNAKE_CASE_ : int = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ : Tuple = int(config['''batch_size'''] )
set_seed(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_dataloaders(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : Optional[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : List[Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : int = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_00 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.split(__a )[-1].split('''.''' )[0]
accelerator.init_trackers(__a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : Tuple = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : List[str] = model(**__a )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**__a )
SCREAMING_SNAKE_CASE_ : str = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
SCREAMING_SNAKE_CASE_ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __a )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(__a ),
'''epoch''': epoch,
} , step=__a , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__a , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 91 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : list[int] ):
if not nums:
return 0
__lowerCAmelCase = nums[0]
__lowerCAmelCase = 0
for num in nums[1:]:
__lowerCAmelCase , __lowerCAmelCase = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
)
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = KandinskyInpaintPipeline
lowerCAmelCase_ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowerCAmelCase_ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowerCAmelCase_ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def _snake_case ( self ):
"""simple docstring"""
return 32
@property
def _snake_case ( self ):
"""simple docstring"""
return 32
@property
def _snake_case ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def _snake_case ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _snake_case ( self ):
"""simple docstring"""
return 1_00
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
lowercase_ : int = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Any = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase_ : int = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.dummy_text_encoder
lowercase_ : int = self.dummy_tokenizer
lowercase_ : Tuple = self.dummy_unet
lowercase_ : List[Any] = self.dummy_movq
lowercase_ : Dict = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , )
lowercase_ : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
lowercase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : List[Any] = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
lowercase_ : Tuple = np.ones((64, 64) , dtype=np.floataa )
lowercase_ : Optional[int] = 0
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ : str = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = '''cpu'''
lowercase_ : Dict = self.get_dummy_components()
lowercase_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = output.images
lowercase_ : str = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
lowercase_ : Optional[int] = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _snake_case ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase_ : str = np.ones((7_68, 7_68) , dtype=np.floataa )
lowercase_ : List[Any] = 0
lowercase_ : int = '''a hat'''
lowercase_ : int = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
lowercase_ : Dict = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ , lowercase_ : Dict = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase_ : Dict = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
lowercase_ : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 93 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 6 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=12 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0.02 , _lowerCamelCase=0 , _lowerCamelCase=None , ):
a :str = parent
a :Dict = batch_size
a :List[Any] = seq_length
a :int = is_training
a :List[Any] = use_input_mask
a :List[Any] = use_labels
a :Optional[int] = vocab_size
a :Any = hidden_size
a :int = projection_dim
a :List[Any] = num_hidden_layers
a :Any = num_attention_heads
a :Tuple = intermediate_size
a :Tuple = dropout
a :Tuple = attention_dropout
a :Any = max_position_embeddings
a :Optional[Any] = initializer_range
a :List[Any] = scope
a :Tuple = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :List[Any] = None
if self.use_input_mask:
a :int = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
a :Union[str, Any] = input_mask.numpy()
a , a :int = input_mask.shape
a :str = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCamelCase ):
a :List[Any] = 1
a :int = 0
a :Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = TFBlipTextModel(config=_lowerCamelCase )
a :str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , training=_lowerCamelCase )
a :Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.prepare_config_and_inputs()
a , a , a :Tuple = config_and_inputs
a :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = BlipTextModelTester(self )
a :List[str] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a :Any = TFBlipTextModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_lowerCamelCase )
| 94 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __A( a ):
snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''text''': Value('''string''' )} )
snake_case_ = Features({} )
snake_case_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"} | 6 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : int = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 95 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]:
__a = AutoTokenizer.from_pretrained(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
__a = tok.pad_token_id
def get_lens(a__ ):
__a = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a = []
for batch in dl:
__a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
__a = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
__a = get_lens(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
__a = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 6 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = CLIPConfig
lowerCamelCase__ = ["""CLIPEncoderLayer"""]
def __init__( self , lowercase ):
super().__init__(lowercase )
_lowerCamelCase : List[str] = CLIPVisionModelWithProjection(config.vision_config )
_lowerCamelCase : List[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
_lowerCamelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def A_ ( self , lowercase , lowercase , lowercase=0.5 , lowercase=0.5 ):
_lowerCamelCase : Dict = self.vision_model(lowercase )[0]
_lowerCamelCase : Any = self.p_head(lowercase )
_lowerCamelCase : str = nsfw_detected.flatten()
_lowerCamelCase : int = nsfw_detected > p_threshold
_lowerCamelCase : Union[str, Any] = nsfw_detected.tolist()
if any(lowercase ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(lowercase ):
if nsfw_detected_:
_lowerCamelCase : Dict = np.zeros(images[idx].shape )
_lowerCamelCase : Dict = self.w_head(lowercase )
_lowerCamelCase : Tuple = watermark_detected.flatten()
_lowerCamelCase : Dict = watermark_detected > w_threshold
_lowerCamelCase : Dict = watermark_detected.tolist()
if any(lowercase ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(lowercase ):
if watermark_detected_:
_lowerCamelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 96 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 | 0 |
'''simple docstring'''
import random
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = a[left_index]
UpperCamelCase__ :Union[str, Any] = left_index + 1
for j in range(left_index + 1 , __a ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = a[i - 1], a[left_index]
return i - 1
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
if left < right:
UpperCamelCase__ :Any = random.randint(__a , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :Any = partition(__a , __a , __a )
quick_sort_random(
__a , __a , __a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__a , pivot_index + 1 , __a ) # recursive quicksort to the right of the pivot point
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :Any = input('''Enter numbers separated by a comma:\n''' ).strip()
UpperCamelCase__ :str = [int(__a ) for item in user_input.split(''',''' )]
quick_sort_random(__a , 0 , len(__a ) )
print(__a )
if __name__ == "__main__":
main() | 97 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def a_ ( lowerCamelCase = 2_0_0_0_0_0_0 ):
UpperCAmelCase__ = [0]
UpperCAmelCase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCAmelCase__ = 0
# the area corresponding to the grid that gives the product closest to target
UpperCAmelCase__ = 0
# an estimate of b, using the quadratic formula
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the triangle number corresponding to b_floor
UpperCAmelCase__ = 42
# the triangle number corresponding to b_ceil
UpperCAmelCase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCAmelCase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCAmelCase__ = floor(lowerCamelCase )
UpperCAmelCase__ = ceil(lowerCamelCase )
UpperCAmelCase__ = triangle_numbers[b_floor]
UpperCAmelCase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_first_guess * triangle_a
UpperCAmelCase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_second_guess * triangle_a
UpperCAmelCase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a = (low + high) // 2
__a , __a , __a = max_subarray(a__ , a__ , a__ )
__a , __a , __a = max_subarray(a__ , mid + 1 , a__ )
__a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]:
__a , __a = float('''-inf''' ), -1
__a , __a = float('''-inf''' ), -1
__a = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a = summ
__a = i
__a = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a = summ
__a = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase ( a__ ) -> float:
__a = [randint(1 , a__ ) for _ in range(a__ )]
__a = time.time()
max_subarray(a__ , 0 , input_size - 1 )
__a = time.time()
return end - start
def __lowerCAmelCase ( ) -> None:
__a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__a = [time_max_subarray(a__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '''\t\t''' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 99 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 6 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9_9999_9999
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__SCREAMING_SNAKE_CASE = remaining_time[j]
__SCREAMING_SNAKE_CASE = j
__SCREAMING_SNAKE_CASE = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__SCREAMING_SNAKE_CASE = remaining_time[short]
if minm == 0:
__SCREAMING_SNAKE_CASE = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__SCREAMING_SNAKE_CASE = False
# Find finish time of current process
__SCREAMING_SNAKE_CASE = increment_time + 1
# Calculate waiting time
__SCREAMING_SNAKE_CASE = finish_time - arrival_time[short]
__SCREAMING_SNAKE_CASE = finar - burst_time[short]
if waiting_time[short] < 0:
__SCREAMING_SNAKE_CASE = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i]
__SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__magic_name__ = int(input())
__magic_name__ = [0] * no_of_processes
__magic_name__ = [0] * no_of_processes
__magic_name__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__magic_name__, __magic_name__ = map(int, input().split())
__magic_name__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__magic_name__ = burst_time
__magic_name__ = no_of_processes
__magic_name__ = waiting_time
__magic_name__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__magic_name__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 100 |
from math import ceil
def __lowerCAmelCase ( a__ = 1001 ) -> int:
__a = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__a = 2 * i + 1
__a = 2 * i
__a = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 6 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''megatron-bert'''
def __init__( self ,A__=2_9_0_5_6 ,A__=1_0_2_4 ,A__=2_4 ,A__=1_6 ,A__=4_0_9_6 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__="absolute" ,A__=True ,**A__ ,):
super().__init__(pad_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
| 101 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class | 6 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='nllb-moe'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , a_=12_81_12 , a_=10_24 , a_=12 , a_=40_96 , a_=16 , a_=12 , a_=40_96 , a_=16 , a_=0.05 , a_=0.05 , a_=True , a_=True , a_="relu" , a_=10_24 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.02 , a_=2 , a_=True , a_=False , a_="float32" , a_=False , a_=1_28 , a_=64 , a_=4 , a_=4 , a_=0.001 , a_=0.001 , a_="all" , a_=False , a_=False , a_=1.0 , a_=0.2 , a_=1 , a_=0 , a_=2 , a_=False , **a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = vocab_size
__snake_case : int = max_position_embeddings
__snake_case : int = d_model
__snake_case : List[str] = encoder_ffn_dim
__snake_case : Optional[Any] = encoder_layers
__snake_case : Dict = encoder_attention_heads
__snake_case : str = decoder_ffn_dim
__snake_case : Dict = decoder_layers
__snake_case : Union[str, Any] = decoder_attention_heads
__snake_case : Optional[Any] = dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Union[str, Any] = activation_dropout
__snake_case : List[Any] = activation_function
__snake_case : int = init_std
__snake_case : List[Any] = encoder_layerdrop
__snake_case : Optional[Any] = decoder_layerdrop
__snake_case : Tuple = use_cache
__snake_case : Dict = encoder_layers
__snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : List[str] = router_z_loss_coef
__snake_case : int = router_aux_loss_coef
__snake_case : Union[str, Any] = decoder_sparse_step
__snake_case : Tuple = encoder_sparse_step
__snake_case : int = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__snake_case : Optional[int] = router_dtype
__snake_case : Tuple = router_ignore_padding_tokens
__snake_case : List[Any] = batch_prioritized_routing
__snake_case : Optional[int] = second_expert_policy
__snake_case : Any = normalize_router_prob_before_dropping
__snake_case : List[Any] = moe_eval_capacity_token_fraction
__snake_case : Optional[int] = moe_token_dropout
__snake_case : int = output_router_logits
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , **a_ , )
| 102 |
from __future__ import annotations
import typing
from collections import Counter
def __lowerCAmelCase ( a__ ) -> typing.Counter[int]:
__a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a__ , max_perimeter + 1 ):
__a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
__a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowerCAmelCase ( a__ = 1000 ) -> int:
__a = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions") | 6 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( UpperCamelCase_ ):
_a = ['''image_processor''', '''tokenizer''']
_a = '''AutoImageProcessor'''
_a = '''AutoTokenizer'''
def __init__( self : Tuple , A_ : int , A_ : int):
super().__init__(A_ , A_)
lowerCAmelCase_ : Optional[int] = self.image_processor
def __call__( self : List[str] , A_ : Tuple=None , A_ : List[Any]=None , A_ : Any=None , **A_ : List[str]):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
lowerCAmelCase_ : List[Any] = self.tokenizer(A_ , return_tensors=A_ , **A_)
if images is not None:
lowerCAmelCase_ : List[Any] = self.image_processor(A_ , return_tensors=A_ , **A_)
if text is not None and images is not None:
lowerCAmelCase_ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_) , tensor_type=A_)
def UpperCAmelCase__ ( self : Tuple , *A_ : Any , **A_ : int):
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCAmelCase__ ( self : str , *A_ : Optional[Any] , **A_ : Optional[Any]):
return self.tokenizer.decode(*A_ , **A_)
@property
def UpperCAmelCase__ ( self : int):
return ["input_ids", "attention_mask", "pixel_values"]
| 103 |
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 6 | 0 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
assert ReadMe.from_string(A__ , A__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
__lowercase = ReadMe.from_string(A__ , A__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(A__ , A__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ ):
"""simple docstring"""
ReadMe.from_string(A__ , A__ , suppress_parsing_errors=A__ )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = ReadMe.from_readme(A__ , A__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = expected_error.format(path=A__ )
with pytest.raises(A__ , match=re.escape(A__ ) ):
__lowercase = ReadMe.from_readme(A__ , A__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = expected_error.format(path=A__ )
with pytest.raises(A__ , match=re.escape(A__ ) ):
ReadMe.from_readme(A__ , A__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
ReadMe.from_readme(A__ , A__ , suppress_parsing_errors=A__ )
| 104 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __A( a ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
__a = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__a = truncation
__a = tokenize_kwargs
__a = {}
if return_tensors is not None:
__a = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]:
'''simple docstring'''
__a = self.framework
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.model(**_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return super().__call__(*_snake_case , **_snake_case ) | 6 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 105 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Optional[int] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4 | 6 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : int = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case ) | 6 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __magic_name__ ( A : List[Any], A : List[str], A : Optional[Any] ):
'''simple docstring'''
if isinstance(A, torch.Tensor ):
return image
elif isinstance(A, PIL.Image.Image ):
a = [image]
if isinstance(image[0], PIL.Image.Image ):
a = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
a = np.concatenate(A, axis=0 )
a = np.array(A ).astype(np.floataa ) / 2_55.0
a = image.transpose(0, 3, 1, 2 )
a = 2.0 * image - 1.0
a = torch.from_numpy(A )
elif isinstance(image[0], torch.Tensor ):
a = torch.cat(A, dim=0 )
return image
def __magic_name__ ( A : str, A : Optional[int], A : Tuple, A : Tuple=0.99_95 ):
'''simple docstring'''
if not isinstance(A, np.ndarray ):
a = True
a = va.device
a = va.cpu().numpy()
a = va.cpu().numpy()
a = np.sum(va * va / (np.linalg.norm(A ) * np.linalg.norm(A )) )
if np.abs(A ) > DOT_THRESHOLD:
a = (1 - t) * va + t * va
else:
a = np.arccos(A )
a = np.sin(A )
a = theta_a * t
a = np.sin(A )
a = np.sin(theta_a - theta_t ) / sin_theta_a
a = sin_theta_t / sin_theta_a
a = sa * va + sa * va
if inputs_are_torch:
a = torch.from_numpy(A ).to(A )
return va
def __magic_name__ ( A : int, A : Optional[int] ):
'''simple docstring'''
a = F.normalize(A, dim=-1 )
a = F.normalize(A, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __magic_name__ ( A : List[str], A : str ):
'''simple docstring'''
for param in model.parameters():
a = value
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __lowerCamelCase : CLIPFeatureExtractor , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , clip_model=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , feature_extractor=__lowerCamelCase , coca_model=__lowerCamelCase , coca_tokenizer=__lowerCamelCase , coca_transform=__lowerCamelCase , )
a = (
feature_extractor.size
if isinstance(feature_extractor.size , __lowerCamelCase )
else feature_extractor.size["shortest_edge"]
)
a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __lowerCamelCase )
set_requires_grad(self.clip_model , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
self.enable_attention_slicing(__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
set_requires_grad(self.vae , __lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> List[Any]:
set_requires_grad(self.vae , __lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> List[Any]:
set_requires_grad(self.unet , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> str:
set_requires_grad(self.unet , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> Optional[int]:
# get the original timestep using init_timestep
a = min(int(num_inference_steps * strength ) , __lowerCamelCase )
a = max(num_inference_steps - init_timestep , 0 )
a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str=None ) -> Dict:
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(__lowerCamelCase )}""" )
a = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCamelCase )
]
a = torch.cat(__lowerCamelCase , dim=0 )
else:
a = self.vae.encode(__lowerCamelCase ).latent_dist.sample(__lowerCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 0.18_215 * init_latents
a = init_latents.repeat_interleave(__lowerCamelCase , dim=0 )
a = randn_tensor(init_latents.shape , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
a = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = init_latents
return latents
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] ) -> int:
a = self.coca_transform(__lowerCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ) -> Any:
a = self.feature_extractor.preprocess(__lowerCamelCase )
a = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
a = self.clip_model.get_image_features(__lowerCamelCase )
a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__lowerCamelCase )
a = image_embeddings_clip.repeat_interleave(__lowerCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = latents.detach().requires_grad_()
a = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
a = self.unet(__lowerCamelCase , __lowerCamelCase , encoder_hidden_states=__lowerCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a = self.scheduler.alphas_cumprod[timestep]
a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a = torch.sqrt(__lowerCamelCase )
a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __lowerCamelCase ):
a = self.scheduler.sigmas[index]
a = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 1 / 0.18_215 * sample
a = self.vae.decode(__lowerCamelCase ).sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = transforms.Resize(self.feature_extractor_size )(__lowerCamelCase )
a = self.normalize(__lowerCamelCase ).to(latents.dtype )
a = self.clip_model.get_image_features(__lowerCamelCase )
a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__lowerCamelCase )
a = spherical_dist_loss(__lowerCamelCase , __lowerCamelCase ).mean() * clip_guidance_scale
a = -torch.autograd.grad(__lowerCamelCase , __lowerCamelCase )[0]
if isinstance(self.scheduler , __lowerCamelCase ):
a = latents.detach() + grads * (sigma**2)
a = noise_pred_original
else:
a = noise_pred_original - torch.sqrt(__lowerCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = 5_12 , __lowerCamelCase : Optional[int] = 5_12 , __lowerCamelCase : float = 0.6 , __lowerCamelCase : Optional[int] = 50 , __lowerCamelCase : Optional[float] = 7.5 , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[float] = 1_00 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : float = 0.8 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , ) -> Dict:
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(__lowerCamelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__lowerCamelCase , torch.Generator ) and batch_size > 1:
a = [generator] + [None] * (batch_size - 1)
a = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
a = [x[0] for x in coca_is_none if x[1]]
a = ", ".join(__lowerCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__lowerCamelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a = self.get_image_description(__lowerCamelCase )
if style_prompt is None:
if len(__lowerCamelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a = self.get_image_description(__lowerCamelCase )
# get prompt text embeddings for content and style
a = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="pt" , )
a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="pt" , )
a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a = slerp(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# duplicate text embeddings for each generation per prompt
a = text_embeddings.repeat_interleave(__lowerCamelCase , dim=0 )
# set timesteps
a = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a = {}
if accepts_offset:
a = 1
self.scheduler.set_timesteps(__lowerCamelCase , **__lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device )
a = timesteps[:1].repeat(__lowerCamelCase )
# Preprocess image
a = preprocess(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = self.prepare_latents(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , text_embeddings.dtype , self.device , __lowerCamelCase )
a = preprocess(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = self.prepare_latents(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , text_embeddings.dtype , self.device , __lowerCamelCase )
a = slerp(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if clip_guidance_scale > 0:
a = self.get_clip_image_embeddings(__lowerCamelCase , __lowerCamelCase )
a = self.get_clip_image_embeddings(__lowerCamelCase , __lowerCamelCase )
a = slerp(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a = content_text_input.input_ids.shape[-1]
a = self.tokenizer([""] , padding="max_length" , max_length=__lowerCamelCase , return_tensors="pt" )
a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a = uncond_embeddings.repeat_interleave(__lowerCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device="cpu" , dtype=__lowerCamelCase ).to(
self.device )
else:
a = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a = {}
if accepts_eta:
a = eta
# check if the scheduler accepts generator
a = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a = generator
with self.progress_bar(total=__lowerCamelCase ):
for i, t in enumerate(__lowerCamelCase ):
# expand the latents if we are doing classifier free guidance
a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
a = self.unet(__lowerCamelCase , __lowerCamelCase , encoder_hidden_states=__lowerCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a = noise_pred.chunk(2 )
a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a = self.cond_fn(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
a = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a = 1 / 0.18_215 * latents
a = self.vae.decode(__lowerCamelCase ).sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__lowerCamelCase , nsfw_content_detected=__lowerCamelCase )
| 107 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
A : Optional[int] = 'zero2'
A : str = 'zero3'
A : Tuple = [ZEROa, ZEROa]
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
A : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A( a ):
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any:
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , )
self.do_checks(_snake_case )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case )
__a = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a = self.get_launcher(_snake_case )
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]:
'''simple docstring'''
__a = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 6 | 0 |
"""simple docstring"""
import numpy as np
def a__ ( SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case ) | 6 | 0 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
A: str = 637_8137.0
A: List[Any] = 635_6752.31_4245
A: Any = 6_3_7_8_1_3_7
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Optional[int] = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase : Optional[int] = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
UpperCAmelCase : List[Any] = radians(UpperCamelCase )
UpperCAmelCase : Dict = radians(UpperCamelCase )
# Equation
UpperCAmelCase : Dict = sin((phi_a - phi_a) / 2 )
UpperCAmelCase : List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase : List[str] = sqrt(sin_sq_phi + (cos(UpperCamelCase ) * cos(UpperCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ = 1_92
lowercase__ = 7_68
lowercase__ = 12
lowercase__ = 3
lowercase__ = [8_00, 13_33]
lowercase__ = False
elif yolos_name == "yolos_s_dWr":
lowercase__ = 3_30
lowercase__ = 14
lowercase__ = 6
lowercase__ = 13_20
elif "yolos_s" in yolos_name:
lowercase__ = 3_84
lowercase__ = 15_36
lowercase__ = 12
lowercase__ = 6
elif "yolos_b" in yolos_name:
lowercase__ = [8_00, 13_44]
lowercase__ = 91
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''coco-detection-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowercase__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: config.hidden_size, :]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[-config.hidden_size :, :]
lowercase__ = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "backbone" in name:
lowercase__ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowercase__ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowercase__ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowercase__ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowercase__ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowercase__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowercase__ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowercase__ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowercase__ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowercase__ = key.split('''.''' )
lowercase__ = int(key_split[2] )
lowercase__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
else:
lowercase__ = val
return orig_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
lowercase__ = get_yolos_config(SCREAMING_SNAKE_CASE )
# load original state_dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# load 🤗 model
lowercase__ = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ = 8_00 if yolos_name != '''yolos_ti''' else 5_12
lowercase__ = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ = None, None
if yolos_name == "yolos_ti":
lowercase__ = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
lowercase__ = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
lowercase__ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
lowercase__ = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
lowercase__ = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
lowercase__ = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
lowercase__ = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
lowercase__ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowercase__ = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE , organization='''hustvl''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 |
def __lowerCAmelCase ( a__ , a__ ) -> float:
def get_matched_characters(a__ , a__ ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}"""
return "".join(a__ )
# matching characters
__a = get_matched_characters(a__ , a__ )
__a = get_matched_characters(a__ , a__ )
__a = len(a__ )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world')) | 6 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[Any] = emb.weight.shape
UpperCamelCase__ :List[Any] = nn.Linear(a__ , a__ , bias=a__ )
UpperCamelCase__ :Union[str, Any] = emb.weight.data
return lin_layer
def a ( __a , __a=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = {}
for old_key in state_dict.keys():
UpperCamelCase__ :List[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCamelCase__ :Optional[int] = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCamelCase__ :Any = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
UpperCamelCase__ :int = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
UpperCamelCase__ :Any = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
UpperCamelCase__ :Any = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
UpperCamelCase__ :List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
UpperCamelCase__ :List[str] = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
UpperCamelCase__ :Any = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
UpperCamelCase__ :Tuple = state_dict[old_key]
return new_dict
def a ( __a , __a , __a , __a , __a = WEIGHTS_NAME ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = []
UpperCamelCase__ :Tuple = 0
os.makedirs(a__ , exist_ok=a__ )
for expert in range(a__ ):
UpperCamelCase__ :int = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(a__ ):
UpperCamelCase__ :List[str] = torch.load(a__ )['''model''']
remove_ignore_keys_(a__ )
UpperCamelCase__ :List[str] = rename_fairseq_keys(a__ , a__ )
UpperCamelCase__ :Dict = os.path.join(
a__ , weights_name.replace('''.bin''' , f'''-{len(a__ )+1:05d}-of-???.bin''' ) )
torch.save(a__ , a__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(a__ )[0]].dtype )
# Add the last block
UpperCamelCase__ :int = os.path.join(a__ , weights_name.replace('''.bin''' , f'''-{len(a__ )+1:05d}-of-???.bin''' ) )
UpperCamelCase__ :int = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(a__ )
UpperCamelCase__ :Union[str, Any] = rename_fairseq_keys(a__ , a__ )
UpperCamelCase__ :int = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(a__ ) == 1:
UpperCamelCase__ :List[str] = os.path.join(a__ , a__ )
torch.save(a__ , a__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(a__ , a__ )
# Otherwise, let's build the index
UpperCamelCase__ :str = {}
for idx, shard in enumerate(a__ ):
UpperCamelCase__ :str = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(a__ ):05d}.bin''' )
UpperCamelCase__ :List[Any] = os.path.join(a__ , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(a__ , os.path.join(a__ , a__ ) )
for key in shard:
UpperCamelCase__ :int = shard_file
# Add the metadata
UpperCamelCase__ :Optional[int] = {'''total_size''': total_size}
UpperCamelCase__ :List[Any] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a__ , a__ ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ :Tuple = json.dumps(a__ , indent=2 , sort_keys=a__ ) + '''\n'''
f.write(a__ )
return metadata, index
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__snake_case = parser.parse_args()
__snake_case = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__snake_case = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__snake_case = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 97 |
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main() | 6 | 0 |
import math
import unittest
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
assert isinstance(a__, a__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(a__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __A ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowercase__ ( self : Optional[Any] ):
with self.assertRaises(_snake_case ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 138 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( A ):
UpperCamelCase = (CMStochasticIterativeScheduler,)
UpperCamelCase = 1_0
def _lowerCamelCase ( self : int , **A : int) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = {
'num_train_timesteps': 2_01,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**_snake_case)
return config
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = 10
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = self.scheduler_classes[0](**_snake_case)
scheduler.set_timesteps(_snake_case)
_UpperCAmelCase = scheduler.timesteps[0]
_UpperCAmelCase = scheduler.timesteps[1]
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
_UpperCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_snake_case)
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_snake_case)
_UpperCAmelCase = 1
scheduler.set_timesteps(_snake_case)
_UpperCAmelCase = scheduler.timesteps
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_snake_case):
# 1. scale model input
_UpperCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case)
# 2. predict noise residual
_UpperCAmelCase = model(_snake_case , _snake_case)
# 3. predict previous sample x_t-1
_UpperCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case).prev_sample
_UpperCAmelCase = pred_prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_snake_case))
_UpperCAmelCase = torch.mean(torch.abs(_snake_case))
assert abs(result_sum.item() - 1_9_2.7_6_1_4) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0) < 1E-3
def _lowerCamelCase ( self : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_snake_case)
_UpperCAmelCase = [1_06, 0]
scheduler.set_timesteps(timesteps=_snake_case)
_UpperCAmelCase = scheduler.timesteps
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_UpperCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case)
# 2. predict noise residual
_UpperCAmelCase = model(_snake_case , _snake_case)
# 3. predict previous sample x_t-1
_UpperCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case).prev_sample
_UpperCAmelCase = pred_prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_snake_case))
_UpperCAmelCase = torch.mean(torch.abs(_snake_case))
assert abs(result_sum.item() - 3_4_7.6_3_5_7) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7) < 1E-3
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_snake_case)
_UpperCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(_snake_case , msg='`timesteps` must be in descending order.'):
scheduler.set_timesteps(timesteps=_snake_case)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_snake_case)
_UpperCAmelCase = [39, 30, 12, 1, 0]
_UpperCAmelCase = len(_snake_case)
with self.assertRaises(_snake_case , msg='Can only pass one of `num_inference_steps` or `timesteps`.'):
scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case)
def _lowerCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_snake_case)
_UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_snake_case , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_snake_case)
| 339 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(a__, exist_ok=a__)
__lowerCAmelCase = os.path.join(a__, '''README.md''')
print(F"""Generating {path}""")
with open(a__, '''w''', encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
_UpperCAmelCase : int = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase : Optional[Any] = model_name.split("""-""")
_UpperCAmelCase : Union[str, Any] = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 174 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
"""simple docstring"""
from math import sqrt
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( _lowerCAmelCase : Optional[Any] = 1_0001 ):
'''simple docstring'''
lowercase__ : Any = 0
lowercase__ : Union[str, Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(a__ ):
count += 1
while count != nth:
number += 2
if is_prime(a__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 6 | 0 |
import unittest
import numpy as np
def __UpperCAmelCase ( a_ , a_ , a_ , a_ = None , ):
snake_case_ = np.shape(a__)
snake_case_ = np.shape(a__)
snake_case_ = np.shape(a__)
if shape_a[0] != shape_b[0]:
snake_case_ = (
'Expected the same number of rows for A and B. '
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(a__)
if shape_b[1] != shape_c[1]:
snake_case_ = (
'Expected the same number of columns for B and C. '
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(a__)
snake_case_ = pseudo_inv
if a_inv is None:
try:
snake_case_ = np.linalg.inv(a__)
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.')
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
snake_case_ = schur_complement(_snake_case , _snake_case , _snake_case )
snake_case_ = np.block([[a, b], [b.T, c]] )
snake_case_ = np.linalg.det(_snake_case )
snake_case_ = np.linalg.det(_snake_case )
snake_case_ = np.linalg.det(_snake_case )
self.assertAlmostEqual(_snake_case , det_a * det_s )
def _UpperCamelCase ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_snake_case ):
schur_complement(_snake_case , _snake_case , _snake_case )
def _UpperCamelCase ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_snake_case ):
schur_complement(_snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 178 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __A( a ):
snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''text''': Value('''string''' )} )
snake_case_ = Features({} )
snake_case_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"} | 6 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
snake_case_ : Tuple = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
snake_case_ : int = '</w>'
snake_case_ : Dict = '@@ '
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Any = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : int = char
return pairs
# Speech2Text2 has no max input length
snake_case_ : Optional[Any] = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class __a (lowerCamelCase ):
__a : List[Any] = VOCAB_FILES_NAMES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Tuple = ["input_ids", "attention_mask"]
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : Union[str, Any]="<pad>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : str=False , __magic_name__ : int=None , **__magic_name__ : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , )
UpperCAmelCase_ : List[str] = do_lower_case
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ : Optional[int] = json.load(_snake_case )
UpperCAmelCase_ : List[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Any = None
else:
with open(_snake_case , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase_ : List[Any] = merges_handle.read().split('''\n''' )[:-1]
UpperCAmelCase_ : str = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase_ : List[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
UpperCAmelCase_ : Union[str, Any] = {}
@property
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
return len(self.decoder )
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : str , __magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : int = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Tuple = get_pairs(_snake_case )
if not pairs:
return token
while True:
UpperCAmelCase_ : Tuple = min(_snake_case , key=lambda __magic_name__ : self.bpe_ranks.get(_snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = bigram
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Union[str, Any] = 0
while i < len(_snake_case ):
try:
UpperCAmelCase_ : Tuple = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : int = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Optional[Any] = tuple(_snake_case )
UpperCAmelCase_ : Union[str, Any] = new_word
if len(_snake_case ) == 1:
break
else:
UpperCAmelCase_ : List[Any] = get_pairs(_snake_case )
UpperCAmelCase_ : str = ''' '''.join(_snake_case )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_ : Optional[Any] = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_snake_case ):
UpperCAmelCase_ : List[Any] = word.replace(_snake_case , '''''' )
UpperCAmelCase_ : Dict = word.replace(''' ''' , _snake_case )
UpperCAmelCase_ : str = word
return word
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
UpperCAmelCase_ : List[str] = text.lower()
UpperCAmelCase_ : List[Any] = text.split()
UpperCAmelCase_ : Any = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.decoder.get(_snake_case , self.unk_token )
return result
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = ''' '''.join(_snake_case )
# make sure @@ tokens are concatenated
UpperCAmelCase_ : int = ''''''.join(string.split(_snake_case ) )
return string
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Dict = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : str = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '''\n''' )
UpperCAmelCase_ : Dict = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase_ : List[str] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 125 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]:
__a = AutoTokenizer.from_pretrained(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
__a = tok.pad_token_id
def get_lens(a__ ):
__a = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a = []
for batch in dl:
__a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
__a = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
__a = get_lens(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
__a = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 6 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = CTRLTokenizer
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCamelCase = dict(zip(_snake_case , range(len(_snake_case))))
_UpperCamelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCamelCase = {'''unk_token''': '''<unk>'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(_snake_case) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(_snake_case))
def UpperCAmelCase ( self , **__a) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = '''adapt react readapt apt'''
_UpperCamelCase = '''adapt react readapt apt'''
return input_text, output_text
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCamelCase = '''adapt react readapt apt'''
_UpperCamelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCamelCase = tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , _snake_case)
| 194 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'table-transformer'
__lowerCamelCase : Tuple = ['past_key_values']
__lowerCamelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__(self , A=True , A=None , A=3 , A=100 , A=6 , A=2_048 , A=8 , A=6 , A=2_048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
_a = backbone_config.get('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(_snake_case )
# set timm attributes to None
_a , _a , _a = None, None, None
_a = use_timm_backbone
_a = backbone_config
_a = num_channels
_a = num_queries
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = encoder_layers
_a = auxiliary_loss
_a = position_embedding_type
_a = backbone
_a = use_pretrained_backbone
_a = dilation
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.d_model
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = version.parse('1.11' )
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def a__ (self ) -> float:
"""simple docstring"""
return 1E-5
@property
def a__ (self ) -> int:
"""simple docstring"""
return 12
| 211 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 | 0 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
self.assertEqual(len(_snake_case) ,len(_snake_case))
for a, b in zip(_snake_case ,_snake_case):
self.assertAlmostEqual(_snake_case ,_snake_case ,delta=_snake_case)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0])])
accumulator([tf.constant([-2.0, 1.0])])
accumulator([tf.constant([-1.0, 2.0])])
with self.assertRaises(_snake_case):
accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])])
self.assertEqual(accumulator.step ,3)
self.assertEqual(len(accumulator.gradients) ,1)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2)
accumulator.reset()
self.assertEqual(accumulator.step ,0)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[str] = None
ops.enable_eager_execution_internal()
__lowerCamelCase : Any = tf.config.list_physical_devices('CPU')
if len(_snake_case) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()])
__lowerCamelCase : List[str] = tf.config.list_logical_devices(device_type='CPU')
__lowerCamelCase : Any = tf.distribute.MirroredStrategy(devices=devices[:2])
with strategy.scope():
__lowerCamelCase : Dict = GradientAccumulator()
__lowerCamelCase : Tuple = tf.Variable([4.0, 3.0])
__lowerCamelCase , __lowerCamelCase : Optional[Any] = create_optimizer(5E-5 ,1_0 ,5)
__lowerCamelCase : str = tf.Variable([0.0, 0.0] ,trainable=_snake_case)
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[int]):
accumulator([gradient])
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable])))
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
with strategy.scope():
__lowerCamelCase : Any = strategy.experimental_local_results(_snake_case)
local_variables[0].assign(_snake_case)
local_variables[1].assign(_snake_case)
strategy.run(_snake_case ,args=(gradient_placeholder,))
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_snake_case)
def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Tuple = strategy.experimental_local_results(accumulator._gradients[0])
self.assertListAlmostEqual(values[0].value() ,_snake_case ,tol=1E-2)
self.assertListAlmostEqual(values[1].value() ,_snake_case ,tol=1E-2)
accumulate([1.0, 2.0] ,[-1.0, 1.0])
accumulate([3.0, -1.0] ,[-1.0, -1.0])
accumulate([-2.0, 2.0] ,[3.0, -2.0])
self.assertEqual(accumulator.step ,3)
_check_local_values([2.0, 3.0] ,[1.0, -2.0])
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2)
accumulator.reset()
self.assertEqual(accumulator.step ,0)
_check_local_values([0.0, 0.0] ,[0.0, 0.0])
| 73 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a = (low + high) // 2
__a , __a , __a = max_subarray(a__ , a__ , a__ )
__a , __a , __a = max_subarray(a__ , mid + 1 , a__ )
__a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]:
__a , __a = float('''-inf''' ), -1
__a , __a = float('''-inf''' ), -1
__a = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a = summ
__a = i
__a = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a = summ
__a = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase ( a__ ) -> float:
__a = [randint(1 , a__ ) for _ in range(a__ )]
__a = time.time()
max_subarray(a__ , 0 , input_size - 1 )
__a = time.time()
return end - start
def __lowerCAmelCase ( ) -> None:
__a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__a = [time_max_subarray(a__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '''\t\t''' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: List[str] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """vit"""
def __init__( self: Optional[int] , __lowerCamelCase: Any=768 , __lowerCamelCase: int=12 , __lowerCamelCase: List[Any]=12 , __lowerCamelCase: int=3072 , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: str=0.02 , __lowerCamelCase: Optional[int]=1e-12 , __lowerCamelCase: int=224 , __lowerCamelCase: Tuple=16 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: str=True , __lowerCamelCase: Dict=16 , **__lowerCamelCase: List[str] , ):
'''simple docstring'''
super().__init__(**_snake_case )
UpperCamelCase__: Any = hidden_size
UpperCamelCase__: str = num_hidden_layers
UpperCamelCase__: List[Any] = num_attention_heads
UpperCamelCase__: Dict = intermediate_size
UpperCamelCase__: List[Any] = hidden_act
UpperCamelCase__: Tuple = hidden_dropout_prob
UpperCamelCase__: List[str] = attention_probs_dropout_prob
UpperCamelCase__: Optional[int] = initializer_range
UpperCamelCase__: Tuple = layer_norm_eps
UpperCamelCase__: List[Any] = image_size
UpperCamelCase__: Union[str, Any] = patch_size
UpperCamelCase__: Dict = num_channels
UpperCamelCase__: int = qkv_bias
UpperCamelCase__: List[Any] = encoder_stride
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = version.parse("""1.11""")
@property
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return 1e-4
| 149 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A red cat sitting on a park bench'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 6 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = [10, 20, 30, 40, 50, 60]
UpperCamelCase__ :Optional[Any] = [2, 4, 6, 8, 10, 12]
UpperCamelCase__ :Union[str, Any] = 100
self.assertEqual(kp.calc_profit(_snake_case , _snake_case , _snake_case ) , 210 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(_snake_case , '''max_weight must greater than zero.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(_snake_case , '''Weight can not be negative.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(_snake_case , '''Profit can not be negative.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(_snake_case , '''max_weight must greater than zero.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(
_snake_case , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main() | 97 |
from math import ceil
def __lowerCAmelCase ( a__ = 1001 ) -> int:
__a = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__a = 2 * i + 1
__a = 2 * i
__a = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 6 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f"{test_file} instead." )
lowerCAmelCase : str = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
lowerCAmelCase : int = components[:-1] + [test_fn.replace('.py', '' )]
lowerCAmelCase : Optional[Any] = '.'.join(a__ )
return test_module_path
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = get_module_path(a__ )
lowerCAmelCase : int = importlib.import_module(a__ )
return test_module
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[Any] = get_test_module(a__ )
for attr in dir(a__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(a__, a__ ) )
# sort with class names
return sorted(a__, key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Optional[int] = get_test_module(a__ )
for attr in dir(a__ ):
lowerCAmelCase : Optional[int] = getattr(a__, a__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCAmelCase : Any = getattr(a__, 'all_model_classes', [] )
if len(a__ ) > 0:
test_classes.append(a__ )
# sort with class names
return sorted(a__, key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Any = get_test_classes(a__ )
lowerCAmelCase : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a__, key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : int = test_class()
if hasattr(a__, 'setUp' ):
test.setUp()
lowerCAmelCase : Tuple = None
if hasattr(a__, 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCAmelCase : Optional[Any] = test.model_tester.__class__
return model_tester
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : int = get_test_classes(a__ )
lowerCAmelCase : Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a__ )
# sort with class names
return sorted(a__, key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Dict = get_test_classes_for_model(a__, a__ )
lowerCAmelCase : Tuple = []
for test_class in test_classes:
lowerCAmelCase : Optional[Any] = get_model_tester_from_test_class(a__ )
if tester_class is not None:
tester_classes.append(a__ )
# sort with class names
return sorted(a__, key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : List[str] = get_test_classes(a__ )
lowerCAmelCase : str = {test_class: get_model_tester_from_test_class(a__ ) for test_class in test_classes}
return test_tester_mapping
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Dict = get_model_classes(a__ )
lowerCAmelCase : Tuple = {
model_class: get_test_classes_for_model(a__, a__ ) for model_class in model_classes
}
return model_test_mapping
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : int = get_model_classes(a__ )
lowerCAmelCase : Optional[int] = {
model_class: get_tester_classes_for_model(a__, a__ ) for model_class in model_classes
}
return model_to_tester_mapping
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(a__, a__ ):
return o
elif isinstance(a__, a__ ):
return o.__name__
elif isinstance(a__, (list, tuple) ):
return [to_json(a__ ) for x in o]
elif isinstance(a__, a__ ):
return {to_json(a__ ): to_json(a__ ) for k, v in o.items()}
else:
return o
| 138 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class | 6 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = AutoencoderKL
UpperCamelCase = '''sample'''
UpperCamelCase = 1e-2
@property
def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes).to(_snake_case)
return {"sample": image}
@property
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS')
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.model_class(**_snake_case)
model.to(_snake_case)
assert not model.is_gradient_checkpointing and model.training
_UpperCAmelCase = model(**_snake_case).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_UpperCAmelCase = torch.randn_like(_snake_case)
_UpperCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_UpperCAmelCase = self.model_class(**_snake_case)
# clone model
model_a.load_state_dict(model.state_dict())
model_a.to(_snake_case)
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_UpperCAmelCase = model_a(**_snake_case).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_UpperCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5)
_UpperCAmelCase = dict(model.named_parameters())
_UpperCAmelCase = dict(model_a.named_parameters())
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5))
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=_snake_case)
self.assertIsNotNone(_snake_case)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(_snake_case)
_UpperCAmelCase = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy')
_UpperCAmelCase = model.to(_snake_case)
model.eval()
if torch_device == "mps":
_UpperCAmelCase = torch.manual_seed(0)
else:
_UpperCAmelCase = torch.Generator(device=_snake_case).manual_seed(0)
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
_UpperCAmelCase = image.to(_snake_case)
with torch.no_grad():
_UpperCAmelCase = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_UpperCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
])
elif torch_device == "cpu":
_UpperCAmelCase = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6])
else:
_UpperCAmelCase = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5])
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2))
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : str , A : Any , A : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return F"gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case) for s in shape])}.npy"
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] , A : Tuple=0 , A : str=(4, 3, 5_12, 5_12) , A : Optional[int]=False) -> Any:
"""simple docstring"""
_UpperCAmelCase = torch.floataa if fpaa else torch.floataa
_UpperCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case))).to(_snake_case).to(_snake_case)
return image
def _lowerCamelCase ( self : List[str] , A : Any="CompVis/stable-diffusion-v1-4" , A : List[Any]=False) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'fp16' if fpaa else None
_UpperCAmelCase = torch.floataa if fpaa else torch.floataa
_UpperCAmelCase = AutoencoderKL.from_pretrained(
_snake_case , subfolder='vae' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case).eval()
return model
def _lowerCamelCase ( self : str , A : int=0) -> Tuple:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(_snake_case)
return torch.Generator(device=_snake_case).manual_seed(_snake_case)
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
])
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : int , A : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(_snake_case)
_UpperCAmelCase = self.get_generator(_snake_case)
with torch.no_grad():
_UpperCAmelCase = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case).sample
assert sample.shape == image.shape
_UpperCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCAmelCase = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice)
assert torch_all_close(_snake_case , _snake_case , atol=3E-3)
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
])
@require_torch_gpu
def _lowerCamelCase ( self : int , A : Union[str, Any] , A : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.get_sd_vae_model(fpaa=_snake_case)
_UpperCAmelCase = self.get_sd_image(_snake_case , fpaa=_snake_case)
_UpperCAmelCase = self.get_generator(_snake_case)
with torch.no_grad():
_UpperCAmelCase = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case).sample
assert sample.shape == image.shape
_UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCAmelCase = torch.tensor(_snake_case)
assert torch_all_close(_snake_case , _snake_case , atol=1E-2)
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
])
def _lowerCamelCase ( self : int , A : Tuple , A : List[str] , A : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(_snake_case)
with torch.no_grad():
_UpperCAmelCase = model(_snake_case).sample
assert sample.shape == image.shape
_UpperCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCAmelCase = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice)
assert torch_all_close(_snake_case , _snake_case , atol=3E-3)
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
])
@require_torch_gpu
def _lowerCamelCase ( self : Tuple , A : Optional[Any] , A : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64))
with torch.no_grad():
_UpperCAmelCase = model.decode(_snake_case).sample
assert list(sample.shape) == [3, 3, 5_12, 5_12]
_UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_UpperCAmelCase = torch.tensor(_snake_case)
assert torch_all_close(_snake_case , _snake_case , atol=1E-3)
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
])
@require_torch_gpu
def _lowerCamelCase ( self : Tuple , A : str , A : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_sd_vae_model(fpaa=_snake_case)
_UpperCAmelCase = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case)
with torch.no_grad():
_UpperCAmelCase = model.decode(_snake_case).sample
assert list(sample.shape) == [3, 3, 5_12, 5_12]
_UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCAmelCase = torch.tensor(_snake_case)
assert torch_all_close(_snake_case , _snake_case , atol=5E-3)
@parameterized.expand([(13,), (16,), (27,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.')
def _lowerCamelCase ( self : str , A : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_sd_vae_model(fpaa=_snake_case)
_UpperCAmelCase = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case)
with torch.no_grad():
_UpperCAmelCase = model.decode(_snake_case).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCAmelCase = model.decode(_snake_case).sample
assert list(sample.shape) == [3, 3, 5_12, 5_12]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1)
@parameterized.expand([(13,), (16,), (37,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.')
def _lowerCamelCase ( self : Tuple , A : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64))
with torch.no_grad():
_UpperCAmelCase = model.decode(_snake_case).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCAmelCase = model.decode(_snake_case).sample
assert list(sample.shape) == [3, 3, 5_12, 5_12]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2)
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
])
def _lowerCamelCase ( self : Optional[int] , A : int , A : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(_snake_case)
_UpperCAmelCase = self.get_generator(_snake_case)
with torch.no_grad():
_UpperCAmelCase = model.encode(_snake_case).latent_dist
_UpperCAmelCase = dist.sample(generator=_snake_case)
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_UpperCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_UpperCAmelCase = torch.tensor(_snake_case)
_UpperCAmelCase = 3E-3 if torch_device != 'mps' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case)
| 339 |
from __future__ import annotations
import typing
from collections import Counter
def __lowerCAmelCase ( a__ ) -> typing.Counter[int]:
__a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a__ , max_perimeter + 1 ):
__a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
__a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowerCAmelCase ( a__ = 1000 ) -> int:
__a = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions") | 6 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=1_0_2_4, lowerCamelCase=1_0_2_4, lowerCamelCase=False, **lowerCamelCase):
__lowerCAmelCase = AutoTokenizer.from_pretrained(a__)
__lowerCAmelCase = SeqaSeqDataset(a__, a__, a__, a__, type_path='''train''', **a__)
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCamelCase):
__lowerCAmelCase = tqdm(
DataLoader(a__, batch_size=5_1_2, num_workers=8, shuffle=a__, collate_fn=ds.collate_fn), desc=str(ds.len_file), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['''input_ids'''].ne(a__).sum(1).tolist()
__lowerCAmelCase = batch['''labels'''].ne(a__).sum(1).tolist()
if consider_target:
for src, tgt in zip(a__, a__):
max_lens.append(max(a__, a__))
else:
max_lens.extend(a__)
return max_lens
__lowerCAmelCase = get_lens(a__)
__lowerCAmelCase = SeqaSeqDataset(a__, a__, a__, a__, type_path='''val''', **a__)
__lowerCAmelCase = get_lens(a__)
pickle_save(a__, train_ds.len_file)
pickle_save(a__, val_ds.len_file)
if __name__ == "__main__":
fire.Fire(save_len_file)
| 174 |
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 6 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Tuple = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class UpperCAmelCase_ ( _a , _a):
lowerCamelCase__ : Any = "resnet"
lowerCamelCase__ : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self , a=3 , a=6_4 , a=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , a=[3, 4, 6, 3] , a="bottleneck" , a="relu" , a=False , a=None , a=None , **a , ) -> Optional[Any]:
super().__init__(**_snake_case )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
lowercase__ : int = num_channels
lowercase__ : str = embedding_size
lowercase__ : Union[str, Any] = hidden_sizes
lowercase__ : List[Any] = depths
lowercase__ : int = layer_type
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Dict = downsample_in_first_stage
lowercase__ : int = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_snake_case ) + 1 )]
lowercase__ , lowercase__ : Any = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = version.parse("1.11")
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-3
| 77 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __A( a ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
__a = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__a = truncation
__a = tokenize_kwargs
__a = {}
if return_tensors is not None:
__a = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]:
'''simple docstring'''
__a = self.framework
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.model(**_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return super().__call__(*_snake_case , **_snake_case ) | 6 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __UpperCAmelCase ( a_ , a_ , a_):
# Initialise PyTorch model
snake_case_ = AlbertConfig.from_json_file(a__)
print(f'''Building PyTorch model from configuration: {config}''')
snake_case_ = AlbertForPreTraining(a__)
# Load weights from tf checkpoint
load_tf_weights_in_albert(a__ , a__ , a__)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , a__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 178 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Optional[int] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4 | 6 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str = 1000 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 1, 1
UpperCAmelCase_ : Optional[Any] = 2
while True:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : List[Any] = fa + fa
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = fa, f
index += 1
for _ in str(a__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 125 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : int = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case ) | 6 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str | Literal[False]:
"""simple docstring"""
_UpperCamelCase = list(a__ )
_UpperCamelCase = list(a__ )
_UpperCamelCase = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
_UpperCamelCase = '''_'''
if count > 1:
return False
else:
return "".join(a__ )
def lowerCamelCase__ ( __snake_case ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
while True:
_UpperCamelCase = ['''$'''] * len(a__ )
_UpperCamelCase = []
for i in range(len(a__ ) ):
for j in range(i + 1, len(a__ ) ):
_UpperCamelCase = compare_string(binary[i], binary[j] )
if k is False:
_UpperCamelCase = '''*'''
_UpperCamelCase = '''*'''
temp.append('''X''' )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
_UpperCamelCase = list(set(a__ ) )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
for minterm in minterms:
_UpperCamelCase = ''''''
for _ in range(a__ ):
_UpperCamelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = list(a__ )
_UpperCamelCase = list(a__ )
_UpperCamelCase = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase__ ( __snake_case, __snake_case ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = [0] * len(a__ )
for i in range(len(chart[0] ) ):
_UpperCamelCase = 0
_UpperCamelCase = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
_UpperCamelCase = j
if count == 1:
_UpperCamelCase = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
_UpperCamelCase = 0
temp.append(prime_implicants[i] )
while True:
_UpperCamelCase = 0
_UpperCamelCase = -1
_UpperCamelCase = 0
for i in range(len(a__ ) ):
_UpperCamelCase = chart[i].count(1 )
if count_n > max_n:
_UpperCamelCase = count_n
_UpperCamelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
_UpperCamelCase = 0
def lowerCamelCase__ ( __snake_case, __snake_case ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
_UpperCamelCase = prime_implicants[i].count('''_''' )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i], binary[j], a__ ):
_UpperCamelCase = 1
return chart
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = int(input('''Enter the no. of variables\n''' ) )
_UpperCamelCase = [
float(a__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
_UpperCamelCase = decimal_to_binary(a__, a__ )
_UpperCamelCase = check(a__ )
print('''Prime Implicants are:''' )
print(a__ )
_UpperCamelCase = prime_implicant_chart(a__, a__ )
_UpperCamelCase = selection(a__, a__ )
print('''Essential Prime Implicants are:''' )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 194 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
A : Optional[int] = 'zero2'
A : str = 'zero3'
A : Tuple = [ZEROa, ZEROa]
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
A : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A( a ):
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any:
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , )
self.do_checks(_snake_case )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case )
__a = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a = self.get_launcher(_snake_case )
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]:
'''simple docstring'''
__a = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 6 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase (__A , __A = True , __A = math.inf , __A = -math.inf , __A = math.inf , __A = -math.inf , __A = False , __A = 100 , __A = 0.01 , __A = 1 , ):
"""simple docstring"""
_a = False
_a = search_prob
_a = start_temperate
_a = []
_a = 0
_a = None
while not search_end:
_a = current_state.score()
if best_state is None or current_score > best_state.score():
_a = current_state
scores.append(a__)
iterations += 1
_a = None
_a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a = random.randint(0 , len(a__) - 1) # picking a random neighbor
_a = neighbors.pop(a__)
_a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a = picked_neighbor
else:
_a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a = picked_neighbor
_a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a = True
else:
_a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__) , a__)
plt.xlabel('''Iterations''')
plt.ylabel('''Function values''')
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowercase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowercase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
lowercase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
| 211 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case ) | 6 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[Any] = multiprocessing.Manager()
__lowerCamelCase : str = manager.list()
__lowerCamelCase : str = multiprocessing.Process(target=a__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__lowerCamelCase : Optional[Any] = shutil.rmtree
__lowerCamelCase : Union[str, Any] = os.rmdir
__lowerCamelCase : Optional[int] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__lowerCamelCase : Tuple = {}
with swallow_io():
with time_limit(a__ ):
exec(a__ , a__ )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
__lowerCamelCase : int = rmtree
__lowerCamelCase : Optional[Any] = rmdir
__lowerCamelCase : str = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def signal_handler(lowerCamelCase__ , lowerCamelCase__ ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , a__ )
signal.signal(signal.SIGALRM , a__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : str = WriteOnlyStringIO()
with contextlib.redirect_stdout(a__ ):
with contextlib.redirect_stderr(a__ ):
with redirect_stdin(a__ ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
with tempfile.TemporaryDirectory() as dirname:
with chdir(a__ ):
yield dirname
class A_ ( SCREAMING_SNAKE_CASE ):
pass
class A_ ( io.StringIO ):
def lowerCAmelCase ( self : Any ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : List[str]):
raise OSError
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str):
raise OSError
def lowerCAmelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : int):
raise OSError
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
return False
class A_ ( contextlib._RedirectStream ): # type: ignore
_UpperCAmelCase : Optional[Any] = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
if root == ".":
yield
return
__lowerCamelCase : Optional[Any] = os.getcwd()
os.chdir(a__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None ) -> Union[str, Any]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : List[str] = None
import os
__lowerCamelCase : str = '1'
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Dict = None
__lowerCamelCase : str = None
__lowerCamelCase : Any = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Any = None
__lowerCamelCase : str = None
__lowerCamelCase : str = None
__lowerCamelCase : List[str] = None
__lowerCamelCase : List[str] = None
__lowerCamelCase : Dict = None
__lowerCamelCase : int = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
__lowerCamelCase : int = None
__lowerCamelCase : Tuple = None
__lowerCamelCase : List[str] = None
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Dict = None
__lowerCamelCase : List[Any] = None
__lowerCamelCase : List[str] = None
__lowerCamelCase : List[Any] = None
__lowerCamelCase : List[Any] = None
import shutil
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : List[Any] = None
import subprocess
__lowerCamelCase : Any = None # type: ignore
__lowerCamelCase : Optional[Any] = None
import sys
__lowerCamelCase : str = None
__lowerCamelCase : Dict = None
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = None
__lowerCamelCase : Optional[Any] = None
| 73 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A__: Dict = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Any = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A__: int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 |
def __lowerCAmelCase ( a__ , a__ ) -> float:
def get_matched_characters(a__ , a__ ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}"""
return "".join(a__ )
# matching characters
__a = get_matched_characters(a__ , a__ )
__a = get_matched_characters(a__ , a__ )
__a = len(a__ )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world')) | 6 | 0 |
'''simple docstring'''
import random
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = num - 1
UpperCamelCase__ :Union[str, Any] = 0
while s % 2 == 0:
UpperCamelCase__ :Dict = s // 2
t += 1
for _ in range(5 ):
UpperCamelCase__ :Optional[int] = random.randrange(2 , num - 1 )
UpperCamelCase__ :Union[str, Any] = pow(a__ , a__ , a__ )
if v != 1:
UpperCamelCase__ :int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCamelCase__ :Any = i + 1
UpperCamelCase__ :Optional[int] = (v**2) % num
return True
def a ( __a ) -> bool:
'''simple docstring'''
if num < 2:
return False
UpperCamelCase__ :str = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a__ )
def a ( __a = 1024 ) -> int:
'''simple docstring'''
while True:
UpperCamelCase__ :Dict = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(a__ ):
return num
if __name__ == "__main__":
__snake_case = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num))) | 97 |
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main() | 6 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if gpta_config_file == "":
lowerCAmelCase : Tuple = GPTaConfig()
else:
lowerCAmelCase : Tuple = GPTaConfig.from_json_file(a__ )
lowerCAmelCase : List[Any] = GPTaModel(a__ )
# Load weights from numpy
load_tf_weights_in_gpta(a__, a__, a__ )
# Save pytorch-model
lowerCAmelCase : Union[str, Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict(), a__ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(a__, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__A : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 138 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str , A : int , A : List[str]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(_snake_case , _snake_case)
_UpperCAmelCase = self.image_processor
def __call__( self : Any , A : Any = None , A : Optional[int] = None , A : List[Any] = True , A : Union[str, Any] = False , A : Union[str, Any] = None , A : Optional[Any] = None , A : int = 0 , A : Tuple = None , A : Union[str, Any] = None , A : Optional[int] = False , A : Dict = False , A : List[str] = False , A : int = False , A : Dict = False , A : Optional[Any] = True , A : List[str] = None , **A : Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(_snake_case , return_tensors=_snake_case)
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(_snake_case)
return encoding_image_processor
def _lowerCamelCase ( self : Optional[Any] , *A : Any , **A : int) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def _lowerCamelCase ( self : List[Any] , *A : Union[str, Any] , **A : str) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 339 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __magic_name__( lowerCamelCase):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set())
@pytest.fixture
def __magic_name__( lowerCamelCase):
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = metric_id
class a__ :
"""simple docstring"""
__UpperCamelCase : Dict = [MetricMock(__A ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def _snake_case (self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock())
@pytest.mark.parametrize(
'''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))])
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
if "tmp_path" in args:
__lowerCAmelCase = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args)
with pytest.warns(a__, match='''https://huggingface.co/docs/evaluate'''):
func(*a__)
| 174 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[int] = 4_2
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Optional[int] = None
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = Node(1 )
lowercase__ : int = Node(2 )
lowercase__ : Optional[int] = Node(3 )
lowercase__ : Dict = Node(4 )
lowercase__ : Optional[int] = Node(5 )
return tree
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[Any] = []
if root is None:
return output
lowercase__ : Any = deque([root] )
while process_queue:
lowercase__ : List[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Any = []
def populate_output(_lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(a__ , a__ )
return output
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Dict = []
def populate_output(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(a__ , a__ )
return output
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
if root is None:
return []
lowercase__ : Union[str, Any] = []
lowercase__ : List[str] = 0
lowercase__ : List[Any] = height(a__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a__ , a__ ) )
lowercase__ : Optional[Any] = 1
else:
output.append(get_nodes_from_right_to_left(a__ , a__ ) )
lowercase__ : Tuple = 0
return output
def a_ ( ): # Main function for testing.
'''simple docstring'''
lowercase__ : int = make_tree()
print(f"""In-order Traversal: {inorder(a__ )}""" )
print(f"""Pre-order Traversal: {preorder(a__ )}""" )
print(f"""Post-order Traversal: {postorder(a__ )}""" , '\n' )
print(f"""Height of Tree: {height(a__ )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(a__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(a__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(a__ , level=a__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 77 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 6 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a=2 , a=3 , a=64 , a=None ) -> Union[str, Any]:
snake_case_ = np.random.default_rng(_snake_case )
snake_case_ = length
snake_case_ = rng.normal(size=(length,) ).astype(np.floataa )
snake_case_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Tuple:
return self.length
def __getitem__( self , a ) -> Union[str, Any]:
return {"x": self.x[i], "y": self.y[i]}
class UpperCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , a=0 , a=0 , a=False ) -> Any:
super().__init__()
snake_case_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ = True
def _UpperCamelCase ( self , a=None ) -> Tuple:
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
snake_case_ = False
return x * self.a[0] + self.b[0]
class UpperCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , a=0 , a=0 , a=False ) -> Dict:
super().__init__()
snake_case_ = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
snake_case_ = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
snake_case_ = True
def _UpperCamelCase ( self , a=None ) -> List[Any]:
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
snake_case_ = False
return x * self.a + self.b
def __UpperCAmelCase ( a_ , a_ = 16):
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case_ = AutoTokenizer.from_pretrained('bert-base-cased')
snake_case_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
snake_case_ = load_dataset('csv' , data_files=a__)
snake_case_ = datasets['train'].unique('label')
snake_case_ = {v: i for i, v in enumerate(a__)}
def tokenize_function(a_):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=a__ , max_length=a__ , padding='max_length')
if "label" in examples:
snake_case_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
a__ , batched=a__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(a_):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ , padding='max_length' , max_length=1_28 , return_tensors='pt')
return tokenizer.pad(a__ , padding='longest' , return_tensors='pt')
# Instantiate dataloaders.
snake_case_ = DataLoader(tokenized_datasets['train'] , shuffle=a__ , collate_fn=a__ , batch_size=2)
snake_case_ = DataLoader(tokenized_datasets['validation'] , shuffle=a__ , collate_fn=a__ , batch_size=1)
return train_dataloader, eval_dataloader
| 178 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __A( a ):
snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''text''': Value('''string''' )} )
snake_case_ = Features({} )
snake_case_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"} | 6 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Optional[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __a (unittest.TestCase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=7 , __magic_name__ : List[str]=3 , __magic_name__ : Dict=18 , __magic_name__ : Union[str, Any]=30 , __magic_name__ : int=4_00 , __magic_name__ : Any=None , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : Tuple=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : Optional[int] = min_resolution
UpperCAmelCase_ : str = max_resolution
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : str = do_convert_rgb
UpperCAmelCase_ : int = [5_12, 10_24, 20_48, 40_96]
UpperCAmelCase_ : Any = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
UpperCAmelCase_ : List[Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : Union[str, Any] = 20_48
UpperCAmelCase_ : Union[str, Any] = image_processor(_snake_case , return_tensors='''pt''' , max_patches=_snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ : Dict = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_snake_case ):
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
UpperCAmelCase_ : Optional[int] = '''Hello'''
UpperCAmelCase_ : str = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case , header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case , header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
UpperCAmelCase_ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Dict = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : int = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ : List[Any] = 3
@property
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 125 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]:
__a = AutoTokenizer.from_pretrained(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
__a = tok.pad_token_id
def get_lens(a__ ):
__a = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a = []
for batch in dl:
__a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
__a = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
__a = get_lens(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
__a = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 6 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.