code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
def __lowerCAmelCase ( a__ ) -> str: return "".join([hex(a__ )[2:].zfill(2 ).upper() for byte in list(a__ )] ) def __lowerCAmelCase ( a__ ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(a__ ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(a__ ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
6
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : Optional[int] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class __A( a ): snake_case_ = '''levit''' def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case ) __a = image_size __a = num_channels __a = kernel_size __a = stride __a = padding __a = hidden_sizes __a = num_attention_heads __a = depths __a = key_dim __a = drop_path_rate __a = patch_size __a = attention_ratio __a = mlp_ratio __a = initializer_range __a = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __A( a ): snake_case_ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-4
6
1
from __future__ import annotations from cmath import sqrt def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[complex, complex]: if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) __a = b * b - 4 * a * c __a = (-b + sqrt(a__ )) / (2 * a) __a = (-b - sqrt(a__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __lowerCAmelCase ( ) -> Tuple: __a , __a = quadratic_roots(a=5 , b=6 , c=1 ) print(F"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
6
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel A : int = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __A( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' __a = TOKEN HfFolder.save_token(_snake_case ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def __lowerCAmelCase ( a__ , a__ ) -> str: __a = True __a = flatten_dict(modela.params ) __a = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: __a = False return models_are_equal @require_flax class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case )
6
1
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __A( a ): def __init__( self , _snake_case , _snake_case , _snake_case ) -> Dict: '''simple docstring''' __a = dataset __a = process __a = params def __len__( self ) -> List[str]: '''simple docstring''' return len(self.dataset ) def __getitem__( self , _snake_case ) -> Dict: '''simple docstring''' __a = self.dataset[i] __a = self.process(_snake_case , **self.params ) return processed class __A( a ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None ) -> Dict: '''simple docstring''' __a = loader __a = infer __a = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether __a = None __a = loader_batch_size # Internal bookkeeping __a = None __a = None def __len__( self ) -> int: '''simple docstring''' return len(self.loader ) def __iter__( self ) -> str: '''simple docstring''' __a = iter(self.loader ) return self def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice __a = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) __a = {} for k, element in self._loader_batch_data.items(): if isinstance(_snake_case , _snake_case ): # Convert ModelOutput to tuple first __a = element.to_tuple() if isinstance(element[0] , torch.Tensor ): __a = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __a = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_snake_case , _snake_case ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): __a = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __a = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around __a = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __a = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __a = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. __a = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 __a = self._loader_batch_data.__class__(_snake_case ) self._loader_batch_index += 1 return result def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch __a = next(self.iterator ) __a = self.infer(_snake_case , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_snake_case , torch.Tensor ): __a = processed else: __a = list(processed.keys() )[0] __a = processed[key] if isinstance(_snake_case , _snake_case ): __a = len(_snake_case ) else: __a = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __a = observed_batch_size # Setting internal index to unwrap the batch __a = processed __a = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __A( a ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None ) -> Dict: '''simple docstring''' super().__init__(_snake_case , _snake_case , _snake_case ) def __iter__( self ) -> Tuple: '''simple docstring''' __a = iter(self.loader ) __a = None return self def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' if self.subiterator is None: __a = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item __a = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators __a = self.infer(next(self.iterator ) , **self.params ) __a = next(self.subiterator ) return processed class __A( a ): def __iter__( self ) -> Dict: '''simple docstring''' __a = iter(self.loader ) return self def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = False __a = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: __a = self.loader_batch_item() __a = item.pop('''is_last''' ) accumulator.append(_snake_case ) if is_last: return accumulator while not is_last: __a = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_snake_case , torch.Tensor ): __a = processed else: __a = list(processed.keys() )[0] __a = processed[key] if isinstance(_snake_case , _snake_case ): __a = len(_snake_case ) else: __a = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __a = observed_batch_size __a = processed __a = 0 while self._loader_batch_index < self.loader_batch_size: __a = self.loader_batch_item() __a = item.pop('''is_last''' ) accumulator.append(_snake_case ) if is_last: return accumulator else: __a = processed __a = item.pop('''is_last''' ) accumulator.append(_snake_case ) return accumulator class __A( a ): def __init__( self , _snake_case , _snake_case ) -> Any: '''simple docstring''' __a = dataset __a = key def __len__( self ) -> List[str]: '''simple docstring''' return len(self.dataset ) def __getitem__( self , _snake_case ) -> Tuple: '''simple docstring''' return self.dataset[i][self.key] class __A( a ): def __init__( self , _snake_case , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' __a = dataset __a = keya __a = keya def __len__( self ) -> Optional[int]: '''simple docstring''' return len(self.dataset ) def __getitem__( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
6
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} A : Optional[int] = 'zero2' A : str = 'zero3' A : Tuple = [ZEROa, ZEROa] def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test A : Union[str, Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __A( a ): @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any: '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , ) self.do_checks(_snake_case ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]: '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case ) __a = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_snake_case )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __a = self.get_launcher(_snake_case ) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_snake_case , env=self.get_env() ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]: '''simple docstring''' __a = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
6
1
def __lowerCAmelCase ( a__ ) -> List[Any]: __a = 0 __a = len(a__ ) for i in range(n - 1 ): for j in range(i + 1 , a__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def __lowerCAmelCase ( a__ ) -> Dict: if len(a__ ) <= 1: return arr, 0 __a = len(a__ ) // 2 __a = arr[0:mid] __a = arr[mid:] __a , __a = count_inversions_recursive(a__ ) __a , __a = count_inversions_recursive(a__ ) __a , __a = _count_cross_inversions(a__ , a__ ) __a = inversion_p + inversions_q + cross_inversions return c, num_inversions def __lowerCAmelCase ( a__ , a__ ) -> Optional[Any]: __a = [] __a = __a = __a = 0 while i < len(a__ ) and j < len(a__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(a__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(a__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def __lowerCAmelCase ( ) -> Any: __a = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __a = count_inversions_bf(a__ ) __a , __a = count_inversions_recursive(a__ ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , a__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __a = count_inversions_bf(a__ ) __a , __a = count_inversions_recursive(a__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , a__ ) # an empty list should also have zero inversions __a = [] __a = count_inversions_bf(a__ ) __a , __a = count_inversions_recursive(a__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , a__ ) if __name__ == "__main__": main()
6
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A( a , a , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = '''sample''' snake_case_ = 1E-2 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = 4 __a = 3 __a = (32, 32) __a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case ) return {"sample": image} @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __a = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**_snake_case ) model.to(_snake_case ) assert not model.is_gradient_checkpointing and model.training __a = model(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __a = torch.randn_like(_snake_case ) __a = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __a = self.model_class(**_snake_case ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_snake_case ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __a = model_a(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __a = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __a = dict(model.named_parameters() ) __a = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_snake_case ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) __a = model.to(_snake_case ) model.eval() if torch_device == "mps": __a = torch.manual_seed(0 ) else: __a = torch.Generator(device=_snake_case ).manual_seed(0 ) __a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __a = image.to(_snake_case ) with torch.no_grad(): __a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __a = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": __a = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __a = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) ) @slow class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy""" def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any: '''simple docstring''' __a = torch.floataa if fpaa else torch.floataa __a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case ) return image def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]: '''simple docstring''' __a = '''fp16''' if fpaa else None __a = torch.floataa if fpaa else torch.floataa __a = AutoencoderKL.from_pretrained( _snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , ) model.to(_snake_case ).eval() return model def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(_snake_case ) return torch.Generator(device=_snake_case ).manual_seed(_snake_case ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , fpaa=_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) with torch.no_grad(): __a = model(_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model.encode(_snake_case ).latent_dist __a = dist.sample(generator=_snake_case ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __a = sample[0, -1, -3:, -3:].flatten().cpu() __a = torch.tensor(_snake_case ) __a = 3E-3 if torch_device != '''mps''' else 1E-2 assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
6
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: A : List[str] = None A : List[str] = logging.get_logger(__name__) A : int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} A : Optional[Any] = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } A : List[str] = { 'facebook/mbart-large-en-ro': 1_0_2_4, 'facebook/mbart-large-cc25': 1_0_2_4, } # fmt: off A : Optional[int] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = ['''input_ids''', '''attention_mask'''] snake_case_ = MBartTokenizer snake_case_ = [] snake_case_ = [] def __init__( self , _snake_case=None , _snake_case=None , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case , ) -> Tuple: '''simple docstring''' __a = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( vocab_file=_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , **_snake_case , ) __a = vocab_file __a = False if not self.vocab_file else True __a = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __a = { lang_code: self.convert_tokens_to_ids(_snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __a = src_lang if src_lang is not None else '''en_XX''' __a = self.convert_tokens_to_ids(self._src_lang ) __a = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' __a = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Any: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __a = src_lang __a = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case ) __a = self.convert_tokens_to_ids(_snake_case ) __a = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = "en_XX" , _snake_case = None , _snake_case = "ro_RO" , **_snake_case , ) -> BatchEncoding: '''simple docstring''' __a = src_lang __a = tgt_lang return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' __a = self.convert_tokens_to_ids(_snake_case ) __a = [] __a = [self.eos_token_id, self.cur_lang_code] __a = self.convert_ids_to_tokens(self.prefix_tokens ) __a = self.convert_ids_to_tokens(self.suffix_tokens ) __a = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' __a = self.convert_tokens_to_ids(_snake_case ) __a = [] __a = [self.eos_token_id, self.cur_lang_code] __a = self.convert_ids_to_tokens(self.prefix_tokens ) __a = self.convert_ids_to_tokens(self.suffix_tokens ) __a = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_snake_case ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return __a = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
6
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup A : str = logging.get_logger(__name__) class __A( a ): def __init__( self , **_snake_case ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''bs4'''] ) super().__init__(**_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a = [] __a = [] __a = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __a = parent.find_all(child.name , recursive=_snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) ) __a = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = BeautifulSoup(_snake_case , '''html.parser''' ) __a = [] __a = [] __a = [] for element in html_code.descendants: if type(_snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __a = html.unescape(_snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(_snake_case ) __a , __a = self.xpath_soup(_snake_case ) stringaxtag_seq.append(_snake_case ) stringaxsubs_seq.append(_snake_case ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = '''''' for tagname, subs in zip(_snake_case , _snake_case ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self , _snake_case ) -> BatchFeature: '''simple docstring''' __a = False # Check that strings has a valid type if isinstance(_snake_case , _snake_case ): __a = True elif isinstance(_snake_case , (list, tuple) ): if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ): __a = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F"""but is of type {type(_snake_case )}.""" ) __a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) ) if not is_batched: __a = [html_strings] # Get nodes + xpaths __a = [] __a = [] for html_string in html_strings: __a , __a , __a = self.get_three_from_single(_snake_case ) nodes.append(_snake_case ) __a = [] for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ): __a = self.construct_xpath(_snake_case , _snake_case ) xpath_strings.append(_snake_case ) xpaths.append(_snake_case ) # return as Dict __a = {'''nodes''': nodes, '''xpaths''': xpaths} __a = BatchFeature(data=_snake_case , tensor_type=_snake_case ) return encoded_inputs
6
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class __A( a ): snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({} ) snake_case_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text"}
6
def __lowerCAmelCase ( a__ , a__ ) -> float: def get_matched_characters(a__ , a__ ) -> str: __a = [] __a = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __a = int(max(0 , i - limit ) ) __a = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(a__ ) __a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}""" return "".join(a__ ) # matching characters __a = get_matched_characters(a__ , a__ ) __a = get_matched_characters(a__ , a__ ) __a = len(a__ ) # transposition __a = ( len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2 ) if not match_count: __a = 0.0 else: __a = ( 1 / 3 * ( match_count / len(a__ ) + match_count / len(a__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __a = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
6
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[int]: # Initialise PyTorch model __a = RemBertConfig.from_json_file(a__ ) print('''Building PyTorch model from configuration: {}'''.format(str(a__ ) ) ) __a = RemBertModel(a__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(a__ , a__ , a__ ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(a__ ) ) torch.save(model.state_dict() , a__ ) if __name__ == "__main__": A : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) A : Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
6
def __lowerCAmelCase ( a__ ) -> str: __a = [] __a = set({'''(''', '''[''', '''{'''} ) __a = set({''')''', ''']''', '''}'''} ) __a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(a__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(a__ ) == 0 def __lowerCAmelCase ( ) -> Dict: __a = input('''Enter sequence of brackets: ''' ) if is_balanced(a__ ): print(a__ , '''is balanced''' ) else: print(a__ , '''is not balanced''' ) if __name__ == "__main__": main()
6
1
from __future__ import annotations from collections.abc import Sequence from typing import Literal def __lowerCAmelCase ( a__ , a__ ) -> str | Literal[False]: __a = list(a__ ) __a = list(a__ ) __a = 0 for i in range(len(a__ ) ): if lista[i] != lista[i]: count += 1 __a = '''_''' if count > 1: return False else: return "".join(a__ ) def __lowerCAmelCase ( a__ ) -> list[str]: __a = [] while True: __a = ['''$'''] * len(a__ ) __a = [] for i in range(len(a__ ) ): for j in range(i + 1 , len(a__ ) ): __a = compare_string(binary[i] , binary[j] ) if k is False: __a = '''*''' __a = '''*''' temp.append('''X''' ) for i in range(len(a__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(a__ ) == 0: return pi __a = list(set(a__ ) ) def __lowerCAmelCase ( a__ , a__ ) -> list[str]: __a = [] for minterm in minterms: __a = '''''' for _ in range(a__ ): __a = str(minterm % 2 ) + string minterm //= 2 temp.append(a__ ) return temp def __lowerCAmelCase ( a__ , a__ , a__ ) -> bool: __a = list(a__ ) __a = list(a__ ) __a = 0 for i in range(len(a__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __lowerCAmelCase ( a__ , a__ ) -> list[str]: __a = [] __a = [0] * len(a__ ) for i in range(len(chart[0] ) ): __a = 0 __a = -1 for j in range(len(a__ ) ): if chart[j][i] == 1: count += 1 __a = j if count == 1: __a = 1 for i in range(len(a__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(a__ ) ): __a = 0 temp.append(prime_implicants[i] ) while True: __a = 0 __a = -1 __a = 0 for i in range(len(a__ ) ): __a = chart[i].count(1 ) if count_n > max_n: __a = count_n __a = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(a__ ) ): __a = 0 def __lowerCAmelCase ( a__ , a__ ) -> list[list[int]]: __a = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )] for i in range(len(a__ ) ): __a = prime_implicants[i].count('''_''' ) for j in range(len(a__ ) ): if is_for_table(prime_implicants[i] , binary[j] , a__ ): __a = 1 return chart def __lowerCAmelCase ( ) -> None: __a = int(input('''Enter the no. of variables\n''' ) ) __a = [ float(a__ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] __a = decimal_to_binary(a__ , a__ ) __a = check(a__ ) print('''Prime Implicants are:''' ) print(a__ ) __a = prime_implicant_chart(a__ , a__ ) __a = selection(a__ , a__ ) print('''Essential Prime Implicants are:''' ) print(a__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : str = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : Dict = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __A( a ): snake_case_ = '''wav2vec2''' def __init__( self , _snake_case=32 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(512, 512, 512, 512, 512, 512, 512) , _snake_case=(5, 2, 2, 2, 2, 2, 2) , _snake_case=(10, 3, 3, 3, 3, 2, 2) , _snake_case=False , _snake_case=128 , _snake_case=16 , _snake_case=False , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=320 , _snake_case=2 , _snake_case=0.1 , _snake_case=100 , _snake_case=256 , _snake_case=256 , _snake_case=0.1 , _snake_case="sum" , _snake_case=False , _snake_case=False , _snake_case=256 , _snake_case=(512, 512, 512, 512, 1_500) , _snake_case=(5, 3, 3, 1, 1) , _snake_case=(1, 2, 3, 1, 1) , _snake_case=512 , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=False , _snake_case=3 , _snake_case=2 , _snake_case=3 , _snake_case=None , _snake_case=None , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case ) __a = hidden_size __a = feat_extract_norm __a = feat_extract_activation __a = list(_snake_case ) __a = list(_snake_case ) __a = list(_snake_case ) __a = conv_bias __a = num_conv_pos_embeddings __a = num_conv_pos_embedding_groups __a = len(self.conv_dim ) __a = num_hidden_layers __a = intermediate_size __a = hidden_act __a = num_attention_heads __a = hidden_dropout __a = attention_dropout __a = activation_dropout __a = feat_proj_dropout __a = final_dropout __a = layerdrop __a = layer_norm_eps __a = initializer_range __a = vocab_size __a = do_stable_layer_norm __a = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a = apply_spec_augment __a = mask_time_prob __a = mask_time_length __a = mask_time_min_masks __a = mask_feature_prob __a = mask_feature_length __a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __a = num_codevectors_per_group __a = num_codevector_groups __a = contrastive_logits_temperature __a = feat_quantizer_dropout __a = num_negatives __a = codevector_dim __a = proj_codevector_dim __a = diversity_loss_weight # ctc loss __a = ctc_loss_reduction __a = ctc_zero_infinity # adapter __a = add_adapter __a = adapter_kernel_size __a = adapter_stride __a = num_adapter_layers __a = output_hidden_size or hidden_size __a = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. __a = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __a = list(_snake_case ) __a = list(_snake_case ) __a = list(_snake_case ) __a = xvector_output_dim @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Dict = { 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
import math import unittest def __lowerCAmelCase ( a__ ) -> bool: assert isinstance(a__ , a__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' with self.assertRaises(_snake_case ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , ) self.assertFalse( is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Optional[int] = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
from __future__ import annotations A : Optional[int] = [] def __lowerCAmelCase ( a__ , a__ , a__ ) -> bool: for i in range(len(a__ ) ): if board[row][i] == 1: return False for i in range(len(a__ ) ): if board[i][column] == 1: return False for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , len(a__ ) ) ): if board[i][j] == 1: return False return True def __lowerCAmelCase ( a__ , a__ ) -> bool: if row >= len(a__ ): solution.append(a__ ) printboard(a__ ) print() return True for i in range(len(a__ ) ): if is_safe(a__ , a__ , a__ ): __a = 1 solve(a__ , row + 1 ) __a = 0 return False def __lowerCAmelCase ( a__ ) -> None: for i in range(len(a__ ) ): for j in range(len(a__ ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) A : int = 8 A : Optional[int] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
6
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
6
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A : str = { 'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ 'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class __A( a ): snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({} ) snake_case_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text"}
6
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : List[str] = { 'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json', # See all ViT models at https://huggingface.co/models?filter=vit } class __A( a ): snake_case_ = '''vit''' def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=16 , **_snake_case , ) -> Optional[int]: '''simple docstring''' super().__init__(**_snake_case ) __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = initializer_range __a = layer_norm_eps __a = image_size __a = patch_size __a = num_channels __a = qkv_bias __a = encoder_stride class __A( a ): snake_case_ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-4
6
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]: __a = AutoTokenizer.from_pretrained(a__ ) __a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ ) __a = tok.pad_token_id def get_lens(a__ ): __a = tqdm( DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) __a = [] for batch in dl: __a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist() __a = batch['''labels'''].ne(a__ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(a__ , a__ ): max_lens.append(max(a__ , a__ ) ) else: max_lens.extend(a__ ) return max_lens __a = get_lens(a__ ) __a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ ) __a = get_lens(a__ ) pickle_save(a__ , train_ds.len_file ) pickle_save(a__ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
6
1
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]: if not arr: return None, None, 0 if low == high: return low, high, arr[low] __a = (low + high) // 2 __a , __a , __a = max_subarray(a__ , a__ , a__ ) __a , __a , __a = max_subarray(a__ , mid + 1 , a__ ) __a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]: __a , __a = float('''-inf''' ), -1 __a , __a = float('''-inf''' ), -1 __a = 0 for i in range(a__ , low - 1 , -1 ): summ += arr[i] if summ > left_sum: __a = summ __a = i __a = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: __a = summ __a = i return max_left, max_right, (left_sum + right_sum) def __lowerCAmelCase ( a__ ) -> float: __a = [randint(1 , a__ ) for _ in range(a__ )] __a = time.time() max_subarray(a__ , 0 , input_size - 1 ) __a = time.time() return end - start def __lowerCAmelCase ( ) -> None: __a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] __a = [time_max_subarray(a__ ) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''' ) for input_size, runtime in zip(a__ , a__ ): print(a__ , '''\t\t''' , a__ ) plt.plot(a__ , a__ ) plt.xlabel('''Number of Inputs''' ) plt.ylabel('''Time taken in seconds''' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
6
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 - _cos) / 2 __a = 1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 + _cos) / 2 __a = -1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = _sin / 2 __a = 0 __a = -ba __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 1 - alpha __a = -2 * _cos __a = 1 + alpha __a = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = 1 + alpha * big_a __a = -2 * _cos __a = 1 - alpha * big_a __a = 1 + alpha / big_a __a = -2 * _cos __a = 1 - alpha / big_a __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (pmc + aaa) __a = 2 * big_a * mpc __a = big_a * (pmc - aaa) __a = ppmc + aaa __a = -2 * pmpc __a = ppmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (ppmc + aaa) __a = -2 * big_a * pmpc __a = big_a * (ppmc - aaa) __a = pmc + aaa __a = 2 * mpc __a = pmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
6
1
from ..utils import DummyObject, requires_backends class __A( metaclass=a ): snake_case_ = ['''speech'''] def __init__( self , *_snake_case , **_snake_case ) -> Any: '''simple docstring''' requires_backends(self , ['''speech'''] ) class __A( metaclass=a ): snake_case_ = ['''speech'''] def __init__( self , *_snake_case , **_snake_case ) -> Dict: '''simple docstring''' requires_backends(self , ['''speech'''] )
6
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list: __a = len(a__ ) __a = [[0] * n for i in range(a__ )] for i in range(a__ ): __a = y_points[i] for i in range(2 , a__ ): for j in range(a__ , a__ ): __a = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
6
1
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging A : int = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] A : Dict = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() A : Dict = logging.get_logger(__name__) A : Optional[Any] = ' Hello world! cécé herlolip' A : str = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def __lowerCAmelCase ( a__ ) -> Optional[Any]: __a = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]: __a = dct.pop(a__ ) __a = val def __lowerCAmelCase ( a__ ) -> Optional[int]: __a = torch.load(a__ , map_location='''cpu''' ) __a = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def __lowerCAmelCase ( a__ ) -> Optional[int]: __a , __a = emb.weight.shape __a = nn.Linear(a__ , a__ , bias=a__ ) __a = emb.weight.data return lin_layer @torch.no_grad() def __lowerCAmelCase ( a__ , a__ , a__=None ) -> List[str]: if not os.path.exists(a__ ): __a = torch.hub.load('''pytorch/fairseq''' , a__ ).eval() else: __a = load_xsum_checkpoint(a__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: __a = checkpoint_path.replace('''.''' , '''-''' ) __a = BartConfig.from_pretrained(a__ ) __a = bart.encode(a__ ).unsqueeze(0 ) __a = BartTokenizer.from_pretrained(a__ ).encode(a__ , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(a__ , a__ ).all(): raise ValueError( F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" ) if checkpoint_path == "bart.large.mnli": __a = bart.state_dict() remove_ignore_keys_(a__ ) __a = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(a__ , a__ , a__ ) __a = BartForSequenceClassification(a__ ).eval() model.load_state_dict(a__ ) __a = bart.predict('''mnli''' , a__ , return_logits=a__ ) __a = model(a__ )[0] # logits else: # no classification heads to worry about __a = bart.model.state_dict() remove_ignore_keys_(a__ ) __a = state_dict['''decoder.embed_tokens.weight'''] __a = bart.extract_features(a__ ) if hf_checkpoint_name == "facebook/bart-large": __a = BartModel(a__ ).eval() model.load_state_dict(a__ ) __a = model(a__ ).model[0] else: __a = BartForConditionalGeneration(a__ ).eval() # an existing summarization ckpt model.model.load_state_dict(a__ ) if hasattr(a__ , '''lm_head''' ): __a = make_linear_from_emb(model.model.shared ) __a = model.model(a__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) if __name__ == "__main__": A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) A : Dict = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
6
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]: if not arr: return None, None, 0 if low == high: return low, high, arr[low] __a = (low + high) // 2 __a , __a , __a = max_subarray(a__ , a__ , a__ ) __a , __a , __a = max_subarray(a__ , mid + 1 , a__ ) __a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]: __a , __a = float('''-inf''' ), -1 __a , __a = float('''-inf''' ), -1 __a = 0 for i in range(a__ , low - 1 , -1 ): summ += arr[i] if summ > left_sum: __a = summ __a = i __a = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: __a = summ __a = i return max_left, max_right, (left_sum + right_sum) def __lowerCAmelCase ( a__ ) -> float: __a = [randint(1 , a__ ) for _ in range(a__ )] __a = time.time() max_subarray(a__ , 0 , input_size - 1 ) __a = time.time() return end - start def __lowerCAmelCase ( ) -> None: __a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] __a = [time_max_subarray(a__ ) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''' ) for input_size, runtime in zip(a__ , a__ ): print(a__ , '''\t\t''' , a__ ) plt.plot(a__ , a__ ) plt.xlabel('''Number of Inputs''' ) plt.ylabel('''Time taken in seconds''' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
6
1
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list: __a = len(a__ ) __a = [[0] * n for i in range(a__ )] for i in range(a__ ): __a = y_points[i] for i in range(2 , a__ ): for j in range(a__ , a__ ): __a = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
6
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A( a , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __A( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ort.SessionOptions() __a = False return options def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
6
1
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' super().tearDown() gc.collect() def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a , __a = FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-canny''' , from_pt=_snake_case , dtype=jnp.bfloataa ) __a , __a = FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa ) __a = controlnet_params __a = '''bird''' __a = jax.device_count() __a = pipe.prepare_text_inputs([prompts] * num_samples ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ) __a = pipe.prepare_image_inputs([canny_image] * num_samples ) __a = jax.random.PRNGKey(0 ) __a = jax.random.split(_snake_case , jax.device_count() ) __a = replicate(_snake_case ) __a = shard(_snake_case ) __a = shard(_snake_case ) __a = pipe( prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) __a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __a = images[0, 253:256, 253:256, -1] __a = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __a = jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a , __a = FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-openpose''' , from_pt=_snake_case , dtype=jnp.bfloataa ) __a , __a = FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa ) __a = controlnet_params __a = '''Chef in the kitchen''' __a = jax.device_count() __a = pipe.prepare_text_inputs([prompts] * num_samples ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' ) __a = pipe.prepare_image_inputs([pose_image] * num_samples ) __a = jax.random.PRNGKey(0 ) __a = jax.random.split(_snake_case , jax.device_count() ) __a = replicate(_snake_case ) __a = shard(_snake_case ) __a = shard(_snake_case ) __a = pipe( prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) __a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __a = images[0, 253:256, 253:256, -1] __a = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __a = jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
6
from math import ceil def __lowerCAmelCase ( a__ = 1001 ) -> int: __a = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): __a = 2 * i + 1 __a = 2 * i __a = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[Any] = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
6
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Optional[int] = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A( a ): snake_case_ = ['''image_processor''', '''tokenizer'''] snake_case_ = '''ChineseCLIPImageProcessor''' snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple: '''simple docstring''' __a = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _snake_case , ) __a = kwargs.pop('''feature_extractor''' ) __a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_snake_case , _snake_case ) __a = self.image_processor def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) if images is not None: __a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case ) if text is not None and images is not None: __a = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict: '''simple docstring''' return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.tokenizer.model_input_names __a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , ) return self.image_processor_class
6
1
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup A : str = logging.get_logger(__name__) class __A( a ): def __init__( self , **_snake_case ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''bs4'''] ) super().__init__(**_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a = [] __a = [] __a = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __a = parent.find_all(child.name , recursive=_snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) ) __a = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = BeautifulSoup(_snake_case , '''html.parser''' ) __a = [] __a = [] __a = [] for element in html_code.descendants: if type(_snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __a = html.unescape(_snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(_snake_case ) __a , __a = self.xpath_soup(_snake_case ) stringaxtag_seq.append(_snake_case ) stringaxsubs_seq.append(_snake_case ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = '''''' for tagname, subs in zip(_snake_case , _snake_case ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self , _snake_case ) -> BatchFeature: '''simple docstring''' __a = False # Check that strings has a valid type if isinstance(_snake_case , _snake_case ): __a = True elif isinstance(_snake_case , (list, tuple) ): if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ): __a = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F"""but is of type {type(_snake_case )}.""" ) __a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) ) if not is_batched: __a = [html_strings] # Get nodes + xpaths __a = [] __a = [] for html_string in html_strings: __a , __a , __a = self.get_three_from_single(_snake_case ) nodes.append(_snake_case ) __a = [] for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ): __a = self.construct_xpath(_snake_case , _snake_case ) xpath_strings.append(_snake_case ) xpaths.append(_snake_case ) # return as Dict __a = {'''nodes''': nodes, '''xpaths''': xpaths} __a = BatchFeature(data=_snake_case , tensor_type=_snake_case ) return encoded_inputs
6
from __future__ import annotations import typing from collections import Counter def __lowerCAmelCase ( a__ ) -> typing.Counter[int]: __a = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(a__ , max_perimeter + 1 ): __a = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(a__ ): __a = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __lowerCAmelCase ( a__ = 1000 ) -> int: __a = pythagorean_triple(a__ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"Perimeter {solution()} has maximum solutions")
6
1
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __A: @staticmethod def SCREAMING_SNAKE_CASE_ ( *_snake_case , **_snake_case ) -> Tuple: '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class __A( unittest.TestCase ): snake_case_ = MODEL_FOR_OBJECT_DETECTION_MAPPING def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[str]: '''simple docstring''' __a = ObjectDetectionPipeline(model=_snake_case , image_processor=_snake_case ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 ) self.assertGreater(len(_snake_case ) , 0 ) for detected_object in outputs: self.assertEqual( _snake_case , { '''score''': ANY(_snake_case ), '''label''': ANY(_snake_case ), '''box''': {'''xmin''': ANY(_snake_case ), '''ymin''': ANY(_snake_case ), '''xmax''': ANY(_snake_case ), '''ymax''': ANY(_snake_case )}, } , ) import datasets __a = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) __a = [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] __a = object_detector(_snake_case , threshold=0.0 ) self.assertEqual(len(_snake_case ) , len(_snake_case ) ) for outputs in batch_outputs: self.assertGreater(len(_snake_case ) , 0 ) for detected_object in outputs: self.assertEqual( _snake_case , { '''score''': ANY(_snake_case ), '''label''': ANY(_snake_case ), '''box''': {'''xmin''': ANY(_snake_case ), '''ymin''': ANY(_snake_case ), '''xmax''': ANY(_snake_case ), '''ymax''': ANY(_snake_case )}, } , ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' pass @require_torch def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = '''hf-internal-testing/tiny-detr-mobilenetsv3''' __a = AutoModelForObjectDetection.from_pretrained(_snake_case ) __a = AutoFeatureExtractor.from_pretrained(_snake_case ) __a = ObjectDetectionPipeline(model=_snake_case , feature_extractor=_snake_case ) __a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ] , ) __a = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], ] , ) @require_torch @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = '''facebook/detr-resnet-50''' __a = AutoModelForObjectDetection.from_pretrained(_snake_case ) __a = AutoFeatureExtractor.from_pretrained(_snake_case ) __a = ObjectDetectionPipeline(model=_snake_case , feature_extractor=_snake_case ) __a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) __a = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = '''facebook/detr-resnet-50''' __a = pipeline('''object-detection''' , model=_snake_case ) __a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) __a = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = 0.9985 __a = '''facebook/detr-resnet-50''' __a = pipeline('''object-detection''' , model=_snake_case ) __a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_snake_case ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) @require_torch @require_pytesseract @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = '''Narsil/layoutlmv3-finetuned-funsd''' __a = 0.9993 __a = pipeline('''object-detection''' , model=_snake_case , threshold=_snake_case ) __a = object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, ] , )
6
# flake8: noqa # Lint as: python3 A : Optional[Any] = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
6
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : List[str] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
from typing import Dict from .base import GenericTensor, Pipeline class __A( a ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: __a = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __a = truncation __a = tokenize_kwargs __a = {} if return_tensors is not None: __a = return_tensors return preprocess_params, {}, postprocess_params def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]: '''simple docstring''' __a = self.framework __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.model(**_snake_case ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *_snake_case , **_snake_case ) -> Any: '''simple docstring''' return super().__call__(*_snake_case , **_snake_case )
6
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin A : Tuple = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n' class __A( unittest.TestCase , a ): def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = load_tool('''text-question-answering''' ) self.tool.setup() __a = load_tool('''text-question-answering''' , remote=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = self.tool(_snake_case , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.remote_tool(_snake_case , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.tool(text=_snake_case , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.remote_tool(text=_snake_case , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(_snake_case , '''launched the BigScience Research Workshop''' )
6
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : Optional[int] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class __A( a ): snake_case_ = '''levit''' def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case ) __a = image_size __a = num_channels __a = kernel_size __a = stride __a = padding __a = hidden_sizes __a = num_attention_heads __a = depths __a = key_dim __a = drop_path_rate __a = patch_size __a = attention_ratio __a = mlp_ratio __a = initializer_range __a = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __A( a ): snake_case_ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-4
6
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer A : List[Any] = logging.get_logger(__name__) A : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} A : Dict = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } A : Any = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } A : Tuple = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } A : Optional[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } A : Tuple = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } A : List[Any] = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } A : Optional[int] = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } A : str = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } A : Union[str, Any] = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION snake_case_ = DPRContextEncoderTokenizer class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION snake_case_ = DPRQuestionEncoderTokenizer A : Optional[Any] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) A : Tuple = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(a ) class __A: def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( _snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , ) elif titles is None or texts is None: __a = titles if texts is None else texts return super().__call__( _snake_case , _snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , ) __a = titles if not isinstance(_snake_case , _snake_case ) else [titles] __a = texts if not isinstance(_snake_case , _snake_case ) else [texts] __a = len(_snake_case ) __a = questions if not isinstance(_snake_case , _snake_case ) else [questions] * n_passages assert len(_snake_case ) == len( _snake_case ), F"""There should be as many titles than texts but got {len(_snake_case )} titles and {len(_snake_case )} texts.""" __a = super().__call__(_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case )['''input_ids'''] __a = super().__call__(_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case )['''input_ids'''] __a = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_snake_case , _snake_case ) ] } if return_attention_mask is not False: __a = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __a = attention_mask return self.pad(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 16 , _snake_case = 64 , _snake_case = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' __a = reader_input['''input_ids'''] __a , __a , __a = reader_output[:3] __a = len(_snake_case ) __a = sorted(range(_snake_case ) , reverse=_snake_case , key=relevance_logits.__getitem__ ) __a = [] for doc_id in sorted_docs: __a = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __a = sequence_ids.index(self.pad_token_id ) else: __a = len(_snake_case ) __a = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_snake_case , top_spans=_snake_case , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_snake_case , start_index=_snake_case , end_index=_snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(_snake_case ) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[DPRSpanPrediction]: '''simple docstring''' __a = [] for start_index, start_score in enumerate(_snake_case ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __a = sorted(_snake_case , key=lambda _snake_case : x[1] , reverse=_snake_case ) __a = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" __a = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_snake_case ) == top_spans: break return chosen_span_intervals @add_end_docstrings(a ) class __A( a , a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION snake_case_ = ['''input_ids''', '''attention_mask'''] snake_case_ = DPRReaderTokenizer
6
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel A : int = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __A( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' __a = TOKEN HfFolder.save_token(_snake_case ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def __lowerCAmelCase ( a__ , a__ ) -> str: __a = True __a = flatten_dict(modela.params ) __a = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: __a = False return models_are_equal @require_flax class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case )
6
1
from typing import Any class __A: def __init__( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = data __a = None def __repr__( self ) -> str: '''simple docstring''' return F"""Node({self.data})""" class __A: def __init__( self ) -> Dict: '''simple docstring''' __a = None def __iter__( self ) -> Any: '''simple docstring''' __a = self.head while node: yield node.data __a = node.next def __len__( self ) -> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ) -> str: '''simple docstring''' return "->".join([str(_snake_case ) for item in self] ) def __getitem__( self , _snake_case ) -> Any: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , _snake_case , _snake_case ) -> None: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) __a = self.head for _ in range(_snake_case ): __a = current.next __a = data def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' self.insert_nth(len(self ) , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' self.insert_nth(0 , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) __a = Node(_snake_case ) if self.head is None: __a = new_node elif index == 0: __a = self.head # link new_node to head __a = new_node else: __a = self.head for _ in range(index - 1 ): __a = temp.next __a = temp.next __a = new_node def SCREAMING_SNAKE_CASE_ ( self ) -> None: # print every node data '''simple docstring''' print(self ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return self.delete_nth(0 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case = 0 ) -> Any: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) __a = self.head # default first node if index == 0: __a = self.head.next else: __a = self.head for _ in range(index - 1 ): __a = temp.next __a = temp.next __a = temp.next.next return delete_node.data def SCREAMING_SNAKE_CASE_ ( self ) -> bool: '''simple docstring''' return self.head is None def SCREAMING_SNAKE_CASE_ ( self ) -> None: '''simple docstring''' __a = None __a = self.head while current: # Store the current node's next node. __a = current.next # Make the current node's next point backwards __a = prev # Make the previous node be the current node __a = current # Make the current node the next node (to progress iteration) __a = next_node # Return prev in order to put the head at the end __a = prev def __lowerCAmelCase ( ) -> None: __a = LinkedList() assert linked_list.is_empty() is True assert str(a__ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(a__ ) == i linked_list.insert_nth(a__ , i + 1 ) assert str(a__ ) == "->".join(str(a__ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(a__ ) == "->".join(str(a__ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(a__ ) == 9 assert str(a__ ) == "->".join(str(a__ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __a = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(a__ ) == "->".join(str(a__ ) for i in range(-8 , 1 ) ) def __lowerCAmelCase ( ) -> None: __a = [ -9, 100, Node(7734_5112 ), '''dlrow olleH''', 7, 5555, 0, -192.55_555, '''Hello, world!''', 77.9, Node(10 ), None, None, 12.20, ] __a = LinkedList() for i in test_input: linked_list.insert_tail(a__ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(a__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __a = linked_list.delete_head() assert result == -9 assert ( str(a__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __a = linked_list.delete_tail() assert result == 12.2 assert ( str(a__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __a = linked_list.delete_nth(10 ) assert result is None assert ( str(a__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(a__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(a__ ) assert ( str(a__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(a__ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __lowerCAmelCase ( ) -> List[Any]: from doctest import testmod testmod() __a = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(a__ ) print('''\nReading/changing Node data using indexing:''' ) print(F"""Element at Position 1: {linked_list[1]}""" ) __a = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(a__ ) print(F"""length of linked_list is : {len(a__ )}""" ) if __name__ == "__main__": main()
6
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} A : Optional[int] = 'zero2' A : str = 'zero3' A : Tuple = [ZEROa, ZEROa] def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test A : Union[str, Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __A( a ): @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any: '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , ) self.do_checks(_snake_case ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]: '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case ) __a = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_snake_case )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __a = self.get_launcher(_snake_case ) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_snake_case , env=self.get_env() ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]: '''simple docstring''' __a = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
6
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() A : List[str] = logging.get_logger(__name__) A : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/' A : int = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def __lowerCAmelCase ( a__ ) -> int: if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: __a = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: __a = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: __a = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: __a = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: __a = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: __a = key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: __a = key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: __a = key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Dict: __a = {} import re __a = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) __a = re.compile( R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __a = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) __a = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) __a = re.compile( R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __a = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) __a = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) __a = re.compile( R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __a = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(a__ ): __a = re_encoder_block_conv_in.match(a__ ) __a = regex_match.groups() __a = int(groups[2] ) * 2 + int(groups[3] ) __a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}""" __a = re_encoder_block_conv_in.sub(a__ , a__ ) elif re_encoder_block_resnet.fullmatch(a__ ): __a = re_encoder_block_resnet.match(a__ ) __a = regex_match.groups() __a = int(groups[2] ) * 2 + int(groups[3] ) __a = {'''1''': 1, '''3''': 2}[groups[-2]] __a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.""" __a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __a = prefix + resnet_block __a = re_encoder_block_resnet.sub(a__ , a__ ) elif re_encoder_block_proj_out.fullmatch(a__ ): __a = re_encoder_block_proj_out.match(a__ ) __a = regex_match.groups() __a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}""" __a = re_encoder_block_proj_out.sub(a__ , a__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(a__ ): __a = re_decoder_block_conv_out.match(a__ ) __a = regex_match.groups() __a = int(groups[2] ) * 2 + int(groups[3] ) - 2 __a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}""" __a = re_decoder_block_conv_out.sub(a__ , a__ ) elif re_decoder_block_resnet.fullmatch(a__ ): __a = re_decoder_block_resnet.match(a__ ) __a = regex_match.groups() __a = int(groups[2] ) * 2 + int(groups[3] ) - 2 __a = {'''1''': 1, '''3''': 2}[groups[-2]] __a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.""" __a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __a = prefix + resnet_block __a = re_decoder_block_resnet.sub(a__ , a__ ) elif re_decoder_block_proj_in.fullmatch(a__ ): __a = re_decoder_block_proj_in.match(a__ ) __a = regex_match.groups() __a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}""" __a = re_decoder_block_proj_in.sub(a__ , a__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(a__ ): __a = re_prior_cond_conv_out.match(a__ ) __a = regex_match.groups() __a = int(groups[1] ) * 2 + int(groups[2] ) - 2 __a = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}""" __a = re_prior_cond_conv_out.sub(a__ , a__ ) elif re_prior_cond_resnet.fullmatch(a__ ): __a = re_prior_cond_resnet.match(a__ ) __a = regex_match.groups() __a = int(groups[1] ) * 2 + int(groups[2] ) - 2 __a = {'''1''': 1, '''3''': 2}[groups[-2]] __a = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.""" __a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __a = prefix + resnet_block __a = re_prior_cond_resnet.sub(a__ , a__ ) elif re_prior_cond_proj_in.fullmatch(a__ ): __a = re_prior_cond_proj_in.match(a__ ) __a = regex_match.groups() __a = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}""" __a = re_prior_cond_proj_in.sub(a__ , a__ ) # keep original key else: __a = original_key __a = replace_key(a__ ) if F"""{key_prefix}.{key}""" not in model_state_dict or key is None: print(F"""failed converting {original_key} to {key}, does not match""" ) # handle missmatched shape elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape: __a = model_state_dict[F"""{key_prefix}.{key}"""] print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" ) __a = original_key __a = original_key __a = value return new_dict @torch.no_grad() def __lowerCAmelCase ( a__=None , a__=None ) -> List[Any]: for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ): __a = requests.get(F"""{PREFIX}{file}""" , allow_redirects=a__ ) os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=a__ ) open(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , '''wb''' ).write(r.content ) __a = MODEL_MAPPING[model_name.split('''/''' )[-1]] __a = JukeboxConfig.from_pretrained(a__ ) __a = JukeboxModel(a__ ) __a = [] __a = {} for i, dict_name in enumerate(a__ ): __a = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['''model'''] __a = {} for k in old_dic.keys(): if k.endswith('''.b''' ): __a = old_dic[k] elif k.endswith('''.w''' ): __a = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: __a = old_dic[k] else: __a = old_dic[k] __a = '''vqvae''' if i == 0 else F"""priors.{3 - i}""" __a = fix_jukebox_keys(a__ , model.state_dict() , a__ , a__ ) weight_dict.append(a__ ) __a = weight_dict.pop(0 ) model.vqvae.load_state_dict(a__ ) for i in range(len(a__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(a__ ).mkdir(exist_ok=a__ ) with open(F"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile: json.dump(a__ , a__ ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) return weight_dict if __name__ == "__main__": A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) A : List[Any] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
6
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A( a , a , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = '''sample''' snake_case_ = 1E-2 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = 4 __a = 3 __a = (32, 32) __a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case ) return {"sample": image} @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __a = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**_snake_case ) model.to(_snake_case ) assert not model.is_gradient_checkpointing and model.training __a = model(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __a = torch.randn_like(_snake_case ) __a = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __a = self.model_class(**_snake_case ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_snake_case ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __a = model_a(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __a = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __a = dict(model.named_parameters() ) __a = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_snake_case ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) __a = model.to(_snake_case ) model.eval() if torch_device == "mps": __a = torch.manual_seed(0 ) else: __a = torch.Generator(device=_snake_case ).manual_seed(0 ) __a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __a = image.to(_snake_case ) with torch.no_grad(): __a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __a = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": __a = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __a = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) ) @slow class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy""" def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any: '''simple docstring''' __a = torch.floataa if fpaa else torch.floataa __a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case ) return image def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]: '''simple docstring''' __a = '''fp16''' if fpaa else None __a = torch.floataa if fpaa else torch.floataa __a = AutoencoderKL.from_pretrained( _snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , ) model.to(_snake_case ).eval() return model def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(_snake_case ) return torch.Generator(device=_snake_case ).manual_seed(_snake_case ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , fpaa=_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) with torch.no_grad(): __a = model(_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model.encode(_snake_case ).latent_dist __a = dist.sample(generator=_snake_case ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __a = sample[0, -1, -3:, -3:].flatten().cpu() __a = torch.tensor(_snake_case ) __a = 3E-3 if torch_device != '''mps''' else 1E-2 assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
6
1
from pathlib import Path import numpy as np from PIL import Image def __lowerCAmelCase ( a__ ) -> np.ndarray: __a , __a , __a = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def __lowerCAmelCase ( a__ ) -> np.ndarray: return (gray > 127) & (gray <= 255) def __lowerCAmelCase ( a__ , a__ ) -> np.ndarray: __a = np.zeros_like(a__ ) __a = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __a = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __a = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __a = int(summation > 0 ) return output if __name__ == "__main__": # read original image A : Tuple = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' A : List[str] = np.array(Image.open(lena_path)) # kernel to be applied A : Tuple = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image A : Dict = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
6
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup A : str = logging.get_logger(__name__) class __A( a ): def __init__( self , **_snake_case ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''bs4'''] ) super().__init__(**_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a = [] __a = [] __a = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __a = parent.find_all(child.name , recursive=_snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) ) __a = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = BeautifulSoup(_snake_case , '''html.parser''' ) __a = [] __a = [] __a = [] for element in html_code.descendants: if type(_snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __a = html.unescape(_snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(_snake_case ) __a , __a = self.xpath_soup(_snake_case ) stringaxtag_seq.append(_snake_case ) stringaxsubs_seq.append(_snake_case ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = '''''' for tagname, subs in zip(_snake_case , _snake_case ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self , _snake_case ) -> BatchFeature: '''simple docstring''' __a = False # Check that strings has a valid type if isinstance(_snake_case , _snake_case ): __a = True elif isinstance(_snake_case , (list, tuple) ): if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ): __a = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F"""but is of type {type(_snake_case )}.""" ) __a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) ) if not is_batched: __a = [html_strings] # Get nodes + xpaths __a = [] __a = [] for html_string in html_strings: __a , __a , __a = self.get_three_from_single(_snake_case ) nodes.append(_snake_case ) __a = [] for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ): __a = self.construct_xpath(_snake_case , _snake_case ) xpath_strings.append(_snake_case ) xpaths.append(_snake_case ) # return as Dict __a = {'''nodes''': nodes, '''xpaths''': xpaths} __a = BatchFeature(data=_snake_case , tensor_type=_snake_case ) return encoded_inputs
6
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo A : List[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' A : Any = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' A : Optional[int] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self ) -> MetricInfo: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 1 , _snake_case = 4 , ) -> Dict[str, float]: '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case ) }
6
def __lowerCAmelCase ( a__ , a__ ) -> float: def get_matched_characters(a__ , a__ ) -> str: __a = [] __a = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __a = int(max(0 , i - limit ) ) __a = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(a__ ) __a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}""" return "".join(a__ ) # matching characters __a = get_matched_characters(a__ , a__ ) __a = get_matched_characters(a__ , a__ ) __a = len(a__ ) # transposition __a = ( len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2 ) if not match_count: __a = 0.0 else: __a = ( 1 / 3 * ( match_count / len(a__ ) + match_count / len(a__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __a = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
6
1
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __A: def __init__( self , _snake_case , _snake_case=13 , _snake_case=64 , _snake_case=2 , _snake_case=3 , _snake_case=True , _snake_case=True , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=10 , _snake_case=0.02 , _snake_case=[1, 16, 4, 4] , _snake_case=None , ) -> List[str]: '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = patch_size __a = num_channels __a = is_training __a = use_labels __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = type_sequence_label_size __a = initializer_range __a = scope __a = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size __a = (self.image_size // 32) ** 2 __a = num_patches + 1 def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 16, 32], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[str]: '''simple docstring''' __a = ViTHybridModel(config=_snake_case ) model.to(_snake_case ) model.eval() __a = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.type_sequence_label_size __a = ViTHybridForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() __a = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a = config_and_inputs __a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A( a , a , unittest.TestCase ): snake_case_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () snake_case_ = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = ViTHybridModelTester(self ) __a = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(_snake_case ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() __a = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: __a = model_class(config=_snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": __a = [F"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = ViTHybridModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def __lowerCAmelCase ( ) -> Optional[int]: __a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __A( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _snake_case ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): __a = model(**_snake_case ) # verify the logits __a = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _snake_case ) __a = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @slow @require_accelerate def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) __a = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' ) __a = prepare_img() __a = image_processor(images=_snake_case , return_tensors='''pt''' ) __a = model(**_snake_case ) __a = outputs.logits # model predicts one of the 1000 ImageNet classes __a = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
6
def __lowerCAmelCase ( a__ ) -> str: __a = [] __a = set({'''(''', '''[''', '''{'''} ) __a = set({''')''', ''']''', '''}'''} ) __a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(a__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(a__ ) == 0 def __lowerCAmelCase ( ) -> Dict: __a = input('''Enter sequence of brackets: ''' ) if is_balanced(a__ ): print(a__ , '''is balanced''' ) else: print(a__ , '''is not balanced''' ) if __name__ == "__main__": main()
6
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A : str = logging.get_logger(__name__) A : Tuple = { 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __A( a ): snake_case_ = '''decision_transformer''' snake_case_ = ['''past_key_values'''] snake_case_ = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _snake_case=17 , _snake_case=4 , _snake_case=128 , _snake_case=4_096 , _snake_case=True , _snake_case=1 , _snake_case=1_024 , _snake_case=3 , _snake_case=1 , _snake_case=None , _snake_case="relu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1E-5 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=50_256 , _snake_case=50_256 , _snake_case=False , _snake_case=False , **_snake_case , ) -> List[Any]: '''simple docstring''' __a = state_dim __a = act_dim __a = hidden_size __a = max_ep_len __a = action_tanh __a = vocab_size __a = n_positions __a = n_layer __a = n_head __a = n_inner __a = activation_function __a = resid_pdrop __a = embd_pdrop __a = attn_pdrop __a = layer_norm_epsilon __a = initializer_range __a = scale_attn_weights __a = use_cache __a = scale_attn_by_inverse_layer_idx __a = reorder_and_upcast_attn __a = bos_token_id __a = eos_token_id super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : str = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Dict = { 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Dict = { 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
def __lowerCAmelCase ( a__ = 1000 ) -> int: __a , __a = 1, 1 __a = 2 while True: __a = 0 __a = fa + fa __a , __a = fa, f index += 1 for _ in str(a__ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Optional[int] = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[str]: # Initialise PyTorch model __a = TaConfig.from_json_file(a__ ) print(F"""Building PyTorch model from configuration: {config}""" ) __a = TaForConditionalGeneration(a__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(a__ , a__ , a__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(a__ ) if __name__ == "__main__": A : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) A : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
6
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
6
1
from collections import defaultdict from math import gcd def __lowerCAmelCase ( a__ = 150_0000 ) -> int: __a = defaultdict(a__ ) __a = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , a__ , 2 ): if gcd(a__ , a__ ) > 1: continue __a = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(a__ , limit + 1 , a__ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"{solution() = }")
6
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class __A( a ): snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({} ) snake_case_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text"}
6
1
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: A : Optional[Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __A( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=None , _snake_case=True , _snake_case=True , _snake_case=None , ) -> Tuple: '''simple docstring''' __a = size if size is not None else {'''height''': 20, '''width''': 20} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = size __a = do_normalize __a = do_convert_rgb __a = [512, 1_024, 2_048, 4_096] __a = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' __a = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class __A( a , unittest.TestCase ): snake_case_ = PixaStructImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = PixaStructImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.image_processor_tester.prepare_dummy_image() __a = self.image_processing_class(**self.image_processor_dict ) __a = 2_048 __a = image_processor(_snake_case , return_tensors='''pt''' , max_patches=_snake_case ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __a = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __a = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a = image_processor( _snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __a = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 __a = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_snake_case ): __a = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches __a = '''Hello''' __a = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case , header_text=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a = image_processor( _snake_case , return_tensors='''pt''' , max_patches=_snake_case , header_text=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) __a = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __a = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a = image_processor( _snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input __a = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __a = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a = image_processor( _snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class __A( a , unittest.TestCase ): snake_case_ = PixaStructImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = PixaStructImageProcessingTester(self , num_channels=4 ) __a = 3 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __a = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __a = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a = image_processor( _snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
6
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]: __a = AutoTokenizer.from_pretrained(a__ ) __a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ ) __a = tok.pad_token_id def get_lens(a__ ): __a = tqdm( DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) __a = [] for batch in dl: __a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist() __a = batch['''labels'''].ne(a__ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(a__ , a__ ): max_lens.append(max(a__ , a__ ) ) else: max_lens.extend(a__ ) return max_lens __a = get_lens(a__ ) __a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ ) __a = get_lens(a__ ) pickle_save(a__ , train_ds.len_file ) pickle_save(a__ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
6
1
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __A( a ): snake_case_ = (CMStochasticIterativeScheduler,) snake_case_ = 1_0 def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> List[str]: '''simple docstring''' __a = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**_snake_case ) return config def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = 10 __a = self.get_scheduler_config() __a = self.scheduler_classes[0](**_snake_case ) scheduler.set_timesteps(_snake_case ) __a = scheduler.timesteps[0] __a = scheduler.timesteps[1] __a = self.dummy_sample __a = 0.1 * sample __a = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample __a = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = 1 scheduler.set_timesteps(_snake_case ) __a = scheduler.timesteps __a = torch.manual_seed(0 ) __a = self.dummy_model() __a = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(_snake_case ): # 1. scale model input __a = scheduler.scale_model_input(_snake_case , _snake_case ) # 2. predict noise residual __a = model(_snake_case , _snake_case ) # 3. predict previous sample x_t-1 __a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(_snake_case ) ) __a = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 192.7614 ) < 1E-2 assert abs(result_mean.item() - 0.2510 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = [106, 0] scheduler.set_timesteps(timesteps=_snake_case ) __a = scheduler.timesteps __a = torch.manual_seed(0 ) __a = self.dummy_model() __a = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __a = scheduler.scale_model_input(_snake_case , _snake_case ) # 2. predict noise residual __a = model(_snake_case , _snake_case ) # 3. predict previous sample x_t-1 __a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(_snake_case ) ) __a = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 347.6357 ) < 1E-2 assert abs(result_mean.item() - 0.4527 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = [39, 30, 12, 15, 0] with self.assertRaises(_snake_case , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = [39, 30, 12, 1, 0] __a = len(_snake_case ) with self.assertRaises(_snake_case , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = [scheduler.config.num_train_timesteps] with self.assertRaises( _snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_snake_case )
6
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 - _cos) / 2 __a = 1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 + _cos) / 2 __a = -1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = _sin / 2 __a = 0 __a = -ba __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 1 - alpha __a = -2 * _cos __a = 1 + alpha __a = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = 1 + alpha * big_a __a = -2 * _cos __a = 1 - alpha * big_a __a = 1 + alpha / big_a __a = -2 * _cos __a = 1 - alpha / big_a __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (pmc + aaa) __a = 2 * big_a * mpc __a = big_a * (pmc - aaa) __a = ppmc + aaa __a = -2 * pmpc __a = ppmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (ppmc + aaa) __a = -2 * big_a * pmpc __a = big_a * (ppmc - aaa) __a = pmc + aaa __a = 2 * mpc __a = pmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
6
1
import datasets from .evaluate import evaluate A : str = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' A : Any = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' A : str = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} __a = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] __a = evaluate(dataset=_snake_case , predictions=_snake_case ) return score
6
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list: __a = len(a__ ) __a = [[0] * n for i in range(a__ )] for i in range(a__ ): __a = y_points[i] for i in range(2 , a__ ): for j in range(a__ , a__ ): __a = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
6
1
from typing import Dict from .base import GenericTensor, Pipeline class __A( a ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: __a = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __a = truncation __a = tokenize_kwargs __a = {} if return_tensors is not None: __a = return_tensors return preprocess_params, {}, postprocess_params def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]: '''simple docstring''' __a = self.framework __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.model(**_snake_case ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *_snake_case , **_snake_case ) -> Any: '''simple docstring''' return super().__call__(*_snake_case , **_snake_case )
6
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]: if not arr: return None, None, 0 if low == high: return low, high, arr[low] __a = (low + high) // 2 __a , __a , __a = max_subarray(a__ , a__ , a__ ) __a , __a , __a = max_subarray(a__ , mid + 1 , a__ ) __a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]: __a , __a = float('''-inf''' ), -1 __a , __a = float('''-inf''' ), -1 __a = 0 for i in range(a__ , low - 1 , -1 ): summ += arr[i] if summ > left_sum: __a = summ __a = i __a = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: __a = summ __a = i return max_left, max_right, (left_sum + right_sum) def __lowerCAmelCase ( a__ ) -> float: __a = [randint(1 , a__ ) for _ in range(a__ )] __a = time.time() max_subarray(a__ , 0 , input_size - 1 ) __a = time.time() return end - start def __lowerCAmelCase ( ) -> None: __a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] __a = [time_max_subarray(a__ ) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''' ) for input_size, runtime in zip(a__ , a__ ): print(a__ , '''\t\t''' , a__ ) plt.plot(a__ , a__ ) plt.xlabel('''Number of Inputs''' ) plt.ylabel('''Time taken in seconds''' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
6
1
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging A : Optional[int] = logging.get_logger(__name__) A : Dict = {'vocab_file': 'vocab.txt'} A : Union[str, Any] = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } A : int = { 'facebook/esm2_t6_8M_UR50D': 1_0_2_4, 'facebook/esm2_t12_35M_UR50D': 1_0_2_4, } def __lowerCAmelCase ( a__ ) -> Dict: with open(a__ , '''r''' ) as f: __a = f.read().splitlines() return [l.strip() for l in lines] class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self , _snake_case , _snake_case="<unk>" , _snake_case="<cls>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case="<eos>" , **_snake_case , ) -> str: '''simple docstring''' super().__init__(**_snake_case ) __a = load_vocab_file(_snake_case ) __a = dict(enumerate(self.all_tokens ) ) __a = {tok: ind for ind, tok in enumerate(self.all_tokens )} __a = unk_token __a = cls_token __a = pad_token __a = mask_token __a = eos_token __a = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str: '''simple docstring''' return self._id_to_token.get(_snake_case , self.unk_token ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Optional[int]: '''simple docstring''' return text.split() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> str: '''simple docstring''' return len(self._id_to_token ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return {token: i for i, token in enumerate(self.all_tokens )} def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str: '''simple docstring''' return self._id_to_token.get(_snake_case , self.unk_token ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' __a = [self.cls_token_id] __a = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] __a = [1] + ([0] * len(_snake_case )) + [1] if token_ids_a is not None: mask += [0] * len(_snake_case ) + [1] return mask def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = os.path.join(_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(_snake_case , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' return self.get_vocab_size(with_added_tokens=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = False ) -> int: '''simple docstring''' return super()._add_tokens(_snake_case , special_tokens=_snake_case )
6
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A( a , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __A( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ort.SessionOptions() __a = False return options def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
6
1
A : Dict = 2_5_6 # Modulus to hash a string A : Tuple = 1_0_0_0_0_0_3 def __lowerCAmelCase ( a__ , a__ ) -> bool: __a = len(a__ ) __a = len(a__ ) if p_len > t_len: return False __a = 0 __a = 0 __a = 1 # Calculating the hash of pattern and substring of text for i in range(a__ ): __a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __a = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __a = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __a = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __lowerCAmelCase ( ) -> None: __a = '''abc1abc12''' __a = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' __a = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(a__ , a__ ) and not rabin_karp(a__ , a__ ) # Test 2) __a = '''ABABX''' __a = '''ABABZABABYABABX''' assert rabin_karp(a__ , a__ ) # Test 3) __a = '''AAAB''' __a = '''ABAAAAAB''' assert rabin_karp(a__ , a__ ) # Test 4) __a = '''abcdabcy''' __a = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(a__ , a__ ) # Test 5) __a = '''Lü''' __a = '''Lüsai''' assert rabin_karp(a__ , a__ ) __a = '''Lue''' assert not rabin_karp(a__ , a__ ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
6
from math import ceil def __lowerCAmelCase ( a__ = 1001 ) -> int: __a = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): __a = 2 * i + 1 __a = 2 * i __a = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[Any] = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
6
1
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __A( a , unittest.TestCase ): snake_case_ = KandinskyVaaPriorPipeline snake_case_ = ['''prompt'''] snake_case_ = ['''prompt''', '''negative_prompt'''] snake_case_ = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] snake_case_ = False @property def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return self.time_input_dim @property def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return 100 @property def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' torch.manual_seed(0 ) __a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __a = { '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } __a = PriorTransformer(**_snake_case ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __a = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) __a = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __a = CLIPVisionModelWithProjection(_snake_case ) return model @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = CLIPImageProcessor( crop_size=224 , do_center_crop=_snake_case , do_normalize=_snake_case , do_resize=_snake_case , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , ) return image_processor def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.dummy_prior __a = self.dummy_image_encoder __a = self.dummy_text_encoder __a = self.dummy_tokenizer __a = self.dummy_image_processor __a = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=_snake_case , clip_sample_range=10.0 , ) __a = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> Dict: '''simple docstring''' if str(_snake_case ).startswith('''mps''' ): __a = torch.manual_seed(_snake_case ) else: __a = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) __a = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = '''cpu''' __a = self.get_dummy_components() __a = self.pipeline_class(**_snake_case ) __a = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) __a = pipe(**self.get_dummy_inputs(_snake_case ) ) __a = output.image_embeds __a = pipe( **self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0] __a = image[0, -10:] __a = image_from_tuple[0, -10:] assert image.shape == (1, 32) __a = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = torch_device == '''cpu''' __a = True __a = False self._test_inference_batch_single_identical( test_max_difference=_snake_case , relax_max_difference=_snake_case , test_mean_pixel_difference=_snake_case , ) @skip_mps def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = torch_device == '''cpu''' __a = False self._test_attention_slicing_forward_pass( test_max_difference=_snake_case , test_mean_pixel_difference=_snake_case , )
6
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A( a ): snake_case_ = ['''image_processor''', '''tokenizer'''] snake_case_ = '''ChineseCLIPImageProcessor''' snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple: '''simple docstring''' __a = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _snake_case , ) __a = kwargs.pop('''feature_extractor''' ) __a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_snake_case , _snake_case ) __a = self.image_processor def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) if images is not None: __a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case ) if text is not None and images is not None: __a = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict: '''simple docstring''' return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.tokenizer.model_input_names __a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , ) return self.image_processor_class
6
1
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 A : Union[str, Any] = { 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_2_8, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 5_0, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 1_0, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 1_0, 'exponential_decay_length_penalty': (5, 1.01), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class __A( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Optional[int]: '''simple docstring''' __a = TOKEN HfFolder.save_token(_snake_case ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Any: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) __a = BertConfig.from_pretrained(F"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = BertConfig.from_pretrained(F"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) __a = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' CustomConfig.register_for_auto_class() __a = CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) __a = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 42 ) class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __a = c.n_embd + 1 # int __a = c.resid_pdrop + 1.0 # float __a = not c.scale_attn_weights # bool __a = c.summary_type + '''foo''' # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" ) self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = PretrainedConfig() __a = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) __a = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case )] if len(_snake_case ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' F""" {', '.join(_snake_case )}.""" ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' with self.assertRaises(_snake_case ): # config is in subfolder, the following should not work without specifying the subfolder __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = mock.Mock() __a = 500 __a = {} __a = HTTPError __a = {} # Download this model to make sure it's in the cache. __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_snake_case ) as mock_head: __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = AutoConfig.from_pretrained('''bert-base-cased''' ) __a = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_snake_case ) __a = 2 json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __a = AutoConfig.from_pretrained(_snake_case ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __a = ['''config.42.0.0.json'''] __a = 768 configuration.save_pretrained(_snake_case ) shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''' ) , os.path.join(_snake_case , '''config.42.0.0.json''' ) ) __a = AutoConfig.from_pretrained(_snake_case ) self.assertEqual(new_configuration.hidden_size , 768 ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers __a = '''v4.0.0''' __a , __a = new_transformers.models.auto.AutoConfig.from_pretrained( _snake_case , return_unused_kwargs=_snake_case ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_snake_case , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __a = '''v3.0.0''' __a = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case ) self.assertEqual(old_configuration.hidden_size , 768 )
6
from __future__ import annotations import typing from collections import Counter def __lowerCAmelCase ( a__ ) -> typing.Counter[int]: __a = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(a__ , max_perimeter + 1 ): __a = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(a__ ): __a = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __lowerCAmelCase ( a__ = 1000 ) -> int: __a = pythagorean_triple(a__ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"Perimeter {solution()} has maximum solutions")
6
1
import sys A : List[str] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def __lowerCAmelCase ( a__ ) -> int: __a = 1 for digit in s: product *= int(a__ ) return product def __lowerCAmelCase ( a__ = N ) -> int: __a = -sys.maxsize - 1 __a = n[:13] __a = 13 while cur_index < len(a__ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): __a = substr[1:] + n[cur_index] cur_index += 1 else: __a = max(a__ , str_eval(a__ ) ) __a = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"{solution() = }")
6
# flake8: noqa # Lint as: python3 A : Optional[Any] = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
6
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A : List[Any] = logging.get_logger(__name__) A : Union[str, Any] = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __A( a ): snake_case_ = '''deformable_detr''' snake_case_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=300 , _snake_case=1_024 , _snake_case=6 , _snake_case=1_024 , _snake_case=8 , _snake_case=6 , _snake_case=1_024 , _snake_case=8 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=True , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=4 , _snake_case=4 , _snake_case=4 , _snake_case=False , _snake_case=300 , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , _snake_case=0.25 , _snake_case=False , **_snake_case , ) -> Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_snake_case , _snake_case ): __a = backbone_config.get('''model_type''' ) __a = CONFIG_MAPPING[backbone_model_type] __a = config_class.from_dict(_snake_case ) __a = use_timm_backbone __a = backbone_config __a = num_channels __a = num_queries __a = max_position_embeddings __a = d_model __a = encoder_ffn_dim __a = encoder_layers __a = encoder_attention_heads __a = decoder_ffn_dim __a = decoder_layers __a = decoder_attention_heads __a = dropout __a = attention_dropout __a = activation_dropout __a = activation_function __a = init_std __a = init_xavier_std __a = encoder_layerdrop __a = auxiliary_loss __a = position_embedding_type __a = backbone __a = use_pretrained_backbone __a = dilation # deformable attributes __a = num_feature_levels __a = encoder_n_points __a = decoder_n_points __a = two_stage __a = two_stage_num_proposals __a = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher __a = class_cost __a = bbox_cost __a = giou_cost # Loss coefficients __a = mask_loss_coefficient __a = dice_loss_coefficient __a = bbox_loss_coefficient __a = giou_loss_coefficient __a = eos_coefficient __a = focal_alpha __a = disable_custom_kernels super().__init__(is_encoder_decoder=_snake_case , **_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' return self.d_model def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __a = self.backbone_config.to_dict() __a = self.__class__.model_type return output
6
from typing import Dict from .base import GenericTensor, Pipeline class __A( a ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: __a = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __a = truncation __a = tokenize_kwargs __a = {} if return_tensors is not None: __a = return_tensors return preprocess_params, {}, postprocess_params def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]: '''simple docstring''' __a = self.framework __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.model(**_snake_case ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *_snake_case , **_snake_case ) -> Any: '''simple docstring''' return super().__call__(*_snake_case , **_snake_case )
6
1
def __lowerCAmelCase ( a__ ) -> bool: return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') ) def __lowerCAmelCase ( a__ ) -> bool: __a = credit_card_number __a = 0 __a = len(a__ ) - 2 for i in range(a__ , -1 , -2 ): # double the value of every second digit __a = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 __a = cc_number[:i] + str(a__ ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(a__ ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __lowerCAmelCase ( a__ ) -> bool: __a = F"""{credit_card_number} is an invalid credit card number because""" if not credit_card_number.isdigit(): print(F"""{error_message} it has nonnumerical characters.""" ) return False if not 13 <= len(a__ ) <= 16: print(F"""{error_message} of its length.""" ) return False if not validate_initial_digits(a__ ): print(F"""{error_message} of its first two digits.""" ) return False if not luhn_validation(a__ ): print(F"""{error_message} it fails the Luhn check.""" ) return False print(F"""{credit_card_number} is a valid credit card number.""" ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('4111111111111111') validate_credit_card_number('32323')
6
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : Optional[int] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class __A( a ): snake_case_ = '''levit''' def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case ) __a = image_size __a = num_channels __a = kernel_size __a = stride __a = padding __a = hidden_sizes __a = num_attention_heads __a = depths __a = key_dim __a = drop_path_rate __a = patch_size __a = attention_ratio __a = mlp_ratio __a = initializer_range __a = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __A( a ): snake_case_ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-4
6
1
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class __A( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=4 , ) -> Tuple: '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_attention_mask __a = use_token_type_ids __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_choices def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_attention_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __A( a , unittest.TestCase ): snake_case_ = True snake_case_ = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = FlaxBertModelTester(self ) @slow def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = FlaxBertModel.from_pretrained('''bert-base-cased''' ) __a = model(np.ones((1, 1) ) ) self.assertIsNotNone(_snake_case )
6
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel A : int = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __A( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' __a = TOKEN HfFolder.save_token(_snake_case ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def __lowerCAmelCase ( a__ , a__ ) -> str: __a = True __a = flatten_dict(modela.params ) __a = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: __a = False return models_are_equal @require_flax class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case )
6
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES A : Dict = logging.get_logger(__name__) A : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) A : List[Any] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) A : Union[str, Any] = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) A : Optional[int] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) A : Optional[Any] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) A : Any = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) A : Optional[Any] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) A : List[Any] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) A : Optional[int] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) A : Optional[int] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) A : Optional[Any] = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) A : Union[str, Any] = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) A : Tuple = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) A : List[Any] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) A : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) A : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) A : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) A : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) A : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) A : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) A : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) A : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) A : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) A : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) A : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) A : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_MAPPING A : Union[str, Any] = auto_class_update(FlaxAutoModel) class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING A : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING A : Dict = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING A : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING A : List[Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A : Optional[int] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING A : Union[str, Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING A : int = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING A : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING A : Union[str, Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A : Tuple = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING A : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class __A( _BaseAutoModelClass ): snake_case_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING A : str = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
6
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} A : Optional[int] = 'zero2' A : str = 'zero3' A : Tuple = [ZEROa, ZEROa] def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test A : Union[str, Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __A( a ): @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any: '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , ) self.do_checks(_snake_case ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]: '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case ) __a = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_snake_case )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __a = self.get_launcher(_snake_case ) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_snake_case , env=self.get_env() ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]: '''simple docstring''' __a = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
6
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging A : Dict = logging.get_logger(__name__) class __A( a ): snake_case_ = '''encoder-decoder''' snake_case_ = True def __init__( self , **_snake_case ) -> str: '''simple docstring''' super().__init__(**_snake_case ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" __a = kwargs.pop('''encoder''' ) __a = encoder_config.pop('''model_type''' ) __a = kwargs.pop('''decoder''' ) __a = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig __a = AutoConfig.for_model(_snake_case , **_snake_case ) __a = AutoConfig.for_model(_snake_case , **_snake_case ) __a = True @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) __a = True __a = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = copy.deepcopy(self.__dict__ ) __a = self.encoder.to_dict() __a = self.decoder.to_dict() __a = self.__class__.model_type return output
6
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A( a , a , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = '''sample''' snake_case_ = 1E-2 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = 4 __a = 3 __a = (32, 32) __a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case ) return {"sample": image} @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __a = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**_snake_case ) model.to(_snake_case ) assert not model.is_gradient_checkpointing and model.training __a = model(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __a = torch.randn_like(_snake_case ) __a = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __a = self.model_class(**_snake_case ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_snake_case ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __a = model_a(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __a = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __a = dict(model.named_parameters() ) __a = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_snake_case ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) __a = model.to(_snake_case ) model.eval() if torch_device == "mps": __a = torch.manual_seed(0 ) else: __a = torch.Generator(device=_snake_case ).manual_seed(0 ) __a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __a = image.to(_snake_case ) with torch.no_grad(): __a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __a = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": __a = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __a = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) ) @slow class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy""" def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any: '''simple docstring''' __a = torch.floataa if fpaa else torch.floataa __a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case ) return image def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]: '''simple docstring''' __a = '''fp16''' if fpaa else None __a = torch.floataa if fpaa else torch.floataa __a = AutoencoderKL.from_pretrained( _snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , ) model.to(_snake_case ).eval() return model def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(_snake_case ) return torch.Generator(device=_snake_case ).manual_seed(_snake_case ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , fpaa=_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) with torch.no_grad(): __a = model(_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model.encode(_snake_case ).latent_dist __a = dist.sample(generator=_snake_case ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __a = sample[0, -1, -3:, -3:].flatten().cpu() __a = torch.tensor(_snake_case ) __a = 3E-3 if torch_device != '''mps''' else 1E-2 assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
6
1
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __A( a ): snake_case_ = 42 snake_case_ = jnp.floataa snake_case_ = True def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' super().setup() __a = nn.Dense(5 , dtype=self.dtype ) def __call__( self , *_snake_case , **_snake_case ) -> str: '''simple docstring''' __a = super().__call__(*_snake_case , **_snake_case ) __a = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __A( a ): snake_case_ = FlaxBigBirdForNaturalQuestionsModule def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ , a__ ) -> Dict: def cross_entropy(a__ , a__ , a__=None ): __a = logits.shape[-1] __a = (labels[..., None] == jnp.arange(a__ )[None]).astype('''f4''' ) __a = jax.nn.log_softmax(a__ , axis=-1 ) __a = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: __a = reduction(a__ ) return loss __a = partial(a__ , reduction=jnp.mean ) __a = cross_entropy(a__ , a__ ) __a = cross_entropy(a__ , a__ ) __a = cross_entropy(a__ , a__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __A: snake_case_ = "google/bigbird-roberta-base" snake_case_ = 3_0_0_0 snake_case_ = 1_0_5_0_0 snake_case_ = 1_2_8 snake_case_ = 3 snake_case_ = 1 snake_case_ = 5 # tx_args snake_case_ = 3E-5 snake_case_ = 0.0 snake_case_ = 2_0_0_0_0 snake_case_ = 0.0_095 snake_case_ = "bigbird-roberta-natural-questions" snake_case_ = "training-expt" snake_case_ = "data/nq-training.jsonl" snake_case_ = "data/nq-validation.jsonl" def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' os.makedirs(self.base_dir , exist_ok=_snake_case ) __a = os.path.join(self.base_dir , self.save_dir ) __a = self.batch_size_per_device * jax.device_count() @dataclass class __A: snake_case_ = 42 snake_case_ = 4_0_9_6 # no dynamic padding on TPUs def __call__( self , _snake_case ) -> int: '''simple docstring''' __a = self.collate_fn(_snake_case ) __a = jax.tree_util.tree_map(_snake_case , _snake_case ) return batch def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a , __a = self.fetch_inputs(features['''input_ids'''] ) __a = { '''input_ids''': jnp.array(_snake_case , dtype=jnp.intaa ), '''attention_mask''': jnp.array(_snake_case , dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ), } return batch def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' __a = [self._fetch_inputs(_snake_case ) for ids in input_ids] return zip(*_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = [1 for _ in range(len(_snake_case ) )] while len(_snake_case ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def __lowerCAmelCase ( a__ , a__ , a__=None ) -> List[str]: if seed is not None: __a = dataset.shuffle(seed=a__ ) for i in range(len(a__ ) // batch_size ): __a = dataset[i * batch_size : (i + 1) * batch_size] yield dict(a__ ) @partial(jax.pmap , axis_name='''batch''' ) def __lowerCAmelCase ( a__ , a__ , **a__ ) -> Any: def loss_fn(a__ ): __a = model_inputs.pop('''start_labels''' ) __a = model_inputs.pop('''end_labels''' ) __a = model_inputs.pop('''pooled_labels''' ) __a = state.apply_fn(**a__ , params=a__ , dropout_rng=a__ , train=a__ ) __a , __a , __a = outputs return state.loss_fn( a__ , a__ , a__ , a__ , a__ , a__ , ) __a , __a = jax.random.split(a__ ) __a = jax.value_and_grad(a__ ) __a , __a = grad_fn(state.params ) __a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) __a = jax.lax.pmean(a__ , '''batch''' ) __a = state.apply_gradients(grads=a__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='''batch''' ) def __lowerCAmelCase ( a__ , **a__ ) -> str: __a = model_inputs.pop('''start_labels''' ) __a = model_inputs.pop('''end_labels''' ) __a = model_inputs.pop('''pooled_labels''' ) __a = state.apply_fn(**a__ , params=state.params , train=a__ ) __a , __a , __a = outputs __a = state.loss_fn(a__ , a__ , a__ , a__ , a__ , a__ ) __a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) return metrics class __A( train_state.TrainState ): snake_case_ = struct.field(pytree_node=a ) @dataclass class __A: snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = None def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ) -> Union[str, Any]: '''simple docstring''' __a = model.params __a = TrainState.create( apply_fn=model.__call__ , params=_snake_case , tx=_snake_case , loss_fn=_snake_case , ) if ckpt_dir is not None: __a , __a , __a , __a , __a = restore_checkpoint(_snake_case , _snake_case ) __a = { '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } __a , __a = build_tx(**_snake_case ) __a = train_state.TrainState( step=_snake_case , apply_fn=model.__call__ , params=_snake_case , tx=_snake_case , opt_state=_snake_case , ) __a = args __a = data_collator __a = lr __a = params __a = jax_utils.replicate(_snake_case ) return state def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = self.args __a = len(_snake_case ) // args.batch_size __a = jax.random.PRNGKey(0 ) __a = jax.random.split(_snake_case , jax.device_count() ) for epoch in range(args.max_epochs ): __a = jnp.array(0 , dtype=jnp.floataa ) __a = get_batched_dataset(_snake_case , args.batch_size , seed=_snake_case ) __a = 0 for batch in tqdm(_snake_case , total=_snake_case , desc=F"""Running EPOCH-{epoch}""" ): __a = self.data_collator(_snake_case ) __a , __a , __a = self.train_step_fn(_snake_case , _snake_case , **_snake_case ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: __a = jax_utils.unreplicate(state.step ) __a = running_loss.item() / i __a = self.scheduler_fn(state_step - 1 ) __a = self.evaluate(_snake_case , _snake_case ) __a = { '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(_snake_case ) ) self.logger.log(_snake_case , commit=_snake_case ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = get_batched_dataset(_snake_case , self.args.batch_size ) __a = len(_snake_case ) // self.args.batch_size __a = jnp.array(0 , dtype=jnp.floataa ) __a = 0 for batch in tqdm(_snake_case , total=_snake_case , desc='''Evaluating ... ''' ): __a = self.data_collator(_snake_case ) __a = self.val_step_fn(_snake_case , **_snake_case ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> List[str]: '''simple docstring''' __a = jax_utils.unreplicate(_snake_case ) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' ) self.model_save_fn(_snake_case , params=state.params ) with open(os.path.join(_snake_case , '''opt_state.msgpack''' ) , '''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(_snake_case , '''args.joblib''' ) ) joblib.dump(self.data_collator , os.path.join(_snake_case , '''data_collator.joblib''' ) ) with open(os.path.join(_snake_case , '''training_state.json''' ) , '''w''' ) as f: json.dump({'''step''': state.step.item()} , _snake_case ) print('''DONE''' ) def __lowerCAmelCase ( a__ , a__ ) -> List[Any]: print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=''' ... ''' ) with open(os.path.join(a__ , '''flax_model.msgpack''' ) , '''rb''' ) as f: __a = from_bytes(state.params , f.read() ) with open(os.path.join(a__ , '''opt_state.msgpack''' ) , '''rb''' ) as f: __a = from_bytes(state.opt_state , f.read() ) __a = joblib.load(os.path.join(a__ , '''args.joblib''' ) ) __a = joblib.load(os.path.join(a__ , '''data_collator.joblib''' ) ) with open(os.path.join(a__ , '''training_state.json''' ) , '''r''' ) as f: __a = json.load(a__ ) __a = training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Optional[Any]: __a = num_train_steps - warmup_steps __a = optax.linear_schedule(init_value=a__ , end_value=a__ , transition_steps=a__ ) __a = optax.linear_schedule(init_value=a__ , end_value=1e-7 , transition_steps=a__ ) __a = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> str: def weight_decay_mask(a__ ): __a = traverse_util.flatten_dict(a__ ) __a = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(a__ ) __a = scheduler_fn(a__ , a__ , a__ , a__ ) __a = optax.adamw(learning_rate=a__ , weight_decay=a__ , mask=a__ ) return tx, lr
6
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup A : str = logging.get_logger(__name__) class __A( a ): def __init__( self , **_snake_case ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''bs4'''] ) super().__init__(**_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a = [] __a = [] __a = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __a = parent.find_all(child.name , recursive=_snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) ) __a = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = BeautifulSoup(_snake_case , '''html.parser''' ) __a = [] __a = [] __a = [] for element in html_code.descendants: if type(_snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __a = html.unescape(_snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(_snake_case ) __a , __a = self.xpath_soup(_snake_case ) stringaxtag_seq.append(_snake_case ) stringaxsubs_seq.append(_snake_case ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = '''''' for tagname, subs in zip(_snake_case , _snake_case ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self , _snake_case ) -> BatchFeature: '''simple docstring''' __a = False # Check that strings has a valid type if isinstance(_snake_case , _snake_case ): __a = True elif isinstance(_snake_case , (list, tuple) ): if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ): __a = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F"""but is of type {type(_snake_case )}.""" ) __a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) ) if not is_batched: __a = [html_strings] # Get nodes + xpaths __a = [] __a = [] for html_string in html_strings: __a , __a , __a = self.get_three_from_single(_snake_case ) nodes.append(_snake_case ) __a = [] for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ): __a = self.construct_xpath(_snake_case , _snake_case ) xpath_strings.append(_snake_case ) xpaths.append(_snake_case ) # return as Dict __a = {'''nodes''': nodes, '''xpaths''': xpaths} __a = BatchFeature(data=_snake_case , tensor_type=_snake_case ) return encoded_inputs
6
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def __lowerCAmelCase ( a__ ) -> Dict: return 1 / (1 + np.exp(-z )) def __lowerCAmelCase ( a__ , a__ ) -> Tuple: return (-y * np.log(a__ ) - (1 - y) * np.log(1 - h )).mean() def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[Any]: __a = np.dot(a__ , a__ ) return np.sum(y * scores - np.log(1 + np.exp(a__ ) ) ) def __lowerCAmelCase ( a__ , a__ , a__ , a__=7_0000 ) -> Tuple: __a = np.zeros(x.shape[1] ) for iterations in range(a__ ): __a = np.dot(a__ , a__ ) __a = sigmoid_function(a__ ) __a = np.dot(x.T , h - y ) / y.size __a = theta - alpha * gradient # updating the weights __a = np.dot(a__ , a__ ) __a = sigmoid_function(a__ ) __a = cost_function(a__ , a__ ) if iterations % 100 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": A : List[Any] = datasets.load_iris() A : Any = iris.data[:, :2] A : int = (iris.target != 0) * 1 A : Dict = 0.1 A : str = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('theta: ', theta) # printing the theta i.e our weights vector def __lowerCAmelCase ( a__ ) -> Union[str, Any]: return sigmoid_function( np.dot(a__ , a__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((A) , (A)) : Tuple = (x[:, 0].min(), x[:, 0].max()) ((A) , (A)) : int = (x[:, 1].min(), x[:, 1].max()) ((A) , (A)) : List[str] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) A : int = np.c_[xxa.ravel(), xxa.ravel()] A : Any = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
6
def __lowerCAmelCase ( a__ , a__ ) -> float: def get_matched_characters(a__ , a__ ) -> str: __a = [] __a = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __a = int(max(0 , i - limit ) ) __a = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(a__ ) __a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}""" return "".join(a__ ) # matching characters __a = get_matched_characters(a__ , a__ ) __a = get_matched_characters(a__ , a__ ) __a = len(a__ ) # transposition __a = ( len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2 ) if not match_count: __a = 0.0 else: __a = ( 1 / 3 * ( match_count / len(a__ ) + match_count / len(a__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __a = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
6
1
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed A : List[str] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(4_2) A : Any = 'sshleifer/student_marian_en_ro_6_1' A : int = 'sshleifer/tiny-mbart' @require_torch class __A( a ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False , _snake_case=None , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , ) -> Any: '''simple docstring''' __a = self.run_trainer( eval_steps=1 , max_len=12 , model_name=_snake_case , num_train_epochs=1 , distributed=_snake_case , extra_args_str=_snake_case , predict_with_generate=_snake_case , do_train=_snake_case , do_eval=_snake_case , do_predict=_snake_case , ) __a = TrainerState.load_from_json(os.path.join(_snake_case , '''trainer_state.json''' ) ).log_history if not do_eval: return __a = [log for log in logs if '''eval_loss''' in log.keys()] __a = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __a = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , _snake_case ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' self.run_seqaseq_quick(distributed=_snake_case ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' self.run_seqaseq_quick(distributed=_snake_case ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' self.run_seqaseq_quick(distributed=_snake_case , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' self.run_seqaseq_quick(distributed=_snake_case , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' self.run_seqaseq_quick(distributed=_snake_case , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_snake_case ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' self.run_seqaseq_quick( distributed=_snake_case , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_snake_case ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' self.run_seqaseq_quick(distributed=_snake_case , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=_snake_case , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any: '''simple docstring''' __a = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __a = experiments[experiment_id] __a = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __a = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**_snake_case , extra_args_str=data['''extra_args_str'''] ) __a = len(re.findall(_snake_case , cl.err ) ) self.assertEqual(_snake_case , data['''n_matches'''] ) @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.run_trainer( eval_steps=2 , max_len=128 , model_name=_snake_case , learning_rate=3E-4 , num_train_epochs=10 , distributed=_snake_case , ) # Check metrics __a = TrainerState.load_from_json(os.path.join(_snake_case , '''trainer_state.json''' ) ).log_history __a = [log for log in logs if '''eval_loss''' in log.keys()] __a = eval_metrics[0] __a = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , _snake_case ) # test if do_predict saves generations and metrics __a = os.listdir(_snake_case ) __a = {os.path.basename(_snake_case ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' from transformers.training_args import OptimizerNames def train_and_return_metrics(_snake_case ) -> Tuple[int, float]: __a = '''--skip_memory_metrics 0''' __a = self.run_trainer( max_len=128 , model_name=_snake_case , learning_rate=3E-4 , num_train_epochs=1 , optim=_snake_case , distributed=_snake_case , extra_args_str=_snake_case , do_eval=_snake_case , do_predict=_snake_case , n_gpus_to_use=1 , ) # Check metrics __a = TrainerState.load_from_json(Path(_snake_case , '''trainer_state.json''' ) ).log_history __a = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 ) __a = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 ) __a = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __a , __a , __a = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __a , __a , __a = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __a = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __a = gpu_peak_mem_orig + gpu_alloc_mem_orig __a = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __a = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __a = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( _snake_case , _snake_case , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( _snake_case , _snake_case , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( _snake_case , _snake_case , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case = 3E-3 , _snake_case = "adafactor" , _snake_case = False , _snake_case = None , _snake_case = 0 , _snake_case = True , _snake_case = True , _snake_case = True , _snake_case = True , _snake_case = None , ) -> Optional[int]: '''simple docstring''' __a = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __a = self.get_auto_remove_tmp_dir() __a = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(_snake_case )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(_snake_case )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() __a = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(_snake_case )} """.split() __a = ''' --do_predict '''.split() __a = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __a = get_gpu_count() __a = get_torch_dist_unique_port() __a = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() __a = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_snake_case , env=self.get_env() ) else: __a = ['''run_translation.py'''] + args with patch.object(_snake_case , '''argv''' , _snake_case ): main() return output_dir
6
def __lowerCAmelCase ( a__ ) -> str: __a = [] __a = set({'''(''', '''[''', '''{'''} ) __a = set({''')''', ''']''', '''}'''} ) __a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(a__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(a__ ) == 0 def __lowerCAmelCase ( ) -> Dict: __a = input('''Enter sequence of brackets: ''' ) if is_balanced(a__ ): print(a__ , '''is balanced''' ) else: print(a__ , '''is not balanced''' ) if __name__ == "__main__": main()
6
1
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def __lowerCAmelCase ( a__ ) -> str: __a = int(a__ ) __a , __a , __a = t // 3600, (t // 60) % 60, t % 60 return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}""" def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__=300 ) -> Any: # docstyle-ignore return F""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def __lowerCAmelCase ( a__ ) -> Any: __a = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: __a = F"""{elt:.6f}""" if isinstance(a__ , a__ ) else str(a__ ) html_code += F""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __A: snake_case_ = 5 snake_case_ = 0.2 def __init__( self , _snake_case , _snake_case = None , _snake_case = True , _snake_case = None , _snake_case = 300 , ) -> Any: '''simple docstring''' __a = total __a = '''''' if prefix is None else prefix __a = leave __a = parent __a = width __a = None __a = None __a = None def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = False , _snake_case = None ) -> Optional[Any]: '''simple docstring''' __a = value if comment is not None: __a = comment if self.last_value is None: __a = __a = time.time() __a = __a = value __a = __a = None __a = self.warmup __a = 1 self.update_bar(_snake_case ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 __a = time.time() __a = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: __a = self.elapsed_time / (value - self.start_value) else: __a = None if value >= self.total: __a = self.total __a = None if not self.leave: self.close() elif self.average_time_per_item is not None: __a = self.average_time_per_item * (self.total - value) self.update_bar(_snake_case ) __a = value __a = current_time if self.average_time_per_item is None: __a = 1 else: __a = max(int(self.update_every / self.average_time_per_item ) , 1 ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> Any: '''simple docstring''' __a = ''' ''' * (len(str(self.total ) ) - len(str(_snake_case ) )) + str(_snake_case ) if self.elapsed_time is None: __a = F"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: __a = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}""" else: __a = ( F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <""" F""" {format_time(self.predicted_remaining )}""" ) self.label += F""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]""" self.display() def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: __a = disp.display(disp.HTML(self.html_code ) , display_id=_snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class __A( a ): def __init__( self , _snake_case , _snake_case=None ) -> Union[str, Any]: '''simple docstring''' super().__init__(_snake_case ) __a = None if column_names is None else [column_names] __a = None def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: __a = disp.display(disp.HTML(self.html_code ) , display_id=_snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict: '''simple docstring''' if self.inner_table is None: __a = [list(values.keys() ), list(values.values() )] else: __a = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(_snake_case ) __a = columns self.inner_table.append([values[c] for c in columns] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None , _snake_case=300 ) -> Union[str, Any]: '''simple docstring''' __a = NotebookProgressBar(_snake_case , prefix=_snake_case , parent=self , width=_snake_case ) return self.child_bar def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = None self.display() class __A( a ): def __init__( self ) -> Optional[int]: '''simple docstring''' __a = None __a = None __a = False def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Union[str, Any]: '''simple docstring''' __a = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' __a = 0 __a = 0 __a = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) __a = NotebookTrainingTracker(state.max_steps , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Union[str, Any]: '''simple docstring''' __a = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) __a = False def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ) -> Optional[int]: '''simple docstring''' if not has_length(_snake_case ): return if self.prediction_bar is None: if self.training_tracker is not None: __a = self.training_tracker.add_child(len(_snake_case ) ) else: __a = NotebookProgressBar(len(_snake_case ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Union[str, Any]: '''simple docstring''' if self.prediction_bar is not None: self.prediction_bar.close() __a = None def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ) -> int: '''simple docstring''' if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: __a = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy __a = state.global_step self.training_tracker.write_line(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ) -> Tuple: '''simple docstring''' if self.training_tracker is not None: __a = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: __a = log['''loss'''] break if self.first_column == "Epoch": __a = int(state.epoch ) else: __a = state.global_step __a = '''eval''' for k in metrics: if k.endswith('''_loss''' ): __a = re.sub(r'''\_loss$''' , '''''' , _snake_case ) __a = metrics.pop('''total_flos''' , _snake_case ) __a = metrics.pop('''epoch''' , _snake_case ) __a = metrics.pop(F"""{metric_key_prefix}_runtime""" , _snake_case ) __a = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , _snake_case ) __a = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , _snake_case ) __a = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , _snake_case ) for k, v in metrics.items(): if k == F"""{metric_key_prefix}_loss""": __a = v else: __a = k.split('''_''' ) __a = ''' '''.join([part.capitalize() for part in splits[1:]] ) __a = v self.training_tracker.write_line(_snake_case ) self.training_tracker.remove_child() __a = None # Evaluation takes a long time so we should force the next update. __a = True def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Tuple: '''simple docstring''' self.training_tracker.update( state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=_snake_case ) __a = None
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : str = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __A( a ): def __init__( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM __a = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_snake_case , scheduler=_snake_case ) @torch.no_grad() def __call__( self , _snake_case = 1 , _snake_case = None , _snake_case = 0.0 , _snake_case = 50 , _snake_case = None , _snake_case = "pil" , _snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(self.unet.config.sample_size , _snake_case ): __a = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: __a = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) __a = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __a = self.unet(_snake_case , _snake_case ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __a = self.scheduler.step( _snake_case , _snake_case , _snake_case , eta=_snake_case , use_clipped_model_output=_snake_case , generator=_snake_case ).prev_sample __a = (image / 2 + 0.5).clamp(0 , 1 ) __a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a = self.numpy_to_pil(_snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=_snake_case )
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Dict = { 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor A : Dict = logging.get_logger(__name__) class __A( a ): def __init__( self , *_snake_case , **_snake_case ) -> None: '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Optional[int] = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
class __A: def __init__( self , _snake_case = "" , _snake_case = False ) -> None: '''simple docstring''' __a = {} # A node will be a leaf if the tree contains its word __a = is_leaf __a = prefix def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> tuple[str, str, str]: '''simple docstring''' __a = 0 for q, w in zip(self.prefix , _snake_case ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' for word in words: self.insert(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' if self.prefix == word: __a = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: __a = RadixNode(prefix=_snake_case , is_leaf=_snake_case ) else: __a = self.nodes[word[0]] __a , __a , __a = incoming_node.match( _snake_case ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(_snake_case ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: __a = remaining_prefix __a = self.nodes[matching_string[0]] __a = RadixNode(_snake_case , _snake_case ) __a = aux_node if remaining_word == "": __a = True else: self.nodes[matching_string[0]].insert(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> bool: '''simple docstring''' __a = self.nodes.get(word[0] , _snake_case ) if not incoming_node: return False else: __a , __a , __a = incoming_node.match( _snake_case ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> bool: '''simple docstring''' __a = self.nodes.get(word[0] , _snake_case ) if not incoming_node: return False else: __a , __a , __a = incoming_node.match( _snake_case ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(_snake_case ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: __a = list(self.nodes.values() )[0] __a = merging_node.is_leaf self.prefix += merging_node.prefix __a = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: __a = False # If there is 1 edge, we merge it with its child else: __a = list(incoming_node.nodes.values() )[0] __a = merging_node.is_leaf incoming_node.prefix += merging_node.prefix __a = merging_node.nodes return True def SCREAMING_SNAKE_CASE_ ( self , _snake_case = 0 ) -> None: '''simple docstring''' if self.prefix != "": print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' ) for value in self.nodes.values(): value.print_tree(height + 1 ) def __lowerCAmelCase ( ) -> bool: __a = '''banana bananas bandana band apple all beast'''.split() __a = RadixNode() root.insert_many(a__ ) assert all(root.find(a__ ) for word in words ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def __lowerCAmelCase ( ) -> None: assert test_trie() def __lowerCAmelCase ( ) -> None: __a = RadixNode() __a = '''banana bananas bandanas bandana band apple all beast'''.split() root.insert_many(a__ ) print('''Words:''' , a__ ) print('''Tree:''' ) root.print_tree() if __name__ == "__main__": main()
6
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
6
1
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray: __a = cva.getAffineTransform(a__ , a__ ) return cva.warpAffine(a__ , a__ , (rows, cols) ) if __name__ == "__main__": # read original image A : List[Any] = cva.imread( str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg') ) # turn image in gray scale value A : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape A , A : List[Any] = gray_img.shape # set different points to rotate image A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa) A : Tuple = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa) A : Union[str, Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa) A : Any = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa) # add all rotated images in a list A : Optional[int] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations A : Optional[int] = plt.figure(1) A : Tuple = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3'] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray') plt.title(titles[i]) plt.axis('off') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
6
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class __A( a ): snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({} ) snake_case_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text"}
6
1
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> int: '''simple docstring''' self.assertEqual(len(_snake_case ) , len(_snake_case ) ) for a, b in zip(_snake_case , _snake_case ): self.assertAlmostEqual(_snake_case , _snake_case , delta=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_snake_case ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = None ops.enable_eager_execution_internal() __a = tf.config.list_physical_devices('''CPU''' ) if len(_snake_case ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __a = tf.config.list_logical_devices(device_type='''CPU''' ) __a = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __a = GradientAccumulator() __a = tf.Variable([4.0, 3.0] ) __a , __a = create_optimizer(5E-5 , 10 , 5 ) __a = tf.Variable([0.0, 0.0] , trainable=_snake_case ) def accumulate_on_replica(_snake_case ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(_snake_case , _snake_case ): with strategy.scope(): __a = strategy.experimental_local_results(_snake_case ) local_variables[0].assign(_snake_case ) local_variables[1].assign(_snake_case ) strategy.run(_snake_case , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_snake_case ) def _check_local_values(_snake_case , _snake_case ): __a = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , _snake_case , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , _snake_case , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
6
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]: __a = AutoTokenizer.from_pretrained(a__ ) __a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ ) __a = tok.pad_token_id def get_lens(a__ ): __a = tqdm( DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) __a = [] for batch in dl: __a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist() __a = batch['''labels'''].ne(a__ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(a__ , a__ ): max_lens.append(max(a__ , a__ ) ) else: max_lens.extend(a__ ) return max_lens __a = get_lens(a__ ) __a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ ) __a = get_lens(a__ ) pickle_save(a__ , train_ds.len_file ) pickle_save(a__ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
6
1
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def __lowerCAmelCase ( a__ ) -> Optional[Any]: __a = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' F"""{test_file} instead.""" ) __a = components[-1] if not test_fn.endswith('''py''' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __a = components[:-1] + [test_fn.replace('''.py''' , '''''' )] __a = '''.'''.join(a__ ) return test_module_path def __lowerCAmelCase ( a__ ) -> List[str]: __a = get_module_path(a__ ) __a = importlib.import_module(a__ ) return test_module def __lowerCAmelCase ( a__ ) -> List[str]: __a = [] __a = get_test_module(a__ ) for attr in dir(a__ ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(a__ , a__ ) ) # sort with class names return sorted(a__ , key=lambda a__ : x.__name__ ) def __lowerCAmelCase ( a__ ) -> Optional[Any]: __a = [] __a = get_test_module(a__ ) for attr in dir(a__ ): __a = getattr(a__ , a__ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __a = getattr(a__ , '''all_model_classes''' , [] ) if len(a__ ) > 0: test_classes.append(a__ ) # sort with class names return sorted(a__ , key=lambda a__ : x.__name__ ) def __lowerCAmelCase ( a__ ) -> Optional[int]: __a = get_test_classes(a__ ) __a = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(a__ , key=lambda a__ : x.__name__ ) def __lowerCAmelCase ( a__ ) -> List[Any]: __a = test_class() if hasattr(a__ , '''setUp''' ): test.setUp() __a = None if hasattr(a__ , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __a = test.model_tester.__class__ return model_tester def __lowerCAmelCase ( a__ , a__ ) -> Optional[Any]: __a = get_test_classes(a__ ) __a = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(a__ ) # sort with class names return sorted(a__ , key=lambda a__ : x.__name__ ) def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]: __a = get_test_classes_for_model(a__ , a__ ) __a = [] for test_class in test_classes: __a = get_model_tester_from_test_class(a__ ) if tester_class is not None: tester_classes.append(a__ ) # sort with class names return sorted(a__ , key=lambda a__ : x.__name__ ) def __lowerCAmelCase ( a__ ) -> Optional[Any]: __a = get_test_classes(a__ ) __a = {test_class: get_model_tester_from_test_class(a__ ) for test_class in test_classes} return test_tester_mapping def __lowerCAmelCase ( a__ ) -> List[str]: __a = get_model_classes(a__ ) __a = { model_class: get_test_classes_for_model(a__ , a__ ) for model_class in model_classes } return model_test_mapping def __lowerCAmelCase ( a__ ) -> Union[str, Any]: __a = get_model_classes(a__ ) __a = { model_class: get_tester_classes_for_model(a__ , a__ ) for model_class in model_classes } return model_to_tester_mapping def __lowerCAmelCase ( a__ ) -> Union[str, Any]: if isinstance(a__ , a__ ): return o elif isinstance(a__ , a__ ): return o.__name__ elif isinstance(a__ , (list, tuple) ): return [to_json(a__ ) for x in o] elif isinstance(a__ , a__ ): return {to_json(a__ ): to_json(a__ ) for k, v in o.items()} else: return o
6
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 - _cos) / 2 __a = 1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 + _cos) / 2 __a = -1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = _sin / 2 __a = 0 __a = -ba __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 1 - alpha __a = -2 * _cos __a = 1 + alpha __a = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = 1 + alpha * big_a __a = -2 * _cos __a = 1 - alpha * big_a __a = 1 + alpha / big_a __a = -2 * _cos __a = 1 - alpha / big_a __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (pmc + aaa) __a = 2 * big_a * mpc __a = big_a * (pmc - aaa) __a = ppmc + aaa __a = -2 * pmpc __a = ppmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (ppmc + aaa) __a = -2 * big_a * pmpc __a = big_a * (ppmc - aaa) __a = pmc + aaa __a = 2 * mpc __a = pmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
6
1
import math import sys import cva import numpy as np def __lowerCAmelCase ( a__ , a__ ) -> np.ndarray: # For applying gaussian function for each element in matrix. __a = math.sqrt(a__ ) __a = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> np.ndarray: __a = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __lowerCAmelCase ( a__ , a__ ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __a = np.zeros((kernel_size, kernel_size) ) for i in range(0 , a__ ): for j in range(0 , a__ ): __a = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(a__ , a__ ) def __lowerCAmelCase ( a__ , a__ , a__ , a__ , ) -> np.ndarray: __a = np.zeros(img.shape ) __a = get_gauss_kernel(a__ , a__ ) __a , __a = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __a = get_slice(a__ , a__ , a__ , a__ ) __a = img_s - img_s[kernel_size // 2, kernel_size // 2] __a = vec_gaussian(a__ , a__ ) __a = np.multiply(a__ , a__ ) __a = np.multiply(a__ , a__ ) __a = np.sum(a__ ) / np.sum(a__ ) __a = val return imga def __lowerCAmelCase ( a__ ) -> tuple: __a = args[1] if args[1:] else '''../image_data/lena.jpg''' __a = float(args[2] ) if args[2:] else 1.0 __a = float(args[3] ) if args[3:] else 1.0 if args[4:]: __a = int(args[4] ) __a = kernel_size + abs(kernel_size % 2 - 1 ) else: __a = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": A , A , A , A : Any = parse_args(sys.argv) A : Any = cva.imread(filename, 0) cva.imshow('input image', img) A : str = img / 2_5_5 A : Dict = out.astype('float32') A : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) A : Union[str, Any] = out * 2_5_5 A : Any = np.uinta(out) cva.imshow('output image', out) cva.waitKey(0) cva.destroyAllWindows()
6
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list: __a = len(a__ ) __a = [[0] * n for i in range(a__ )] for i in range(a__ ): __a = y_points[i] for i in range(2 , a__ ): for j in range(a__ , a__ ): __a = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
6
1
import heapq as hq import math from collections.abc import Iterator class __A: def __init__( self , _snake_case ) -> List[Any]: '''simple docstring''' __a = str(id_ ) __a = None __a = None __a = [] __a = {} # {vertex:distance} def __lt__( self , _snake_case ) -> List[Any]: '''simple docstring''' return self.key < other.key def __repr__( self ) -> Optional[int]: '''simple docstring''' return self.id def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' self.neighbors.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = weight def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Tuple: # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , a__ ) graph[b - 1].add_edge(graph[a - 1] , a__ ) def __lowerCAmelCase ( a__ , a__ ) -> list: __a = [] for u in graph: __a = math.inf __a = None __a = 0 __a = graph[:] while q: __a = min(a__ ) q.remove(a__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __a = u __a = u.edges[v.id] for i in range(1 , len(a__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def __lowerCAmelCase ( a__ , a__ ) -> Iterator[tuple]: for u in graph: __a = math.inf __a = None __a = 0 __a = list(a__ ) hq.heapify(a__ ) while h: __a = hq.heappop(a__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __a = u __a = u.edges[v.id] hq.heapify(a__ ) for i in range(1 , len(a__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def __lowerCAmelCase ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
6
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]: if not arr: return None, None, 0 if low == high: return low, high, arr[low] __a = (low + high) // 2 __a , __a , __a = max_subarray(a__ , a__ , a__ ) __a , __a , __a = max_subarray(a__ , mid + 1 , a__ ) __a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]: __a , __a = float('''-inf''' ), -1 __a , __a = float('''-inf''' ), -1 __a = 0 for i in range(a__ , low - 1 , -1 ): summ += arr[i] if summ > left_sum: __a = summ __a = i __a = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: __a = summ __a = i return max_left, max_right, (left_sum + right_sum) def __lowerCAmelCase ( a__ ) -> float: __a = [randint(1 , a__ ) for _ in range(a__ )] __a = time.time() max_subarray(a__ , 0 , input_size - 1 ) __a = time.time() return end - start def __lowerCAmelCase ( ) -> None: __a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] __a = [time_max_subarray(a__ ) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''' ) for input_size, runtime in zip(a__ , a__ ): print(a__ , '''\t\t''' , a__ ) plt.plot(a__ , a__ ) plt.xlabel('''Number of Inputs''' ) plt.ylabel('''Time taken in seconds''' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
6
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer A : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} A : Union[str, Any] = { 'vocab_file': { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt', }, 'tokenizer_file': { 'unc-nlp/lxmert-base-uncased': ( 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json' ), }, } A : Any = { 'unc-nlp/lxmert-base-uncased': 5_1_2, } A : str = { 'unc-nlp/lxmert-base-uncased': {'do_lower_case': True}, } class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LxmertTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> List[Any]: '''simple docstring''' super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) __a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars ): __a = getattr(_snake_case , normalizer_state.pop('''type''' ) ) __a = do_lower_case __a = strip_accents __a = tokenize_chinese_chars __a = normalizer_class(**_snake_case ) __a = do_lower_case def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> str: '''simple docstring''' __a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]: '''simple docstring''' __a = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
6
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A( a , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __A( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ort.SessionOptions() __a = False return options def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
6
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A : Any = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
from math import ceil def __lowerCAmelCase ( a__ = 1001 ) -> int: __a = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): __a = 2 * i + 1 __a = 2 * i __a = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[Any] = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
6
1
def __lowerCAmelCase ( a__ ) -> int: # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __a = len(a__ ) __a = max(a__ ) __a = min(a__ ) # create the counting array __a = coll_max + 1 - coll_min __a = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , a__ ): __a = counting_arr[i] + counting_arr[i - 1] # create the output collection __a = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , a__ ) ): __a = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def __lowerCAmelCase ( a__ ) -> List[str]: return "".join([chr(a__ ) for i in counting_sort([ord(a__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" A : List[Any] = input('Enter numbers separated by a comma:\n').strip() A : Dict = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
6
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A( a ): snake_case_ = ['''image_processor''', '''tokenizer'''] snake_case_ = '''ChineseCLIPImageProcessor''' snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple: '''simple docstring''' __a = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _snake_case , ) __a = kwargs.pop('''feature_extractor''' ) __a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_snake_case , _snake_case ) __a = self.image_processor def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) if images is not None: __a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case ) if text is not None and images is not None: __a = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict: '''simple docstring''' return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.tokenizer.model_input_names __a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , ) return self.image_processor_class
6
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants A : Any = Mapping[str, np.ndarray] A : Any = Mapping[str, Any] # Is a nested dict. A : Union[str, Any] = 0.01 @dataclasses.dataclass(frozen=a ) class __A: snake_case_ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. snake_case_ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. snake_case_ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. snake_case_ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. snake_case_ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions snake_case_ = None # Optional remark about the protein. Included as a comment in output PDB # files snake_case_ = None # Templates used to generate this protein (prediction-only) snake_case_ = None # Chain corresponding to each parent snake_case_ = None def __lowerCAmelCase ( a__ ) -> Protein: __a = R'''(\[[A-Z]+\]\n)''' __a = [tag.strip() for tag in re.split(a__ , a__ ) if len(a__ ) > 0] __a = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) __a = ["N", "CA", "C"] __a = None __a = None __a = None for g in groups: if "[PRIMARY]" == g[0]: __a = g[1][0].strip() for i in range(len(a__ ) ): if seq[i] not in residue_constants.restypes: __a = '''X''' # FIXME: strings are immutable __a = np.array( [residue_constants.restype_order.get(a__ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __a = [] for axis in range(3 ): tertiary.append(list(map(a__ , g[1][axis].split() ) ) ) __a = np.array(a__ ) __a = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(a__ ): __a = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __a = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) __a = np.zeros( ( len(a__ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(a__ ): __a = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=a__ , atom_mask=a__ , aatype=a__ , residue_index=np.arange(len(a__ ) ) , b_factors=a__ , ) def __lowerCAmelCase ( a__ , a__ = 0 ) -> List[str]: __a = [] __a = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) __a = prot.parents __a = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __a = [p for i, p in zip(a__ , a__ ) if i == chain_id] if parents is None or len(a__ ) == 0: __a = ['''N/A'''] pdb_headers.append(F"""PARENT {' '.join(a__ )}""" ) return pdb_headers def __lowerCAmelCase ( a__ , a__ ) -> str: __a = [] __a = pdb_str.split('''\n''' ) __a = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) __a = 42 if prot.parents is not None and len(prot.parents ) > 0: __a = [] if prot.parents_chain_index is not None: __a = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(a__ ) , [] ) parent_dict[str(a__ )].append(a__ ) __a = max([int(a__ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __a = parent_dict.get(str(a__ ) , ['''N/A'''] ) parents_per_chain.append(a__ ) else: parents_per_chain.append(list(prot.parents ) ) else: __a = [['''N/A''']] def make_parent_line(a__ ) -> str: return F"""PARENT {' '.join(a__ )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __a = 0 for i, l in enumerate(a__ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(a__ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(a__ ): __a = parents_per_chain[chain_counter] else: __a = ['''N/A'''] out_pdb_lines.append(make_parent_line(a__ ) ) return "\n".join(a__ ) def __lowerCAmelCase ( a__ ) -> str: __a = residue_constants.restypes + ['''X'''] def res_atoa(a__ ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) __a = residue_constants.atom_types __a = [] __a = prot.atom_mask __a = prot.aatype __a = prot.atom_positions __a = prot.residue_index.astype(np.intaa ) __a = prot.b_factors __a = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) __a = get_pdb_headers(a__ ) if len(a__ ) > 0: pdb_lines.extend(a__ ) __a = aatype.shape[0] __a = 1 __a = 0 __a = string.ascii_uppercase __a = None # Add all atom sites. for i in range(a__ ): __a = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(a__ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __a = '''ATOM''' __a = atom_name if len(a__ ) == 4 else F""" {atom_name}""" __a = '''''' __a = '''''' __a = 1.00 __a = atom_name[0] # Protein supports only C, N, O, S, this works. __a = '''''' __a = '''A''' if chain_index is not None: __a = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __a = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(a__ ) atom_index += 1 __a = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __a = True __a = chain_index[i + 1] if should_terminate: # Close the chain. __a = '''TER''' __a = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(a__ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(a__ , a__ ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(a__ ) def __lowerCAmelCase ( a__ ) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ) -> Protein: return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=a__ , remark=a__ , parents=a__ , parents_chain_index=a__ , )
6
from __future__ import annotations import typing from collections import Counter def __lowerCAmelCase ( a__ ) -> typing.Counter[int]: __a = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(a__ , max_perimeter + 1 ): __a = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(a__ ): __a = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __lowerCAmelCase ( a__ = 1000 ) -> int: __a = pythagorean_triple(a__ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"Perimeter {solution()} has maximum solutions")
6
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __A( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.4814_5466, 0.457_8275, 0.4082_1073] , _snake_case=[0.2686_2954, 0.2613_0258, 0.2757_7711] , _snake_case=True , ) -> Any: '''simple docstring''' __a = size if size is not None else {'''height''': 224, '''width''': 224} __a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = do_center_crop __a = crop_size __a = do_normalize __a = image_mean __a = image_std __a = do_convert_rgb def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False , _snake_case=False , _snake_case=False ) -> Union[str, Any]: '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __a = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __a = [] for i in range(self.batch_size ): __a , __a = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __a = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs] if torchify: __a = [torch.from_numpy(_snake_case ) for x in image_inputs] return image_inputs @require_torch @require_vision class __A( a , unittest.TestCase ): snake_case_ = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = ChineseCLIPImageProcessingTester(self , do_center_crop=_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''do_resize''' ) ) self.assertTrue(hasattr(_snake_case , '''size''' ) ) self.assertTrue(hasattr(_snake_case , '''do_center_crop''' ) ) self.assertTrue(hasattr(_snake_case , '''center_crop''' ) ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''image_mean''' ) ) self.assertTrue(hasattr(_snake_case , '''image_std''' ) ) self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class __A( a , unittest.TestCase ): snake_case_ = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_snake_case ) __a = 3 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''do_resize''' ) ) self.assertTrue(hasattr(_snake_case , '''size''' ) ) self.assertTrue(hasattr(_snake_case , '''do_center_crop''' ) ) self.assertTrue(hasattr(_snake_case , '''center_crop''' ) ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''image_mean''' ) ) self.assertTrue(hasattr(_snake_case , '''image_std''' ) ) self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
6
# flake8: noqa # Lint as: python3 A : Optional[Any] = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
6
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer A : Optional[int] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast A : Dict = TaTokenizerFast A : Union[str, Any] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ 'MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5Model', 'MT5PreTrainedModel', 'MT5Stack', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model'] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys A : Any = _LazyModule( __name__, globals()['__file__'], _import_structure, extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast}, module_spec=__spec__, )
6
from typing import Dict from .base import GenericTensor, Pipeline class __A( a ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: __a = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __a = truncation __a = tokenize_kwargs __a = {} if return_tensors is not None: __a = return_tensors return preprocess_params, {}, postprocess_params def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]: '''simple docstring''' __a = self.framework __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.model(**_snake_case ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *_snake_case , **_snake_case ) -> Any: '''simple docstring''' return super().__call__(*_snake_case , **_snake_case )
6
1
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __A( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=4 , ) -> Optional[int]: '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_attention_mask __a = use_token_type_ids __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_choices def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_attention_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __A( a , unittest.TestCase ): snake_case_ = True snake_case_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = FlaxRobertaModelTester(self ) @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' for model_class_name in self.all_model_classes: __a = model_class_name.from_pretrained('''roberta-base''' , from_pt=_snake_case ) __a = model(np.ones((1, 1) ) ) self.assertIsNotNone(_snake_case )
6
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : Optional[int] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class __A( a ): snake_case_ = '''levit''' def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case ) __a = image_size __a = num_channels __a = kernel_size __a = stride __a = padding __a = hidden_sizes __a = num_attention_heads __a = depths __a = key_dim __a = drop_path_rate __a = patch_size __a = attention_ratio __a = mlp_ratio __a = initializer_range __a = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __A( a ): snake_case_ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-4
6
1
def __lowerCAmelCase ( a__ , a__ ) -> list[int]: __a = int(a__ ) # Initialize Result __a = [] # Traverse through all denomination for denomination in reversed(a__ ): # Find denominations while int(a__ ) >= int(a__ ): total_value -= int(a__ ) answer.append(a__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A : Any = [] A : str = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): A : Tuple = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(F"Denomination {i}: ").strip())) A : Optional[Any] = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter A : Optional[int] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] A : Tuple = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(F"Following is minimal change for {value}: ") A : Tuple = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
6
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel A : int = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __A( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' __a = TOKEN HfFolder.save_token(_snake_case ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def __lowerCAmelCase ( a__ , a__ ) -> str: __a = True __a = flatten_dict(modela.params ) __a = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: __a = False return models_are_equal @require_flax class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case )
6
1
import random def __lowerCAmelCase ( a__ ) -> bool: __a = num - 1 __a = 0 while s % 2 == 0: __a = s // 2 t += 1 for _ in range(5 ): __a = random.randrange(2 , num - 1 ) __a = pow(a__ , a__ , a__ ) if v != 1: __a = 0 while v != (num - 1): if i == t - 1: return False else: __a = i + 1 __a = (v**2) % num return True def __lowerCAmelCase ( a__ ) -> bool: if num < 2: return False __a = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(a__ ) def __lowerCAmelCase ( a__ = 1024 ) -> int: while True: __a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(a__ ): return num if __name__ == "__main__": A : Any = generate_large_prime() print(('Prime number:', num)) print(('is_prime_low_num:', is_prime_low_num(num)))
6
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} A : Optional[int] = 'zero2' A : str = 'zero3' A : Tuple = [ZEROa, ZEROa] def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test A : Union[str, Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __A( a ): @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any: '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , ) self.do_checks(_snake_case ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]: '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case ) __a = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_snake_case )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __a = self.get_launcher(_snake_case ) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_snake_case , env=self.get_env() ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]: '''simple docstring''' __a = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
6
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A : int = logging.get_logger(__name__) def __lowerCAmelCase ( a__ ) -> List[Any]: __a = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __a = [144, 192, 240] __a = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: __a = [96, 120, 144] __a = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: __a = [64, 80, 96] __a = [16, 16, 24, 48, 64, 80, 320] __a = 0.05 __a = 2.0 if mobilevit_name.startswith('''deeplabv3_''' ): __a = 512 __a = 16 __a = 21 __a = '''pascal-voc-id2label.json''' else: __a = 1000 __a = '''imagenet-1k-id2label.json''' __a = '''huggingface/label-files''' __a = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(a__ ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def __lowerCAmelCase ( a__ , a__=False ) -> List[Any]: for i in range(1 , 6 ): if F"""layer_{i}.""" in name: __a = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: __a = name.replace('''conv_1.''' , '''conv_stem.''' ) if ".block." in name: __a = name.replace('''.block.''' , '''.''' ) if "exp_1x1" in name: __a = name.replace('''exp_1x1''' , '''expand_1x1''' ) if "red_1x1" in name: __a = name.replace('''red_1x1''' , '''reduce_1x1''' ) if ".local_rep.conv_3x3." in name: __a = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' ) if ".local_rep.conv_1x1." in name: __a = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' ) if ".norm." in name: __a = name.replace('''.norm.''' , '''.normalization.''' ) if ".conv." in name: __a = name.replace('''.conv.''' , '''.convolution.''' ) if ".conv_proj." in name: __a = name.replace('''.conv_proj.''' , '''.conv_projection.''' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: __a = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: __a = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: __a = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' ) if "conv_3x3" in name: __a = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' ) if "reduce_1x1" in name: __a = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: __a = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' ) if F""".global_rep.{i}.bias""" in name: __a = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' ) if ".global_rep." in name: __a = name.replace('''.global_rep.''' , '''.transformer.''' ) if ".pre_norm_mha.0." in name: __a = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' ) if ".pre_norm_mha.1.out_proj." in name: __a = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' ) if ".pre_norm_ffn.0." in name: __a = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' ) if ".pre_norm_ffn.1." in name: __a = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' ) if ".pre_norm_ffn.4." in name: __a = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' ) if ".transformer." in name: __a = name.replace('''.transformer.''' , '''.transformer.layer.''' ) if ".aspp_layer." in name: __a = name.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in name: __a = name.replace('''.aspp_pool.''' , '''.''' ) if "seg_head." in name: __a = name.replace('''seg_head.''' , '''segmentation_head.''' ) if "segmentation_head.classifier.classifier." in name: __a = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' ) if "classifier.fc." in name: __a = name.replace('''classifier.fc.''' , '''classifier.''' ) elif (not base_model) and ("segmentation_head." not in name): __a = '''mobilevit.''' + name return name def __lowerCAmelCase ( a__ , a__ , a__=False ) -> Union[str, Any]: if base_model: __a = '''''' else: __a = '''mobilevit.''' for key in orig_state_dict.copy().keys(): __a = orig_state_dict.pop(a__ ) if key[:8] == "encoder.": __a = key[8:] if "qkv" in key: __a = key.split('''.''' ) __a = int(key_split[0][6:] ) - 1 __a = int(key_split[3] ) __a = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) __a = layer.transformer.layer[transformer_num].attention.attention.all_head_size __a = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: __a = val[:dim, :] __a = val[dim : dim * 2, :] __a = val[-dim:, :] else: __a = val[:dim] __a = val[dim : dim * 2] __a = val[-dim:] else: __a = val return orig_state_dict def __lowerCAmelCase ( ) -> List[Any]: __a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __a = Image.open(requests.get(a__ , stream=a__ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( a__ , a__ , a__ , a__=False ) -> Dict: __a = get_mobilevit_config(a__ ) # load original state_dict __a = torch.load(a__ , map_location='''cpu''' ) # load 🤗 model if mobilevit_name.startswith('''deeplabv3_''' ): __a = MobileViTForSemanticSegmentation(a__ ).eval() else: __a = MobileViTForImageClassification(a__ ).eval() __a = convert_state_dict(a__ , a__ ) model.load_state_dict(a__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __a = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) __a = image_processor(images=prepare_img() , return_tensors='''pt''' ) __a = model(**a__ ) __a = outputs.logits if mobilevit_name.startswith('''deeplabv3_''' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": __a = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __a = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __a = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": __a = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": __a = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": __a = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , a__ , atol=1e-4 ) Path(a__ ).mkdir(exist_ok=a__ ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if push_to_hub: __a = { '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''' ) __a = model_mapping[mobilevit_name] image_processor.push_to_hub(a__ , organization='''apple''' ) model.push_to_hub(a__ , organization='''apple''' ) if __name__ == "__main__": A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) A : Optional[int] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A( a , a , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = '''sample''' snake_case_ = 1E-2 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = 4 __a = 3 __a = (32, 32) __a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case ) return {"sample": image} @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __a = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**_snake_case ) model.to(_snake_case ) assert not model.is_gradient_checkpointing and model.training __a = model(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __a = torch.randn_like(_snake_case ) __a = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __a = self.model_class(**_snake_case ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_snake_case ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __a = model_a(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __a = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __a = dict(model.named_parameters() ) __a = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_snake_case ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) __a = model.to(_snake_case ) model.eval() if torch_device == "mps": __a = torch.manual_seed(0 ) else: __a = torch.Generator(device=_snake_case ).manual_seed(0 ) __a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __a = image.to(_snake_case ) with torch.no_grad(): __a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __a = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": __a = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __a = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) ) @slow class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy""" def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any: '''simple docstring''' __a = torch.floataa if fpaa else torch.floataa __a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case ) return image def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]: '''simple docstring''' __a = '''fp16''' if fpaa else None __a = torch.floataa if fpaa else torch.floataa __a = AutoencoderKL.from_pretrained( _snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , ) model.to(_snake_case ).eval() return model def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(_snake_case ) return torch.Generator(device=_snake_case ).manual_seed(_snake_case ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , fpaa=_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) with torch.no_grad(): __a = model(_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model.encode(_snake_case ).latent_dist __a = dist.sample(generator=_snake_case ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __a = sample[0, -1, -3:, -3:].flatten().cpu() __a = torch.tensor(_snake_case ) __a = 3E-3 if torch_device != '''mps''' else 1E-2 assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
6
1
A : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def __lowerCAmelCase ( a__ ) -> int: __a = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution A : list[bool | None] = [None] * 1_0_0_0_0_0_0_0 A : int = True A : Optional[Any] = False def __lowerCAmelCase ( a__ ) -> bool: if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore __a = chain(next_number(a__ ) ) __a = number_chain while number < 1000_0000: __a = number_chain number *= 10 return number_chain def __lowerCAmelCase ( a__ = 1000_0000 ) -> int: for i in range(1 , a__ ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(a__ ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
6
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup A : str = logging.get_logger(__name__) class __A( a ): def __init__( self , **_snake_case ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''bs4'''] ) super().__init__(**_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a = [] __a = [] __a = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __a = parent.find_all(child.name , recursive=_snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) ) __a = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = BeautifulSoup(_snake_case , '''html.parser''' ) __a = [] __a = [] __a = [] for element in html_code.descendants: if type(_snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __a = html.unescape(_snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(_snake_case ) __a , __a = self.xpath_soup(_snake_case ) stringaxtag_seq.append(_snake_case ) stringaxsubs_seq.append(_snake_case ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = '''''' for tagname, subs in zip(_snake_case , _snake_case ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self , _snake_case ) -> BatchFeature: '''simple docstring''' __a = False # Check that strings has a valid type if isinstance(_snake_case , _snake_case ): __a = True elif isinstance(_snake_case , (list, tuple) ): if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ): __a = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F"""but is of type {type(_snake_case )}.""" ) __a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) ) if not is_batched: __a = [html_strings] # Get nodes + xpaths __a = [] __a = [] for html_string in html_strings: __a , __a , __a = self.get_three_from_single(_snake_case ) nodes.append(_snake_case ) __a = [] for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ): __a = self.construct_xpath(_snake_case , _snake_case ) xpath_strings.append(_snake_case ) xpaths.append(_snake_case ) # return as Dict __a = {'''nodes''': nodes, '''xpaths''': xpaths} __a = BatchFeature(data=_snake_case , tensor_type=_snake_case ) return encoded_inputs
6
1
from __future__ import annotations from PIL import Image # Define glider example A : Union[str, Any] = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example A : Tuple = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __lowerCAmelCase ( a__ ) -> list[list[int]]: __a = [] for i in range(len(a__ ) ): __a = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours __a = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(a__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(a__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(a__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. __a = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(a__ ) return next_generation def __lowerCAmelCase ( a__ , a__ ) -> list[Image.Image]: __a = [] for _ in range(a__ ): # Create output image __a = Image.new('''RGB''' , (len(cells[0] ), len(a__ )) ) __a = img.load() # Save cells to image for x in range(len(a__ ) ): for y in range(len(cells[0] ) ): __a = 255 - cells[y][x] * 255 __a = (colour, colour, colour) # Save image images.append(a__ ) __a = new_generation(a__ ) return images if __name__ == "__main__": A : Union[str, Any] = generate_images(GLIDER, 1_6) images[0].save('out.gif', save_all=True, append_images=images[1:])
6
def __lowerCAmelCase ( a__ , a__ ) -> float: def get_matched_characters(a__ , a__ ) -> str: __a = [] __a = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __a = int(max(0 , i - limit ) ) __a = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(a__ ) __a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}""" return "".join(a__ ) # matching characters __a = get_matched_characters(a__ , a__ ) __a = get_matched_characters(a__ , a__ ) __a = len(a__ ) # transposition __a = ( len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2 ) if not match_count: __a = 0.0 else: __a = ( 1 / 3 * ( match_count / len(a__ ) + match_count / len(a__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __a = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
6
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __lowerCAmelCase ( a__ , a__ , a__ ) -> Dict: __a = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] __a = { '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } __a = F"""{src_lang}-{tgt_lang}""" __a = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(a__ , exist_ok=a__ ) __a = os.path.join(a__ , '''README.md''' ) print(F"""Generating {path}""" ) with open(a__ , '''w''' , encoding='''utf-8''' ) as f: f.write(a__ ) # make sure we are under the root of the project A : int = Path(__file__).resolve().parent.parent.parent A : Optional[Any] = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: A , A , A : Optional[Any] = model_name.split('-') A : Union[str, Any] = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
6
def __lowerCAmelCase ( a__ ) -> str: __a = [] __a = set({'''(''', '''[''', '''{'''} ) __a = set({''')''', ''']''', '''}'''} ) __a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(a__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(a__ ) == 0 def __lowerCAmelCase ( ) -> Dict: __a = input('''Enter sequence of brackets: ''' ) if is_balanced(a__ ): print(a__ , '''is balanced''' ) else: print(a__ , '''is not balanced''' ) if __name__ == "__main__": main()
6
1
import re import string import numpy as np import datasets A : Union[str, Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' A : int = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' A : List[str] = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , reference_urls=[] , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=False , ) -> List[Any]: '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: __a = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in predictions] ) __a = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in references] ) else: __a = np.asarray(_snake_case ) __a = np.asarray(_snake_case ) if ignore_case: __a = np.char.lower(_snake_case ) __a = np.char.lower(_snake_case ) if ignore_punctuation: __a = string.punctuation.maketrans('''''' , '''''' , string.punctuation ) __a = np.char.translate(_snake_case , table=_snake_case ) __a = np.char.translate(_snake_case , table=_snake_case ) if ignore_numbers: __a = string.digits.maketrans('''''' , '''''' , string.digits ) __a = np.char.translate(_snake_case , table=_snake_case ) __a = np.char.translate(_snake_case , table=_snake_case ) __a = predictions == references return {"exact_match": np.mean(_snake_case ) * 100}
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : str = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __lowerCAmelCase ( ) -> Dict: __a = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } __a = Dataset.from_dict(a__ ) return dataset class __A( a ): def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = get_dataset() __a = make_duplicate_clusters(_snake_case , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = get_dataset() __a , __a = deduplicate_dataset(_snake_case ) self.assertEqual(len(_snake_case ) , 2 ) print(_snake_case ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , _snake_case )
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Dict = { 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = tempfile.mkdtemp() # fmt: off __a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on __a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __a = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } __a = os.path.join(self.tmpdirname , _snake_case ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_snake_case , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Any: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Optional[int]: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __a = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.get_tokenizer() __a = self.get_image_processor() __a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case ) processor.save_pretrained(self.tmpdirname ) __a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __a = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 ) __a = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case ) __a = self.prepare_image_inputs() __a = image_processor(_snake_case , return_tensors='''np''' ) __a = processor(images=_snake_case , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case ) __a = '''lower newer''' __a = processor(text=_snake_case ) __a = tokenizer(_snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case ) __a = '''lower newer''' __a = self.prepare_image_inputs() __a = processor(text=_snake_case , images=_snake_case ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_snake_case ): processor() def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case ) __a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a = processor.batch_decode(_snake_case ) __a = tokenizer.batch_decode(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case ) __a = '''lower newer''' __a = self.prepare_image_inputs() __a = processor(text=_snake_case , images=_snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Optional[int] = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class __A( a ): snake_case_ = '''facebook/bart-large-mnli''' snake_case_ = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) snake_case_ = '''text_classifier''' snake_case_ = AutoTokenizer snake_case_ = AutoModelForSequenceClassification snake_case_ = ['''text''', ['''text''']] snake_case_ = ['''text'''] def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' super().setup() __a = self.model.config __a = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): __a = int(_snake_case ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = labels return self.pre_processor( [text] * len(_snake_case ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str: '''simple docstring''' __a = outputs.logits __a = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
6
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
6
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A : Any = logging.get_logger(__name__) A : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} A : List[Any] = { 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } A : str = { 'junnyu/roformer_chinese_small': 1_5_3_6, 'junnyu/roformer_chinese_base': 1_5_3_6, 'junnyu/roformer_chinese_char_small': 5_1_2, 'junnyu/roformer_chinese_char_base': 5_1_2, 'junnyu/roformer_small_discriminator': 1_2_8, 'junnyu/roformer_small_generator': 1_2_8, } A : Tuple = { 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = RoFormerTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> List[Any]: '''simple docstring''' super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) __a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , _snake_case ) != do_lower_case or pre_tok_state.get('''strip_accents''' , _snake_case ) != strip_accents ): __a = getattr(_snake_case , pre_tok_state.pop('''type''' ) ) __a = do_lower_case __a = strip_accents __a = pre_tok_class(**_snake_case ) __a = do_lower_case def __getstate__( self ) -> Tuple: '''simple docstring''' __a = self.__dict__.copy() __a = BertPreTokenizer() return state def __setstate__( self , _snake_case ) -> Any: '''simple docstring''' __a = d __a = self.__dict__['''_tokenizer'''].get_vocab() __a = PreTokenizer.custom(JiebaPreTokenizer(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> List[str]: '''simple docstring''' __a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]: '''simple docstring''' __a = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=False , **_snake_case , ) -> Any: '''simple docstring''' __a = BertPreTokenizer() return super().save_pretrained(_snake_case , _snake_case , _snake_case , _snake_case , **_snake_case )
6
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class __A( a ): snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({} ) snake_case_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text"}
6
1
from itertools import product def __lowerCAmelCase ( a__ , a__ ) -> list[int]: __a = sides_number __a = max_face_number * dice_number __a = [0] * (max_total + 1) __a = 1 __a = range(a__ , max_face_number + 1 ) for dice_numbers in product(a__ , repeat=a__ ): __a = sum(a__ ) totals_frequencies[total] += 1 return totals_frequencies def __lowerCAmelCase ( ) -> float: __a = total_frequency_distribution( sides_number=4 , dice_number=9 ) __a = total_frequency_distribution( sides_number=6 , dice_number=6 ) __a = 0 __a = 9 __a = 4 * 9 __a = 6 for peter_total in range(a__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __a = (4**9) * (6**6) __a = peter_wins_count / total_games_number __a = round(a__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"{solution() = }")
6
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]: __a = AutoTokenizer.from_pretrained(a__ ) __a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ ) __a = tok.pad_token_id def get_lens(a__ ): __a = tqdm( DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) __a = [] for batch in dl: __a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist() __a = batch['''labels'''].ne(a__ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(a__ , a__ ): max_lens.append(max(a__ , a__ ) ) else: max_lens.extend(a__ ) return max_lens __a = get_lens(a__ ) __a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ ) __a = get_lens(a__ ) pickle_save(a__ , train_ds.len_file ) pickle_save(a__ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
6
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : Any = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Any = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 - _cos) / 2 __a = 1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 + _cos) / 2 __a = -1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = _sin / 2 __a = 0 __a = -ba __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 1 - alpha __a = -2 * _cos __a = 1 + alpha __a = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = 1 + alpha * big_a __a = -2 * _cos __a = 1 - alpha * big_a __a = 1 + alpha / big_a __a = -2 * _cos __a = 1 - alpha / big_a __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (pmc + aaa) __a = 2 * big_a * mpc __a = big_a * (pmc - aaa) __a = ppmc + aaa __a = -2 * pmpc __a = ppmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (ppmc + aaa) __a = -2 * big_a * pmpc __a = big_a * (ppmc - aaa) __a = pmc + aaa __a = 2 * mpc __a = pmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
6
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __A( a ): snake_case_ = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Any: '''simple docstring''' __a = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**_snake_case ) return config def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' self.check_over_configs(thresholding=_snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = len(_snake_case ) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0 ) for t in reversed(range(_snake_case ) ): # 1. predict noise residual __a = model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __a = pred_prev_sample __a = torch.sum(torch.abs(_snake_case ) ) __a = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(prediction_type='''v_prediction''' ) __a = scheduler_class(**_snake_case ) __a = len(_snake_case ) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0 ) for t in reversed(range(_snake_case ) ): # 1. predict noise residual __a = model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __a = pred_prev_sample __a = torch.sum(torch.abs(_snake_case ) ) __a = torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_snake_case ) __a = scheduler.timesteps for i, timestep in enumerate(_snake_case ): if i == len(_snake_case ) - 1: __a = -1 else: __a = timesteps[i + 1] __a = scheduler.previous_timestep(_snake_case ) __a = prev_t.item() self.assertEqual(_snake_case , _snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = [100, 87, 50, 51, 0] with self.assertRaises(_snake_case , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = [100, 87, 50, 1, 0] __a = len(_snake_case ) with self.assertRaises(_snake_case , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**_snake_case ) __a = [scheduler.config.num_train_timesteps] with self.assertRaises( _snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_snake_case )
6
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list: __a = len(a__ ) __a = [[0] * n for i in range(a__ )] for i in range(a__ ): __a = y_points[i] for i in range(2 , a__ ): for j in range(a__ , a__ ): __a = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
6
1
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A( a , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __A( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ort.SessionOptions() __a = False return options def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
6
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]: if not arr: return None, None, 0 if low == high: return low, high, arr[low] __a = (low + high) // 2 __a , __a , __a = max_subarray(a__ , a__ , a__ ) __a , __a , __a = max_subarray(a__ , mid + 1 , a__ ) __a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]: __a , __a = float('''-inf''' ), -1 __a , __a = float('''-inf''' ), -1 __a = 0 for i in range(a__ , low - 1 , -1 ): summ += arr[i] if summ > left_sum: __a = summ __a = i __a = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: __a = summ __a = i return max_left, max_right, (left_sum + right_sum) def __lowerCAmelCase ( a__ ) -> float: __a = [randint(1 , a__ ) for _ in range(a__ )] __a = time.time() max_subarray(a__ , 0 , input_size - 1 ) __a = time.time() return end - start def __lowerCAmelCase ( ) -> None: __a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] __a = [time_max_subarray(a__ ) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''' ) for input_size, runtime in zip(a__ , a__ ): print(a__ , '''\t\t''' , a__ ) plt.plot(a__ , a__ ) plt.xlabel('''Number of Inputs''' ) plt.ylabel('''Time taken in seconds''' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
6
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 - _cos) / 2 __a = 1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = (1 + _cos) / 2 __a = -1 - _cos __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = _sin / 2 __a = 0 __a = -ba __a = 1 + alpha __a = -2 * _cos __a = 1 - alpha __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 1 - alpha __a = -2 * _cos __a = 1 + alpha __a = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = 1 + alpha * big_a __a = -2 * _cos __a = 1 - alpha * big_a __a = 1 + alpha / big_a __a = -2 * _cos __a = 1 - alpha / big_a __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (pmc + aaa) __a = 2 * big_a * mpc __a = big_a * (pmc - aaa) __a = ppmc + aaa __a = -2 * pmpc __a = ppmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter: __a = tau * frequency / samplerate __a = sin(a__ ) __a = cos(a__ ) __a = _sin / (2 * q_factor) __a = 10 ** (gain_db / 40) __a = (big_a + 1) - (big_a - 1) * _cos __a = (big_a + 1) + (big_a - 1) * _cos __a = (big_a - 1) - (big_a + 1) * _cos __a = (big_a - 1) + (big_a + 1) * _cos __a = 2 * sqrt(a__ ) * alpha __a = big_a * (ppmc + aaa) __a = -2 * big_a * pmpc __a = big_a * (ppmc - aaa) __a = pmc + aaa __a = 2 * mpc __a = pmc - aaa __a = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
6
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A( a , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __A( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ort.SessionOptions() __a = False return options def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __a = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __a = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) __a = '''A red cat sitting on a park bench''' __a = np.random.RandomState(0 ) __a = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='''np''' , ) __a = output.images __a = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __a = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
6
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} A : Optional[int] = 'zero2' A : str = 'zero3' A : Tuple = [ZEROa, ZEROa] def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test A : Union[str, Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __A( a ): @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any: '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , ) self.do_checks(_snake_case ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]: '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case ) __a = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_snake_case )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __a = self.get_launcher(_snake_case ) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_snake_case , env=self.get_env() ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]: '''simple docstring''' __a = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
6
from math import ceil def __lowerCAmelCase ( a__ = 1001 ) -> int: __a = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): __a = 2 * i + 1 __a = 2 * i __a = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[Any] = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
6
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py A : Tuple = 'src/diffusers' # Matches is_xxx_available() A : Any = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla A : Optional[int] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') A : List[Any] = '\n{0} = None\n' A : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' A : Union[str, Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def __lowerCAmelCase ( a__ ) -> Dict: __a = _re_backend.findall(a__ ) if len(a__ ) == 0: return None return "_and_".join(a__ ) def __lowerCAmelCase ( ) -> Dict: with open(os.path.join(a__ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.readlines() # Get to the point we do the actual imports for type checking __a = 0 __a = {} # Go through the end of the file while line_index < len(a__ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __a = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 __a = [] # Until we unindent, add backend objects to the list while line_index < len(a__ ) and len(lines[line_index] ) > 1: __a = lines[line_index] __a = _re_single_line_import.search(a__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(a__ ) > 0: __a = objects else: line_index += 1 return backend_specific_objects def __lowerCAmelCase ( a__ , a__ ) -> Tuple: if name.isupper(): return DUMMY_CONSTANT.format(a__ ) elif name.islower(): return DUMMY_FUNCTION.format(a__ , a__ ) else: return DUMMY_CLASS.format(a__ , a__ ) def __lowerCAmelCase ( a__=None ) -> Dict: if backend_specific_objects is None: __a = read_init() # For special correspondence backend to module name as used in the function requires_modulename __a = {} for backend, objects in backend_specific_objects.items(): __a = '''[''' + ''', '''.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']''' __a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(a__ , a__ ) for o in objects] ) __a = dummy_file return dummy_files def __lowerCAmelCase ( a__=False ) -> Union[str, Any]: __a = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __a = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. __a = os.path.join(a__ , '''utils''' ) __a = { backend: os.path.join(a__ , F"""dummy_{short_names.get(a__ , a__ )}_objects.py""" ) for backend in dummy_files.keys() } __a = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(a__ ): with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __a = f.read() else: __a = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F"""Updating diffusers.utils.dummy_{short_names.get(a__ , a__ )}_objects.py as the main """ '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F"""diffusers.utils.dummy_{short_names.get(a__ , a__ )}_objects.py. Run `make fix-copies` """ '''to fix this.''' ) if __name__ == "__main__": A : List[str] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') A : List[Any] = parser.parse_args() check_dummies(args.fix_and_overwrite)
6
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __A( a ): snake_case_ = ['''image_processor''', '''tokenizer'''] snake_case_ = '''ChineseCLIPImageProcessor''' snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple: '''simple docstring''' __a = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _snake_case , ) __a = kwargs.pop('''feature_extractor''' ) __a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_snake_case , _snake_case ) __a = self.image_processor def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) if images is not None: __a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case ) if text is not None and images is not None: __a = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict: '''simple docstring''' return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.tokenizer.model_input_names __a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , ) return self.image_processor_class
6
1
from __future__ import annotations import math def __lowerCAmelCase ( a__ ) -> list[int]: if num <= 0: __a = F"""{num}: Invalid input, please enter a positive integer.""" raise ValueError(a__ ) __a = [True] * (num + 1) __a = [] __a = 2 __a = int(math.sqrt(a__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(a__ ) # Set multiples of start be False for i in range(start * start , num + 1 , a__ ): if sieve[i] is True: __a = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(a__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
6
from __future__ import annotations import typing from collections import Counter def __lowerCAmelCase ( a__ ) -> typing.Counter[int]: __a = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(a__ , max_perimeter + 1 ): __a = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(a__ ): __a = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __lowerCAmelCase ( a__ = 1000 ) -> int: __a = pythagorean_triple(a__ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"Perimeter {solution()} has maximum solutions")
6
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : str = { 'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json', 'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json', 'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json', 'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __A( a ): snake_case_ = '''mobilenet_v2''' def __init__( self , _snake_case=3 , _snake_case=224 , _snake_case=1.0 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=True , _snake_case=0.8 , _snake_case=0.02 , _snake_case=0.001 , _snake_case=255 , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) __a = num_channels __a = image_size __a = depth_multiplier __a = depth_divisible_by __a = min_depth __a = expand_ratio __a = output_stride __a = first_layer_is_expansion __a = finegrained_output __a = hidden_act __a = tf_padding __a = classifier_dropout_prob __a = initializer_range __a = layer_norm_eps __a = semantic_loss_ignore_index class __A( a ): snake_case_ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-4
6
# flake8: noqa # Lint as: python3 A : Optional[Any] = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
6
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A : str = logging.get_logger(__name__) A : Tuple = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class __A( a ): snake_case_ = '''marian''' snake_case_ = ['''past_key_values'''] snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , _snake_case=58_101 , _snake_case=None , _snake_case=1_024 , _snake_case=12 , _snake_case=4_096 , _snake_case=16 , _snake_case=12 , _snake_case=4_096 , _snake_case=16 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=1_024 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=58_100 , _snake_case=False , _snake_case=58_100 , _snake_case=0 , _snake_case=0 , _snake_case=True , **_snake_case , ) -> List[str]: '''simple docstring''' __a = vocab_size __a = decoder_vocab_size or vocab_size __a = max_position_embeddings __a = d_model __a = encoder_ffn_dim __a = encoder_layers __a = encoder_attention_heads __a = decoder_ffn_dim __a = decoder_layers __a = decoder_attention_heads __a = dropout __a = attention_dropout __a = activation_dropout __a = activation_function __a = init_std __a = encoder_layerdrop __a = decoder_layerdrop __a = use_cache __a = encoder_layers __a = scale_embedding # scale factor will be sqrt(d_model) if True __a = share_encoder_decoder_embeddings super().__init__( pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , ) class __A( a ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __a = {0: '''batch'''} __a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __a = {0: '''batch''', 1: '''decoder_sequence'''} __a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_snake_case , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. __a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __a , __a = self.num_layers for i in range(_snake_case ): __a = {0: '''batch''', 2: '''past_sequence + sequence'''} __a = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __a = super().outputs else: __a = super(_snake_case , self ).outputs if self.use_past: __a , __a = self.num_layers for i in range(_snake_case ): __a = {0: '''batch''', 2: '''past_sequence + sequence'''} __a = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]: '''simple docstring''' __a = self._generate_dummy_inputs_for_encoder_and_decoder( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # Generate decoder inputs __a = seq_length if not self.use_past else 1 __a = self._generate_dummy_inputs_for_encoder_and_decoder( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) __a = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} __a = dict(**_snake_case , **_snake_case ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __a , __a = common_inputs['''input_ids'''].shape __a = common_inputs['''decoder_input_ids'''].shape[1] __a , __a = self.num_attention_heads __a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __a = decoder_seq_length + 3 __a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __a = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_snake_case , _snake_case )] , dim=1 ) __a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __a , __a = self.num_layers __a = min(_snake_case , _snake_case ) __a = max(_snake_case , _snake_case ) - min_num_layers __a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_snake_case ): common_inputs["past_key_values"].append( ( torch.zeros(_snake_case ), torch.zeros(_snake_case ), torch.zeros(_snake_case ), torch.zeros(_snake_case ), ) ) # TODO: test this. __a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_snake_case , _snake_case ): common_inputs["past_key_values"].append((torch.zeros(_snake_case ), torch.zeros(_snake_case )) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]: '''simple docstring''' __a = self._generate_dummy_inputs_for_encoder_and_decoder( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __a , __a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __a = seqlen + 2 __a , __a = self.num_layers __a , __a = self.num_attention_heads __a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __a = common_inputs['''attention_mask'''].dtype __a = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 ) __a = [ (torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(_snake_case ) ] return common_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]: '''simple docstring''' __a = compute_effective_axis_dimension( _snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __a = tokenizer.num_special_tokens_to_add(_snake_case ) __a = compute_effective_axis_dimension( _snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case ) # Generate dummy inputs according to compute batch and sequence __a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __a = dict(tokenizer(_snake_case , return_tensors=_snake_case ) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case ) else: __a = self._generate_dummy_inputs_for_causal_lm( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __a = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case ) else: __a = super(_snake_case , self )._flatten_past_key_values_( _snake_case , _snake_case , _snake_case , _snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-4
6
from typing import Dict from .base import GenericTensor, Pipeline class __A( a ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: __a = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) __a = truncation __a = tokenize_kwargs __a = {} if return_tensors is not None: __a = return_tensors return preprocess_params, {}, postprocess_params def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]: '''simple docstring''' __a = self.framework __a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.model(**_snake_case ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *_snake_case , **_snake_case ) -> Any: '''simple docstring''' return super().__call__(*_snake_case , **_snake_case )
6
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor A : Tuple = logging.get_logger(__name__) class __A( a ): def __init__( self , *_snake_case , **_snake_case ) -> None: '''simple docstring''' warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
6
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : Optional[int] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class __A( a ): snake_case_ = '''levit''' def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case ) __a = image_size __a = num_channels __a = kernel_size __a = stride __a = padding __a = hidden_sizes __a = num_attention_heads __a = depths __a = key_dim __a = drop_path_rate __a = patch_size __a = attention_ratio __a = mlp_ratio __a = initializer_range __a = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __A( a ): snake_case_ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-4
6
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json A : List[str] = 'sshleifer/mar_enro_6_3_student' class __A( a ): def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' super().setUp() __a = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_snake_case , ) __a = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' MarianMTModel.from_pretrained(_snake_case ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script __a = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() __a = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): __a = bash_script.replace(_snake_case , str(_snake_case ) ) __a = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") __a = F""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future __a = ['''finetune.py'''] + bash_script.split() + args with patch.object(_snake_case , '''argv''' , _snake_case ): __a = argparse.ArgumentParser() __a = pl.Trainer.add_argparse_args(_snake_case ) __a = SummarizationModule.add_model_specific_args(_snake_case , os.getcwd() ) __a = parser.parse_args() __a = main(_snake_case ) # Check metrics __a = load_json(model.metrics_save_path ) __a = metrics['''val'''][0] __a = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , _snake_case ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict __a = os.listdir(_snake_case ) __a = [x for x in contents if x.endswith('''.ckpt''' )][0] __a = os.path.join(args.output_dir , _snake_case ) __a = torch.load(_snake_case , map_location='''cpu''' ) __a = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __a = {os.path.basename(_snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class __A( a ): @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = F"""{self.test_file_dir_str}/test_data/wmt_en_ro""" __a = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 128, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script __a = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) __a = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) __a = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): __a = bash_script.replace(_snake_case , str(_snake_case ) ) __a = self.get_auto_remove_tmp_dir() __a = bash_script.replace('''--fp16''' , '''''' ) __a = 6 __a = ( ['''distillation.py'''] + bash_script.split() + [ F"""--output_dir={output_dir}""", '''--gpus=1''', '''--learning_rate=1e-3''', F"""--num_train_epochs={epochs}""", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(_snake_case , '''argv''' , _snake_case ): __a = argparse.ArgumentParser() __a = pl.Trainer.add_argparse_args(_snake_case ) __a = SummarizationDistiller.add_model_specific_args(_snake_case , os.getcwd() ) __a = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu __a = distill_main(_snake_case ) # Check metrics __a = load_json(model.metrics_save_path ) __a = metrics['''val'''][0] __a = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , _snake_case ) # check lightning ckpt can be loaded and has a reasonable statedict __a = os.listdir(_snake_case ) __a = [x for x in contents if x.endswith('''.ckpt''' )][0] __a = os.path.join(args.output_dir , _snake_case ) __a = torch.load(_snake_case , map_location='''cpu''' ) __a = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __a = {os.path.basename(_snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
6
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel A : int = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __A( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' __a = TOKEN HfFolder.save_token(_snake_case ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __a = FlaxBertModel(_snake_case ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token ) __a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) __a = flatten_dict(unfreeze(model.params ) ) __a = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __a = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" ) def __lowerCAmelCase ( a__ , a__ ) -> str: __a = True __a = flatten_dict(modela.params ) __a = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: __a = False return models_are_equal @require_flax class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __a = FlaxBertModel(_snake_case ) __a = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' ) with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertTrue(check_models_equal(_snake_case , _snake_case ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = '''bert''' __a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_snake_case ): __a = FlaxBertModel.from_pretrained(_snake_case ) __a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case ) self.assertIsNotNone(_snake_case )
6
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A( a , a , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = '''sample''' snake_case_ = 1E-2 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = 4 __a = 3 __a = (32, 32) __a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case ) return {"sample": image} @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __a = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**_snake_case ) model.to(_snake_case ) assert not model.is_gradient_checkpointing and model.training __a = model(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __a = torch.randn_like(_snake_case ) __a = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __a = self.model_class(**_snake_case ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_snake_case ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __a = model_a(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __a = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __a = dict(model.named_parameters() ) __a = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_snake_case ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) __a = model.to(_snake_case ) model.eval() if torch_device == "mps": __a = torch.manual_seed(0 ) else: __a = torch.Generator(device=_snake_case ).manual_seed(0 ) __a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __a = image.to(_snake_case ) with torch.no_grad(): __a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __a = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": __a = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __a = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) ) @slow class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy""" def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any: '''simple docstring''' __a = torch.floataa if fpaa else torch.floataa __a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case ) return image def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]: '''simple docstring''' __a = '''fp16''' if fpaa else None __a = torch.floataa if fpaa else torch.floataa __a = AutoencoderKL.from_pretrained( _snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , ) model.to(_snake_case ).eval() return model def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(_snake_case ) return torch.Generator(device=_snake_case ).manual_seed(_snake_case ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , fpaa=_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) with torch.no_grad(): __a = model(_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model.encode(_snake_case ).latent_dist __a = dist.sample(generator=_snake_case ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __a = sample[0, -1, -3:, -3:].flatten().cpu() __a = torch.tensor(_snake_case ) __a = 3E-3 if torch_device != '''mps''' else 1E-2 assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
6
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} A : Optional[int] = 'zero2' A : str = 'zero3' A : Tuple = [ZEROa, ZEROa] def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test A : Union[str, Any] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __A( a ): @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) @require_torch_multi_gpu @parameterized.expand(_snake_case , name_func=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' self.run_and_check( stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any: '''simple docstring''' __a = models[model] __a = self.run_trainer( stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , ) self.do_checks(_snake_case ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]: '''simple docstring''' __a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case ) __a = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_snake_case )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __a = self.get_launcher(_snake_case ) __a = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_snake_case , env=self.get_env() ) return output_dir def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]: '''simple docstring''' __a = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
6
1
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __lowerCAmelCase ( a__ ) -> Any: monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def __lowerCAmelCase ( a__ ) -> List[str]: class __A: def __init__( self , _snake_case ) -> Dict: '''simple docstring''' __a = metric_id class __A: snake_case_ = [MetricMock(a ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']] def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Optional[int]: if "tmp_path" in args: __a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(a__ , match='''https://huggingface.co/docs/evaluate''' ): func(*a__ )
6
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A( a , a , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = '''sample''' snake_case_ = 1E-2 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = 4 __a = 3 __a = (32, 32) __a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case ) return {"sample": image} @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __a = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**_snake_case ) model.to(_snake_case ) assert not model.is_gradient_checkpointing and model.training __a = model(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __a = torch.randn_like(_snake_case ) __a = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __a = self.model_class(**_snake_case ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_snake_case ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __a = model_a(**_snake_case ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __a = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __a = dict(model.named_parameters() ) __a = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_snake_case ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) __a = model.to(_snake_case ) model.eval() if torch_device == "mps": __a = torch.manual_seed(0 ) else: __a = torch.Generator(device=_snake_case ).manual_seed(0 ) __a = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __a = image.to(_snake_case ) with torch.no_grad(): __a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __a = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": __a = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __a = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) ) @slow class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy""" def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any: '''simple docstring''' __a = torch.floataa if fpaa else torch.floataa __a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case ) return image def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]: '''simple docstring''' __a = '''fp16''' if fpaa else None __a = torch.floataa if fpaa else torch.floataa __a = AutoencoderKL.from_pretrained( _snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , ) model.to(_snake_case ).eval() return model def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(_snake_case ) return torch.Generator(device=_snake_case ).manual_seed(_snake_case ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , fpaa=_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) with torch.no_grad(): __a = model(_snake_case ).sample assert sample.shape == image.shape __a = sample[-1, -2:, -2:, :2].flatten().float().cpu() __a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_snake_case , _snake_case , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] __a = sample[-1, -2:, :2, -2:].flatten().float().cpu() __a = torch.tensor(_snake_case ) assert torch_all_close(_snake_case , _snake_case , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = self.get_sd_vae_model(fpaa=_snake_case ) __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) ) with torch.no_grad(): __a = model.decode(_snake_case ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __a = model.decode(_snake_case ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_snake_case , _snake_case , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = self.get_sd_vae_model() __a = self.get_sd_image(_snake_case ) __a = self.get_generator(_snake_case ) with torch.no_grad(): __a = model.encode(_snake_case ).latent_dist __a = dist.sample(generator=_snake_case ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __a = sample[0, -1, -3:, -3:].flatten().cpu() __a = torch.tensor(_snake_case ) __a = 3E-3 if torch_device != '''mps''' else 1E-2 assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
6
1
from sklearn.metrics import recall_score import datasets A : Optional[Any] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n' A : Optional[Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n' A : Tuple = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=1 , _snake_case="binary" , _snake_case=None , _snake_case="warn" , ) -> Any: '''simple docstring''' __a = recall_score( _snake_case , _snake_case , labels=_snake_case , pos_label=_snake_case , average=_snake_case , sample_weight=_snake_case , zero_division=_snake_case , ) return {"recall": float(_snake_case ) if score.size == 1 else score}
6
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup A : str = logging.get_logger(__name__) class __A( a ): def __init__( self , **_snake_case ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''bs4'''] ) super().__init__(**_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a = [] __a = [] __a = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __a = parent.find_all(child.name , recursive=_snake_case ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) ) __a = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = BeautifulSoup(_snake_case , '''html.parser''' ) __a = [] __a = [] __a = [] for element in html_code.descendants: if type(_snake_case ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __a = html.unescape(_snake_case ).strip() if not text_in_this_tag: continue all_doc_strings.append(_snake_case ) __a , __a = self.xpath_soup(_snake_case ) stringaxtag_seq.append(_snake_case ) stringaxsubs_seq.append(_snake_case ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_snake_case ) != len(_snake_case ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = '''''' for tagname, subs in zip(_snake_case , _snake_case ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self , _snake_case ) -> BatchFeature: '''simple docstring''' __a = False # Check that strings has a valid type if isinstance(_snake_case , _snake_case ): __a = True elif isinstance(_snake_case , (list, tuple) ): if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ): __a = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F"""but is of type {type(_snake_case )}.""" ) __a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) ) if not is_batched: __a = [html_strings] # Get nodes + xpaths __a = [] __a = [] for html_string in html_strings: __a , __a , __a = self.get_three_from_single(_snake_case ) nodes.append(_snake_case ) __a = [] for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ): __a = self.construct_xpath(_snake_case , _snake_case ) xpath_strings.append(_snake_case ) xpaths.append(_snake_case ) # return as Dict __a = {'''nodes''': nodes, '''xpaths''': xpaths} __a = BatchFeature(data=_snake_case , tensor_type=_snake_case ) return encoded_inputs
6
1
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __A( a ): snake_case_ = None snake_case_ = None snake_case_ = None snake_case_ = None class __A( a ): def __init__( self , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case=512 , _snake_case="cls" , _snake_case=False , _snake_case=True , **_snake_case , ) -> Dict: '''simple docstring''' super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) __a = project_dim __a = pooler_fn __a = learn_encoder __a = use_attention_mask class __A( a ): snake_case_ = [r'''pooler''', r'''logit_scale'''] snake_case_ = [r'''position_ids''', r'''predictions.decoder.bias'''] snake_case_ = '''roberta''' snake_case_ = RobertaSeriesConfig def __init__( self , _snake_case ) -> Tuple: '''simple docstring''' super().__init__(_snake_case ) __a = XLMRobertaModel(_snake_case ) __a = nn.Linear(config.hidden_size , config.project_dim ) __a = getattr(_snake_case , '''has_pre_transformation''' , _snake_case ) if self.has_pre_transformation: __a = nn.Linear(config.hidden_size , config.project_dim ) __a = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def SCREAMING_SNAKE_CASE_ ( self , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , ) -> int: '''simple docstring''' __a = return_dict if return_dict is not None else self.config.use_return_dict __a = self.base_model( input_ids=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_attentions=_snake_case , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_snake_case , ) if self.has_pre_transformation: __a = outputs['''hidden_states'''][-2] __a = self.pre_LN(_snake_case ) __a = self.transformation_pre(_snake_case ) return TransformationModelOutput( projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __a = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
6
def __lowerCAmelCase ( a__ , a__ ) -> float: def get_matched_characters(a__ , a__ ) -> str: __a = [] __a = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __a = int(max(0 , i - limit ) ) __a = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(a__ ) __a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}""" return "".join(a__ ) # matching characters __a = get_matched_characters(a__ , a__ ) __a = get_matched_characters(a__ , a__ ) __a = len(a__ ) # transposition __a = ( len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2 ) if not match_count: __a = 0.0 else: __a = ( 1 / 3 * ( match_count / len(a__ ) + match_count / len(a__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __a = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
6
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : Dict = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class __A( a ): snake_case_ = '''blip_text_model''' def __init__( self , _snake_case=30_524 , _snake_case=768 , _snake_case=768 , _snake_case=3_072 , _snake_case=768 , _snake_case=12 , _snake_case=8 , _snake_case=512 , _snake_case="gelu" , _snake_case=1E-12 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=30_522 , _snake_case=2 , _snake_case=0 , _snake_case=102 , _snake_case=True , _snake_case=True , **_snake_case , ) -> Optional[int]: '''simple docstring''' super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , sep_token_id=_snake_case , **_snake_case , ) __a = vocab_size __a = hidden_size __a = encoder_hidden_size __a = intermediate_size __a = projection_dim __a = hidden_dropout_prob __a = num_hidden_layers __a = num_attention_heads __a = max_position_embeddings __a = layer_norm_eps __a = hidden_act __a = initializer_range __a = attention_probs_dropout_prob __a = is_decoder __a = use_cache @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(_snake_case ) __a , __a = cls.get_config_dict(_snake_case , **_snake_case ) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": __a = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_snake_case , **_snake_case ) class __A( a ): snake_case_ = '''blip_vision_model''' def __init__( self , _snake_case=768 , _snake_case=3_072 , _snake_case=512 , _snake_case=12 , _snake_case=12 , _snake_case=384 , _snake_case=16 , _snake_case="gelu" , _snake_case=1E-5 , _snake_case=0.0 , _snake_case=1E-10 , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case ) __a = hidden_size __a = intermediate_size __a = projection_dim __a = num_hidden_layers __a = num_attention_heads __a = patch_size __a = image_size __a = initializer_range __a = attention_dropout __a = layer_norm_eps __a = hidden_act @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(_snake_case ) __a , __a = cls.get_config_dict(_snake_case , **_snake_case ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": __a = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_snake_case , **_snake_case ) class __A( a ): snake_case_ = '''blip''' snake_case_ = True def __init__( self , _snake_case=None , _snake_case=None , _snake_case=512 , _snake_case=2.6592 , _snake_case=256 , **_snake_case , ) -> Optional[int]: '''simple docstring''' super().__init__(**_snake_case ) if text_config is None: __a = {} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' ) if vision_config is None: __a = {} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' ) __a = BlipTextConfig(**_snake_case ) __a = BlipVisionConfig(**_snake_case ) __a = self.vision_config.hidden_size __a = projection_dim __a = logit_scale_init_value __a = 1.0 __a = 0.02 __a = image_text_hidden_size @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> str: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = copy.deepcopy(self.__dict__ ) __a = self.text_config.to_dict() __a = self.vision_config.to_dict() __a = self.__class__.model_type return output
6
def __lowerCAmelCase ( a__ ) -> str: __a = [] __a = set({'''(''', '''[''', '''{'''} ) __a = set({''')''', ''']''', '''}'''} ) __a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(a__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(a__ ) == 0 def __lowerCAmelCase ( ) -> Dict: __a = input('''Enter sequence of brackets: ''' ) if is_balanced(a__ ): print(a__ , '''is balanced''' ) else: print(a__ , '''is not balanced''' ) if __name__ == "__main__": main()
6
1
from collections import deque from math import floor from random import random from time import time class __A: def __init__( self ) -> List[Any]: '''simple docstring''' __a = {} def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=1 ) -> Optional[Any]: '''simple docstring''' if self.graph.get(_snake_case ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: __a = [[w, v]] if not self.graph.get(_snake_case ): __a = [] def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' return list(self.graph ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' if self.graph.get(_snake_case ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 , _snake_case=-1 ) -> Any: '''simple docstring''' if s == d: return [] __a = [] __a = [] if s == -2: __a = list(self.graph )[0] stack.append(_snake_case ) visited.append(_snake_case ) __a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_snake_case ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_snake_case ) != 0: __a = stack[len(_snake_case ) - 1] else: __a = ss # check if se have reached the starting point if len(_snake_case ) == 0: return visited def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-1 ) -> List[Any]: '''simple docstring''' if c == -1: __a = floor(random() * 10_000 ) + 10 for i in range(_snake_case ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): __a = floor(random() * c ) + 1 if n != i: self.add_pair(_snake_case , _snake_case , 1 ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 ) -> Optional[Any]: '''simple docstring''' __a = deque() __a = [] if s == -2: __a = list(self.graph )[0] d.append(_snake_case ) visited.append(_snake_case ) while d: __a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' __a = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str: '''simple docstring''' return len(self.graph[u] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 ) -> Union[str, Any]: '''simple docstring''' __a = [] __a = [] if s == -2: __a = list(self.graph )[0] stack.append(_snake_case ) visited.append(_snake_case ) __a = s __a = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __a = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(_snake_case ) != 0: __a = stack[len(_snake_case ) - 1] else: __a = ss # check if se have reached the starting point if len(_snake_case ) == 0: return sorted_nodes def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = [] __a = [] __a = list(self.graph )[0] stack.append(_snake_case ) visited.append(_snake_case ) __a = -2 __a = [] __a = s __a = False __a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __a = len(_snake_case ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __a = node[1] break # check if all the children are visited if s == ss: stack.pop() __a = True if len(_snake_case ) != 0: __a = stack[len(_snake_case ) - 1] else: __a = False indirect_parents.append(_snake_case ) __a = s __a = ss # check if se have reached the starting point if len(_snake_case ) == 0: return list(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = [] __a = [] __a = list(self.graph )[0] stack.append(_snake_case ) visited.append(_snake_case ) __a = -2 __a = [] __a = s __a = False __a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __a = len(_snake_case ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __a = node[1] break # check if all the children are visited if s == ss: stack.pop() __a = True if len(_snake_case ) != 0: __a = stack[len(_snake_case ) - 1] else: __a = False indirect_parents.append(_snake_case ) __a = s __a = ss # check if se have reached the starting point if len(_snake_case ) == 0: return False def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 , _snake_case=-1 ) -> str: '''simple docstring''' __a = time() self.dfs(_snake_case , _snake_case ) __a = time() return end - begin def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 ) -> Union[str, Any]: '''simple docstring''' __a = time() self.bfs(_snake_case ) __a = time() return end - begin class __A: def __init__( self ) -> int: '''simple docstring''' __a = {} def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=1 ) -> Optional[int]: '''simple docstring''' if self.graph.get(_snake_case ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist __a = [[w, v]] # add the other way if self.graph.get(_snake_case ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist __a = [[w, u]] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int: '''simple docstring''' if self.graph.get(_snake_case ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_snake_case ) # the other way round if self.graph.get(_snake_case ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 , _snake_case=-1 ) -> Union[str, Any]: '''simple docstring''' if s == d: return [] __a = [] __a = [] if s == -2: __a = list(self.graph )[0] stack.append(_snake_case ) visited.append(_snake_case ) __a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_snake_case ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_snake_case ) != 0: __a = stack[len(_snake_case ) - 1] else: __a = ss # check if se have reached the starting point if len(_snake_case ) == 0: return visited def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-1 ) -> List[Any]: '''simple docstring''' if c == -1: __a = floor(random() * 10_000 ) + 10 for i in range(_snake_case ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): __a = floor(random() * c ) + 1 if n != i: self.add_pair(_snake_case , _snake_case , 1 ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 ) -> str: '''simple docstring''' __a = deque() __a = [] if s == -2: __a = list(self.graph )[0] d.append(_snake_case ) visited.append(_snake_case ) while d: __a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' return len(self.graph[u] ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = [] __a = [] __a = list(self.graph )[0] stack.append(_snake_case ) visited.append(_snake_case ) __a = -2 __a = [] __a = s __a = False __a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __a = len(_snake_case ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __a = node[1] break # check if all the children are visited if s == ss: stack.pop() __a = True if len(_snake_case ) != 0: __a = stack[len(_snake_case ) - 1] else: __a = False indirect_parents.append(_snake_case ) __a = s __a = ss # check if se have reached the starting point if len(_snake_case ) == 0: return list(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = [] __a = [] __a = list(self.graph )[0] stack.append(_snake_case ) visited.append(_snake_case ) __a = -2 __a = [] __a = s __a = False __a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __a = len(_snake_case ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __a = node[1] break # check if all the children are visited if s == ss: stack.pop() __a = True if len(_snake_case ) != 0: __a = stack[len(_snake_case ) - 1] else: __a = False indirect_parents.append(_snake_case ) __a = s __a = ss # check if se have reached the starting point if len(_snake_case ) == 0: return False def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return list(self.graph ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 , _snake_case=-1 ) -> int: '''simple docstring''' __a = time() self.dfs(_snake_case , _snake_case ) __a = time() return end - begin def SCREAMING_SNAKE_CASE_ ( self , _snake_case=-2 ) -> Tuple: '''simple docstring''' __a = time() self.bfs(_snake_case ) __a = time() return end - begin
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : str = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __A( unittest.TestCase ): def __init__( self , _snake_case , _snake_case = True , _snake_case = None , _snake_case = 32 , _snake_case = True , _snake_case = 1 / 255 , _snake_case = True , _snake_case = True , _snake_case = [0.4814_5466, 0.457_8275, 0.4082_1073] , _snake_case = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _snake_case = True , _snake_case=7 , _snake_case=30 , _snake_case=400 , _snake_case=3 , ) -> Optional[Any]: '''simple docstring''' __a = parent __a = do_resize __a = size if size is not None else {'''shortest_edge''': 288} __a = size_divisor __a = do_rescale __a = rescale_factor __a = do_normalize __a = do_center_crop __a = image_mean __a = image_std __a = do_pad __a = batch_size __a = num_channels __a = min_resolution __a = max_resolution def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> str: '''simple docstring''' if not batched: __a = self.size['''shortest_edge'''] __a = image_inputs[0] if isinstance(_snake_case , Image.Image ): __a , __a = image.size else: __a , __a = image.shape[1], image.shape[2] __a = size / min(_snake_case , _snake_case ) if h < w: __a , __a = size, scale * w else: __a , __a = scale * h, size __a = int((1_333 / 800) * size ) if max(_snake_case , _snake_case ) > max_size: __a = max_size / max(_snake_case , _snake_case ) __a = newh * scale __a = neww * scale __a , __a = int(newh + 0.5 ), int(neww + 0.5 ) __a , __a = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __a = [] for image in image_inputs: __a , __a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a = max(_snake_case , key=lambda _snake_case : item[0] )[0] __a = max(_snake_case , key=lambda _snake_case : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __A( a , unittest.TestCase ): snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = BridgeTowerImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''image_mean''' ) ) self.assertTrue(hasattr(_snake_case , '''image_std''' ) ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''do_resize''' ) ) self.assertTrue(hasattr(_snake_case , '''size''' ) ) self.assertTrue(hasattr(_snake_case , '''size_divisor''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __a , __a = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values __a , __a = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __a , __a = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values __a , __a = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __a , __a = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values __a , __a = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Dict = { 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
import enum import shutil import sys A , A : Dict = shutil.get_terminal_size() A : str = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'} class __A( enum.Enum ): snake_case_ = 0 snake_case_ = 1 def __lowerCAmelCase ( a__ , a__="" ) -> Optional[int]: sys.stdout.write(str(a__ ) + end ) sys.stdout.flush() def __lowerCAmelCase ( a__ , a__ , a__="" ) -> Dict: forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , a__ ) def __lowerCAmelCase ( ) -> Any: forceWrite('''\r''' ) def __lowerCAmelCase ( a__ , a__ ) -> Any: forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def __lowerCAmelCase ( ) -> int: forceWrite(''' ''' * TERMINAL_WIDTH ) reset_cursor() def __lowerCAmelCase ( ) -> Dict: reset_cursor() forceWrite('''-''' * TERMINAL_WIDTH )
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Optional[int] = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
1
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer A : Any = logging.get_logger(__name__) class __A( a ): snake_case_ = '''AutoTokenizer''' snake_case_ = ['''tokenizer'''] snake_case_ = { '''semantic_prompt''': 1, '''coarse_prompt''': 2, '''fine_prompt''': 2, } def __init__( self , _snake_case , _snake_case=None ) -> str: '''simple docstring''' super().__init__(_snake_case ) __a = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case="speaker_embeddings_path.json" , **_snake_case ) -> List[str]: '''simple docstring''' if speaker_embeddings_dict_path is not None: __a = get_file_from_repo( _snake_case , _snake_case , subfolder=kwargs.pop('''subfolder''' , _snake_case ) , cache_dir=kwargs.pop('''cache_dir''' , _snake_case ) , force_download=kwargs.pop('''force_download''' , _snake_case ) , proxies=kwargs.pop('''proxies''' , _snake_case ) , resume_download=kwargs.pop('''resume_download''' , _snake_case ) , local_files_only=kwargs.pop('''local_files_only''' , _snake_case ) , use_auth_token=kwargs.pop('''use_auth_token''' , _snake_case ) , revision=kwargs.pop('''revision''' , _snake_case ) , ) if speaker_embeddings_path is None: logger.warning( F"""`{os.path.join(_snake_case , _snake_case )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" ) __a = None else: with open(_snake_case ) as speaker_embeddings_json: __a = json.load(_snake_case ) else: __a = None __a = AutoTokenizer.from_pretrained(_snake_case , **_snake_case ) return cls(tokenizer=_snake_case , speaker_embeddings=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case="speaker_embeddings_path.json" , _snake_case="speaker_embeddings" , _snake_case = False , **_snake_case , ) -> Union[str, Any]: '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(_snake_case , _snake_case , '''v2''' ) , exist_ok=_snake_case ) __a = {} __a = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": __a = self._load_voice_preset(_snake_case ) __a = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , _snake_case , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=_snake_case , ) __a = os.path.join(_snake_case , F"""{prompt_key}_{key}.npy""" ) __a = tmp_dict with open(os.path.join(_snake_case , _snake_case ) , '''w''' ) as fp: json.dump(_snake_case , _snake_case ) super().save_pretrained(_snake_case , _snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case = None , **_snake_case ) -> List[Any]: '''simple docstring''' __a = self.speaker_embeddings[voice_preset] __a = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" ) __a = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _snake_case ) , cache_dir=kwargs.pop('''cache_dir''' , _snake_case ) , force_download=kwargs.pop('''force_download''' , _snake_case ) , proxies=kwargs.pop('''proxies''' , _snake_case ) , resume_download=kwargs.pop('''resume_download''' , _snake_case ) , local_files_only=kwargs.pop('''local_files_only''' , _snake_case ) , use_auth_token=kwargs.pop('''use_auth_token''' , _snake_case ) , revision=kwargs.pop('''revision''' , _snake_case ) , ) if path is None: raise ValueError( F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.""" ) __a = np.load(_snake_case ) return voice_preset_dict def SCREAMING_SNAKE_CASE_ ( self , _snake_case = None ) -> List[Any]: '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) def __call__( self , _snake_case=None , _snake_case=None , _snake_case="pt" , _snake_case=256 , _snake_case=False , _snake_case=True , _snake_case=False , **_snake_case , ) -> Tuple: '''simple docstring''' if voice_preset is not None and not isinstance(_snake_case , _snake_case ): if ( isinstance(_snake_case , _snake_case ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): __a = self._load_voice_preset(_snake_case ) else: if isinstance(_snake_case , _snake_case ) and not voice_preset.endswith('''.npz''' ): __a = voice_preset + '''.npz''' __a = np.load(_snake_case ) if voice_preset is not None: self._validate_voice_preset_dict(_snake_case , **_snake_case ) __a = BatchFeature(data=_snake_case , tensor_type=_snake_case ) __a = self.tokenizer( _snake_case , return_tensors=_snake_case , padding='''max_length''' , max_length=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , add_special_tokens=_snake_case , **_snake_case , ) if voice_preset is not None: __a = voice_preset return encoded_text
6
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
6
1
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __A( a , a , unittest.TestCase ): snake_case_ = IFImgaImgSuperResolutionPipeline snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) snake_case_ = PipelineTesterMixin.required_optional_params - {'''latents'''} def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> int: '''simple docstring''' if str(_snake_case ).startswith('''mps''' ): __a = torch.manual_seed(_snake_case ) else: __a = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) __a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) __a = floats_tensor((1, 3, 16, 16) , rng=random.Random(_snake_case ) ).to(_snake_case ) __a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' self._test_save_load_local() def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
6
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class __A( a ): snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({} ) snake_case_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text"}
6
1